repo_name
stringlengths 5
104
| path
stringlengths 4
248
| content
stringlengths 102
99.9k
|
|---|---|---|
tkchafin/scripts
|
findBreaksVCF.py
|
#!/usr/bin/python
import re
import sys
import os
import getopt
import vcf
def main():
params = parseArgs()
vfh = vcf.Reader(open(params.vcf, 'r'))
#grab contig sizes
contigs = dict()
for c,s in vfh.contigs.items():
contigs[s.id] = s.length
regions = list()
this_chrom = None
start = int()
stop = int()
count = 0
for rec in vfh:
if not this_chrom:
this_chrom = rec.CHROM
start = 1
stop = 1
count = 0
#If we entered new chromosome, submit old break
elif this_chrom != rec.CHROM:
t = tuple([this_chrom, start, contigs[this_chrom]])
regions.append(t)
this_chrom = rec.CHROM
start = 1
stop = 1
count = 0
#if this SNP is parsimony-informative
if rec.is_snp and not rec.is_monomorphic:
#Check if parsimony-informative
if is_PIS(rec):
count+=1
#if this is the final PIS, submit region to list
if count == params.force:
stop = rec.POS
t = tuple([this_chrom, start, stop])
regions.append(t)
start = stop + 1
count = 0
t = tuple([this_chrom, start, contigs[this_chrom]])
regions.append(t)
print("Writing regions to out.regions...")
write_regions("out.regions", regions)
#Function to write list of regions tuples, in GATK format
def write_regions(f, r):
with open(f, 'w') as fh:
try:
for reg in r:
ol = str(reg[0]) + ":" + str(reg[1]) + "-" + str(reg[2]) + "\n"
fh.write(ol)
except IOError as e:
print("Could not read file %s: %s"%(f,e))
sys.exit(1)
except Exception as e:
print("Unexpected error reading file %s: %s"%(f,e))
sys.exit(1)
finally:
fh.close()
#Function to check pyVCF record for if parsimony informative or not
def is_PIS(r):
ref=0
alt=0
for call in r.samples:
if call.gt_type:
if call.gt_type == 0:
ref += 1
elif call.gt_type == 1:
alt += 1
elif call.gt_type == 2:
alt += 1
ref += 1
if ref >= 2 and alt >= 2:
return(True)
if ref <= 2 and alt <= 2:
return(False)
#Object to parse command-line arguments
class parseArgs():
def __init__(self):
#Define options
try:
options, remainder = getopt.getopt(sys.argv[1:], 'v:f:h', \
["vcf=" "help", "force="])
except getopt.GetoptError as err:
print(err)
self.display_help("\nExiting because getopt returned non-zero exit status.")
#Default values for params
#Input params
self.vcf=None
self.force=100000
#First pass to see if help menu was called
for o, a in options:
if o in ("-h", "-help", "--help"):
self.display_help("Exiting because help menu was called.")
#Second pass to set all args.
for opt, arg_raw in options:
arg = arg_raw.replace(" ","")
arg = arg.strip()
opt = opt.replace("-","")
#print(opt,arg)
if opt in ('v', 'vcf'):
self.vcf = arg
elif opt in ('f','force'):
self.force=int(arg)
elif opt in ('h', 'help'):
pass
else:
assert False, "Unhandled option %r"%opt
#Check manditory options are set
if not self.vcf:
self.display_help("Must provide VCF file <-v,--vcf>")
def display_help(self, message=None):
if message is not None:
print()
print (message)
print ("\nfindBreaksVCF.py\n")
print ("Contact:Tyler K. Chafin, University of Arkansas,tkchafin@uark.edu")
print ("\nUsage: ", sys.argv[0], "-v <input.vcf> -f <100000>\n")
print ("Description: Breaks chromosomes into chunks of X parsimony-informative sites, for running MDL")
print("""
Arguments:
-v,--vcf : VCF file for parsing
-f,--force : Number of PIS to force a break
-h,--help : Displays help menu
""")
print()
sys.exit()
#Call main function
if __name__ == '__main__':
main()
|
spencerkclark/aospy-obj-lib
|
aospy_user/runs/cases.py
|
from aospy import Run
am2_control = Run(
name='am2_control',
description=(
'Preindustrial control simulation.'
),
data_in_direc=('/archive/Yi.Ming/sm2.1_fixed/'
'SM2.1U_Control-1860_lm2_aie_rerun6.YIM/pp'),
data_in_dur=5,
data_in_start_date='0001-01-01',
data_in_end_date='0080-12-31',
default_date_range=('0021-01-01', '0080-12-31'),
idealized=False
)
am2_tropics = Run(
name='am2_tropics',
description=(
'Anthropogenic sulfate aerosol forcing only in the'
' Northern Hemisphere tropics (EQ to 30N)'
),
data_in_direc=('/archive/Yi.Ming/sm2.1_fixed/'
'SM2.1U_Control-1860_lm2_aie2_tropical_rerun6.YIM/pp'),
data_in_dur=5,
data_in_start_date='0001-01-01',
data_in_end_date='0080-12-31',
default_date_range=('0021-01-01', '0080-12-31'),
idealized=False
)
am2_extratropics = Run(
name='am2_extratropics',
description=(
'Anthropogenic sulfate aerosol forcing only in the'
' Northern Hemisphere extratropics (30N to Pole)'
),
data_in_direc=('/archive/Yi.Ming/sm2.1_fixed/'
'SM2.1U_Control-1860_lm2_aie2_extropical_rerun6.YIM/pp'),
data_in_dur=5,
data_in_start_date='0001-01-01',
data_in_end_date='0080-12-31',
default_date_range=('0021-01-01', '0080-12-31'),
idealized=False
)
am2_tropics_and_extratropics = Run(
name='am2_tropics+extratropics',
description=(
'Anthropogenic sulfate aerosol forcing everywhere'
),
data_in_direc=('/archive/Yi.Ming/sm2.1_fixed/'
'SM2.1U_Control-1860_lm2_aie2_rerun6.YIM/pp'),
data_in_dur=5,
data_in_start_date='0001-01-01',
data_in_end_date='0080-12-31',
default_date_range=('0021-01-01', '0080-12-31'),
idealized=False
)
# REYOI Runs - First year is 1982; we throw that out as spinup;
# start analysis in 1983.
am2_HadISST_control = Run(
name='am2_HadISST_control',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea '
'ice repeated annually, with PD atmospheric composition.'
),
data_in_direc=('/archive/yim/siena_201203/m45_am2p14_1990/'
'gfdl.ncrc2-intel-prod/pp'),
data_in_dur=16,
data_in_start_date='1983-01-01',
data_in_end_date='1998-12-31',
default_date_range=('1983-01-01', '1998-12-31'),
idealized=False
)
am2_reyoi_control = Run(
name='am2_reyoi_control',
tags=['reyoi', 'cont'],
description='PI atmos and Reynolds OI climatological SSTs',
data_in_direc=('/archive/Spencer.Hill/am2/am2clim_reyoi/'
'gfdl.ncrc2-default-prod/pp'),
data_in_dur=1,
data_in_start_date='1982-01-01',
data_in_end_date='1998-12-31',
default_date_range=('1983-01-01', '1998-12-31'),
idealized=False
)
am2_reyoi_extratropics_full = Run(
name='am2_reyoi_extratropics_full',
description=(
'Full SST anomaly pattern applied to REYOI fixed SST climatology.'),
data_in_direc=('/archive/Spencer.Clark/am2/'
'am2clim_reyoi_extratropics_full/'
'gfdl.ncrc2-default-prod/pp'),
data_in_dur=17,
data_in_start_date='1982-01-01',
data_in_end_date='1998-12-31',
default_date_range=('1983-01-01', '1998-12-31'),
idealized=False
)
am2_reyoi_extratropics_sp = Run(
name='am2_reyoi_extratropics_sp',
description=(
'Spatial Pattern SST anomaly pattern applied to'
' REYOI fixed SST climatology.'),
data_in_direc=('/archive/Spencer.Clark/am2/'
'am2clim_reyoi_extratropics_sp/gfdl.ncrc2-default-prod/pp'),
data_in_dur=17,
data_in_start_date='1982-01-01',
data_in_end_date='1998-12-31',
default_date_range=('1983-01-01', '1998-12-31'),
idealized=False
)
am2_reyoi_tropics_sp_SI = Run(
name='am2_reyoi_tropics_sp_SI',
description=(
'Spatial Pattern SST anomaly pattern applied to REYOI fixed SST'
' climatology.'),
data_in_direc=('/archive/Spencer.Clark/am2/'
'am2clim_reyoi_tropics_sp_SI/gfdl.ncrc2-default-prod/pp'),
data_in_dur=17,
data_in_start_date='1982-01-01',
data_in_end_date='1998-12-31',
default_date_range=('1983-01-01', '1998-12-31'),
idealized=False
)
am2_reyoi_tropics_full = Run(
name='am2_reyoi_tropics_full',
description=(
'Full SST anomaly pattern applied to REYOI fixed SST climatology.'),
data_in_direc=('/archive/Spencer.Clark/am2/'
'am2clim_reyoi_tropics_full/gfdl.ncrc2-default-prod/pp'),
data_in_dur=17,
data_in_start_date='1982-01-01',
data_in_end_date='1998-12-31',
default_date_range=('1983-01-01', '1998-12-31'),
idealized=False
)
am2_reyoi_extratropics_sp_SI = Run(
name='am2_reyoi_extratropics_sp_SI',
description=(
'Spatial Pattern SST anomaly pattern applied to REYOI fixed'
' SST climatology. Fixed sea-ice.'),
data_in_direc=('/archive/Spencer.Clark/am2/'
'am2clim_reyoi_extratropics_sp_SI/'
'gfdl.ncrc2-default-prod/pp'),
data_in_dur=17,
data_in_start_date='1982-01-01',
data_in_end_date='1998-12-31',
default_date_range=('1983-01-01', '1998-12-31'),
idealized=False
)
am2_reyoi_extratropics_u = Run(
name='am2_reyoi_extratropics_u',
description=(
'Uniform SST anomaly pattern applied to REYOI fixed SST climatology.'),
data_in_direc=('/archive/Spencer.Clark/am2/'
'am2clim_reyoi_extratropics_u/gfdl.ncrc2-default-prod/pp'),
data_in_dur=17,
data_in_start_date='1982-01-01',
data_in_end_date='1998-12-31',
default_date_range=('1983-01-01', '1998-12-31'),
idealized=False
)
am2_reyoi_tropics_u = Run(
name='am2_reyoi_tropics_u',
description=(
'Uniform SST anomaly pattern applied to REYOI fixed SST climatology.'),
data_in_direc=('/archive/Spencer.Clark/am2/'
'am2clim_reyoi_tropics_u/gfdl.ncrc2-default-prod/pp'),
data_in_dur=17,
data_in_start_date='1982-01-01',
data_in_end_date='1998-12-31',
default_date_range=('1983-01-01', '1998-12-31'),
idealized=False
)
|
djo938/dumpFormat
|
dumpformat/test/test.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#Copyright (C) 2013 Jonathan Delvaux <dumpformat@djoproject.net>
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from dumpformat import dumpManager
#TODO test
#try to save .test.xml does not work, why ?
class mltriesTest(unittest.TestCase):
def setUp(self):
self.d = dumpManager()
def test_init(self):
self.d.save("./test.xml")
def test_
def test_load(self):
pass
#TODO
#save then load and compare
if __name__ == '__main__':
unittest.main()
|
chrmoritz/zoxel
|
src/plugins/tool_extrude.py
|
# tool_extrude.py
# Extrusion tool.
# Copyright (c) 2015, Lennart Riecken
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PySide import QtGui, QtCore
from tool import Tool, EventData, MouseButtons, KeyModifiers, Face
from plugin_api import register_plugin
class ExtrudeTool(Tool):
def __init__(self, api):
super(ExtrudeTool, self).__init__(api)
# Create our action / icon
self.action = QtGui.QAction(QtGui.QPixmap(":/images/gfx/icons/border-bottom-thick.png"), "Extrude", None)
self.action.setStatusTip("Extude region")
self.action.setCheckable(True)
self.action.setShortcut(QtGui.QKeySequence("Ctrl+0"))
# Register the tool
self.priority = 10
self.api.register_tool(self)
# Area tool helper
self._mouse = None
self._stamp = []
self.xdir = True
self.ydir = True
self.zdir = True
self.pastoffset = 0
self.fixeddirection = False
def drawstamp(self, data, dx, dy, dz):
for x, y, z, col in self._stamp:
tgt = data.voxels.get(x + dx, y + dy, z + dz)
if tgt == 0:
data.voxels.set(x + dx, y + dy, z + dz, col, True, 1)
data.voxels.completeUndoFill()
def on_drag_start(self, data):
if len(data.voxels._selection) > 0:
self._stamp = []
for x, y, z in data.voxels._selection:
col = data.voxels.get(x, y, z)
self._stamp.append((x, y, z, col))
self._mouse = (data.mouse_x, data.mouse_y)
if QtCore.Qt.Key_X in data.keys:
self.xdir = True
self.ydir = False
self.zdir = False
self.fixeddirection = True
elif QtCore.Qt.Key_Y in data.keys:
self.xdir = False
self.ydir = True
self.zdir = False
self.fixeddirection = True
elif QtCore.Qt.Key_Z in data.keys:
self.xdir = False
self.ydir = False
self.zdir = True
self.fixeddirection = True
else:
self.xdir = True
self.ydir = True
self.zdir = True
self.fixeddirection = False
self.pastoffset = 0
# When dragging, create the selection
def on_drag(self, data):
# In case the first click has missed a valid target.
if self._mouse is None or len(self._stamp) == 0:
return
dx = data.mouse_x - self._mouse[0]
dy = data.mouse_y - self._mouse[1]
# Work out some sort of vague translation between screen and voxels
sx = self.api.mainwindow.width() / data.voxels.width
sy = self.api.mainwindow.height() / data.voxels.height
dx = int(round(dx / float(sx)))
dy = int(round(dy / float(sy)))
if dx == 0 and dy == 0:
return
# Work out translation for x,y
ax, ay = self.api.mainwindow.display.view_axis()
tx = 0
ty = 0
tz = 0
tdx = 0
tdy = 0
tdz = 0
if ax == self.api.mainwindow.display.X_AXIS:
tdx = dx
if dx > 0:
tx = 1
elif dx < 0:
tx = -1
elif ax == self.api.mainwindow.display.Y_AXIS:
tdy = dx
if dx > 0:
ty = 1
elif dx < 0:
ty = -1
elif ax == self.api.mainwindow.display.Z_AXIS:
tdz = dx
if dx > 0:
tz = 1
elif dx < 0:
tz = -1
if ay == self.api.mainwindow.display.X_AXIS:
tdx = dy
if dy > 0:
tx = 1
elif dy < 0:
tx = -1
elif ay == self.api.mainwindow.display.Y_AXIS:
tdy = dy
if dy > 0:
ty = -1
elif dy < 0:
ty = 1
elif ay == self.api.mainwindow.display.Z_AXIS:
tdz = dy
if dy > 0:
tz = 1
elif dy < 0:
tz = -1
if self.fixeddirection:
if self.xdir:
if tx != 0:
self._mouse = (data.mouse_x, data.mouse_y)
self.pastoffset += tx
self.drawstamp(data, self.pastoffset, 0, 0)
elif self.ydir:
if ty != 0:
self._mouse = (data.mouse_x, data.mouse_y)
self.pastoffset += ty
self.drawstamp(data, 0, self.pastoffset, 0)
elif self.zdir:
if tz != 0:
self._mouse = (data.mouse_x, data.mouse_y)
self.pastoffset += tz
self.drawstamp(data, 0, 0, self.pastoffset)
else:
if tx != 0 and self.xdir and (not self.ydir or (abs(tdx) > abs(tdy) and abs(tdx) > abs(tdz))):
self._mouse = (data.mouse_x, data.mouse_y)
self.ydir = False
self.zdir = False
self.pastoffset += tx
self.drawstamp(data, self.pastoffset, 0, 0)
elif ty != 0 and self.ydir and (not self.zdir or abs(tdy) > abs(tdz)):
self._mouse = (data.mouse_x, data.mouse_y)
self.xdir = False
self.zdir = False
self.pastoffset += ty
self.drawstamp(data, 0, self.pastoffset, 0)
elif tz != 0 and self.zdir:
self._mouse = (data.mouse_x, data.mouse_y)
self.xdir = False
self.ydir = False
self.pastoffset += tz
self.drawstamp(data, 0, 0, self.pastoffset)
def on_drag_end(self, data):
data.voxels.clear_selection()
dx = self.pastoffset if self.xdir else 0
dy = self.pastoffset if self.ydir else 0
dz = self.pastoffset if self.zdir else 0
for x, y, z, col in self._stamp:
data.voxels.select(x + dx, y + dy, z + dz)
register_plugin(ExtrudeTool, "Extrude Tool", "1.0")
|
asoliveira/NumShip
|
scripts/entrada/padrao/plot-1cg.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import scipy as sp
#Nome do arquivo em que está os dados da posição
arq = 'CurvaGiro/pos.dat'
#Limites dos eixos
v = [-10,1000, 0, 1000]
#Título eixo x
xl = r'y metros'
#Título do eixo y
yl = r'x metros'
x = sp.genfromtxt('CurvaGiro/pos.dat')
a = plt.plot(x[:,2], x[:,1], 'k-')
plt.grid(True, 'both', color = '0.8', linestyle = '--', linewidth = 1)
plt.axis(v)
plt.xlabel(xl)
plt.ylabel(yl)
plt.show(a)
|
Abjad/abjad
|
abjad/__init__.py
|
from quicktions import Fraction
from . import (
_update,
deprecated,
enumerate,
format,
get,
illustrators,
io,
iterate,
iterpitches,
lyconst,
lyenv,
makers,
mutate,
persist,
string,
wf,
)
from ._version import __version__, __version_info__
from .bind import Wrapper, annotate, attach, detach
from .bundle import LilyPondFormatBundle, SlotContributions
from .configuration import (
Configuration,
list_all_classes,
list_all_functions,
yield_all_modules,
)
from .contextmanagers import (
ContextManager,
FilesystemState,
ForbidUpdate,
NullContextManager,
ProgressIndicator,
RedirectedStreams,
TemporaryDirectory,
TemporaryDirectoryChange,
Timer,
)
from .cyclictuple import CyclicTuple
from .duration import Duration, Multiplier, NonreducedFraction, Offset
from .dynamic import Dynamic
from .enums import (
Center,
Comparison,
Down,
Exact,
HorizontalAlignment,
Left,
Less,
Middle,
More,
Right,
Up,
VerticalAlignment,
)
from .exceptions import (
AssignabilityError,
ImpreciseMetronomeMarkError,
LilyPondParserError,
MissingMetronomeMarkError,
ParentageError,
PersistentIndicatorError,
SchemeParserFinishedError,
UnboundedTimeIntervalError,
WellformednessError,
)
from .format import lilypond
from .get import Lineage
from .illustrators import illustrate
from .indicators import (
Arpeggio,
Articulation,
BarLine,
BeamCount,
BendAfter,
BreathMark,
Clef,
ColorFingering,
Fermata,
Glissando,
KeyCluster,
KeySignature,
LaissezVibrer,
MarginMarkup,
MetronomeMark,
Mode,
Ottava,
RehearsalMark,
Repeat,
RepeatTie,
StaffChange,
StaffPosition,
StartBeam,
StartGroup,
StartHairpin,
StartMarkup,
StartPhrasingSlur,
StartPianoPedal,
StartSlur,
StartTextSpan,
StartTrillSpan,
StemTremolo,
StopBeam,
StopGroup,
StopHairpin,
StopPhrasingSlur,
StopPianoPedal,
StopSlur,
StopTextSpan,
StopTrillSpan,
Tie,
TimeSignature,
)
from .instruments import (
Accordion,
AltoFlute,
AltoSaxophone,
AltoTrombone,
AltoVoice,
BaritoneSaxophone,
BaritoneVoice,
BassClarinet,
BassFlute,
BassSaxophone,
BassTrombone,
BassVoice,
Bassoon,
Cello,
ClarinetInA,
ClarinetInBFlat,
ClarinetInEFlat,
Contrabass,
ContrabassClarinet,
ContrabassFlute,
ContrabassSaxophone,
Contrabassoon,
EnglishHorn,
Flute,
FrenchHorn,
Glockenspiel,
Guitar,
Harp,
Harpsichord,
Instrument,
Marimba,
MezzoSopranoVoice,
Oboe,
Percussion,
Piano,
Piccolo,
SopraninoSaxophone,
SopranoSaxophone,
SopranoVoice,
StringNumber,
TenorSaxophone,
TenorTrombone,
TenorVoice,
Trumpet,
Tuba,
Tuning,
Vibraphone,
Viola,
Violin,
Xylophone,
)
from .io import graph, show
from .label import ColorMap
from .lilypondfile import Block, LilyPondFile
from .lyproxy import (
LilyPondContext,
LilyPondEngraver,
LilyPondGrob,
LilyPondGrobInterface,
)
from .makers import LeafMaker, NoteMaker
from .markups import Markup
from .math import Infinity, NegativeInfinity
from .meter import Meter, MeterList, MetricAccentKernel
from .metricmodulation import MetricModulation
from .obgc import OnBeatGraceContainer, on_beat_grace_container
from .overrides import (
IndexedTweakManager,
IndexedTweakManagers,
Interface,
LilyPondLiteral,
LilyPondOverride,
LilyPondSetting,
OverrideInterface,
SettingInterface,
TweakInterface,
override,
setting,
tweak,
)
from .parentage import Parentage
from .parsers import parser
from .parsers.base import Parser
from .parsers.parse import parse
from .pattern import Pattern, PatternTuple
from .pcollections import (
IntervalClassSegment,
IntervalClassSet,
IntervalSegment,
IntervalSet,
PitchClassSegment,
PitchClassSet,
PitchRange,
PitchSegment,
PitchSet,
Segment,
Set,
TwelveToneRow,
)
from .pitch import (
Accidental,
Interval,
IntervalClass,
NamedInterval,
NamedIntervalClass,
NamedInversionEquivalentIntervalClass,
NamedPitch,
NamedPitchClass,
NumberedInterval,
NumberedIntervalClass,
NumberedInversionEquivalentIntervalClass,
NumberedPitch,
NumberedPitchClass,
Octave,
Pitch,
PitchClass,
PitchTyping,
)
from .ratio import NonreducedRatio, Ratio
from .score import (
AfterGraceContainer,
BeforeGraceContainer,
Chord,
Cluster,
Component,
Container,
Context,
DrumNoteHead,
Leaf,
MultimeasureRest,
Note,
NoteHead,
NoteHeadList,
Rest,
Score,
Skip,
Staff,
StaffGroup,
TremoloContainer,
Tuplet,
Voice,
)
from .select import LogicalTie, Selection
from .setclass import SetClass
from .spanners import (
beam,
glissando,
hairpin,
horizontal_bracket,
ottava,
phrasing_slur,
piano_pedal,
slur,
text_spanner,
tie,
trill_spanner,
)
from .tag import Line, Tag, activate, deactivate
from .timespan import OffsetCounter, Timespan, TimespanList
from .typedcollections import TypedCollection, TypedFrozenset, TypedList, TypedTuple
from .typings import (
DurationSequenceTyping,
DurationTyping,
IntegerPair,
IntegerSequence,
Number,
NumberPair,
PatternTyping,
Prototype,
RatioSequenceTyping,
RatioTyping,
Strings,
)
from .verticalmoment import (
VerticalMoment,
iterate_leaf_pairs,
iterate_pitch_pairs,
iterate_vertical_moments,
)
index = Pattern.index
index_all = Pattern.index_all
index_first = Pattern.index_first
index_last = Pattern.index_last
__all__ = [
"Accidental",
"Accordion",
"AfterGraceContainer",
"AltoFlute",
"AltoSaxophone",
"AltoTrombone",
"AltoVoice",
"Arpeggio",
"Articulation",
"AssignabilityError",
"BarLine",
"BaritoneSaxophone",
"BaritoneVoice",
"BassClarinet",
"BassFlute",
"BassSaxophone",
"BassTrombone",
"BassVoice",
"Bassoon",
"BeamCount",
"BeforeGraceContainer",
"BendAfter",
"Block",
"BreathMark",
"Cello",
"Center",
"Chord",
"ClarinetInA",
"ClarinetInBFlat",
"ClarinetInEFlat",
"Clef",
"Cluster",
"ColorFingering",
"ColorMap",
"Comparison",
"Component",
"Configuration",
"Container",
"Context",
"ContextManager",
"Contrabass",
"ContrabassClarinet",
"ContrabassFlute",
"ContrabassSaxophone",
"Contrabassoon",
"CyclicTuple",
"Down",
"DrumNoteHead",
"Duration",
"DurationSequenceTyping",
"DurationTyping",
"Dynamic",
"EnglishHorn",
"Exact",
"Expression",
"Fermata",
"FilesystemState",
"Flute",
"ForbidUpdate",
"Fraction",
"FrenchHorn",
"Glissando",
"Glockenspiel",
"Guitar",
"Harp",
"Harpsichord",
"HorizontalAlignment",
"ImpreciseMetronomeMarkError",
"IndexedTweakManager",
"IndexedTweakManagers",
"Infinity",
"Instrument",
"IntegerPair",
"IntegerSequence",
"Interface",
"Interval",
"IntervalClass",
"IntervalClassSegment",
"IntervalClassSet",
"IntervalSegment",
"IntervalSet",
"KeyCluster",
"KeySignature",
"LaissezVibrer",
"Leaf",
"LeafMaker",
"Left",
"Less",
"LilyPondContext",
"LilyPondEngraver",
"LilyPondFile",
"LilyPondFormatBundle",
"LilyPondGrob",
"LilyPondGrobInterface",
"LilyPondLiteral",
"LilyPondOverride",
"LilyPondParserError",
"LilyPondSetting",
"Line",
"Lineage",
"LogicalTie",
"MarginMarkup",
"Marimba",
"Markup",
"Meter",
"MeterList",
"MetricAccentKernel",
"MetricModulation",
"MetronomeMark",
"MezzoSopranoVoice",
"Middle",
"MissingMetronomeMarkError",
"Mode",
"More",
"MultimeasureRest",
"Multiplier",
"NamedInterval",
"NamedIntervalClass",
"NamedInversionEquivalentIntervalClass",
"NamedPitch",
"NamedPitchClass",
"NegativeInfinity",
"NonreducedFraction",
"NonreducedRatio",
"Note",
"NoteHead",
"NoteHeadList",
"NoteMaker",
"NullContextManager",
"Number",
"NumberPair",
"NumberedInterval",
"NumberedIntervalClass",
"NumberedInversionEquivalentIntervalClass",
"NumberedPitch",
"NumberedPitchClass",
"Oboe",
"Octave",
"Offset",
"OffsetCounter",
"OnBeatGraceContainer",
"Ottava",
"OverrideInterface",
"Parentage",
"ParentageError",
"Parser",
"Pattern",
"PatternTuple",
"PatternTyping",
"Percussion",
"PersistentIndicatorError",
"Piano",
"Piccolo",
"Pitch",
"PitchClass",
"PitchClassSegment",
"PitchClassSet",
"PitchRange",
"PitchSegment",
"PitchSet",
"PitchTyping",
"ProgressIndicator",
"Prototype",
"Ratio",
"RatioSequenceTyping",
"RatioTyping",
"RedirectedStreams",
"RehearsalMark",
"Repeat",
"RepeatTie",
"Rest",
"Right",
"SchemeParserFinishedError",
"Score",
"Segment",
"Selection",
"Set",
"SetClass",
"SettingInterface",
"Skip",
"SlotContributions",
"SopraninoSaxophone",
"SopranoSaxophone",
"SopranoVoice",
"Staff",
"StaffChange",
"StaffGroup",
"StaffPosition",
"StartBeam",
"StartGroup",
"StartHairpin",
"StartMarkup",
"StartPhrasingSlur",
"StartPianoPedal",
"StartSlur",
"StartTextSpan",
"StartTrillSpan",
"StemTremolo",
"StopBeam",
"StopGroup",
"StopHairpin",
"StopPhrasingSlur",
"StopPianoPedal",
"StopSlur",
"StopTextSpan",
"StopTrillSpan",
"StringNumber",
"Strings",
"Tag",
"TemporaryDirectory",
"TemporaryDirectoryChange",
"TenorSaxophone",
"TenorTrombone",
"TenorVoice",
"Tie",
"TimeSignature",
"Timer",
"Timespan",
"TimespanList",
"TremoloContainer",
"Trumpet",
"Tuba",
"Tuning",
"Tuplet",
"TweakInterface",
"TwelveToneRow",
"TypedCollection",
"TypedFrozenset",
"TypedList",
"TypedTuple",
"UnboundedTimeIntervalError",
"Up",
"VerticalAlignment",
"VerticalMoment",
"Vibraphone",
"Viola",
"Violin",
"Voice",
"WellformednessError",
"Wrapper",
"Xylophone",
"__version__",
"__version_info__",
"_update",
"activate",
"annotate",
"attach",
"beam",
"deactivate",
"deprecated",
"detach",
"enumerate",
"format",
"glissando",
"graph",
"hairpin",
"horizontal_bracket",
"illustrate",
"illustrators",
"index",
"index_all",
"index_first",
"index_last",
"get",
"io",
"iterate",
"iterate_leaf_pairs",
"iterate_pitch_pairs",
"iterate_vertical_moments",
"iterpitches",
"label",
"list_all_classes",
"list_all_functions",
"lilypond",
"lyconst",
"lyenv",
"makers",
"mutate",
"on_beat_grace_container",
"ottava",
"override",
"parse",
"parser",
"persist",
"phrasing_slur",
"piano_pedal",
"select",
"setting",
"show",
"slur",
"string",
"text_spanner",
"tie",
"trill_spanner",
"tweak",
"wf",
"yield_all_modules",
]
|
oser-cs/oser-website
|
tests/test_core/test_address.py
|
"""Address model tests."""
from core.models import Address
from core.factory import AddressFactory
from tests.utils import ModelTestCase
class AddressTest(ModelTestCase):
"""Test the Address model."""
model = Address
field_tests = {
'line1': {
'verbose_name': 'ligne 1',
'blank': False,
'max_length': 300,
},
'line2': {
'verbose_name': 'ligne 2',
'max_length': 300,
'blank': True,
'default': '',
},
'post_code': {
'verbose_name': 'code postal',
'blank': False,
'max_length': 20,
},
'city': {
'verbose_name': 'ville',
'blank': False,
'max_length': 100,
},
'country': {
'verbose_name': 'pays',
'blank': False,
'default': 'FR',
},
}
model_tests = {
'verbose_name': 'adresse',
}
@classmethod
def setUpTestData(cls):
cls.obj = AddressFactory.create(
line1='3 Rue Joliot Curie',
post_code='91190',
city='Gif-sur-Yvette',
)
def test_str(self):
expected = '3 Rue Joliot Curie, 91190 Gif-sur-Yvette, France'
self.assertEqual(expected, str(self.obj))
|
twm/yarrharr
|
yarrharr/application.py
|
# Copyright © 2013, 2015, 2016, 2017, 2018, 2020, 2022 Tom Most <twm@freecog.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Additional permission under GNU GPL version 3 section 7
#
# If you modify this Program, or any covered work, by linking or
# combining it with OpenSSL (or a modified version of that library),
# containing parts covered by the terms of the OpenSSL License, the
# licensors of this Program grant you additional permission to convey
# the resulting work. Corresponding Source for a non-source form of
# such a combination shall include the source code for the parts of
# OpenSSL used as well as that of the covered work.
"""
Yarrharr production server via Twisted Web
"""
import io
import json
import logging
import os
import re
import sys
from base64 import b64encode
import attr
from django.conf import settings
from django.dispatch import receiver
from twisted.internet import defer
from twisted.internet.endpoints import serverFromString
from twisted.logger import (
FileLogObserver,
FilteringLogObserver,
ILogFilterPredicate,
Logger,
LogLevel,
PredicateResult,
formatEvent,
globalLogBeginner,
globalLogPublisher,
)
from twisted.python.filepath import FilePath
from twisted.web.resource import ErrorPage, NoResource, Resource
from twisted.web.server import Site
from twisted.web.static import File
from twisted.web.wsgi import WSGIResource
from zope.interface import implementer
from . import __version__
from .signals import schedule_changed
from .wsgi import application
log = Logger()
@attr.s
class CSPReport(object):
url = attr.ib()
referrer = attr.ib()
resource = attr.ib()
violatedDirective = attr.ib()
effectiveDirective = attr.ib()
source = attr.ib()
sample = attr.ib()
status = attr.ib()
policy = attr.ib()
disposition = attr.ib()
def __str__(self):
bits = []
for a in attr.fields(self.__class__):
value = getattr(self, a.name)
if value is None:
continue
bits.append("{}={!r}".format(a.name, value))
return "\n".join(bits)
@classmethod
def fromJSON(cls, data):
"""
Construct a :class:`CSPReport` from the serialization of a violation
per CSP Level 3 §5.3.
"""
if {"source-file", "line-number", "column-number"} <= data.keys():
source = "{source-file} {line-number}:{column-number}".format_map(data)
elif {"source-file", "line-number"} <= data.keys():
source = "{source-file} {line-number}".format_map(data)
else:
source = data.get("source-file")
return cls(
url=data["document-uri"],
referrer=data["referrer"] or None, # Always seems to be an empty string.
resource=data["blocked-uri"],
violatedDirective=data.get("violated-directive"),
effectiveDirective=data.get("effective-directive"),
policy=data["original-policy"],
disposition=data.get("disposition"),
status=data.get("status-code"),
sample=data.get("script-sample") or None,
source=source,
)
class CSPReportLogger(Resource):
isLeaf = True
_log = Logger()
def render(self, request):
if request.method != b"POST":
request.setResponseCode(405)
request.setHeader("Allow", "POST")
return b"HTTP 405: Method Not Allowed\n"
if request.requestHeaders.getRawHeaders("Content-Type") != ["application/csp-report"]:
request.setResponseCode(415)
return b"HTTP 415: Only application/csp-report requests are accepted\n"
# Process the JSON text produced per
# https://w3c.github.io/webappsec-csp/#deprecated-serialize-violation
report = CSPReport.fromJSON(json.load(io.TextIOWrapper(request.content, encoding="utf-8"))["csp-report"])
if report.sample and report.sample.startswith(";(function installGlobalHook(window) {"):
# This seems to be a misbehavior in some Firefox extension.
# I cannot reproduce it with a clean profile.
return b""
if report.sample and report.sample == "call to eval() or related function blocked by CSP":
# This is caused by Tridactyl due to a Firefox issue. It's quite
# chatty so we'll disable for now, even though the message is
# generated by the browser and might indicate a script injection.
# See <https://github.com/cmcaine/tridactyl/issues/109> and
# <https://bugzilla.mozilla.org/show_bug.cgi?id=1267027>.
return b""
self._log.debug(
"Content Security Policy violation reported by {userAgent!r}:\n{report}",
userAgent=", ".join(request.requestHeaders.getRawHeaders("User-Agent", [])),
report=report,
)
return b"" # Browser ignores the response.
class FallbackResource(Resource):
"""
Resource which falls back to an alternative resource tree if it doesn't
have a matching child resource.
"""
def __init__(self, fallback):
Resource.__init__(self)
self.fallback = fallback
def render(self, request):
"""
Render this path with the fallback resource.
"""
return self.fallback.render(request)
def getChild(self, path, request):
"""
Dispatch unhandled requests to the fallback resource.
"""
# Mutate the request path such that it's like FallbackResource didn't handle
# the request at all. This is a bit of a nasty hack, since we're
# relying on the t.w.server implementation's behavior to not break when
# we do this. A better way would be to create a wrapper for the request object
request.postpath.insert(0, request.prepath.pop())
return self.fallback
class Static(Resource):
"""
Serve up Yarrharr's static assets directory. The files in this directory
have names like::
In development, the files are served uncompressed and named like so::
main-afffb00fd22ca3ce0250.js
The second dot-delimited section is a hash of the file's contents or source
material. As the filename changes each time the content does, these files
are served with a long max-age and the ``immutable`` flag in the
`Cache-Control`_ header.
In production, each file has two pre-compressed variants: one with
a ``.gz`` extension, and one with a ``.br`` extension. For example::
main-afffb00fd22ca3ce0250.js
main-afffb00fd22ca3ce0250.js.br
main-afffb00fd22ca3ce0250.js.gz
The actual serving of the files is done by `twisted.web.static.File`, which
is fancy and supports range requests, conditional gets, etc.
.. note::
Several features used here are only available to HTTPS origins.
Cache-Control: immutable and Brotli compression both are in Firefox.
.. _cache-control: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
"""
_dir = FilePath(settings.STATIC_ROOT)
_validName = re.compile(rb"^[a-zA-Z0-9]+-[a-zA-Z0-9]+(\.[a-z]+)+$")
# NOTE: RFC 7231 § 5.3.4 is not completely clear about whether
# content-coding tokens are case-sensitive or not. The "identity" token
# appears in EBNF and is therefore definitely case-insensitive, but the
# other tokens only appear in IANA registry tables in lowercase form. In
# contrast, the transfer-coding possibilities are clearly defined in EBNF
# so are definitely case-insensitive. For content-coding every implementer
# seems to agree on lowercase, so I'm not going to worry about it.
_brToken = re.compile(rb"(:?^|[\s,])br(:?$|[\s,;])")
_gzToken = re.compile(rb"(:?^|[\s,])(:?x-)?gzip(:?$|[\s,;])")
_contentTypes = {
b".js": "application/javascript",
b".css": "text/css",
b".map": "application/octet-stream",
b".ico": "image/x-icon",
b".svg": "image/svg+xml",
b".png": "image/png",
}
def _file(self, path, type, encoding=None):
"""
Construct a `twisted.web.static.File` customized to serve Yarrharr
static assets.
:param path: `twisted.internet.filepath.FilePath` instance
:returns: `twisted.web.resource.IResource`
"""
f = File(path.path)
f.type = type
f.encoding = encoding
return f
def getChild(self, path, request):
"""
Serve a file for the given path.
The Content-Type header is set based on the file extension.
A limited form of content negotiation is done based on the
Accept-Encoding header and the files on disk. Apart from the default of
``identity``, two encodings are supported:
* ``br``, which selects any Brotli-compressed ``.br`` variant of
the file.
* ``gzip``, which selects any gzip-compressed ``.br`` variant of the
file. ``x-gzip`` is also supported.
qvalues are ignored as browsers don't use them. This may produce an
incorrect response if a variant is disabled like ``identity;q=0``.
"""
if not self._validName.match(path):
return NoResource("Not found.")
ext = path[path.rindex(b".") :]
try:
type = self._contentTypes[ext]
except KeyError:
return NoResource("Unknown type.")
acceptEncoding = request.getHeader(b"accept-encoding") or b"*"
file = None
if self._brToken.search(acceptEncoding):
br = self._dir.child(path + b".br")
if br.isfile():
file = self._file(br, type, "br")
if file is None and self._gzToken.search(acceptEncoding):
gz = self._dir.child(path + b".gz")
if gz.isfile():
file = self._file(gz, type, "gzip")
if file is None:
file = self._file(self._dir.child(path), type)
request.setHeader(b"Vary", b"accept-encoding")
request.setHeader(b"Cache-Control", b"public, max-age=31536000, immutable")
return file
class Root(FallbackResource):
"""
Root of the Yarrharr URL hierarchy.
"""
def __init__(self, reactor, threadpool):
wsgi = WSGIResource(reactor, threadpool, application)
FallbackResource.__init__(self, wsgi)
self.putChild(b"csp-report", CSPReportLogger())
self.putChild(b"static", Static())
# Handle requests for /favicon.ico and paths hit by script kiddies at
# the Twisted level so that they don't make it down to Django, which
# logs 404s as errors:
a404 = ErrorPage(404, "Not Found", "")
for path in (b"favicon.ico", b"index.php", b"wp-login.php"):
self.putChild(path, a404)
def getChildWithDefault(self, name, request):
# Disable the Referer header in some browsers. This is complemented by
# the injection of rel="noopener noreferrer" on all links by the HTML
# sanitizer.
request.setHeader(b"Referrer-Policy", b"same-origin")
request.setHeader(b"X-Content-Type-Options", b"nosniff")
request.setHeader(b"Cross-Origin-Opener-Policy", b"same-origin")
script_nonce = b64encode(os.urandom(32))
request.requestHeaders.setRawHeaders(b"Yarrharr-Script-Nonce", [script_nonce])
request.setHeader(
b"Content-Security-Policy",
(
# b"default-src 'none'; "
b"img-src *; "
b"script-src 'self' 'nonce-%s'; "
b"style-src 'self'; "
b"frame-ancestors 'none'; "
b"form-action 'self'; "
b"report-uri /csp-report"
)
% (script_nonce,),
)
return super().getChildWithDefault(name, request)
def updateFeeds(reactor, max_fetch=5):
"""
Poll any feeds due for a check.
"""
from .fetch import poll
def _failed(reason):
"""
Log unexpected errors and schedule a retry in one second.
"""
log.failure("Unexpected failure polling feeds", failure=reason)
return 1.0 # seconds until next poll
d = poll(reactor, max_fetch)
# Last gasp error handler to avoid terminating the LoopingCall.
d.addErrback(_failed)
return d
_txLevelToPriority = {
LogLevel.debug: "<7>",
LogLevel.info: "<6>",
LogLevel.warn: "<4>",
LogLevel.error: "<3>",
LogLevel.critical: "<2>",
}
def formatForSystemd(event):
# Events generated by twisted.python.log have a "system", while ones
# generated with twisted.logger have a "namespace" with similar
# meaning.
#
s = "[{}] ".format(event.get("log_system") or event.get("log_namespace") or "-")
s += formatEvent(event)
if not s:
return None
if "log_failure" in event:
try:
s += "\n" + event["log_failure"].getTraceback().rstrip("\n")
except: # noqa
pass
prefix = _txLevelToPriority.get(event.get("log_level")) or "<6>"
return prefix + s.replace("\n", "\n" + prefix + " ") + "\n"
@implementer(ILogFilterPredicate)
def dropUnhandledHTTP2Shutdown(event):
"""
Suppress the log messages which result from an unhandled error in HTTP/2
connection shutdown. See #282 and Twisted #9462.
This log message is relayed from the :mod:`twisted.python.log` so the
fields are a little odd:
* ``'log_namespace'`` is ``'log_legacy'``, and there is a ``'system'``
field with a value of ``'-'``.
* ``'log_text'`` contains the actual log text, including a pre-formatted
traceback.
* ``'failure'`` used instead of ``'log_failure'``.
"""
if event.get("log_namespace") != "log_legacy":
return PredicateResult.maybe
if event.get("log_level") != LogLevel.critical:
return PredicateResult.maybe
if "failure" not in event or not event["failure"].check(AttributeError):
return PredicateResult.maybe
if event["log_text"].startswith("Unhandled Error") and "no attribute 'shutdown'" in event["log_text"]:
return PredicateResult.no
return PredicateResult.maybe
class TwistedLoggerLogHandler(logging.Handler):
publisher = globalLogPublisher
def _mapLevel(self, levelno):
"""
Convert a stdlib logging level into a Twisted :class:`LogLevel`.
"""
if levelno <= logging.DEBUG:
return LogLevel.debug
elif levelno <= logging.INFO:
return LogLevel.info
elif levelno <= logging.WARNING:
return LogLevel.warn
elif levelno <= logging.ERROR:
return LogLevel.error
return LogLevel.critical
def emit(self, record):
self.publisher(
{
"log_level": self._mapLevel(record.levelno),
"log_namespace": record.name,
"log_format": "{msg}",
"msg": self.format(record),
}
)
class AdaptiveLoopingCall(object):
"""
:class:`AdaptiveLoopingCall` invokes a function periodically. Each time it
is called it returns the time to wait until the next invocation.
:ivar _clock: :class:`IReactorTime` implementer
:ivar _f: The function to call.
:ivar _deferred: Deferred returned by :meth:`.start()`.
:ivar _call: `IDelayedCall` when waiting for the next poll period.
Otherwise `None`.
:ivar bool _poked: `True` when the function should be immediately invoked
again after it completes.
:ivar bool _stopped: `True` once `stop()` has been called.
"""
_deferred = None
_call = None
_poked = False
_stopped = False
def __init__(self, clock, f):
"""
:param clock: :class:`IReactorTime` provider to use when scheduling
calls.
:param f: The function to call when the loop is started. It must return
the number of seconds to wait before calling it again, or
a deferred for the same.
"""
self._clock = clock
self._f = f
def start(self):
"""
Call the function immediately, and schedule future calls according to
its result.
:returns:
:class:`Deferred` which will succeed when :meth:`stop()` is called
and the loop cleanly exits, or fail when the function produces
a failure.
"""
assert self._deferred is None
assert self._call is None
assert not self._stopped
self._deferred = d = defer.Deferred()
self._callIt()
return d
def stop(self):
self._stopped = True
if self._call:
self._call.cancel()
self._deferred.callback(self)
def poke(self):
"""
Run the function as soon as possible: either immediately or once it has
finished any current execution. This is a no-op if the service has been
stopped. Pokes coalesce if received while the function is executing.
"""
if self._stopped or self._poked:
return
if self._call:
self._call.cancel()
self._callIt()
else:
self._poked = True
def _callIt(self):
self._call = None
d = defer.maybeDeferred(self._f)
d.addCallback(self._schedule)
d.addErrback(self._failLoop)
def _schedule(self, seconds):
"""
Schedule the next call.
"""
assert isinstance(seconds, (int, float))
if self._stopped:
d, self._deferred = self._deferred, None
d.callback(self)
elif self._poked:
self._poked = False
self._callIt()
else:
self._call = self._clock.callLater(seconds, self._callIt)
def _failLoop(self, failure):
"""
Terminate the loop due to an unhandled failure.
"""
d, self._deferred = self._deferred, None
d.errback(failure)
def run():
from twisted.internet import reactor
root = logging.getLogger()
logging.getLogger("django").setLevel(logging.INFO)
logging.raiseExceptions = settings.DEBUG
logging._srcfile = None # Disable expensive collection of location information.
root.setLevel(logging.DEBUG if settings.DEBUG else logging.INFO)
root.addHandler(TwistedLoggerLogHandler())
observer = FilteringLogObserver(
FileLogObserver(sys.stdout, formatForSystemd),
[dropUnhandledHTTP2Shutdown],
)
globalLogBeginner.beginLoggingTo([observer], redirectStandardIO=False)
log.info("Yarrharr {version} starting", version=__version__)
factory = Site(Root(reactor, reactor.getThreadPool()), logPath=None)
endpoint = serverFromString(reactor, settings.SERVER_ENDPOINT)
reactor.addSystemEventTrigger("before", "startup", endpoint.listen, factory)
updateLoop = AdaptiveLoopingCall(reactor, lambda: updateFeeds(reactor))
loopEndD = updateLoop.start()
loopEndD.addErrback(lambda f: log.failure("Polling loop broke", f))
@receiver(schedule_changed)
def threadPollNow(sender, **kwargs):
"""
When the `schedule_changed` signal is sent poke the polling loop. If it
is sleeping this will cause it to poll immediately. Otherwise this will
cause it to run the poll function immediately once it returns (running
it again protects against races).
"""
log.debug("Immediate poll triggered by {sender}", sender=sender)
reactor.callFromThread(updateLoop.poke)
def stopUpdateLoop():
updateLoop.stop()
return loopEndD
reactor.addSystemEventTrigger("before", "shutdown", stopUpdateLoop)
reactor.run()
|
wdecoster/nanoget
|
nanoget/extraction_functions.py
|
import logging
from functools import reduce
import nanoget.utils as ut
import pandas as pd
import sys
import pysam
import re
from Bio import SeqIO
import concurrent.futures as cfutures
from itertools import repeat
def process_summary(summaryfile, **kwargs):
"""Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant
"""
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["channel", "start_time", "duration", "sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["channelIDs", "time", "duration", "lengths", "quals", "barcode"]
else:
datadf.columns = ["channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
"""
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, ut.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_bam(bam, **kwargs):
"""Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
if len(chromosomes) > 100 or kwargs["huge"]:
logging.info("Nanoget: lots of contigs (>100) or --huge, not running in separate processes")
datadf = pd.DataFrame(
data=extract_from_bam(bam, None, kwargs["keep_supp"]),
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
else:
unit = chromosomes
with cfutures.ProcessPoolExecutor(max_workers=kwargs["threads"]) as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam,
repeat(bam),
unit,
repeat(kwargs["keep_supp"]))
for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info(f"Nanoget: bam {bam} contains {datadf['lengths'].size} primary alignments.")
return ut.reduce_memory_usage(datadf)
def process_cram(cram, **kwargs):
"""Combines metrics from cram after extraction.
Processing function: calls pool of worker functions
to extract from a cram file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from cram file {}.".format(cram))
samfile = check_bam(cram, samtype="cram")
chromosomes = samfile.references
if len(chromosomes) > 100:
unit = [None]
logging.info("Nanoget: lots of contigs (>100), not running in separate processes")
else:
unit = chromosomes
with cfutures.ProcessPoolExecutor(max_workers=kwargs["threads"]) as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam,
repeat(cram), unit, repeat(kwargs["keep_supp"]))
for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info(f"Nanoget: cram {cram} contains {datadf['lengths'].size} primary alignments.")
return ut.reduce_memory_usage(datadf)
def extract_from_bam(bam, chromosome, keep_supplementary=True):
"""Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length
"""
samfile = pysam.AlignmentFile(bam, "rb")
if keep_supplementary:
return [
(read.query_name,
ut.ave_qual(read.query_qualities),
ut.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary and not read.is_unmapped]
else:
return [
(read.query_name,
ut.ave_qual(read.query_qualities),
ut.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary and not read.is_unmapped and not read.is_supplementary]
def get_pID(read):
"""Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
"""
match = reduce(lambda x, y: x + y[1] if y[0] in (0, 7, 8) else x, read.cigartuples, 0)
ins = reduce(lambda x, y: x + y[1] if y[0] == 1 else x, read.cigartuples, 0)
delt = reduce(lambda x, y: x + y[1] if y[0] == 2 else x, read.cigartuples, 0)
alignment_length = match + ins + delt
try:
return (1 - read.get_tag("NM") / alignment_length) * 100
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples)) /
alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None
def parse_MD(MDlist):
"""Parse MD string to get number of mismatches and deletions."""
return sum([len(item) for item in re.split('[0-9^]', MDlist)])
def parse_CIGAR(cigartuples):
"""Count the insertions in the read using the CIGAR string."""
return sum([item[1] for item in cigartuples if item[0] == 1])
def handle_compressed_input(inputfq, file_type="fastq"):
"""Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression
"""
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
def process_fasta(fasta, **kwargs):
"""Combine metrics extracted from a fasta file."""
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna())
def process_fastq_plain(fastq, **kwargs):
"""Combine metrics extracted from a fastq file."""
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna())
def extract_from_fastq(fq):
"""Extract metrics from a fastq file.
Return average quality and read length
"""
for rec in SeqIO.parse(fq, "fastq"):
yield ut.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
def stream_fastq_full(fastq, threads):
"""Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght
"""
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
def extract_all_from_fastq(rec):
"""Extract metrics from a fastq file.
Return identifier, read length, average quality and median quality
"""
return (rec.id,
len(rec),
ut.ave_qual(rec.letter_annotations["phred_quality"]),
None)
def info_to_dict(info):
"""Get the key-value pairs from the albacore/minknow fastq description and return dict"""
return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(ut.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df)
def readfq(fp):
"""Generator function adapted from https://github.com/lh3/readfq."""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last:
break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last:
break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs) # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def fq_minimal(fq):
"""Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length
"""
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None
def process_fastq_minimal(fastq, **kwargs):
"""Swiftly extract minimal features (length and timestamp) from a rich fastq file"""
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df)
|
qpiel/python_estimation_source
|
Example/ExReadFermiCatalog.py
|
# author David Sanchez david.sanchez@lapp.in2p3.fr
# ------ Imports --------------- #
import numpy
from Plot.PlotLibrary import *
from Catalog.ReadFermiCatalog import *
from environ import FERMI_CATALOG_DIR
# ------------------------------ #
#look for this 2FGL source
source = "2FGL J1015.1+4925"
#source = "1FHL J2158.8-3013"
#source = "3FGL J2158.8-3013"
Cat = FermiCatalogReader(source,FERMI_CATALOG_DIR,"e2dnde","TeV")
#print some information
print "2FGL association ",Cat.Association('3FGL')
print "3FGL Name ",Cat.Association('2FHL','3FGL_name')
print "3FGL Var Index ",Cat.GetVarIndex("3FGL")
#create a spectrum for a given catalog and compute the model+butterfly
Cat.MakeSpectrum("3FGL",1e-4,0.3)
enerbut,but,enerphi,phi = Cat.Plot("3FGL")
Cat.MakeSpectrum("2FGL",1e-4,0.3)
enerbut2FGL,but2FGL,enerphi2FGL,phi2FGL = Cat.Plot("2FGL")
Cat.MakeSpectrum("2FHL",5e-2,2)
enerbut2FHL,but2FHL,enerphi2FHL,phi2FHL = Cat.Plot("2FHL")
# read DATA Point
em,ep,flux,dflux = Cat.GetDataPoints('3FGL') #energy in TeV since the user ask for that in the call of Cat
ener = numpy.sqrt(em*ep)
dem = ener-em
dep = ep-ener
c=Cat.ReadPL('3FGL')[3]
dnde = (-c+1)*flux*numpy.power(ener*1e6,-c+2)/(numpy.power((ep*1e6),-c+1)-numpy.power((em*1e6),-c+1))*1.6e-6
ddnde = dnde*dflux/flux
#plot
import matplotlib.pyplot as plt
plt.loglog()
plt.plot(enerbut, but, 'b-',label = "3FGL")
plt.plot(enerphi,phi, 'b-')
plt.plot(enerbut2FGL,but2FGL,'g-',label = "2FGL")
plt.plot(enerphi2FGL,phi2FGL,'g-')
plt.plot(enerbut2FHL,but2FHL,'r-',label = "2FHL")
plt.plot(enerphi2FHL,phi2FHL,'r-')
plt.errorbar(ener, dnde, xerr= [dem,dep], yerr = ddnde,fmt='o')
plt.legend(loc = 3)
plt.ylabel('E2dN/dE(erg.cm-2.s-1)')
plt.xlabel('energy (TeV)')
plt.show()
|
squilter/ardupilot
|
libraries/AP_MSP/Tools/pymsp.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
author: Alex Apostoli
based on https://github.com/hkm95/python-multiwii
which is under GPLv3
"""
import struct
import time
import sys
import re
class MSPItem:
def __init__(self, name, fmt, fields):
self.name = name
self.format = fmt
self.fields = fields
if not isinstance(self.format, list):
self.format = [self.format]
self.fields = [self.fields]
self.values = {}
def parse(self, msp, dataSize):
'''parse data'''
ofs = msp.p
for i in range(len(self.format)):
fmt = self.format[i]
fields = self.fields[i].split(',')
if fmt[0] == '{':
# we have a repeat count from an earlier variable
right = fmt.find('}')
vname = fmt[1:right]
count = self.values[vname]
fmt = "%u%s" % (count, fmt[right+1:])
if fmt[0].isdigit():
repeat = int(re.search(r'\d+', fmt).group())
else:
repeat = None
fmt = "<" + fmt
fmt_size = struct.calcsize(fmt)
if dataSize < fmt_size:
raise Exception("Format %s needs %u bytes got %u for %s" % (self.name, fmt_size, dataSize, fmt))
values = list(struct.unpack(fmt, msp.inBuf[ofs:ofs+fmt_size]))
if repeat is not None:
for i in range(len(fields)):
self.values[fields[i]] = []
for j in range(repeat):
self.values[fields[i]].append(values[j*len(fields)])
else:
for i in range(len(fields)):
self.values[fields[i]] = values[i]
dataSize -= fmt_size
ofs += fmt_size
msp.by_name[self.name] = self
#print("Got %s" % self.name)
class PyMSP:
""" Multiwii Serial Protocol """
OSD_RSSI_VALUE = 0
OSD_MAIN_BATT_VOLTAGE = 1
OSD_CROSSHAIRS = 2
OSD_ARTIFICIAL_HORIZON = 3
OSD_HORIZON_SIDEBARS = 4
OSD_ITEM_TIMER_1 = 5
OSD_ITEM_TIMER_2 = 6
OSD_FLYMODE = 7
OSD_CRAFT_NAME = 8
OSD_THROTTLE_POS = 9
OSD_VTX_CHANNEL = 10
OSD_CURRENT_DRAW = 11
OSD_MAH_DRAWN = 12
OSD_GPS_SPEED = 13
OSD_GPS_SATS = 14
OSD_ALTITUDE = 15
OSD_ROLL_PIDS = 16
OSD_PITCH_PIDS = 17
OSD_YAW_PIDS = 18
OSD_POWER = 19
OSD_PIDRATE_PROFILE = 20
OSD_WARNINGS = 21
OSD_AVG_CELL_VOLTAGE = 22
OSD_GPS_LON = 23
OSD_GPS_LAT = 24
OSD_DEBUG = 25
OSD_PITCH_ANGLE = 26
OSD_ROLL_ANGLE = 27
OSD_MAIN_BATT_USAGE = 28
OSD_DISARMED = 29
OSD_HOME_DIR = 30
OSD_HOME_DIST = 31
OSD_NUMERICAL_HEADING = 32
OSD_NUMERICAL_VARIO = 33
OSD_COMPASS_BAR = 34
OSD_ESC_TMP = 35
OSD_ESC_RPM = 36
OSD_REMAINING_TIME_ESTIMATE = 37
OSD_RTC_DATETIME = 38
OSD_ADJUSTMENT_RANGE = 39
OSD_CORE_TEMPERATURE = 40
OSD_ANTI_GRAVITY = 41
OSD_G_FORCE = 42
OSD_MOTOR_DIAG = 43
OSD_LOG_STATUS = 44
OSD_FLIP_ARROW = 45
OSD_LINK_QUALITY = 46
OSD_FLIGHT_DIST = 47
OSD_STICK_OVERLAY_LEFT = 48
OSD_STICK_OVERLAY_RIGHT = 49
OSD_DISPLAY_NAME = 50
OSD_ESC_RPM_FREQ = 51
OSD_RATE_PROFILE_NAME = 52
OSD_PID_PROFILE_NAME = 53
OSD_PROFILE_NAME = 54
OSD_RSSI_DBM_VALUE = 55
OSD_RC_CHANNELS = 56
OSD_CAMERA_FRAME = 57
MSP_NAME =10
MSP_OSD_CONFIG =84
MSP_IDENT =100
MSP_STATUS =101
MSP_RAW_IMU =102
MSP_SERVO =103
MSP_MOTOR =104
MSP_RC =105
MSP_RAW_GPS =106
MSP_COMP_GPS =107
MSP_ATTITUDE =108
MSP_ALTITUDE =109
MSP_ANALOG =110
MSP_RC_TUNING =111
MSP_PID =112
MSP_BOX =113
MSP_MISC =114
MSP_MOTOR_PINS =115
MSP_BOXNAMES =116
MSP_PIDNAMES =117
MSP_WP =118
MSP_BOXIDS =119
MSP_SERVO_CONF =120
MSP_NAV_STATUS =121
MSP_NAV_CONFIG =122
MSP_MOTOR_3D_CONFIG =124
MSP_RC_DEADBAND =125
MSP_SENSOR_ALIGNMENT =126
MSP_LED_STRIP_MODECOLOR =127
MSP_VOLTAGE_METERS =128
MSP_CURRENT_METERS =129
MSP_BATTERY_STATE =130
MSP_MOTOR_CONFIG =131
MSP_GPS_CONFIG =132
MSP_COMPASS_CONFIG =133
MSP_ESC_SENSOR_DATA =134
MSP_GPS_RESCUE =135
MSP_GPS_RESCUE_PIDS =136
MSP_VTXTABLE_BAND =137
MSP_VTXTABLE_POWERLEVEL =138
MSP_MOTOR_TELEMETRY =139
MSP_SET_RAW_RC =200
MSP_SET_RAW_GPS =201
MSP_SET_PID =202
MSP_SET_BOX =203
MSP_SET_RC_TUNING =204
MSP_ACC_CALIBRATION =205
MSP_MAG_CALIBRATION =206
MSP_SET_MISC =207
MSP_RESET_CONF =208
MSP_SET_WP =209
MSP_SELECT_SETTING =210
MSP_SET_HEAD =211
MSP_SET_SERVO_CONF =212
MSP_SET_MOTOR =214
MSP_SET_NAV_CONFIG =215
MSP_SET_MOTOR_3D_CONFIG =217
MSP_SET_RC_DEADBAND =218
MSP_SET_RESET_CURR_PID =219
MSP_SET_SENSOR_ALIGNMENT =220
MSP_SET_LED_STRIP_MODECOLOR=221
MSP_SET_MOTOR_CONFIG =222
MSP_SET_GPS_CONFIG =223
MSP_SET_COMPASS_CONFIG =224
MSP_SET_GPS_RESCUE =225
MSP_SET_GPS_RESCUE_PIDS =226
MSP_SET_VTXTABLE_BAND =227
MSP_SET_VTXTABLE_POWERLEVEL=228
MSP_BIND =241
MSP_RTC =247
MSP_EEPROM_WRITE =250
MSP_DEBUGMSG =253
MSP_DEBUG =254
IDLE = 0
HEADER_START = 1
HEADER_M = 2
HEADER_ARROW = 3
HEADER_SIZE = 4
HEADER_CMD = 5
HEADER_ERR = 6
PIDITEMS = 10
MESSAGES = {
MSP_RAW_GPS: MSPItem('RAW_GPS', "BBiihH", "fix,numSat,Lat,Lon,Alt,Speed"),
MSP_IDENT: MSPItem('IDENT', "BBBI", "version,multiType,MSPVersion,multiCapability"),
MSP_STATUS: MSPItem('STATUS', "HHHI", "cycleTime,i2cError,present,mode"),
MSP_RAW_IMU: MSPItem('RAW_IMU', "hhhhhhhhh", "AccX,AccY,AccZ,GyrX,GyrY,GyrZ,MagX,MagY,MagZ"),
MSP_SERVO: MSPItem('SERVO', "8h", "servo"),
MSP_MOTOR: MSPItem('MOTOR', "8h", "motor"),
MSP_RC: MSPItem('RC', "8h", "rc"),
MSP_COMP_GPS: MSPItem('COMP_GPS', "HhB", "distanceToHome,directionToHome,update"),
MSP_ATTITUDE: MSPItem('ATTITUDE', "hhh", "roll,pitch,yaw"),
MSP_ALTITUDE: MSPItem('ALTITUDE', "ih", "alt,vspeed"),
MSP_RC_TUNING: MSPItem('RC_TUNING', "BBBBBBB", "RC_Rate,RC_Expo,RollPitchRate,YawRate,DynThrPID,ThrottleMID,ThrottleExpo"),
MSP_BATTERY_STATE: MSPItem('BATTERY_STATE', "BHBHh", "cellCount,capacity,voltage,mah,current"),
MSP_RTC: MSPItem('RTC', "HBBBBBH", "year,mon,mday,hour,min,sec,millis"),
MSP_OSD_CONFIG: MSPItem("OSD_CONFIG",
["BBBBHBBH",
"{osd_item_count}H",
"B", "{stats_item_count}H",
"B", "{timer_count}H",
"HBIBBB"],
["feature,video_system,units,rssi_alarm,cap_alarm,unused1,osd_item_count,alt_alarm",
"osd_items",
"stats_item_count", "stats_items",
"timer_count", "timer_items",
"legacy_warnings,warnings_count,enabled_warnings,profiles,selected_profile,osd_overlay"]),
MSP_PID: MSPItem("PID", "8PID", "P,I,D"),
MSP_MISC: MSPItem("MISC", "HHHHHII","intPowerTrigger,conf1,conf2,conf3,conf4,conf5,conf6"),
MSP_MOTOR_PINS: MSPItem("MOTOR_PINS", "8H","MP"),
MSP_ANALOG: MSPItem("ANALOG", "BHHHH", "dV,consumed_mah,rssi,current,volt"),
MSP_STATUS: MSPItem("STATUS", "HHHIBHHBBIB", "task_delta,i2c_err_count,sensor_status,mode_flags,nop_1,system_load,gyro_time,nop_2,nop_3,armed,extra"),
}
def __init__(self):
self.msp_name = {
'name':None
}
self.msp_osd_config = {}
self.inBuf = bytearray([0] * 255)
self.p = 0
self.c_state = self.IDLE
self.err_rcvd = False
self.checksum = 0
self.cmd = 0
self.offset=0
self.dataSize=0
self.servo = []
self.mot = []
self.RCChan = []
self.byteP = []
self.byteI = []
self.byteD = []
self.confINF = []
self.byteMP = []
self.confP = []
self.confI = []
self.confD = []
# parsed messages, indexed by name
self.by_name = {}
def get(self, fieldname):
'''get a field from a parsed message by Message.Field name'''
a = fieldname.split('.')
msgName = a[0]
fieldName = a[1]
if not msgName in self.by_name:
# default to zero for simplicty of display
return 0
msg = self.by_name[msgName]
if not fieldName in msg.values:
raise Exception("Unknown field %s" % fieldName)
return msg.values[fieldName]
def read32(self):
'''signed 32 bit number'''
value, = struct.unpack("<i", self.inBuf[self.p:self.p+4])
self.p += 4
return value
def read32u(self):
'''unsigned 32 bit number'''
value, = struct.unpack("<I", self.inBuf[self.p:self.p+4])
self.p += 4
return value
def read16(self):
'''signed 16 bit number'''
value, = struct.unpack("<h", self.inBuf[self.p:self.p+2])
self.p += 2
return value
def read16u(self):
'''unsigned 16 bit number'''
value, = struct.unpack("<H", self.inBuf[self.p:self.p+2])
self.p += 2
return value
def read8(self):
'''unsigned 8 bit number'''
value, = struct.unpack("<B", self.inBuf[self.p:self.p+1])
self.p += 1
return value
def requestMSP (self, msp, payload = [], payloadinbytes = False):
if msp < 0:
return 0
checksum = 0
bf = ['$', 'M', '<']
pl_size = 2 * ((len(payload)) & 0xFF)
bf.append(pl_size)
checksum ^= (pl_size&0xFF)
bf.append(msp&0xFF)
checksum ^= (msp&0xFF)
if payload > 0:
if (payloadinbytes == False):
for c in struct.pack('<%dh' % ((pl_size) / 2), *payload):
checksum ^= (ord(c) & 0xFF)
else:
for c in struct.pack('<%Bh' % ((pl_size) / 2), *payload):
checksum ^= (ord(c) & 0xFF)
bf = bf + payload
bf.append(checksum)
return bf
def evaluateCommand(self, cmd, dataSize):
if cmd in self.MESSAGES:
# most messages are parsed from the MESSAGES list
self.MESSAGES[cmd].parse(self, dataSize)
elif cmd == self.MSP_NAME:
s = bytearray()
for i in range(0,dataSize,1):
b = self.read8()
if b == 0:
break
s.append(b)
self.msp_name['name'] = s.decode("utf-8")
elif cmd == self.MSP_ACC_CALIBRATION:
x = None
elif cmd == self.MSP_MAG_CALIBRATION:
x = None
elif cmd == self.MSP_BOX:
x = None
elif cmd == self.MSP_BOXNAMES:
x = None
elif cmd == self.MSP_PIDNAMES:
x = None
elif cmd == self.MSP_SERVO_CONF:
x = None
elif cmd == self.MSP_DEBUGMSG:
x = None
elif cmd == self.MSP_DEBUG:
x = None
else:
print("Unhandled command ", cmd, dataSize)
def parseMspData(self, buf):
for c in buf:
self.parseMspByte(c)
def parseMspByte(self, c):
if sys.version_info.major >= 3:
cc = chr(c)
ci = c
else:
cc = c
ci = ord(c)
if self.c_state == self.IDLE:
if cc == '$':
self.c_state = self.HEADER_START
else:
self.c_state = self.IDLE
elif self.c_state == self.HEADER_START:
if cc == 'M':
self.c_state = self.HEADER_M
else:
self.c_state = self.IDLE
elif self.c_state == self.HEADER_M:
if cc == '>':
self.c_state = self.HEADER_ARROW
elif cc == '!':
self.c_state = self.HEADER_ERR
else:
self.c_state = self.IDLE
elif self.c_state == self.HEADER_ARROW or self.c_state == self.HEADER_ERR:
self.err_rcvd = (self.c_state == self.HEADER_ERR)
#print (struct.unpack('<B',c)[0])
self.dataSize = ci
# reset index variables
self.p = 0
self.offset = 0
self.checksum = 0
self.checksum ^= ci
# the command is to follow
self.c_state = self.HEADER_SIZE
elif self.c_state == self.HEADER_SIZE:
#print (struct.unpack('<B',c)[0])
self.cmd = ci
self.checksum ^= ci
self.c_state = self.HEADER_CMD
elif self.c_state == self.HEADER_CMD and self.offset < self.dataSize:
#print (struct.unpack('<B',c)[0])
self.checksum ^= ci
self.inBuf[self.offset] = ci
self.offset += 1
elif self.c_state == self.HEADER_CMD and self.offset >= self.dataSize:
# compare calculated and transferred checksum
if ((self.checksum&0xFF) == ci):
if self.err_rcvd:
print("Vehicle didn't understand the request type")
else:
self.evaluateCommand(self.cmd, self.dataSize)
else:
print('"invalid checksum for command "+((int)(cmd&0xFF))+": "+(checksum&0xFF)+" expected, got "+(int)(c&0xFF))')
self.c_state = self.IDLE
def setPID(self):
self.sendRequestMSP(self.requestMSP(self.MSP_PID))
self.receiveData(self.MSP_PID)
time.sleep(0.04)
payload = []
for i in range(0, self.PIDITEMS, 1):
self.byteP[i] = int((round(self.confP[i] * 10)))
self.byteI[i] = int((round(self.confI[i] * 1000)))
self.byteD[i] = int((round(self.confD[i])))
# POS - 4 POSR - 5 NAVR - 6
self.byteP[4] = int((round(self.confP[4] * 100.0)))
self.byteI[4] = int((round(self.confI[4] * 100.0)))
self.byteP[5] = int((round(self.confP[5] * 10.0)))
self.byteI[5] = int((round(self.confI[5] * 100.0)))
self.byteD[5] = int((round(self.confD[5] * 10000.0))) / 10
self.byteP[6] = int((round(self.confP[6] * 10.0)))
self.byteI[6] = int((round(self.confI[6] * 100.0)))
self.byteD[6] = int((round(self.confD[6] * 10000.0))) / 10
for i in range(0, self.PIDITEMS, 1):
payload.append(self.byteP[i])
payload.append(self.byteI[i])
payload.append(self.byteD[i])
self.sendRequestMSP(self.requestMSP(self.MSP_SET_PID, payload, True), True)
def arm(self):
timer = 0
start = time.time()
while timer < 0.5:
data = [1500,1500,2000,1000]
self.sendRequestMSP(self.requestMSP(self.MSP_SET_RAW_RC,data))
time.sleep(0.05)
timer = timer + (time.time() - start)
start = time.time()
def disarm(self):
timer = 0
start = time.time()
while timer < 0.5:
data = [1500,1500,1000,1000]
self.sendRequestMSP(self.requestMSP(self.MSP_SET_RAW_RC,data))
time.sleep(0.05)
timer = timer + (time.time() - start)
start = time.time()
def receiveIMU(self, duration):
timer = 0
start = time.time()
while timer < duration:
self.sendRequestMSP(self.requestMSP(self.MSP_RAW_IMU))
self.receiveData(self.MSP_RAW_IMU)
if self.msp_raw_imu['accx'] > 32768: # 2^15 ...to check if negative number is received
self.msp_raw_imu['accx'] -= 65536 # 2^16 ...converting into 2's complement
if self.msp_raw_imu['accy'] > 32768:
self.msp_raw_imu['accy'] -= 65536
if self.msp_raw_imu['accz'] > 32768:
self.msp_raw_imu['accz'] -= 65536
if self.msp_raw_imu['gyrx'] > 32768:
self.msp_raw_imu['gyrx'] -= 65536
if self.msp_raw_imu['gyry'] > 32768:
self.msp_raw_imu['gyry'] -= 65536
if self.msp_raw_imu['gyrz'] > 32768:
self.msp_raw_imu['gyrz'] -= 65536
print("size: %d, accx: %f, accy: %f, accz: %f, gyrx: %f, gyry: %f, gyrz: %f " %(self.msp_raw_imu['size'], self.msp_raw_imu['accx'], self.msp_raw_imu['accy'], self.msp_raw_imu['accz'], self.msp_raw_imu['gyrx'], self.msp_raw_imu['gyry'], self.msp_raw_imu['gyrz']))
time.sleep(0.04)
timer = timer + (time.time() - start)
start = time.time()
def calibrateIMU(self):
self.sendRequestMSP(self.requestMSP(self.MSP_ACC_CALIBRATION))
time.sleep(0.01)
|
hackersql/sq1map
|
comm1x/src/core/injections/controller/shell_options.py
|
#!/usr/bin/env python
# encoding: UTF-8
"""
This file is part of Commix Project (http://commixproject.com).
Copyright (c) 2014-2017 Anastasios Stasinopoulos (@ancst).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
For more see the file 'readme/COPYING' for copying permission.
"""
import re
import os
import sys
import time
import urllib
import urlparse
from src.utils import menu
from src.utils import settings
from src.core.injections.controller import checks
from src.thirdparty.colorama import Fore, Back, Style, init
from src.core.shells import bind_tcp
from src.core.shells import reverse_tcp
from src.core.injections.results_based.techniques.classic import cb_injector
from src.core.injections.results_based.techniques.eval_based import eb_injector
from src.core.injections.semiblind.techniques.file_based import fb_injector
"""
Check for established connection
"""
def check_established_connection():
while True:
if settings.VERBOSITY_LEVEL == 1:
print ""
warn_msg = "Something went wrong with the reverse TCP connection."
warn_msg += " Please wait while checking state."
print settings.print_warning_msg(warn_msg)
time.sleep(10)
lines = os.popen('netstat -anta').read().split("\n")
found = False
for line in lines:
if "ESTABLISHED" in line and settings.LPORT in line.split():
found = True
pass
if not found:
return
"""
Execute the bind / reverse TCP shell
"""
def execute_shell(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, payload, OUTPUT_TEXTFILE):
if settings.EVAL_BASED_STATE != False:
# Command execution results.
start = time.time()
response = eb_injector.injection(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename)
end = time.time()
diff = end - start
# Evaluate injection results.
shell = eb_injector.injection_results(response, TAG, cmd)
else:
# Command execution results.
start = time.time()
if settings.FILE_BASED_STATE == True:
response = fb_injector.injection(separator, payload, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename)
else:
whitespace = settings.WHITESPACE[0]
if whitespace == " ":
whitespace = urllib.quote(whitespace)
response = cb_injector.injection(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename)
end = time.time()
diff = end - start
# Evaluate injection results.
shell = cb_injector.injection_results(response, TAG, cmd)
if settings.REVERSE_TCP and (int(diff) > 0 and int(diff) < 6):
check_established_connection()
else:
if settings.VERBOSITY_LEVEL == 1:
print ""
err_msg = "The " + os_shell_option.split("_")[0] + " "
err_msg += os_shell_option.split("_")[1].upper() + " connection has failed!"
print settings.print_critical_msg(err_msg)
"""
Configure the bind TCP shell
"""
def bind_tcp_config(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, go_back, go_back_again, payload, OUTPUT_TEXTFILE):
settings.BIND_TCP = True
# Set up RHOST / LPORT for the bind TCP connection.
bind_tcp.configure_bind_tcp()
if settings.BIND_TCP == False:
if settings.REVERSE_TCP == True:
os_shell_option = "reverse_tcp"
reverse_tcp_config(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, go_back, go_back_again, payload, OUTPUT_TEXTFILE)
return go_back, go_back_again
while True:
if settings.RHOST and settings.LPORT in settings.SHELL_OPTIONS:
result = checks.check_bind_tcp_options(settings.RHOST)
else:
cmd = bind_tcp.bind_tcp_options()
result = checks.check_bind_tcp_options(cmd)
if result != None:
if result == 0:
go_back_again = False
elif result == 1 or result == 2:
go_back_again = True
settings.BIND_TCP = False
elif result == 3:
settings.BIND_TCP = False
reverse_tcp_config(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, go_back, go_back_again, payload, OUTPUT_TEXTFILE)
return go_back, go_back_again
# execute bind TCP shell
execute_shell(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, payload, OUTPUT_TEXTFILE)
"""
Configure the reverse TCP shell
"""
def reverse_tcp_config(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, go_back, go_back_again, payload, OUTPUT_TEXTFILE):
settings.REVERSE_TCP = True
# Set up LHOST / LPORT for the reverse TCP connection.
reverse_tcp.configure_reverse_tcp()
if settings.REVERSE_TCP == False:
if settings.BIND_TCP == True:
os_shell_option = "bind_tcp"
bind_tcp_config(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, go_back, go_back_again, payload, OUTPUT_TEXTFILE)
return go_back, go_back_again
while True:
if settings.LHOST and settings.LPORT in settings.SHELL_OPTIONS:
result = checks.check_reverse_tcp_options(settings.LHOST)
else:
cmd = reverse_tcp.reverse_tcp_options()
result = checks.check_reverse_tcp_options(cmd)
if result != None:
if result == 0:
go_back_again = False
elif result == 1 or result == 2:
go_back_again = True
settings.REVERSE_TCP = False
elif result == 3:
settings.REVERSE_TCP = False
bind_tcp_config(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, go_back, go_back_again, payload, OUTPUT_TEXTFILE)
#reverse_tcp_config(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, go_back, go_back_again)
return go_back, go_back_again
# execute reverse TCP shell
execute_shell(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, payload, OUTPUT_TEXTFILE)
"""
Check commix shell options
"""
def check_option(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, technique, go_back, no_result, timesec, go_back_again, payload, OUTPUT_TEXTFILE):
os_shell_option = checks.check_os_shell_options(cmd.lower(), technique, go_back, no_result)
if os_shell_option == "back" or os_shell_option == True or os_shell_option == False:
go_back = True
if os_shell_option == False:
go_back_again = True
return go_back, go_back_again
# The "os_shell" option
elif os_shell_option == "os_shell":
warn_msg = "You are already into the '" + os_shell_option + "' mode."
print settings.print_warning_msg(warn_msg)
return go_back, go_back_again
# The "bind_tcp" option
elif os_shell_option == "bind_tcp":
go_back, go_back_again = bind_tcp_config(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, go_back, go_back_again, payload, OUTPUT_TEXTFILE)
return go_back, go_back_again
# The "reverse_tcp" option
elif os_shell_option == "reverse_tcp":
go_back, go_back_again = reverse_tcp_config(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, go_back, go_back_again, payload, OUTPUT_TEXTFILE)
return go_back, go_back_again
# The "quit" option
elif os_shell_option == "quit":
sys.exit(0)
else:
return go_back, go_back_again
|
kickino/aws-scripts
|
glacier/glacier_push.py
|
#!/usr/bin/python2.7
from boto.glacier.layer1 import Layer1
from boto.glacier.concurrent import ConcurrentUploader
import sys
import os.path
from time import gmtime, strftime
access_key_id = "xxx"
secret_key = "xxx"
target_vault_name = "xxx"
inventory = "xxx"
# the file to be uploaded into the vault as an archive
fname = sys.argv[1]
# a description you give to the file
fdes = os.path.basename(sys.argv[1])
if not os.path.isfile(fname) :
print("Can't find the file to upload")
sys.exit(-1);
# glacier uploader
glacier_layer1 = Layer1(aws_access_key_id=access_key_id, aws_secret_access_key=secret_key, is_secure=True)
uploader = ConcurrentUploader(glacier_layer1, target_vault_name, part_size=128*1024*1024, num_threads=1)
archive_id = uploader.upload(fname, fdes)
# write an inventory file
f = open(inventory, 'a+')
f.write(archive_id+'\t'+fdes+'\n')
f.close()
sys.exit(0);
|
Theyrealone/ExcelMapper
|
run.py
|
from shutil import copyfile
from datetime import datetime
from ExcelMapper.mapper import *
import xlrd
import xlsxwriter
row_rules_sheet1_t1 = {
'found "risk"': lambda data: 'risk' in data['type'],
'found "Risk"': lambda data: 'Risk' in data['type'],
'found "reward"(ignore letter casing)': lambda data: 'reward' in data['type'].lower() or 'reward' in data['type'].lower()}
row_rules_sheet1_t2 = row_rules_sheet1_t1
col_rules_sheet1_t1 = {
'"high"' : lambda data: 'high' in data['amount'],
#'return true' : lambda data: True,
'return false' : lambda data: False}
col_rules_sheet1_t2 = {
'found "low"' : lambda data: 'low' in data['amount'],
'return true' : lambda data: True,
'return false' : lambda data: False}
row_rules_sheet2_t3 = {
'found "fire"': lambda data: 'fire' in data['type'],
'found "Fire"': lambda data: 'Fire' in data['type'],
'found "damage"(ignore letter casing)': lambda data: 'damage' in data['type'].lower()}
col_rules_sheet2_t3 = {
'"low"' : lambda data: 'low' == data['amount'],
'"high"': lambda data: 'high' == data['amount']}
def main():
date = str(datetime.now().date())
print "Maping"
excel_template = xlrd.open_workbook("map.xlsx")
copyfile('map.xlsx', "map {}.xlsx".format(date))
excel_data = xlrd.open_workbook("data.xlsx")
t1_excel_mapper = create_mapper(wb=excel_template,table_index=1,row_rules=row_rules_sheet1_t1,col_rules=col_rules_sheet1_t1)
t1_output_data = t1_excel_mapper.run(excel_data)
t2_excel_mapper = create_mapper(wb=excel_template,table_index=2,row_rules=row_rules_sheet1_t2,col_rules=col_rules_sheet1_t2)
t2_output_data = t2_excel_mapper.run(excel_data)
t3_excel_mapper = create_mapper(wb=excel_template,table_index=3,row_rules=row_rules_sheet2_t3,col_rules=col_rules_sheet2_t3)
t3_output_data = t3_excel_mapper.run(excel_data)
workbook = xlsxwriter.Workbook('output {}.xlsx'.format(date))
worksheet = workbook.add_worksheet()
for (row,col),results in t1_output_data.iteritems():
worksheet.write(row, col,len(results))
for (row,col),results in t2_output_data.iteritems():
worksheet.write(row, col,len(results))
worksheet = workbook.add_worksheet()
for (row,col),results in t3_output_data.iteritems():
worksheet.write(row, col,len(results))
workbook.close()
print "Done."
def clone_sheet(to_clone_sheet,new_sheet):
for row in range(0,to_clone_sheet.nrows):
for col in range(0,to_clone_sheet.ncols):
new_sheet.write(row,col,to_clone_sheet.cell_value(row, col))
if __name__ == "__main__":
main()
|
davidam/python-examples
|
scikit/latentsemantic.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2019 David Arroyo Menéndez
# Author: David Arroyo Menéndez <davidam@gnu.org>
# Maintainer: David Arroyo Menéndez <davidam@gnu.org>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with latentsemantic; see the file LICENSE. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
#import pdb
import pandas as pd
#pdb.set_trace()
df = pd.read_csv('data/Reviews.csv')
print(df.head(3))
print(df['Text'].head(2))
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer()
print(tfidf)
print(tfidf.fit(df['Text']))
X = tfidf.transform(df['Text'])
print(X)
#print(df['Text'][1].head(2))
print([X[1, tfidf.vocabulary_['peanuts']]])
print([X[1, tfidf.vocabulary_['jumbo']]])
print([X[1, tfidf.vocabulary_['error']]])
import numpy as np
df.dropna(inplace=True)
df[df['Score'] != 3]
df['Positivity'] = np.where(df['Score'] > 3, 1, 0)
cols = ['Id', 'ProductId', 'UserId', 'ProfileName', 'HelpfulnessNumerator', 'HelpfulnessDenominator', 'Score', 'Time', 'Summary']
df.drop(cols, axis=1, inplace=True)
df.head(3)
from sklearn.model_selection import train_test_split
X = df.Text
y = df.Positivity
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 0)
print("Train set has total {0} entries with {1:.2f}% negative, {2:.2f}% positive".format(len(X_train),
(len(X_train[y_train == 0]) / (len(X_train)*1.))*100,
(len(X_train[y_train == 1]) / (len(X_train)*1.))*100))
print("Test set has total {0} entries with {1:.2f}% negative, {2:.2f}% positive".format(len(X_test),
(len(X_test[y_test == 0]) / (len(X_test)*1.))*100,
(len(X_test[y_test == 1]) / (len(X_test)*1.))*100))
|
kugel-/peasy
|
plugins/peasypytest.py
|
from __future__ import print_function
import gettext
import gi
gi.require_version('Peas', '1.0')
from gi.repository import GObject
from gi.repository import Peas
from gi.repository import Peasy
from gi.repository import Geany
gettext.bindtextdomain("peasy", "/home/kugel/dev/geany.git/build-linux/dest/share/locale")
gettext.textdomain("peasy")
_ = gettext.gettext
class PeasyPyTester(Peasy.Plugin, Peasy.PluginHelp):
__gtype_name = 'PeasyPyTester'
object = GObject.property(type=GObject.Object)
# why is this needed!?
plugin_info = GObject.property(type=Peas.PluginInfo)
doc = None
def on_closed(self, obj, d):
print(d.display_name() + " closed")
self.doc = None
def do_enable(self):
print("do_enable: " + gettext.dgettext("peasy", "Hello from %s!") % self.props.plugin_info.get_name())
self.doc = Geany.Document.new_file("foo")
self.geany_plugin.geany_data.object.connect("document-close", self.on_closed)
return True
def do_disable(self):
print("do_disable: " + _("%s says bye!") % self.props.plugin_info.get_name())
if (self.doc and self.doc.is_valid):
self.doc.close()
def do_help(self):
print("Help!!")
|
autosportlabs/RaceCapture_App
|
spacer.py
|
#
# Race Capture App
#
# Copyright (C) 2014-2017 Autosport Labs
#
# This file is part of the Race Capture App
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details. You should
# have received a copy of the GNU General Public License along with
# this code. If not, see <http://www.gnu.org/licenses/>.
from kivy.uix.widget import Widget
class HorizontalSpacer(Widget):
def __init__(self, **kwargs):
super(HorizontalSpacer, self).__init__( **kwargs)
self.size_hint_y = None
self.height=0
class VerticalSpacer(Widget):
def __init__(self, **kwargs):
super(VerticalSpacer, self).__init__( **kwargs)
self.size_hint_x = None
self.width=0
|
frankosan/pypers
|
pypers/steps/picard/reordersam.py
|
import os
from pypers.core.step import CmdLineStep
class ReorderSam(CmdLineStep):
spec = {
"version": "0.0.1",
"descr": [
"Runs ReorderSam to reorder chromosomes into GATK order"
],
"args":
{
"inputs": [
{
"name" : "input_bam",
"type" : "file",
"iterable" : True,
"descr" : "the input bam file",
},
{
"name" : "reference",
"type" : "ref_genome",
"tool" : "reordersam",
"descr" : "Reference whole genome fasta"
}
],
"outputs": [
{
"name" : "output_bam",
"type" : "file",
"value" : "dummy",
"descr" : "the reordered output bam",
}
],
"params": [
{
"name" : "jvm_args",
"value" : "-Xmx{{jvm_memory}}g -Djava.io.tmpdir={{output_dir}}",
"descr" : "java virtual machine arguments",
"readonly" : True
}
]
},
"cmd": [
"/usr/bin/java {{jvm_args}} -jar /software/pypers/picard-tools/picard-tools-1.119/picard-tools-1.119/ReorderSam.jar",
" I={{input_bam}} O={{output_bam}} CREATE_INDEX=True R={{reference}}"
],
"requirements": {
"memory": '8'
}
}
def preprocess(self):
"""
Set output bam name
"""
file_name = os.path.basename(self.input_bam)
self.output_bam = file_name.replace('.bam','.reord.bam')
super(ReorderSam, self).preprocess()
|
thica/ORCA-Remote
|
src/scripts/tools/tool_repositorymanager/RepManager.py
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import annotations
import os
from typing import Union
from typing import List
from typing import Dict
from xml.etree.ElementTree import Element
from xml.etree.ElementTree import SubElement
from kivy.logger import Logger
from ORCA.ui.ShowErrorPopUp import ShowErrorPopUp
from ORCA.utils.TypeConvert import ToUnicode
from ORCA.utils.TypeConvert import EscapeUnicode
from ORCA.utils.Filesystem import AdjustPathToOs
from ORCA.utils.FileName import cFileName
from ORCA.utils.LogError import LogError
from ORCA.vars.Replace import ReplaceVars
from ORCA.vars.Access import SetVar
from ORCA.vars.Actions import Var_DelArray
from ORCA.utils.XML import XMLPrettify
from ORCA.utils.Path import cPath
from ORCA.download.RepManagerEntry import cRepManagerEntry
import ORCA.Globals as Globals
oRepositoryManager:Union[cRepositoryManager,None] = None
def RepositoryManager(oPathRepSource:cPath) -> None:
""" starts RepositoryManager, we make it global to avoid wrong garbage collection """
global oRepositoryManager
oRepositoryManager=cRepositoryManager(oPathRepSource)
oRepositoryManager.CollectAndUpload()
def CreateRepVarArray(uBaseLocalDir:str) -> None:
global oRepositoryManager
if oRepositoryManager:
oRepositoryManager.CreateRepVarArray(uBaseLocalDir)
class cRepositoryManager:
""" The Main repository manager class, which uploads all reps to the cloud """
def __init__(self,oPathRepSource) -> None:
super(cRepositoryManager, self).__init__()
self.aFiles:List[str] = []
self.aRepManagerEntries:List[cRepManagerEntry] = []
self.aZipFiles:List[Dict] = []
self.oPathRepSource:cPath = oPathRepSource
def CollectAndUpload(self) -> None:
""" Collects all Reps and uploads them """
try:
oPath:cPath = Globals.oPathTmp + "RepManager"
oPath.Delete()
self.GetOthers()
self.GetCodesets()
self.GetDefinitions()
self.GetSkins()
self.GetInterfaces()
self.GetLanguages()
self.GetSounds()
self.GetScripts()
self.GetWizardTemplates()
self.GetFonts()
self.CreateRepository()
except Exception as e:
uMsg=LogError(uMsg='Critical failure on Repository Manager ...' ,oException=e)
ShowErrorPopUp(uMessage=uMsg)
def GetOthers(self) -> None:
""" Gets all others reps """
del self.aFiles[:]
del self.aRepManagerEntries[:]
self.aFiles=(self.oPathRepSource + 'repositories/orca-remote/repositories/others').GetFileList(bSubDirs = False, bFullPath = True)
for uFn in self.aFiles:
oRepManagerEntry:cRepManagerEntry = cRepManagerEntry(oFileName=uFn)
if oRepManagerEntry.ParseFromXML():
if not oRepManagerEntry.oRepEntry.bSkip:
self.aRepManagerEntries.append(oRepManagerEntry)
else:
Logger.warning('Resource not ready for Repository Manager, skipped: '+uFn)
self.SaveRepositoryXML('others','Various ORCA resources')
def GetFonts(self) -> None:
""" Gets all others reps """
del self.aFiles[:]
del self.aRepManagerEntries[:]
aFontsFolders:List[str] = Globals.oPathFonts.GetFolderList(bFullPath=True)
for uFontFolder in aFontsFolders:
oFnFontDefinition:cFileName = cFileName(cPath(uFontFolder)) + "fonts.xml"
oRepManagerEntry:cRepManagerEntry = cRepManagerEntry(oFileName=oFnFontDefinition)
if oRepManagerEntry.ParseFromXML():
if not oRepManagerEntry.oRepEntry.bSkip:
self.aRepManagerEntries.append(oRepManagerEntry)
else:
Logger.warning('Font not ready for Repository Manager, skipped: '+oFnFontDefinition)
self.SaveRepositoryXML('fonts','Font Resources')
def GetCodesets(self) -> None:
""" Gets all codeset reps """
del self.aFiles[:]
del self.aRepManagerEntries[:]
self.aFiles=Globals.oPathCodesets.GetFileList(bSubDirs = True, bFullPath = True)
for uFn in self.aFiles:
if uFn.lower().endswith('.xml'):
oRepManagerEntry:cRepManagerEntry=cRepManagerEntry(oFileName=uFn)
if oRepManagerEntry.ParseFromXML():
if not oRepManagerEntry.oRepEntry.bSkip:
self.aRepManagerEntries.append(oRepManagerEntry)
else:
Logger.warning('Codeset not ready for Repository Manager, skipped: '+uFn)
self.SaveRepositoryXML('codesets','Orca Genuine Codesets')
def GetSounds(self) -> None:
""" Gets all sounds reps """
del self.aFiles[:]
del self.aRepManagerEntries[:]
for uSound in Globals.oSound.aSoundsList:
oFnSound:cFileName = cFileName(Globals.oPathSoundsRoot + uSound) +"sounds.xml"
oRepManagerEntry:cRepManagerEntry = cRepManagerEntry(oFileName=oFnSound)
if oRepManagerEntry.ParseFromXML():
if not oRepManagerEntry.oRepEntry.bSkip:
self.aRepManagerEntries.append(oRepManagerEntry)
else:
Logger.warning('Soundset not ready for Repository Manager, skipped: '+oFnSound)
self.SaveRepositoryXML('sounds','Orca Genuine Sounds')
def GetDefinitions(self) -> None:
""" Gets all definition reps """
del self.aFiles[:]
del self.aRepManagerEntries[:]
for uDefinitionName in Globals.aDefinitionList:
oFnFile:cFileName=cFileName().ImportFullPath(uFnFullName='%s/definitions/%s/definition.xml' % (Globals.oPathRoot.string, uDefinitionName))
oRepManagerEntry:cRepManagerEntry=cRepManagerEntry(oFileName=oFnFile)
if oRepManagerEntry.ParseFromXML():
if not oRepManagerEntry.oRepEntry.bSkip:
self.aRepManagerEntries.append(oRepManagerEntry)
else:
Logger.warning('Definition not ready for Repository Manager, skipped: '+oFnFile)
self.SaveRepositoryXML('definitions','Orca Genuine Definitions')
def GetLanguages(self) -> None:
""" Gets all Language reps """
del self.aFiles[:]
del self.aRepManagerEntries[:]
for uLanguage in Globals.aLanguageList:
oFn:cFileName=cFileName().ImportFullPath(uFnFullName='%s/languages/%s/strings.xml' % (Globals.oPathRoot.string, uLanguage))
oRepManagerEntry:cRepManagerEntry=cRepManagerEntry(oFileName=oFn)
if oRepManagerEntry.ParseFromXML():
if not oRepManagerEntry.oRepEntry.bSkip:
self.aRepManagerEntries.append(oRepManagerEntry)
else:
Logger.warning('Language not ready for Repository Manager, skipped: '+oFn)
self.SaveRepositoryXML('languages','Orca Genuine Language Files')
def GetInterfaces(self) -> None:
""" Gets all interface reps """
del self.aFiles[:]
del self.aRepManagerEntries[:]
for uInterFaceName in Globals.oInterFaces.aObjectNameList:
oFn:cFileName=cFileName().ImportFullPath(uFnFullName='%s/interfaces/%s/interface.py' % (Globals.oPathRoot.string, uInterFaceName))
oRepManagerEntry:cRepManagerEntry=cRepManagerEntry(oFileName=oFn)
if oRepManagerEntry.ParseFromSourceFile():
if not oRepManagerEntry.oRepEntry.bSkip:
self.aRepManagerEntries.append(oRepManagerEntry)
else:
Logger.warning('Interface not ready for Repository Manager, skipped: '+oFn)
self.SaveRepositoryXML('interfaces','Orca Genuine Interfaces')
def GetScripts(self) -> None:
""" Gets all scripts reps """
del self.aFiles[:]
del self.aRepManagerEntries[:]
for uScriptName in Globals.oScripts.dScriptPathList:
oFn:cFileName=cFileName(Globals.oScripts.dScriptPathList[uScriptName])+'script.py'
oRepManagerEntry:cRepManagerEntry=cRepManagerEntry(oFileName=oFn)
if oRepManagerEntry.ParseFromSourceFile():
if not oRepManagerEntry.oRepEntry.bSkip:
self.aRepManagerEntries.append(oRepManagerEntry)
else:
Logger.warning('Script not ready for Repository Manager, skipped: '+oFn)
self.SaveRepositoryXML('scripts','Orca Genuine Scripts')
def GetSkins(self) -> None:
""" Gets all skins reps """
del self.aFiles[:]
del self.aRepManagerEntries[:]
for uSkinName in Globals.aSkinList:
oFn:cFileName=cFileName().ImportFullPath(uFnFullName='%s/skins/%s/skin.xml' % (Globals.oPathRoot.string, uSkinName))
oRepManagerEntry:cRepManagerEntry=cRepManagerEntry(oFileName=oFn)
if oRepManagerEntry.ParseFromXML():
if not oRepManagerEntry.oRepEntry.bSkip:
self.aRepManagerEntries.append(oRepManagerEntry)
else:
Logger.warning('Skin not ready for Repository Manager, skipped: '+oFn)
self.SaveRepositoryXML('skins','Orca Genuine Skins')
def GetWizardTemplates(self) -> None:
""" Gets all wizard reps """
del self.aFiles[:]
del self.aRepManagerEntries[:]
aDirs:List[str]=(Globals.oPathRoot + u'wizard templates').GetFolderList()
for uDirName in aDirs:
aDirsSub:List[str]=(Globals.oPathRoot + (u'wizard templates/' + uDirName)).GetFolderList()
for uDirsSub in aDirsSub:
oFn:cFileName=cFileName(Globals.oPathRoot + (u'wizard templates/' + uDirName + "/" + uDirsSub)) + (uDirsSub + ".xml")
oRepManagerEntry:cRepManagerEntry=cRepManagerEntry(oFileName=oFn)
if oRepManagerEntry.ParseFromXML():
if not oRepManagerEntry.oRepEntry.bSkip:
self.aRepManagerEntries.append(oRepManagerEntry)
else:
Logger.warning('Wizard Template not ready for Repository Manager, skipped: '+oFn)
self.SaveRepositoryXML('wizard templates','Wizard Templates')
def SaveRepositoryXML(self,uType:str,uDescription:str) -> None:
""" Saves the main repository directory xml """
oVal:Element
uContent:str
uRoot:str
oPath:cPath= Globals.oPathTmp + "RepManager"
oPath.Create()
oPath=oPath+"repositories"
oPath.Create()
oPath=oPath+uType
oPath.Create()
oFnXml:cFileName=cFileName(oPath) +'repository.xml'
oXMLRoot:Element = Element('repository')
oVal = SubElement(oXMLRoot,'version')
oVal.text = '1.00'
oVal = SubElement(oXMLRoot,'type')
oVal.text = uType
oVal = SubElement(oXMLRoot,'description')
oVal.text = uDescription
oXMLEntries:Element = SubElement(oXMLRoot,'entries')
for oEntry in self.aRepManagerEntries:
Logger.debug ('Saving Repository-Entry [%s]' % oEntry.oFnEntry.string)
oEntry.oRepEntry.WriteToXMLNode(oXMLNode=oXMLEntries)
for oSource in oEntry.oRepEntry.aSources:
bZipParentDir:bool = cPath.CheckIsDir(uCheckName=oSource.uLocal)
# Create according Zip
if bZipParentDir:
uUpper:str = os.path.basename(oSource.uSourceFile)
uFinalPath:str = uType
oDest:cFileName = cFileName().ImportFullPath(uFnFullName='%s/RepManager/repositories/%s/%s' % (Globals.oPathTmp.string, uFinalPath, uUpper))
uUpper1:str = os.path.split(os.path.abspath(oSource.uLocal))[0]
uRoot = AdjustPathToOs(uPath=ReplaceVars(uUpper1)+'/')
self.aZipFiles.append({'filename':oSource.uLocal,'dstfilename':oDest.string, 'removepath':uRoot, 'skipfiles':ToUnicode(oEntry.oRepEntry.aSkipFileNames)})
else:
uDest:str = AdjustPathToOs(uPath='%s/RepManager/repositories/%s/%s.zip' % (Globals.oPathTmp.string, uType, os.path.splitext(os.path.basename(oSource.uLocal))[0]))
uRoot = AdjustPathToOs(uPath=Globals.oPathRoot.string + "/" + oSource.uTargetPath)
self.aZipFiles.append({'filename':oSource.uLocal,'dstfilename':uDest, 'removepath':uRoot})
oFSFile = open(oFnXml.string, 'w')
uContent = XMLPrettify(oElem=oXMLRoot)
uContent = ReplaceVars(uContent)
oFSFile.write(EscapeUnicode(uContent))
oFSFile.close()
def CreateRepository(self) -> None:
self.CreateZipVarArray()
SetVar(uVarName="REPMAN_BASELOCALDIR", oVarValue=(Globals.oPathTmp + "RepManager").string)
Globals.oTheScreen.AddActionToQueue(aActions=[{'string': 'call Create Repository'}])
return
def CreateZipVarArray(self) -> None:
SetVar(uVarName="REPMAN_ZIPCNTFILES", oVarValue= str(len(self.aZipFiles)))
Var_DelArray("REPMAN_ZIPSOUREFILENAMES[]")
Var_DelArray("REPMAN_ZIPDESTFILENAMES[]")
Var_DelArray("REPMAN_ZIPREMOVEPATH[]")
Var_DelArray("REPMAN_ZIPSKIPFILES[]")
Var_DelArray("REPMAN_ZIPTYPE[]")
i:int=0
for dZipFile in self.aZipFiles:
uIndex:str = str(i) + "]"
SetVar(uVarName="REPMAN_ZIPSOURCEFILENAMES[" + uIndex ,oVarValue=dZipFile['filename'])
SetVar(uVarName="REPMAN_ZIPDESTFILENAMES[" + uIndex ,oVarValue=dZipFile['dstfilename'])
SetVar(uVarName="REPMAN_ZIPREMOVEPATH[" + uIndex ,oVarValue=dZipFile['removepath'])
uSkipFiles:str = dZipFile.get('skipfiles',None)
if uSkipFiles is not None:
SetVar(uVarName="REPMAN_ZIPSKIPFILES[" + uIndex, oVarValue=dZipFile['skipfiles'])
SetVar(uVarName="REPMAN_ZIPTYPE[" + uIndex,oVarValue= "folder")
else:
SetVar(uVarName="REPMAN_ZIPTYPE[" + uIndex,oVarValue= "file")
i += 1
# noinspection PyMethodMayBeStatic
def CreateRepVarArray(self,uBaseLocalDir:str) -> None:
aLocalFiles:List[str] = cPath(uBaseLocalDir).GetFileList(bSubDirs=True, bFullPath=True)
SetVar(uVarName="REPMAN_LOCALBASENAME", oVarValue=uBaseLocalDir)
SetVar(uVarName="REPMAN_CNTFILES", oVarValue= str(len(aLocalFiles)))
Var_DelArray("REPMAN_LOCALFILENAMES[]")
i:int=0
for uLocalFile in aLocalFiles:
uIndex:str = str(i) + "]"
SetVar(uVarName="REPMAN_LOCALFILENAMES[" + uIndex ,oVarValue=uLocalFile)
i += 1
|
etrombly/snort-rule-tool
|
snort-rule-tool.py
|
#!/usr/bin/env python2
import sys
import math
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5 import QtCore
from mainwindow import Ui_MainWindow
from scapy.all import *
""" dump any string, ascii or encoded, to formatted hex output """
def dumpString(src, length=16):
FILTER = ''.join([(len(repr(chr(x))) == 3) and chr(x) or '.' for x in range(256)])
result = []
for i in xrange(0, len(src), length):
chars = src[i:i+length]
hex = ' '.join(["%02x" % ord(x) for x in chars])
printable = ''.join(["%s" % ((ord(x) <= 127 and FILTER[ord(x)]) or '.') for x in chars])
result.append(["%-*s" % (length*3, hex), "%s" % (printable,)])
return result
class Snort(QtWidgets.QMainWindow):
def __init__(self):
super(Snort, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.show()
self.index = 0
self.comboBoxes = [self.ui.srcCombo, self.ui.srcPortCombo, self.ui.destCombo, self.ui.destPortCombo]
self.defaultFmt = self.ui.hexColumn.currentCharFormat()
#setup scrollbars to be synced
self.hexSlider = self.ui.hexColumn.verticalScrollBar()
self.textSlider = self.ui.textColumn.verticalScrollBar()
self.hexSlider.valueChanged.connect(self.syncScroll)
self.textSlider.valueChanged.connect(self.syncScroll)
self.ui.packetBox.valueChanged.connect(self.changePacket)
self.ui.actionOpen.triggered.connect(self.openPCAP)
self.ui.contentEdit.textChanged.connect(self.contentChanged)
self.ui.flowCheck.stateChanged.connect(self.flowChecked)
self.ui.streamButton.clicked.connect(self.assembleStream)
self.ui.flowCombo.currentTextChanged.connect(self.buildRule)
self.ui.actionCombo.currentTextChanged.connect(self.buildRule)
self.ui.protoCombo.currentTextChanged.connect(self.buildRule)
self.ui.srcCombo.currentTextChanged.connect(self.buildRule)
self.ui.srcPortCombo.currentTextChanged.connect(self.buildRule)
self.ui.dirCombo.currentTextChanged.connect(self.buildRule)
self.ui.destCombo.currentTextChanged.connect(self.buildRule)
self.ui.destPortCombo.currentTextChanged.connect(self.buildRule)
self.streams = []
def syncScroll(self, value):
self.textSlider.setValue(value)
self.hexSlider.setValue(value)
def changePacket(self):
self.index = self.ui.packetBox.value() - 1
self.readPacket()
def findStreams(self):
tcp_streams = self.packets.filter(lambda p: p.haslayer(TCP))
self.streams = []
for syn in tcp_streams.filter(lambda p: p[TCP].flags & 0x02):
for synack in tcp_streams.filter(lambda p: p[TCP].flags & 0x12 and p[TCP].ack == syn.seq + 1):
ack = tcp_streams.filter(lambda p: p[TCP].flags & 0x10 and p[TCP].ack == synack.seq + 1)
if ack:
srcport = syn[TCP].sport
dstport = syn[TCP].dport
L3 = IP
try:
#try underlayer
foot = syn[TCP].underlayer
srcip = foot.src
dstip = foot.dst
if type(foot) == IPv6:
L3 = IPv6
except:
#try other, but upper layer
if IPv6 in syn:
srcip = syn[IPv6].src
dstip = syn[IPv6].dst
L3 = IPv6
elif IP in pkt:
srcip = syn[IP].src
dstip = syn[IP].dst
else:
continue
ip_pair = (srcip,dstip)
port_pair = (srcport,dstport)
filtered_stream = tcp_streams.filter(lambda p: p[TCP].dport in port_pair and \
p[TCP].sport in port_pair and \
p[L3].src in ip_pair and \
p[L3].dst in ip_pair)
assembled_stream = [syn,synack,ack[0]]
while True:
client_next_seq = assembled_stream[-1][TCP].seq
server_next_seq = assembled_stream[-1][TCP].ack
next = filtered_stream.filter(lambda p: p.seq in (client_next_seq,server_next_seq) and \
not p in assembled_stream)
if not next:
break
for pkt in next:
assembled_stream.append(pkt)
self.streams.append(PacketList(assembled_stream))
def assembleStream(self):
pkt = self.packets[self.index]
self.ui.hexColumn.clear()
self.ui.textColumn.clear()
for stream in self.streams:
if pkt in stream:
thisStream = stream
break
streamText = "".join([str(packet) for packet in thisStream])
payload = dumpString(streamText)
for line in payload:
self.ui.hexColumn.appendPlainText(line[0])
self.ui.textColumn.appendPlainText(line[1])
def readPacket(self):
self.clearAll()
pkt = self.packets[self.index]
payload = dumpString(str(pkt))
for line in payload:
self.ui.hexColumn.appendPlainText(line[0])
self.ui.textColumn.appendPlainText(line[1])
if IP in pkt:
self.ui.protoCombo.setCurrentText("ip")
self.ui.srcCombo.insertItem(0, pkt[IP].src)
self.ui.destCombo.insertItem(0,pkt[IP].dst)
srcip = pkt[IP].src
if IPv6 in pkt:
self.ui.protoCombo.setCurrentText("ip")
self.ui.srcCombo.insertItem(0, pkt[IPv6].src)
self.ui.destCombo.insertItem(0,pkt[IPv6].dst)
srcip = pkt[IPv6].src
if TCP in pkt:
self.ui.protoCombo.setCurrentText("tcp")
self.ui.srcPortCombo.insertItem(0, str(pkt[TCP].sport))
self.ui.destPortCombo.insertItem(0, str(pkt[TCP].dport))
for stream in self.streams:
if pkt in stream:
self.ui.flowCheck.setChecked(True)
self.ui.streamButton.setEnabled(True)
client = stream[0]
if IP in client:
layer = IP
else:
layer = IPv6
if srcip == client[layer].src:
self.ui.flowCombo.setCurrentText("to_server")
elif srcip == client[layer].dst:
self.ui.flowCombo.setCurrentText("to_client")
if UDP in pkt:
self.ui.protoCombo.setCurrentText("udp")
self.ui.srcPortCombo.insertItem(0, str(pkt[UDP].sport))
self.ui.destPortCombo.insertItem(0, str(pkt[UDP].dport))
if ICMP in pkt:
self.ui.protoCombo.setCurrentText("icmp")
for combo in self.comboBoxes:
combo.setCurrentIndex(0)
self.buildRule()
self.textSlider.setValue(0)
def openPCAP(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open PCAP',filter='Packet Captures (*.cap *.pcap)')
if filename:
self.file = filename[0]
self.packets = rdpcap(self.file)
self.findStreams()
self.ui.packetBox.setRange(1, len(self.packets))
self.readPacket()
def contentChanged(self):
content = self.ui.contentEdit.text()
hexContent = self.ui.hexColumn.toPlainText().replace("\n", "")
textContent = self.ui.textColumn.toPlainText().replace("\n", "")
if self.ui.nocaseCheck.isChecked():
content = content.lower()
textContent = textContent.lower()
cursor = QtGui.QTextCursor(self.ui.hexColumn.document())
cursor.setPosition(0, QtGui.QTextCursor.MoveAnchor)
cursor.setPosition(self.ui.hexColumn.document().characterCount() - 1, QtGui.QTextCursor.KeepAnchor)
cursor.setCharFormat(self.defaultFmt)
cursor2 = QtGui.QTextCursor(self.ui.textColumn.document())
cursor2.setPosition(0, QtGui.QTextCursor.MoveAnchor)
cursor2.setPosition(self.ui.textColumn.document().characterCount() - 1, QtGui.QTextCursor.KeepAnchor)
cursor2.setCharFormat(self.defaultFmt)
matchPointer = 0
endPointer = 0
start = 0
end = 0
match = False
origContent = content
while content:
if content.startswith("|"):
if content.count("|") > 1:
content = content[1:]
index = content.index("|")
search = content[0:index]
content = content[index + 1:]
else:
search = content[1:]
content = None
if search and \
(not match and search in hexContent[endPointer:]) or \
(match and hexContent[endPointer:endPointer + len(search)] == search):
if not match:
end = hexContent[endPointer:].index(search) + len(search) + endPointer
start = hexContent[endPointer:].index(search) + endPointer
match = True
matchPointer = end
else:
end = end + len(search)
endPointer = end
elif match:
content = origContent
match = False
start = 0
end = 0
endPointer = matchPointer
matchPointer = 0
else:
break
else:
if "|" in content:
search = content[0:content.index("|")]
content = content[content.index("|"):]
else:
search = content
content = None
textPointer = int(math.ceil(endPointer / 3.0))
if search and \
(not match and search in textContent[textPointer:]) or \
(match and textContent[textPointer:len(search) + textPointer] == search):
if not match:
end = ((textContent[textPointer:].index(search) + len(search)) * 3) + endPointer
start = (textContent[textPointer:].index(search) * 3) + endPointer
match = True
matchPointer = end
else:
end = end + (len(search) * 3) + 1
endPointer = end
elif match:
content = origContent
match = False
start = 0
end = 0
endPointer = matchPointer
matchPointer = 0
else:
break
if match:
start = start + (start / 47)
end = end + (end / 47)
fmt = QtGui.QTextCharFormat()
fmt.setForeground(QtCore.Qt.red)
cursor.setPosition(start, QtGui.QTextCursor.MoveAnchor)
cursor.setPosition(end, QtGui.QTextCursor.KeepAnchor)
cursor.setCharFormat(fmt)
cursor2.setPosition(start / 3, QtGui.QTextCursor.MoveAnchor)
cursor2.setPosition(math.ceil(end / 3.0), QtGui.QTextCursor.KeepAnchor)
cursor2.setCharFormat(fmt)
self.ui.depthEdit.setText(str(int(math.ceil(end / 3.0))))
self.ui.offsetEdit.setText(str(start / 3))
def clearAll(self):
for combo in self.comboBoxes:
combo.clear()
combo.addItem("any")
self.ui.destPortCombo.addItem("any")
self.ui.hexColumn.clear()
self.ui.textColumn.clear()
self.ui.ruleText.clear()
self.ui.contentEdit.clear()
self.ui.flowCheck.setChecked(False)
self.ui.flowCombo.setCurrentText("established")
self.ui.flowCombo.setEnabled(False)
self.ui.streamButton.setEnabled(False)
self.ui.depthCheck.setChecked(False)
self.ui.depthEdit.clear()
self.ui.depthEdit.setEnabled(False)
self.ui.offsetCheck.setChecked(False)
self.ui.offsetEdit.clear()
self.ui.offsetEdit.setEnabled(False)
self.ui.distanceCheck.setChecked(False)
self.ui.distanceEdit.clear()
self.ui.distanceEdit.setEnabled(False)
self.ui.withinCheck.setChecked(False)
self.ui.withinEdit.clear()
self.ui.withinEdit.setEnabled(False)
self.ui.nocaseCheck.setChecked(False)
def flowChecked(self):
self.ui.flowCombo.setEnabled(self.ui.flowCheck.isChecked())
self.buildRule()
def buildRule(self):
self.ui.ruleText.clear()
options = ""
if self.ui.flowCheck.isChecked():
options += "flow: %s;" % (self.ui.flowCombo.currentText(), )
rule = "%s %s %s %s %s %s %s {%s}" % (
self.ui.actionCombo.currentText(),
self.ui.protoCombo.currentText(),
self.ui.srcCombo.currentText(),
self.ui.srcPortCombo.currentText(),
self.ui.dirCombo.currentText(),
self.ui.destCombo.currentText(),
self.ui.destPortCombo.currentText(),
options)
self.ui.ruleText.appendPlainText(rule)
def main():
app = QtWidgets.QApplication(sys.argv)
snort = Snort()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
elainenaomi/sciwonc-dataflow-examples
|
sbbd2016/experiments/1-postgres/3_workflow_full_10files_primary_nosh_nors_annot_with_proj_3s/pegasus.bDkvI/pegasus-4.6.0/lib/python2.7/dist-packages/Pegasus/tools/kickstart_parser.py
|
#!/usr/bin/env python
"""
Pegasus utility functions for pasing a kickstart output file and return wanted information
"""
##
# Copyright 2007-2010 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# Revision : $Revision: 2012 $
# Import Python modules
from xml.parsers import expat
import re
import sys
import logging
import traceback
import os
# Regular expressions used in the kickstart parser
re_parse_props = re.compile(r'(\S+)\s*=\s*([^",]+)')
re_parse_quoted_props = re.compile(r'(\S+)\s*=\s*"([^"]+)"')
logger = logging.getLogger(__name__)
class Parser:
"""
This class is used to parse a kickstart output file, and return
requested information.
"""
def __init__(self, filename):
"""
This function initializes the Parser class with the kickstart
output file that should be parsed.
"""
self._kickstart_output_file = filename
self._parsing_job_element = False
self._parsing_arguments = False
self._parsing_main_job = False
self._parsing_machine = False
self._parsing_stdout = False
self._parsing_stderr = False
self._parsing_data = False
self._parsing_cwd = False
self._parsing_final_statcall = False
self._record_number = 0
self._arguments = []
self._stdout = ""
self._stderr = ""
self._cwd = ""
self._lfn = "" # filename parsed from statcall record
self._keys = {}
self._ks_elements = {}
self._fh = None
self._open_error = False
def open(self):
"""
This function opens a kickstart output file.
"""
try:
self._fh = open(self._kickstart_output_file)
except:
# Error opening file
self._fh = None
self._open_error = True
return False
# Open succeeded
self._open_error = False
return True
def close(self):
"""
This function closes the kickstart output file.
"""
try:
self._fh.close()
except:
return False
return True
def read_record(self):
"""
This function reads an invocation record from the kickstart
output file. We also look for the struct at the end of a file
containing multiple records. It returns a string containing
the record, or None if it is not found.
"""
buffer = ""
#valid token that is parsed
token = ""
self._record_number += 1
logger.debug( "Started reading record number %d from kickstart file %s" %( self._record_number, self._kickstart_output_file))
# First, we find the beginning <invocation xmlns....
while True:
line = self._fh.readline()
if line == '':
# End of file, record not found
return None
if line.find("<invocation") != -1:
token = "<invocation"
break
if ( line.find("[cluster-task") != -1 ):
token = "[cluster-task"
break
if ( line.find("[cluster-summary") != -1 ):
token = "[cluster-summary"
break
if ( line.find("[seqexec-task") != -1 ):
#deprecated token
token = "[seqexec-task"
break
if ( line.find("[seqexec-summary") != -1 ):
#deprecated token
token = "[seqexec-summary"
break
# Found something!
#if line.find("<invocation") >= 0:
if token == "<invocation" :
# Found invocation record
start = line.find("<invocation")
buffer = line[start:]
end = buffer.find("</invocation>")
# Check if we have everything in a single line
if end >= 0:
end = end + len("</invocation>")
return buffer[:end]
#elif line.find("[seqexec-summary") >= 0:
elif ( token == "[cluster-summary" or token == "[seqexec-summary" ):
# Found line with cluster jobs summary
start = line.find(token)
buffer = line[start:]
end = buffer.find("]")
if end >= 0:
end = end + len("]")
return buffer[:end]
# clustered record should be in a single line!
logger.warning("%s: %s line is malformed... ignoring it..." % (self._kickstart_output_file, token ))
return ""
#elif line.find("[seqexec-task") >= 0:
elif ( token == "[cluster-task" or token == "[seqexec-task" ):
# Found line with task information
start = line.find( token )
buffer = line[start:]
end = buffer.find("]")
if end >= 0:
end = end + len("]")
return buffer[:end]
# task record should be in a single line!
logger.warning("%s: %s line is malformed... ignoring it..." % (self._kickstart_output_file, token))
return ""
else:
return ""
# Ok, now continue reading the file until we get a full record
buffer = [buffer]
while True:
line = self._fh.readline()
if line == '':
# End of file, record not found
return None
#buffer = buffer + line
buffer.append( line )
if line.find("</invocation>") >= 0:
break
# Now, we got it, let's make sure
end = line.find("</invocation>")
if end == -1:
return ""
#end = end + len("</invocation>")
invocation = "".join(buffer)
#print invocation
logger.debug( "Finished reading record number %d from kickstart file %s" %( self._record_number, self._kickstart_output_file))
return invocation
#return buffer[:end]
def is_invocation_record(self, buffer=''):
"""
Returns True if buffer contains an invocation record.
"""
if buffer.find("<invocation") == -1:
return False
return True
def is_task_record(self, buffer=''):
"""
Returns True if buffer contains a task record.
"""
if ( buffer.find("[seqexec-task") != -1 or buffer.find( "[cluster-task" ) != -1 ):
return True
return False
def is_clustered_record(self, buffer=''):
"""
Returns True if buffer contains a clustered record.
"""
if ( buffer.find("[seqexec-summary") != -1 or buffer.find( "[cluster-summary" ) != -1):
return True
return False
def start_element(self, name, attrs):
"""
Function called by the parser every time a new element starts
"""
# Keep track if we are parsing the main job element
if name == "mainjob":
self._parsing_main_job = True
if name == "machine":
self._parsing_machine = True
# Keep track if we are inside one of the job elements
if (name == "setup" or name == "prejob" or
name == "mainjob" or name == "postjob" or name == "cleanup"):
self._parsing_job_element = True
if name == "argument-vector" and name in self._ks_elements:
# Start parsing arguments
self._parsing_arguments = True
elif name == "cwd" and name in self._ks_elements:
# Start parsing cwd
self._parsing_cwd = True
elif name == "data":
# Start parsing data for stdout and stderr output
self._parsing_data = True
elif name == "file" and name in self._ks_elements:
if self._parsing_main_job == True :
# Special case for name inside the mainjob element (will change this later)
for my_element in self._ks_elements[name]:
if my_element in attrs:
self._keys[my_element] = attrs[my_element]
elif name == "ram" and name in self._ks_elements:
if self._parsing_machine == True:
# Special case for ram inside the machine element (will change this later)
for my_element in self._ks_elements[name]:
if my_element in attrs:
self._keys[my_element] = attrs[my_element]
elif name == "uname" and name in self._ks_elements:
if self._parsing_machine == True:
# Special case for uname inside the machine element (will change this later)
for my_element in self._ks_elements[name]:
if my_element in attrs:
self._keys[my_element] = attrs[my_element]
elif name == "statcall":
if "id" in attrs:
if attrs["id"] == "stdout" and "stdout" in self._ks_elements:
self._parsing_stdout = True
elif attrs["id"] == "stderr" and "stderr" in self._ks_elements:
self._parsing_stderr = True
elif attrs["id"] == "final" :
self._parsing_final_statcall = True
self._lfn = attrs["lfn"]
elif name == "statinfo":
if self._parsing_final_statcall is True:
statinfo = {}
for my_element in self._ks_elements[name]:
if my_element in attrs:
statinfo[my_element] = attrs[my_element]
if not self._keys.has_key( "outputs"):
self._keys[ "outputs" ] = {} #a dictionary indexed by lfn
lfn = self._lfn
if lfn is None or not statinfo:
logger.warning( "Malformed/Empty stat record for output lfn %s %s" %(lfn, statinfo))
self._keys["outputs"][lfn] = statinfo
elif name == "usage" and name in self._ks_elements:
if self._parsing_job_element:
# Special case for handling utime and stime, which need to be added
for my_element in self._ks_elements[name]:
if my_element in attrs:
if my_element in self._keys:
try:
self._keys[my_element] = self._keys[my_element] + float(attrs[my_element])
except ValueError:
logger.warning("cannot convert element %s to float!" % (my_element))
else:
try:
self._keys[my_element] = float(attrs[my_element])
except ValueError:
logger.warning("cannot convert element %s to float!" % (my_element))
else:
# For all other elements, check if we want them
if name in self._ks_elements:
for my_element in self._ks_elements[name]:
if my_element in attrs:
self._keys[my_element] = attrs[my_element]
def end_element(self, name):
"""
Function called by the parser whenever we reach the end of an element
"""
# Stop parsing argement-vector and cwd if we reached the end of those elements
if name == "argument-vector":
self._parsing_arguments = False
elif name == "cwd":
self._parsing_cwd = False
elif name == "mainjob":
self._parsing_main_job = False
elif name == "machine":
self._parsing_machine = False
elif name == "statcall":
if self._parsing_stdout == True:
self._parsing_stdout = False
if self._parsing_stderr == True:
self._parsing_stderr = False
if self._parsing_final_statcall == True:
self._parsing_final_statcall = False
elif name == "data":
self._parsing_data = False
# Now, see if we left one of the job elements
if (name == "setup" or name == "prejob" or
name == "mainjob" or name == "postjob" or name == "cleanup"):
self._parsing_job_element = False
def char_data(self, data=''):
"""
Function called by the parser whenever there's character data in an element
"""
if self._parsing_cwd == True:
self._cwd += data
if self._parsing_arguments == True:
self._arguments.append(data.strip())
if self._parsing_stdout == True and self._parsing_data == True:
self._stdout += data
if self._parsing_stderr == True and self._parsing_data == True:
self._stderr += data
def parse_invocation_record(self, buffer=''):
"""
Parses the xml record in buffer, returning the desired keys.
"""
# Initialize variables
self._parsing_arguments = False
self._parsing_main_job = False
self._parsing_machine = False
self._parsing_stdout = False
self._parsing_stderr = False
self._parsing_data = False
self._parsing_cwd = False
self._arguments = []
self._stdout = ""
self._stderr = ""
self._cwd = ""
self._keys = {}
# Check if we have an invocation record
if self.is_invocation_record(buffer) == False:
return self._keys
# Add invocation key to our response
self._keys["invocation"] = True
# Prepend XML header
buffer = '<?xml version="1.0" encoding="ISO-8859-1"?>\n' + buffer
# Create parser
self._my_parser = expat.ParserCreate()
self._my_parser.StartElementHandler = self.start_element
self._my_parser.EndElementHandler = self.end_element
self._my_parser.CharacterDataHandler = self.char_data
# Parse everything!
output = self._my_parser.Parse(buffer)
# Add cwd, arguments, stdout, and stderr to keys
if "cwd" in self._ks_elements:
self._keys["cwd"] = self._cwd
if "argument-vector" in self._ks_elements:
self._keys["argument-vector"] = " ".join(self._arguments)
if "stdout" in self._ks_elements:
self._keys["stdout"] = self._stdout
if "stderr" in self._ks_elements:
self._keys["stderr"] = self._stderr
return self._keys
def parse_clustered_record(self, buffer=''):
"""
Parses the clustered record in buffer, returning all found keys
"""
self._keys = {}
# Check if we have an invocation record
if self.is_clustered_record(buffer) == False:
return self._keys
# Add clustered key to our response
self._keys["clustered"] = True
# Parse all quoted properties
for my_key, my_val in re_parse_quoted_props.findall(buffer):
self._keys[my_key] = my_val
# And add unquoted properties as well
for my_key, my_val in re_parse_props.findall(buffer):
self._keys[my_key] = my_val
return self._keys
def parse_task_record(self, buffer=''):
"""
Parses the task record in buffer, returning all found keys
"""
self._keys = {}
# Check if we have an invocation record
if self.is_task_record(buffer) == False:
return self._keys
# Add task key to our response
self._keys["task"] = True
# Parse all quoted properties
for my_key, my_val in re_parse_quoted_props.findall(buffer):
self._keys[my_key] = my_val
# And add unquoted properties as well
for my_key, my_val in re_parse_props.findall(buffer):
self._keys[my_key] = my_val
return self._keys
def parse(self, keys_dict, tasks=True, clustered=True):
"""
This function parses the kickstart output file, looking for
the keys specified in the keys_dict variable. It returns a
list of dictionaries containing the found keys. Look at the
parse_stampede function for details about how to pass keys
using the keys_dict structure. The function will return an
empty list if no records are found or if an error happens.
"""
my_reply = []
# Place keys_dict in the _ks_elements
self._ks_elements = keys_dict
# Try to open the file
if self.open() == False:
return my_reply
logger.debug( "Started reading records from kickstart file %s" %(self._kickstart_output_file))
self._record_number = 0
# Read first record
my_buffer = self.read_record()
# Loop while we still have record to read
while my_buffer is not None:
if self.is_invocation_record(my_buffer) == True:
# We have an invocation record, parse it!
try:
my_record = self.parse_invocation_record(my_buffer)
except:
logger.warning("KICKSTART-PARSE-ERROR --> error parsing invocation record in file %s"
% (self._kickstart_output_file))
logger.warning(traceback.format_exc())
# Found error parsing this file, return empty reply
my_reply = []
# Finish the loop
break
my_reply.append(my_record)
elif self.is_clustered_record(my_buffer) == True:
# Check if we want clustered records too
if clustered:
# Clustered records are seqexec summary records for clustered jobs
# We have a clustered record, parse it!
my_reply.append(self.parse_clustered_record(my_buffer))
elif self.is_task_record(my_buffer) == True:
# Check if we want task records too
if tasks:
# We have a clustered record, parse it!
my_reply.append(self.parse_task_record(my_buffer))
else:
# We have something else, this shouldn't happen!
# Just skip it
pass
# Read next record
my_buffer = self.read_record()
# Lastly, close the file
self.close()
return my_reply
def parse_stampede(self):
"""
This function works similarly to the parse function above,
but does not require a keys_dict parameter as it uses a
built-in list of keys speficically used in the Stampede
schema.
"""
stampede_elements = {"invocation": ["hostname", "resource", "user", "hostaddr", "transformation", "derivation"],
"mainjob": ["duration", "start"],
"usage": ["utime", "stime"],
"ram": ["total"],
"uname": ["system", "release", "machine"],
"file": ["name"],
"status": ["raw"],
"regular": ["exitcode"],
"argument-vector": [],
"cwd": [],
"stdout": [],
"stderr": [],
"statinfo": ["lfn", "size", "ctime", "user" ]}
return self.parse(stampede_elements, tasks=True, clustered=True)
def parse_stdout_stderr(self):
"""
This function extracts the stdout and stderr from a kickstart output file.
It returns an array containing the output for each task in a job.
"""
stdout_stderr_elements = {"invocation": ["hostname", "resource", "derivation", "transformation"],
"file": ["name"],
"regular": ["exitcode"],
"failure": ["error"],
"argument-vector": [],
"cwd": [],
"stdout": [],
"stderr": []}
return self.parse(stdout_stderr_elements, tasks=False, clustered=False)
if __name__ == "__main__":
# Let's run a test!
print "Testing kickstart output file parsing..."
# Make sure we have an argument
if len(sys.argv) < 2:
print "For testing, please give a kickstart output filename!"
sys.exit(1)
# Create parser class
p = Parser(sys.argv[1])
# Parse file according to the Stampede schema
output = p.parse_stampede()
# Print output
for record in output:
print record
|
flexi-framework/hopr
|
tools/blockgridgenerator/main.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys,os
from PyQt5 import QtCore, QtGui, QtWidgets, uic
from model import MainModel
from view import MainView
class App(QtWidgets.QApplication):
def __init__(self, scriptpath, sys_argv):
super(App, self).__init__(sys_argv)
self.model = MainModel()
self.main_view = MainView(self.model, scriptpath)
self.main_view.show() #Maximized()
self.model.gridChanged.emit()
if __name__ == '__main__':
scriptpath = os.path.dirname(os.path.abspath(sys.argv[0]))
app = App(scriptpath, sys.argv)
sys.exit(app.exec_())
|
bkaganyildiz/StreamBasedNotification
|
StreamBasedNotifs/capture/consumer.py
|
from channels.auth import channel_session_user_from_http
from .models import Stream, Notification
import redis
import ast
from .task import sendNotifications, send_notifications
from channels import Group
import json
redis_con = redis.Redis('demo.scorebeyond.com', 8007)
subs = redis_con.pubsub()
subs.subscribe('test')
@channel_session_user_from_http
def ws_connect(message):
'''Capture redis stream and save it into database'''
Group('stream').add(message.reply_channel)
for message in subs.listen():
if message['type'] == "message":
data1 = ast.literal_eval(message['data'])
print data1['name']
if Notification.objects.filter(event_name=data1['name']):
notif = Notification.objects.get(event_name=data1['name'])
if notif.no_delay:
send_notifications(data1)
else:
sendNotifications(data1, capture=notif.delay)
if not Stream.objects.filter(name=data1['name']):
type_list = []
if not data1['info']:
Stream.objects.create(name=data1['name'], info="")
else:
for k, v in data1['info'].iteritems():
type_list.append(k+":"+type(v).__name__)
Stream.objects.create(name=data1['name'], info=','.join(type_list))
Group('stream').send({
'text': json.dumps({
'event_name': data1['name'],
'blueprint': ','.join(type_list),
})
})
else:
print message
|
alvcarmona/efficiencycalculatorweb
|
effcalculator/effcalculator/urls.py
|
"""effcalculator URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import url, include
from django.contrib.auth import views as auth_views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/', include('api.urls')),
url(r'^', include('frontend.urls'))
]
|
joaormatos/anaconda
|
Anaconda/pyglet/canvas/win32.py
|
#!/usr/bin/python
# $Id:$
from base import Display, Screen, ScreenMode, Canvas
from pyglet.libs.win32 import _kernel32, _user32, types, constants
from pyglet.libs.win32.constants import *
from pyglet.libs.win32.types import *
class Win32Display(Display):
def get_screens(self):
screens = []
def enum_proc(hMonitor, hdcMonitor, lprcMonitor, dwData):
r = lprcMonitor.contents
width = r.right - r.left
height = r.bottom - r.top
screens.append(
Win32Screen(self, hMonitor, r.left, r.top, width, height))
return True
enum_proc_type = WINFUNCTYPE(BOOL, HMONITOR, HDC, POINTER(RECT), LPARAM)
enum_proc_ptr = enum_proc_type(enum_proc)
_user32.EnumDisplayMonitors(NULL, NULL, enum_proc_ptr, 0)
return screens
class Win32Screen(Screen):
_initial_mode = None
def __init__(self, display, handle, x, y, width, height):
super(Win32Screen, self).__init__(display, x, y, width, height)
self._handle = handle
def get_matching_configs(self, template):
canvas = Win32Canvas(self.display, 0, _user32.GetDC(0))
configs = template.match(canvas)
# XXX deprecate config's being screen-specific
for config in configs:
config.screen = self
return configs
def get_device_name(self):
info = MONITORINFOEX()
info.cbSize = sizeof(MONITORINFOEX)
_user32.GetMonitorInfoW(self._handle, byref(info))
return info.szDevice
def get_modes(self):
device_name = self.get_device_name()
i = 0
modes = []
while True:
mode = DEVMODE()
mode.dmSize = sizeof(DEVMODE)
r = _user32.EnumDisplaySettingsW(device_name, i, byref(mode))
if not r:
break
modes.append(Win32ScreenMode(self, mode))
i += 1
return modes
def get_mode(self):
mode = DEVMODE()
mode.dmSize = sizeof(DEVMODE)
_user32.EnumDisplaySettingsW(self.get_device_name(),
ENUM_CURRENT_SETTINGS,
byref(mode))
return Win32ScreenMode(self, mode)
def set_mode(self, mode):
assert mode.screen is self
if not self._initial_mode:
self._initial_mode = self.get_mode()
r = _user32.ChangeDisplaySettingsExW(self.get_device_name(),
byref(mode._mode),
None,
CDS_FULLSCREEN,
None)
if r == DISP_CHANGE_SUCCESSFUL:
self.width = mode.width
self.height = mode.height
def restore_mode(self):
if self._initial_mode:
self.set_mode(self._initial_mode)
class Win32ScreenMode(ScreenMode):
def __init__(self, screen, mode):
super(Win32ScreenMode, self).__init__(screen)
self._mode = mode
self.width = mode.dmPelsWidth
self.height = mode.dmPelsHeight
self.depth = mode.dmBitsPerPel
self.rate = mode.dmDisplayFrequency
class Win32Canvas(Canvas):
def __init__(self, display, hwnd, hdc):
super(Win32Canvas, self).__init__(display)
self.hwnd = hwnd
self.hdc = hdc
|
isislovecruft/switzerland
|
switzerland/lib/shrunk_scapy/data.py
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
import re
from dadict import DADict
from error import log_loading
############
## Consts ##
############
ETHER_ANY = "\x00"*6
ETHER_BROADCAST = "\xff"*6
ETH_P_ALL = 3
ETH_P_IP = 0x800
ETH_P_ARP = 0x806
ETH_P_IPV6 = 0x86dd
# From net/if_arp.h
ARPHDR_ETHER = 1
ARPHDR_METRICOM = 23
ARPHDR_PPP = 512
ARPHDR_LOOPBACK = 772
ARPHDR_TUN = 65534
# From net/ipv6.h on Linux (+ Additions)
IPV6_ADDR_UNICAST = 0x01
IPV6_ADDR_MULTICAST = 0x02
IPV6_ADDR_CAST_MASK = 0x0F
IPV6_ADDR_LOOPBACK = 0x10
IPV6_ADDR_GLOBAL = 0x00
IPV6_ADDR_LINKLOCAL = 0x20
IPV6_ADDR_SITELOCAL = 0x40 # deprecated since Sept. 2004 by RFC 3879
IPV6_ADDR_SCOPE_MASK = 0xF0
#IPV6_ADDR_COMPATv4 = 0x80 # deprecated; i.e. ::/96
#IPV6_ADDR_MAPPED = 0x1000 # i.e.; ::ffff:0.0.0.0/96
IPV6_ADDR_6TO4 = 0x0100 # Added to have more specific info (should be 0x0101 ?)
IPV6_ADDR_UNSPECIFIED = 0x10000
MTU = 1600
# file parsing to get some values :
def load_protocols(filename):
spaces = re.compile("[ \t]+|\n")
dct = DADict(_name=filename)
try:
for l in open(filename):
try:
shrp = l.find("#")
if shrp >= 0:
l = l[:shrp]
l = l.strip()
if not l:
continue
lt = tuple(re.split(spaces, l))
if len(lt) < 2 or not lt[0]:
continue
dct[lt[0]] = int(lt[1])
except Exception,e:
log_loading.info("Couldn't parse file [%s]: line [%r] (%s)" % (filename,l,e))
except IOError:
log_loading.info("Can't open /etc/protocols file")
return dct
IP_PROTOS=load_protocols("/etc/protocols")
def load_ethertypes(filename):
spaces = re.compile("[ \t]+|\n")
dct = DADict(_name=filename)
try:
f=open(filename)
for l in f:
try:
shrp = l.find("#")
if shrp >= 0:
l = l[:shrp]
l = l.strip()
if not l:
continue
lt = tuple(re.split(spaces, l))
if len(lt) < 2 or not lt[0]:
continue
dct[lt[0]] = int(lt[1], 16)
except Exception,e:
log_loading.info("Couldn't parse file [%s]: line [%r] (%s)" % (filename,l,e))
f.close()
except IOError,msg:
pass
return dct
ETHER_TYPES=load_ethertypes("/etc/ethertypes")
def load_services(filename):
spaces = re.compile("[ \t]+|\n")
tdct=DADict(_name="%s-tcp"%filename)
udct=DADict(_name="%s-udp"%filename)
try:
f=open(filename)
for l in f:
try:
shrp = l.find("#")
if shrp >= 0:
l = l[:shrp]
l = l.strip()
if not l:
continue
lt = tuple(re.split(spaces, l))
if len(lt) < 2 or not lt[0]:
continue
if lt[1].endswith("/tcp"):
tdct[lt[0]] = int(lt[1].split('/')[0])
elif lt[1].endswith("/udp"):
udct[lt[0]] = int(lt[1].split('/')[0])
except Exception,e:
log_loading.warning("Couldn't file [%s]: line [%r] (%s)" % (filename,l,e))
f.close()
except IOError:
log_loading.info("Can't open /etc/services file")
return tdct,udct
TCP_SERVICES,UDP_SERVICES=load_services("/etc/services")
class ManufDA(DADict):
def fixname(self, val):
return val
def _get_manuf_couple(self, mac):
oui = ":".join(mac.split(":")[:3]).upper()
return self.__dict__.get(oui,(mac,mac))
def _get_manuf(self, mac):
return self._get_manuf_couple(mac)[1]
def _get_short_manuf(self, mac):
return self._get_manuf_couple(mac)[0]
def _resolve_MAC(self, mac):
oui = ":".join(mac.split(":")[:3]).upper()
if oui in self:
return ":".join([self[oui][0]]+ mac.split(":")[3:])
return mac
def load_manuf(filename):
try:
manufdb=ManufDA(_name=filename)
for l in open(filename):
try:
l = l.strip()
if not l or l.startswith("#"):
continue
oui,shrt=l.split()[:2]
i = l.find("#")
if i < 0:
lng=shrt
else:
lng = l[i+2:]
manufdb[oui] = shrt,lng
except Exception,e:
log_loading.warning("Couldn't parse one line from [%s] [%r] (%s)" % (filename, l, e))
except IOError:
#log_loading.warning("Couldn't open [%s] file" % filename)
pass
return manufdb
#####################
## knowledge bases ##
#####################
class KnowledgeBase:
def __init__(self, filename):
self.filename = filename
self.base = None
def lazy_init(self):
self.base = ""
def reload(self, filename = None):
if filename is not None:
self.filename = filename
oldbase = self.base
self.base = None
self.lazy_init()
if self.base is None:
self.base = oldbase
def get_base(self):
if self.base is None:
self.lazy_init()
return self.base
|
jjgomera/pychemqt
|
lib/EoS/Cubic/RKTwu.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
r"""Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <jjgomera@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>."""
from math import exp
from scipy.constants import R
from lib.EoS.Cubic.RK import RK
class RKTwu(RK):
r"""Equation of state of Redlich-Kwong with a modified alpha temperature
dependence by Twu, (1995) [1]_.
.. math::
\begin{array}[t]{l}
P = \frac{RT}{V-b}-\frac{a}{V\left(V+b\right)}\\
a = 0.427480263354\frac{R^2T_c^2}{P_c}\alpha\\
b = 0.086640349965\frac{RT_c}{P_c}\\
\alpha = alpha^{(0)} + \omega\left(\alpha^{(1)}-\alpha^{(0)}\right)\\
\alpha^{(0)} = T_r^{-0.201158} \exp{0.141599\left(1-T_r^{2.29528}
\right)}\\
\alpha^{(1)} = T_r^{-0.660145} \exp{0.500315\left(1-T_r^{2.63165}
\right)}\\
\end{array}
"""
__title__ = "Twu-Redlich-Kwong (1995)"
__status__ = "RKTwu"
__doi__ = {
"autor": "Twu, C.H., Coon, J.E., Cunningham, J.R.",
"title": "A New Generalized Alpha Function for a Cubic Equation of "
"State Part 2. Redlich-Kwong equation",
"ref": "Fluid Phase Equilibria 105 (1995) 61-69",
"doi": "10.1016/0378-3812(94)02602-w"},
def _lib(self, cmp, T):
"""Modified parameteres correlations"""
a = 0.42748023354*R**2*cmp.Tc**2/cmp.Pc
alfa = self._alpha(cmp, T)
b = 0.086640349965*R*cmp.Tc/cmp.Pc
return a*alfa, b
def _alpha(self, cmp, T):
"""Modified α expression"""
Tr = T/cmp.Tc
if Tr <= 1:
alpha0 = Tr**(-0.201158)*exp(0.141599*(1-Tr**2.29528)) # Eq 17
alpha1 = Tr**(-0.660145)*exp(0.500315*(1-Tr**2.63165)) # Eq 18
else:
alpha0 = Tr**(-1.10)*exp(0.441411*(1-Tr**(-1.30))) # Eq 19
alpha1 = Tr**(-2.31278)*exp(0.03258*(1-Tr**(-10.3128))) # Eq 20
# Eq 15
alpha = alpha0 + cmp.f_acent*(alpha1-alpha0)
return alpha
if __name__ == "__main__":
from lib.mezcla import Mezcla
mix = Mezcla(5, ids=[4], caudalMolar=1, fraccionMolar=[1])
eq = RKTwu(300, 9.9742e5, mix)
print('%0.0f %0.1f' % (eq.Vg.ccmol, eq.Vl.ccmol))
eq = RKTwu(300, 42.477e5, mix)
print('%0.1f' % (eq.Vl.ccmol))
|
gaeun/open-event-orga-server
|
app/views/users/export.py
|
from flask import Blueprint
from flask import flash
from flask import make_response, render_template
from flask_login import current_user
from markupsafe import Markup
from app.helpers.data_getter import DataGetter
from app.helpers.auth import AuthManager
from app.helpers.exporters.ical import ICalExporter
from app.helpers.exporters.pentabarfxml import PentabarfExporter
from app.helpers.exporters.xcal import XCalExporter
from app.helpers.permission_decorators import can_access
event_export = Blueprint('event_export', __name__, url_prefix='/events/<int:event_id>/export')
@event_export.route('/')
@can_access
def display_export_view(event_id):
event = DataGetter.get_event(event_id)
export_jobs = DataGetter.get_export_jobs(event_id)
user = current_user
if not AuthManager.is_verified_user():
flash(Markup("Your account is unverified. "
"Please verify by clicking on the confirmation link that has been emailed to you."
'<br>Did not get the email? Please <a href="/resend_email/" class="alert-link"> '
'click here to resend the confirmation.</a>'))
return render_template(
'gentelella/admin/event/export/export.html', event=event, export_jobs=export_jobs,
current_user=user
)
@event_export.route('/pentabarf.xml')
@can_access
def pentabarf_export_view(event_id):
response = make_response(PentabarfExporter.export(event_id))
response.headers["Content-Type"] = "application/xml"
response.headers["Content-Disposition"] = "attachment; filename=pentabarf.xml"
return response
@event_export.route('/calendar.ical')
@can_access
def ical_export_view(event_id):
response = make_response(ICalExporter.export(event_id))
response.headers["Content-Type"] = "text/calendar"
response.headers["Content-Disposition"] = "attachment; filename=calendar.ics"
return response
@event_export.route('/calendar.xcs')
@can_access
def xcal_export_view(event_id):
response = make_response(XCalExporter.export(event_id))
response.headers["Content-Type"] = "text/calendar"
response.headers["Content-Disposition"] = "attachment; filename=calendar.xcs"
return response
|
mm10ws/ImPy
|
ImAdd.py
|
__author__ = 'Mayur M'
import ImgIO
def add(image1, image2): # add two images together
if image1.width == image2.width and image1.height == image2.height:
return_red = []
return_green = []
return_blue = []
for i in range(0, len(image1.red)):
tmp_r = image1.red[i] + image2.red[i] # adding the RGB values
tmp_g = image1.green[i] + image2.green[i]
tmp_b = image1.blue[i] + image2.blue[i]
if 0 <= tmp_r <= 255:
return_red.append(tmp_r)
else:
return_red.append(tmp_r % 255) # loop values around if saturation
if 0 <= tmp_g <= 255:
return_green.append(tmp_g)
else:
return_green.append(tmp_g % 255) # loop values around if saturation
if 0 <= tmp_b <= 255:
return_blue.append(tmp_b)
else:
return_blue.append(tmp_b % 255) # loop values around if saturation
return return_red, return_green, return_blue
else:
print "Error: image dimensions do not match!"
def main(): # test case
print('start!!!!!')
ima = ImgIO.ImgIO()
imb = ImgIO.ImgIO()
ima.read_image("y.jpg")
imb.read_image("test1.png")
add_r, add_g, add_b = add(ima, imb)
imc = ImgIO.ImgIO()
imc.read_list(add_r, add_g, add_b, "final1.png", ima.width, ima.height)
imc.write_image("final1.png")
if __name__ == '__main__':
main()
|
junzis/pyModeS
|
pyModeS/decoder/allcall.py
|
"""
Decode all-call reply messages, with downlink format 11
"""
from pyModeS import common
def _checkdf(func):
"""Ensure downlink format is 11."""
def wrapper(msg):
df = common.df(msg)
if df != 11:
raise RuntimeError(
"Incorrect downlink format, expect 11, got {}".format(df)
)
return func(msg)
return wrapper
@_checkdf
def icao(msg):
"""Decode transponder code (ICAO address).
Args:
msg (str): 14 hexdigits string
Returns:
string: ICAO address
"""
return common.icao(msg)
@_checkdf
def interrogator(msg):
"""Decode interrogator identifier code.
Args:
msg (str): 14 hexdigits string
Returns:
int: interrogator identifier code
"""
# the CRC remainder contains the CL and IC field. top three bits are CL field and last four bits are IC field.
remainder = common.crc(msg)
if remainder > 79:
IC = "corrupt IC"
elif remainder < 16:
IC="II"+str(remainder)
else:
IC="SI"+str(remainder-16)
return IC
@_checkdf
def capability(msg):
"""Decode transponder capability.
Args:
msg (str): 14 hexdigits string
Returns:
int, str: transponder capability, description
"""
msgbin = common.hex2bin(msg)
ca = common.bin2int(msgbin[5:8])
if ca == 0:
text = "level 1 transponder"
elif ca == 4:
text = "level 2 transponder, ability to set CA to 7, on ground"
elif ca == 5:
text = "level 2 transponder, ability to set CA to 7, airborne"
elif ca == 6:
text = "evel 2 transponder, ability to set CA to 7, either airborne or ground"
elif ca == 7:
text = "Downlink Request value is 0,or the Flight Status is 2, 3, 4 or 5, either airborne or on the ground"
else:
text = None
return ca, text
|
aouyar/pybackup
|
pybackup/plugins/mysql.py
|
"""pybackup - Backup Plugin for MySQL Database
"""
import os
from pybackup import errors
from pybackup import utils
from pybackup.logmgr import logger
from pybackup.plugins import BackupPluginBase
from pysysinfo.mysql import MySQLinfo
__author__ = "Ali Onur Uyar"
__copyright__ = "Copyright 2011, Ali Onur Uyar"
__credits__ = []
__license__ = "GPL"
__version__ = "0.5"
__maintainer__ = "Ali Onur Uyar"
__email__ = "aouyar at gmail.com"
__status__ = "Development"
class PluginMySQL(BackupPluginBase):
"""Class for backups of MySQL Database.
"""
_extOpts = {'filename_dump_db': 'Filename for MySQL dump files.',
'db_host': 'MySQL Database Server Name or IP.',
'db_port': 'MySQL Database Server Port.',
'db_user': 'MySQL Database Server User.',
'db_password': 'MySQL Database Server Password.',
'db_list': 'List of databases. (All databases by default.)',}
_extReqOptList = ()
_extDefaults = {'cmd_mysqldump': 'mysqldump',
'filename_dump_db': 'mysql_dump',}
def __init__(self, global_conf, job_conf):
"""Constructor
@param global_conf: Dictionary of general configuration options.
@param job_conf: Dictionary of job configuration options.
"""
BackupPluginBase.__init__(self, global_conf, job_conf)
self._connArgs = []
for (opt, key) in (('-h', 'db_host'),
('-P', 'db_port'),
('-u', 'db_user')):
val = self._conf.get(key)
if val is not None:
self._connArgs.extend([opt, val])
self._env = os.environ.copy()
db_password = self._conf.get('db_password')
if db_password is not None:
self._env['MYSQL_PWD'] = db_password
def dumpDatabase(self, db, data=True):
if data:
dump_type = 'data'
dump_desc = 'MySQL Database Contents'
else:
dump_type = 'db'
dump_desc = 'MySQL Database Container'
dump_filename = "%s_%s_%s.dump.%s" % (self._conf['filename_dump_db'],
db, dump_type,
self._conf['suffix_compress'])
dump_path = os.path.join(self._conf['job_path'], dump_filename)
args = [self._conf['cmd_mysqldump'],]
args.extend(self._connArgs)
if db in ('information_schema', 'mysql'):
args.append('--skip-lock-tables')
if not data:
args.extend(['--no-create-info', '--no-data' ,'--databases'])
args.append(db)
logger.info("Starting dump of %s: %s"
" Backup: %s", dump_desc, db, dump_path)
returncode, out, err = self._execBackupCmd(args, #@UnusedVariable
self._env,
out_path=dump_path,
out_compress=True)
if returncode == 0:
logger.info("Finished dump of %s: %s"
" Backup: %s", dump_desc, db, dump_path)
else:
raise errors.BackupError("Dump of %s for %s failed "
"with error code: %s"
% (dump_desc, db, returncode),
*utils.splitMsg(err))
def dumpDatabases(self):
if not self._conf.has_key('db_list'):
try:
my = MySQLinfo(host=self._conf.get('db_host'),
port=self._conf.get('db_port'),
user=self._conf.get('db_user'),
password=self._conf.get('db_password'))
self._conf['db_list'] = my.getDatabases()
del my
except Exception, e:
raise errors.BackupError("Connection to MySQL Server "
"for querying database list failed.",
"Error Message: %s" % str(e))
logger.info("Starting dump of %d MySQL Databases.",
len(self._conf['db_list']))
for db in self._conf['db_list']:
self.dumpDatabase(db, False)
self.dumpDatabase(db, True)
logger.info("Finished dump of MySQL Databases.")
def dumpFull(self):
self.dumpDatabases()
description = "Plugin for backups of MySQL Database."
methodList = (('mysql_dump_full', PluginMySQL, 'dumpFull'),
('mysql_dump_databases', PluginMySQL, 'dumpDatabases'),)
|
ReservoirWebs/GrowChinook
|
Test_test.py
|
#!/usr/bin/python
import os
import glob
import cgi
import PrintPages_test as pt
address = cgi.escape(os.environ["REMOTE_ADDR"])
script = "Main Model Form"
pt.write_log_entry(script, address)
pt.print_header('GrowChinook', 'Std')
pt.print_full_form(None, None, 'in', 'RunModel.py')
extension = 'csv'
os.chdir('uploads')
result = [i for i in glob.glob('*.csv')]
print('''
{}
</div>
</body>
'''.format(result))
print ('</html>')
|
ShengRang/c4f
|
leetcode/combination-sum-ii.py
|
# coding: utf-8
class Solution(object):
@staticmethod
def dfs(candidates, target, vis, res, cur_idx, sum):
if sum > target:
return
if sum == target:
ans = [candidates[i] for i in cur_idx if i >= 0]
res.append(ans)
return
if sum < target:
for i, v in enumerate(candidates):
if sum + v > target:
break
if i != cur_idx[-1] + 1 and candidates[i] == candidates[i-1]:
continue
if i >= cur_idx[-1] and (not vis[i]):
vis[i] = 1
cur_idx.append(i)
Solution.dfs(candidates, target, vis, res, cur_idx, sum+v)
vis[i] = 0
cur_idx.pop()
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
candidates = sorted(candidates)
n = len(candidates)
res = []
cur_idx = [-1]
vis = [0 for _ in candidates]
Solution.dfs(candidates, target, vis, res, cur_idx, 0)
# return map(list, list(res))
return res
s = Solution()
print s.combinationSum2([10,1,2,7,6,1,5], 8)
print s.combinationSum2([2,5,2,1,2], 5)
|
RYLF/pythontip
|
33.py
|
# coding=utf-8
# Title:大幂次运算
# 给你两个正整数a(0 < a < 100000)和n(0 <= n <=100000000000),计算(a^n) % 20132013并输出结果
import math
# Test
a,n =10000,10000000
# Answer
ret = 1 #余数
def PowerMod(a, n, ret):
if n == 0:
return ret
if n % 2: # n为奇数
ret = ret * a % 20132013
return PowerMod(a*a%20132013, n/2, ret) #n为偶数。a^n %m = (a^2^n/2)%m = ((a^2%m)^n/2)%m
print PowerMod(a, n, ret)
|
vasili-v/ctauto
|
test/test_parser.py
|
import unittest
from ctauto.exceptions import CTAutoMissingEndOfMetablockError, \
CTAutoBrokenEndOfMetablockError, \
CTAutoInvalidMetablockError, \
CTAutoInvalidIdError, \
CTAutoMissingEndOfStringError, \
CTAutoInvalidStringError, \
CTAutoIncompleteEscapeSequence, \
CTAutoInvalidEscapeSequence, \
CTAutoTrailingCharacterAfterQuotedText, \
CTAutoInvalidNumberError
from ctauto.blocks import Block, MetaBlock
from ctauto.tokens import SimpleTextToken, QuotedTextToken, NumericToken, \
DotToken, LeftSquareBracketToken, RightSquareBracketToken
from ctauto.parser import EndOfFileCharacter, Parser, TemplateParser
_TEST_CONTENT = "<% metacode 1 %>\n" \
"#include <stdio.h>\n" \
"\n" \
"int main(void)\n" \
"{\n" \
" <% metacode 2 %>\n" \
" // <% metacode 3 %>\n" \
" return 0;\n" \
" <% metacode 4 . [ 1 ] %>\n" \
"}\n"
class TestParser(unittest.TestCase):
def test_parse(self):
class TestParser(Parser):
def reset(self, content, source):
self.source = source
self.content = content
self.indexes = []
self.characters = []
return self.first
def finalize(self):
return self.indexes, self.characters
def first(self, index, character):
self.indexes.append(index)
self.characters.append(character)
return self.second
def second(self, index, character):
self.indexes.append(index)
self.characters.append(character)
return self.third
def third(self, index, character):
if character is EndOfFileCharacter:
self.indexes.append(index)
self.characters.append(character)
return
self.indexes[-1] = index
self.characters[-1] = character
return self.third
parser = TestParser()
indexes, characters = parser.parse(_TEST_CONTENT, "test")
self.assertEqual(parser.source, "test")
self.assertEqual(parser.content, _TEST_CONTENT)
length = len(_TEST_CONTENT)
self.assertEqual(indexes, [0, length-1, length])
self.assertEqual(characters, ['<', '\n', EndOfFileCharacter])
class TestTemplateParser(unittest.TestCase):
def test_template_parse(self):
parser = TemplateParser()
blocks = parser.parse(_TEST_CONTENT, "test")
self.assertEqual(parser.source, "test")
self.assertEqual(parser.content, _TEST_CONTENT)
self.assertEqual(len(blocks), 8)
block = blocks[0]
self.assertIsInstance(block, MetaBlock)
self.assertEqual(block.content, " metacode 1 ")
self.assertEqual(block.tokens,
[SimpleTextToken(1, "metacode"),
NumericToken(1, "1")])
block = blocks[1]
self.assertIsInstance(block, Block)
self.assertEqual(block.content, "\n"
"#include <stdio.h>\n"
"\n"
"int main(void)\n"
"{\n"
" ")
block = blocks[2]
self.assertIsInstance(block, MetaBlock)
self.assertEqual(block.content, " metacode 2 ")
self.assertEqual(block.tokens,
[SimpleTextToken(6, "metacode"),
NumericToken(6, "2")])
block = blocks[3]
self.assertIsInstance(block, Block)
self.assertEqual(block.content, "\n"
" // ")
block = blocks[4]
self.assertIsInstance(block, MetaBlock)
self.assertEqual(block.content, " metacode 3 ")
self.assertEqual(block.tokens,
[SimpleTextToken(7, "metacode"),
NumericToken(7, "3")])
block = blocks[5]
self.assertIsInstance(block, Block)
self.assertEqual(block.content, "\n"
" return 0;\n"
" ")
block = blocks[6]
self.assertIsInstance(block, MetaBlock)
self.assertEqual(block.content, " metacode 4 . [ 1 ] ")
self.assertEqual(block.tokens,
[SimpleTextToken(9, "metacode"),
NumericToken(9, "4"),
DotToken(9),
LeftSquareBracketToken(9),
NumericToken(9, "1"),
RightSquareBracketToken(9)])
block = blocks[7]
self.assertIsInstance(block, Block)
self.assertEqual(block.content, "\n"
"}\n")
def test_invalid_ends_of_metablock(self):
parser = TemplateParser()
with self.assertRaises(CTAutoMissingEndOfMetablockError):
parser.parse("<% %", "test")
with self.assertRaises(CTAutoBrokenEndOfMetablockError):
parser.parse("<% %!", "test")
def test_invalid_metablock(self):
parser = TemplateParser()
with self.assertRaises(CTAutoInvalidMetablockError):
parser.parse("<% ! %>", "test")
def test_end_of_metablock_while_skipping_whitespaces(self):
parser = TemplateParser()
with self.assertRaises(CTAutoMissingEndOfMetablockError):
parser.parse(" <% ", "test")
def test_multiline_metablock(self):
parser = TemplateParser()
blocks = parser.parse("<%\tx\n\ty\n\tz\n\tt%>", "test")
self.assertEqual(blocks[0].tokens,
[SimpleTextToken(1, "x"),
SimpleTextToken(2, "y"),
SimpleTextToken(3, "z"),
SimpleTextToken(4, "t")])
def test_simple_text_token(self):
parser = TemplateParser()
blocks = parser.parse("<%test%>", "test")
self.assertEqual(blocks[0].tokens, [SimpleTextToken(1, "test")])
blocks = parser.parse("<% test %>", "test")
self.assertEqual(blocks[0].tokens, [SimpleTextToken(1, "test")])
with self.assertRaises(CTAutoMissingEndOfMetablockError):
parser.parse("<%s test", "test")
with self.assertRaises(CTAutoInvalidIdError):
parser.parse("<%s test! %>", "test")
def test_quoted_text_token(self):
parser = TemplateParser()
blocks = parser.parse("<%\"test\"%>", "test")
self.assertEqual(blocks[0].tokens, [QuotedTextToken(1, "test")])
blocks = parser.parse("<% \"test \\\\ \\\"test\\\" \\n \\t \\r \\a\" %>", "test")
self.assertEqual(blocks[0].tokens, [QuotedTextToken(1, "test \\ \"test\" \n \t \r \\a")])
with self.assertRaises(CTAutoMissingEndOfStringError):
parser.parse("<%\"test%>", "test")
with self.assertRaises(CTAutoInvalidStringError):
parser.parse("<%\"test\n%>", "test")
with self.assertRaises(CTAutoIncompleteEscapeSequence):
parser.parse("<% \"test \\", "test")
with self.assertRaises(CTAutoInvalidEscapeSequence):
parser.parse("<% \"test \\\n test\" %>", "test")
with self.assertRaises(CTAutoMissingEndOfMetablockError):
parser.parse("<% \"test\"", "test")
with self.assertRaises(CTAutoTrailingCharacterAfterQuotedText):
parser.parse("<% \"test\"test %>", "test")
def test_numeric_token(self):
parser = TemplateParser()
blocks = parser.parse("<% 1234567890 %>", "test")
self.assertEqual(blocks[0].tokens, [NumericToken(1, "1234567890")])
blocks = parser.parse("<%1234567890%>", "test")
self.assertEqual(blocks[0].tokens, [NumericToken(1, "1234567890")])
with self.assertRaises(CTAutoMissingEndOfMetablockError):
parser.parse("<%1234567890", "test")
with self.assertRaises(CTAutoInvalidNumberError):
parser.parse("<% 1234567890test %>", "test")
def test_simple_token_as_terminator(self):
parser = TemplateParser()
blocks = parser.parse("<% test.test %>", "test")
self.assertEqual(blocks[0].tokens,
[SimpleTextToken(1, "test"),
DotToken(1),
SimpleTextToken(1, "test")])
blocks = parser.parse("<% 1234567890[test %>", "test")
self.assertEqual(blocks[0].tokens,
[NumericToken(1, "1234567890"),
LeftSquareBracketToken(1),
SimpleTextToken(1, "test")])
blocks = parser.parse("<% \"test\"]test %>", "test")
self.assertEqual(blocks[0].tokens,
[QuotedTextToken(1, "test"),
RightSquareBracketToken(1),
SimpleTextToken(1, "test")])
test_suite = unittest.TestSuite([unittest.defaultTestLoader.loadTestsFromTestCase(TestParser),
unittest.defaultTestLoader.loadTestsFromTestCase(TestTemplateParser)])
if __name__ == '__main__':
unittest.main()
|
mattsch/Sickbeard
|
cherrypy/test/test_logging.py
|
"""Basic tests for the CherryPy core: request handling."""
from cherrypy.test import test
test.prefer_parent_path()
import os
localDir = os.path.dirname(__file__)
import cherrypy
access_log = os.path.join(localDir, "access.log")
error_log = os.path.join(localDir, "error.log")
# Some unicode strings.
tartaros = u'\u03a4\u1f71\u03c1\u03c4\u03b1\u03c1\u03bf\u03c2'
erebos = u'\u0388\u03c1\u03b5\u03b2\u03bf\u03c2.com'
def setup_server():
class Root:
def index(self):
return "hello"
index.exposed = True
def uni_code(self):
cherrypy.request.login = tartaros
cherrypy.request.remote.name = erebos
uni_code.exposed = True
def slashes(self):
cherrypy.request.request_line = r'GET /slashed\path HTTP/1.1'
slashes.exposed = True
def whitespace(self):
# User-Agent = "User-Agent" ":" 1*( product | comment )
# comment = "(" *( ctext | quoted-pair | comment ) ")"
# ctext = <any TEXT excluding "(" and ")">
# TEXT = <any OCTET except CTLs, but including LWS>
# LWS = [CRLF] 1*( SP | HT )
cherrypy.request.headers['User-Agent'] = 'Browzuh (1.0\r\n\t\t.3)'
whitespace.exposed = True
def as_string(self):
return "content"
as_string.exposed = True
def as_yield(self):
yield "content"
as_yield.exposed = True
def error(self):
raise ValueError()
error.exposed = True
error._cp_config = {'tools.log_tracebacks.on': True}
root = Root()
cherrypy.config.update({'log.error_file': error_log,
'log.access_file': access_log,
})
cherrypy.tree.mount(root)
from cherrypy.test import helper, logtest
class AccessLogTests(helper.CPWebCase, logtest.LogCase):
logfile = access_log
def testNormalReturn(self):
self.markLog()
self.getPage("/as_string",
headers=[('Referer', 'http://www.cherrypy.org/'),
('User-Agent', 'Mozilla/5.0')])
self.assertBody('content')
self.assertStatus(200)
intro = '%s - - [' % self.interface()
self.assertLog(-1, intro)
if [k for k, v in self.headers if k.lower() == 'content-length']:
self.assertLog(-1, '] "GET %s/as_string HTTP/1.1" 200 7 '
'"http://www.cherrypy.org/" "Mozilla/5.0"'
% self.prefix())
else:
self.assertLog(-1, '] "GET %s/as_string HTTP/1.1" 200 - '
'"http://www.cherrypy.org/" "Mozilla/5.0"'
% self.prefix())
def testNormalYield(self):
self.markLog()
self.getPage("/as_yield")
self.assertBody('content')
self.assertStatus(200)
intro = '%s - - [' % self.interface()
self.assertLog(-1, intro)
if [k for k, v in self.headers if k.lower() == 'content-length']:
self.assertLog(-1, '] "GET %s/as_yield HTTP/1.1" 200 7 "" ""' %
self.prefix())
else:
self.assertLog(-1, '] "GET %s/as_yield HTTP/1.1" 200 - "" ""'
% self.prefix())
def testEscapedOutput(self):
# Test unicode in access log pieces.
self.markLog()
self.getPage("/uni_code")
self.assertStatus(200)
self.assertLog(-1, repr(tartaros.encode('utf8'))[1:-1])
# Test the erebos value. Included inline for your enlightenment.
# Note the 'r' prefix--those backslashes are literals.
self.assertLog(-1, r'\xce\x88\xcf\x81\xce\xb5\xce\xb2\xce\xbf\xcf\x82')
# Test backslashes in output.
self.markLog()
self.getPage("/slashes")
self.assertStatus(200)
self.assertLog(-1, r'"GET /slashed\\path HTTP/1.1"')
# Test whitespace in output.
self.markLog()
self.getPage("/whitespace")
self.assertStatus(200)
# Again, note the 'r' prefix.
self.assertLog(-1, r'"Browzuh (1.0\r\n\t\t.3)"')
class ErrorLogTests(helper.CPWebCase, logtest.LogCase):
logfile = error_log
def testTracebacks(self):
# Test that tracebacks get written to the error log.
self.markLog()
ignore = helper.webtest.ignored_exceptions
ignore.append(ValueError)
try:
self.getPage("/error")
self.assertInBody("raise ValueError()")
self.assertLog(0, 'HTTP Traceback (most recent call last):')
self.assertLog(-3, 'raise ValueError()')
finally:
ignore.pop()
if __name__ == '__main__':
helper.testmain()
|
jeremiah-c-leary/vhdl-style-guide
|
vsg/rules/generic/rule_002.py
|
from vsg.rules import token_indent
from vsg import token
lTokens = []
lTokens.append(token.generic_clause.generic_keyword)
class rule_002(token_indent):
'''
This rule checks the indent of the **generic** keyword.
**Violation**
.. code-block:: vhdl
entity fifo is
generic (
entity fifo is
generic (
**Fix**
.. code-block:: vhdl
entity fifo is
generic (
entity fifo is
generic (
'''
def __init__(self):
token_indent.__init__(self, 'generic', '002', lTokens)
|
sramkrishna/eidisi
|
scripts/meson_post_install.py
|
#!/usr/bin/env python3
import os
import pathlib
import sysconfig
import compileall
import subprocess
prefix = pathlib.Path(os.environ.get('MESON_INSTALL_PREFIX', '/usr/local'))
datadir = prefix / 'share'
destdir = os.environ.get('DESTDIR', '')
if not destdir:
print('Compiling gsettings schemas...')
subprocess.call(['glib-compile-schemas', str(datadir / 'glib-2.0' / 'schemas')])
print('Updating icon cache...')
subprocess.call(['gtk-update-icon-cache', '-qtf', str(datadir / 'icons' / 'hicolor')])
print('Updating desktop database...')
subprocess.call(['update-desktop-database', '-q', str(datadir / 'applications')])
print('Compiling python bytecode...')
moduledir = sysconfig.get_path('purelib', vars={'base': str(prefix)})
compileall.compile_dir(destdir + os.path.join(moduledir, 'eidisi'), optimize=2)
|
testmana2/test
|
Plugins/VcsPlugins/vcsMercurial/HgCopyDialog.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a dialog to enter the data for a copy or rename operation.
"""
from __future__ import unicode_literals
import os.path
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QDialog, QDialogButtonBox
from E5Gui.E5PathPicker import E5PathPickerModes
from .Ui_HgCopyDialog import Ui_HgCopyDialog
class HgCopyDialog(QDialog, Ui_HgCopyDialog):
"""
Class implementing a dialog to enter the data for a copy or rename
operation.
"""
def __init__(self, source, parent=None, move=False):
"""
Constructor
@param source name of the source file/directory (string)
@param parent parent widget (QWidget)
@param move flag indicating a move operation (boolean)
"""
super(HgCopyDialog, self).__init__(parent)
self.setupUi(self)
self.source = source
if os.path.isdir(self.source):
self.targetPicker.setMode(E5PathPickerModes.DirectoryMode)
else:
self.targetPicker.setMode(E5PathPickerModes.SaveFileMode)
if move:
self.setWindowTitle(self.tr('Mercurial Move'))
else:
self.forceCheckBox.setEnabled(False)
self.sourceEdit.setText(source)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
msh = self.minimumSizeHint()
self.resize(max(self.width(), msh.width()), msh.height())
def getData(self):
"""
Public method to retrieve the copy data.
@return the target name (string) and a flag indicating
the operation should be enforced (boolean)
"""
target = self.targetPicker.text()
if not os.path.isabs(target):
sourceDir = os.path.dirname(self.sourceEdit.text())
target = os.path.join(sourceDir, target)
return target, self.forceCheckBox.isChecked()
@pyqtSlot(str)
def on_targetPicker_textChanged(self, txt):
"""
Private slot to handle changes of the target.
@param txt contents of the target edit (string)
"""
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(
os.path.isabs(txt) or os.path.dirname(txt) == "")
|
trungnt13/BAY2-uef17
|
utils.py
|
# ===========================================================================
# This file contain some utilities for the course
# ===========================================================================
from __future__ import print_function, division, absolute_import
import os
import sys
import time
import shutil
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import URLError, HTTPError
import tarfile
import platform
import numpy as np
# Under Python 2, 'urlretrieve' relies on FancyURLopener from legacy
# urllib module, known to have issues with proxy management
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
'''
This function is adpated from: https://github.com/fchollet/keras
Original work Copyright (c) 2014-2015 keras contributors
'''
def chunk_read(response, chunk_size=8192, reporthook=None):
total_size = response.info().get('Content-Length').strip()
total_size = int(total_size)
count = 0
while 1:
chunk = response.read(chunk_size)
if not chunk:
break
count += 1
if reporthook:
reporthook(count, chunk_size, total_size)
yield chunk
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
class Progbar(object):
'''
This function is adpated from: https://github.com/fchollet/keras
Original work Copyright (c) 2014-2015 keras contributors
Modified work Copyright 2016-2017 TrungNT
'''
def __init__(self, target, title=''):
'''
@param target: total number of steps expected
'''
self.width = 39
self.target = target
self.sum_values = {}
self.unique_values = []
self.start = time.time()
self.total_width = 0
self.seen_so_far = 0
self.title = title
def update(self, current, values=[]):
'''
@param current: index of current step
@param values: list of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
'''
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [v * (current - self.seen_so_far), current - self.seen_so_far]
self.unique_values.append(k)
else:
self.sum_values[k][0] += v * (current - self.seen_so_far)
self.sum_values[k][1] += (current - self.seen_so_far)
self.seen_so_far = current
now = time.time()
prev_total_width = self.total_width
sys.stdout.write("\b" * prev_total_width)
sys.stdout.write("\r")
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%s %%%dd/%%%dd [' % (self.title, numdigits, numdigits)
bar = barstr % (current, self.target)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
sys.stdout.write(bar)
self.total_width = len(bar)
if current:
time_per_unit = (now - self.start) / current
else:
time_per_unit = 0
eta = time_per_unit * (self.target - current)
info = ''
if current < self.target:
info += ' - ETA: %ds' % eta
else:
info += ' - %ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s:' % k
if type(self.sum_values[k]) is list:
avg = self.sum_values[k][0] / max(1, self.sum_values[k][1])
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self.sum_values[k]
self.total_width += len(info)
if prev_total_width > self.total_width:
info += ((prev_total_width - self.total_width) * " ")
sys.stdout.write(info)
if current >= self.target:
if "Linux" in platform.platform():
sys.stdout.write("\n\n")
else:
sys.stdout.write("\n")
sys.stdout.flush()
def add(self, n, values=[]):
self.update(self.seen_so_far + n, values)
def get_file(fname, origin, untar=False, datadir=None):
'''
This function is adpated from: https://github.com/fchollet/keras
Original work Copyright (c) 2014-2015 keras contributors
Modified work Copyright 2016-2017 TrungNT
Return
------
file path of the downloaded file
'''
# ====== check valid datadir ====== #
if datadir is None:
datadir = os.path.join(os.path.expanduser('~'), '.bay2')
if not os.path.exists(datadir):
os.mkdir(datadir)
elif not os.path.exists(datadir):
raise ValueError('Cannot find folder at path:' + str(datadir))
# ====== download the file ====== #
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
if not os.path.exists(fpath):
print('Downloading data from', origin)
global _progbar
_progbar = None
def dl_progress(count, block_size, total_size):
global _progbar
if _progbar is None:
_progbar = Progbar(total_size)
else:
_progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
_progbar = None
if untar:
if not os.path.exists(untar_fpath):
print('Untaring file...')
tfile = tarfile.open(fpath, 'r:gz')
try:
tfile.extractall(path=datadir)
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(untar_fpath):
if os.path.isfile(untar_fpath):
os.remove(untar_fpath)
else:
shutil.rmtree(untar_fpath)
raise
tfile.close()
return untar_fpath
return fpath
|
microwaveabletoaster/dunces-and-dungeons
|
dunces-and-dungeons.py
|
from dungeon.dungeon import Dungeon, Hub
from entity.player.players import Player, Party
import entity.item.items as items
import sys, os
import base
import web.server
try:
import dill
except:
dill = None
PARTY = Party()
class Manager:
def __init__(self):
self.checked = False
def get_current_release(self):
latest = None
try:
import requests
latest = requests.get('https://api.github.com/repos/microwaveabletoaster/dunces-and-dungeons/releases/latest').json()['tag_name']
except:
base.put("could not reach the update service :'(")
return latest
def update_check(self):
base.put('checking for update...')
latest = self.get_current_release()
if latest:
if latest == self.RELEASE_ID:
base.put('you\'re up to date!')
else:
base.put("---------------=====UPDATE!!=====-----------\nan update to dunces and dungeons has been released! \ngo download it now from here: https://github.com/microwaveabletoaster/dunces-and-dungeons/releases \nit probably contains super important bugfixes and or more neat features, so don't dawdle!! \n\n<3 the team\n")
self.checked = True
def main(self,webbed=False):
self.webbed = webbed
if webbed: # ha amphibian joke
base.IS_WEB_VERSION = True
base.SERVER = web.server
web.server.party = PARTY
print 'MOVED ON'
base.BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ver = []
with open('%s/version.dunce' % base.BASE_DIR, 'r+') as f:
contents = f.read()
if contents is '':
base.put('writing')
f.write(self.get_current_release().replace('.',' ').replace('v',''))
ver = contents.split(' ')
self.RELEASE_ID = ('v%s.%s.%s' % (ver[0],ver[1],ver[2])).strip()
if not self.checked:
self.update_check()
go = True
intro = """
______ _ _______ _______ _______
( __ \ |\ /|( ( /|( ____ \( ____ \( ____ \\
| ( \ )| ) ( || \ ( || ( \/| ( \/| ( \/
| | ) || | | || \ | || | | (__ | (_____
| | | || | | || (\ \) || | | __) (_____ )
| | ) || | | || | \ || | | ( ) |
| (__/ )| (___) || ) \ || (____/\| (____/\/\____) |
(______/ (_______)|/ )_)(_______/(_______/\_______)
_______ _ ______
( ___ )( ( /|( __ \\
| ( ) || \ ( || ( \ )
| (___) || \ | || | ) |
| ___ || (\ \) || | | |
| ( ) || | \ || | ) |
| ) ( || ) \ || (__/ )
|/ \||/ )_)(______/
______ _ _______ _______ _______ _ _______
( __ \ |\ /|( ( /|( ____ \( ____ \( ___ )( ( /|( ____ \\
| ( \ )| ) ( || \ ( || ( \/| ( \/| ( ) || \ ( || ( \/
| | ) || | | || \ | || | | (__ | | | || \ | || (_____
| | | || | | || (\ \) || | ____ | __) | | | || (\ \) |(_____ )
| | ) || | | || | \ || | \_ )| ( | | | || | \ | ) |
| (__/ )| (___) || ) \ || (___) || (____/\| (___) || ) \ |/\____) |
(______/ (_______)|/ )_)(_______)(_______/(_______)|/ )_)\_______)
copyleft (c) 2016 John Dikeman and Cameron Egger
"""
base.put(intro)
cho = 0
# most of this code is super redundant cause cho is hardcoded but do i care? nope lol.
if cho is not None:
if cho is 0:
self.new_game()
if cho is 1:
li = []
if os.path.exists('%s/saves/' % base.BASE_DIR):
for dirpath, dirname, filename in os.walk('%s/saves/' % base.BASE_DIR):
for fi in filename:
if '.dunce' in fi:
li.append(fi)
else:
base.put('no saves to choose from!')
op = base.make_choice(li,"savefile")
if dill:
if op is not None:
go = False
base.put('loading session')
dill.load_session('%s/saves/%s' % (base.BASE_DIR,li[op]))
else:
base.put('save/load support is disabled because you haven\'t installed dill!')
def new_game(self):
# PARTY.current_dungeon.start()
if self.webbed:
party_size = base.get_input('enter the size of your party: ')
if int(party_size) is 0:
base.put("you can't play with zero people, dingus")
sys.exit()
# creating all the players in the party
for a in range(int(party_size)):
name = base.get_input('enter the name of player %d: ' % a)
PARTY.add_player(Player(name))
base.put('Game Start')
base.put(PARTY.to_str())
dungeon = Hub(PARTY)
PARTY.hub = dungeon
PARTY.current_dungeon = dungeon
PARTY.current_dungeon.start()
while(PARTY.end):
PARTY.handle_player_turn()
if(PARTY.end):
PARTY.current_dungeon.handle_monster_turn()
base.put("\n\n------------=========GAME OVER=========------------")
else:
party_size = base.get_input('enter the size of your party: ')
if int(party_size) is 0:
base.put("you can't play with zero people, dingus")
sys.exit()
# creating all the players in the party
for a in range(int(party_size)):
name = base.get_input('enter the name of player %d: ' % a)
PARTY.add_player(Player(name))
base.put('Game Start')
base.put(PARTY.to_str())
dungeon = Hub(PARTY)
PARTY.hub = dungeon
PARTY.current_dungeon = dungeon
PARTY.current_dungeon.start()
while(PARTY.end):
PARTY.handle_player_turn()
if(PARTY.end):
PARTY.current_dungeon.handle_monster_turn()
base.put("\n\n------------=========GAME OVER=========------------")
if __name__ == '__main__':
game = Manager()
try:
if sys.argv[1] == 'web':
print 'initializing web server. point your browser to http://localhost:5000.'
game.main(True)
else:
game.main()
except IndexError:
game.main()
|
satriaphd/bgc-learn
|
bgc-learn.py
|
import os
import sys
import shutil
import straight.plugin
import numpy as np
import pkg_resources
from os import path
from core import utils
from core import argparser
from core import log
from core import parser
def main():
## Parse arguments
ap = argparser.init_arg_parser()
options = ap.parse_args()
## Collect input gbks from folder
input_files = []
if not path.isdir(options["input_folder"]):
log.error("Specified folder didn't exist '%s'" % (options["input_folder"]))
sys.exit(1)
else:
for filename in os.listdir(options["input_folder"]):
filepath = path.join(options["input_folder"], filename)
if not path.isdir(filepath):
ext = path.splitext(filepath)[1][1:]
if ext in ["gbk"]:
input_files.append(filename)
## Initial check parameters
metadata = {}
if options["mode"] == "train":
## check and load metadata file
if not path.exists(options["training_metadata"]):
log.error("Specified file didn't exist '%s'" % (options["training_metadata"]))
sys.exit(1)
else:
metadata = parser.parse_training_metadata(options["training_metadata"])
options["single_values"] = [[]] * len(input_files)
options["train_set"] = []
options["test_set"] = []
# remove GBKs not listed in metadata
input_files[:] = [bgc for bgc in input_files if utils.get_bgc_name(bgc) in metadata["bgc"]]
# features
if "features" not in options:
if "features" not in metadata:
options["features"] = [{"name": plugin.name, "params": [], "subs": [sub for sub in plugin.features]} for plugin in utils.load_plugins("feature_extraction")]
else:
options["features"] = metadata["features"]
# algorithm mode (classification / regression)
if metadata["mode"] == "CLASSIFICATION":
options["algo_mode"] = "classification"
if "algorithm" not in options:
if "algorithm" not in metadata:
options["algorithm"] = {"name": "svm", "params": []}
else:
options["algorithm"] = metadata["algorithm"]
elif metadata["mode"] == "REGRESSION":
options["algo_mode"] = "regression"
if "algorithm" not in options:
if "algorithm" not in metadata:
options["algorithm"] = {"name": "linear_regression", "params": []}
else:
options["algorithm"] = metadata["algorithm"]
else:
log.error("Incorrect metadata file format '%s'" % (options["training_metadata"]))
sys.exit(1)
# single values (from right hand side of data column) & train/test set distribution
for i, fp in enumerate(input_files):
bgc_id = utils.get_bgc_name(fp)
if bgc_id in metadata["bgc"]:
idx_meta = metadata["bgc"].index(bgc_id)
options["single_values"][i] = metadata["single_values"][idx_meta]
if idx_meta in metadata["train_set"]:
options["train_set"].append(i)
if idx_meta in metadata["test_set"]:
options["test_set"].append(i)
else:
log.error("'%s' is not included in your metadata" % (bgc_id))
sys.exit(1)
# pair values for training set (from its own table from the metadata)
options["train_pair_values"] = [[None] * len(options["train_set"]) for _ in range(len(options["train_set"]))]
for i, idx1 in enumerate(options["train_set"]):
for j, idx2 in enumerate(options["train_set"]):
if len(metadata["train_pair_values"]) > i and len(metadata["train_pair_values"][i]) > j:
options["train_pair_values"][i][j] = metadata["train_pair_values"][i][j]
# pair values for test set (from its own table from the metadata)
options["test_pair_values"] = [[None] * len(options["test_set"]) for _ in range(len(options["test_set"]))]
for i, idx1 in enumerate(options["test_set"]):
for j, idx2 in enumerate(options["test_set"]):
if len(metadata["test_pair_values"]) > i and len(metadata["test_pair_values"][i]) > j:
options["test_pair_values"][i][j] = metadata["test_pair_values"][i][j]
if options["mode"] == "predict":
## check and load model file
print "..."
## further checks..
algo_type = utils.get_algo_type(options["algorithm"]["name"])
if algo_type not in ["classification", "regression"]:
log.error("Selected algorithm '%s' did not exist" % (algo["name"]))
sys.exit(1)
if options["algo_mode"] != algo_type:
log.error("Selected algorithm '%s' is for %s, but the provided data is for %s." % (options["algorithm"]["name"], algo_type, options["algo_mode"]))
sys.exit(1)
options["features_scope"] = ""
for idx, feature in enumerate(options["features"]):
for plugin in utils.load_plugins("feature_extraction"):
if plugin.name == feature["name"]:
if len(options["features_scope"]) > 0 and plugin.scope != options["features_scope"]:
log.error("You selected features of different scope ('%s:%s', '%s:%s'). Please select only combination of features with the same scope." % (feature["name"], plugin.scope, options["features"][idx - 1]["name"], options["features_scope"]))
sys.exit(1)
options["features_scope"] = plugin.scope
break
if len(feature["subs"]) < 1:
for plugin in utils.load_plugins("feature_extraction"):
if plugin.name == feature["name"]:
feature["subs"].extend(plugin.features)
break
for sub in feature["subs"]:
for plugin in utils.load_plugins("feature_extraction"):
if plugin.name == feature["name"]:
if sub not in plugin.features:
log.error("Feature unknown: '%s'" % sub)
sys.exit(1)
## Check output folder
if not options["output_folder"]:
options["output_folder"] = path.join(os.getcwd(), path.basename(options["input_folder"]))
if path.isdir(options["output_folder"]):
# output folder exist, probable disrupted job
if not options["continue"] and not options["overwrite"]:
log.error("Output folder '%s' exist. Previous run? use --continue to continue, or --overwrite to start over." % options["output_folder"])
sys.exit(1)
elif options["overwrite"]:
shutil.rmtree(options["output_folder"])
os.makedirs(options["output_folder"])
elif options["reset_preprocesses"]:
bgcjsonpath = path.join(options["output_folder"], "bgcjson")
if path.exists(bgcjsonpath):
shutil.rmtree(bgcjsonpath)
else:
os.makedirs(options["output_folder"])
## Parse gbks
## TODO: multi-threading?
log.info("Started preprocessing input files..")
utils.print_progress(0, len(input_files), prefix='Preprocessing input GBKs..', suffix='', decimals=1)
for i, filename in enumerate(input_files):
filepath = path.join(options["input_folder"], filename)
if not (path.exists(path.join(options["output_folder"], "bgcjson", "%s.bgcjson" % utils.get_bgc_name(filepath)))):
bgc = parser.parse_gbk(filepath)
if bgc is not None:
utils.save_bgcjson(bgc, options["output_folder"])
utils.print_progress(i + 1, len(input_files), prefix='Preprocessing input GBKs..', suffix='', decimals=1, bar_length=100)
log.info("Finished preprocessing input files..")
## Do feature extraction
# step 1: make folder structure & index file
feature_folder = utils.create_feature_folder(input_files, options["output_folder"])
# step 2: traverse FE modules and run algorithms, then save the results
feature_extraction_plugins = []
for plugin in utils.load_plugins("feature_extraction"):
if ("features" not in options) or (plugin.name in [feature["name"] for feature in options["features"]]):
feature_extraction_plugins.append(plugin)
# calculate features
options["feature_values"] = {}
if options["features_scope"] == "pair":
log.info("Started feature extraction for all BGC pairs..")
nrcomb = len(input_files) * (len(input_files) - 1) / 2
count = 0
utils.print_progress(0, nrcomb, prefix='Feature extraction..', suffix='', decimals=1)
for i, fn1 in enumerate(input_files):
for j, fn2 in enumerate(input_files):
if i < j:
bgc1 = parser.parse_bgcjson(path.join(options["output_folder"], "bgcjson", "%s.bgcjson" % utils.get_bgc_name(fn1)))
bgc2 = parser.parse_bgcjson(path.join(options["output_folder"], "bgcjson", "%s.bgcjson" % utils.get_bgc_name(fn2)))
for plugin in feature_extraction_plugins:
if plugin.name not in options["feature_values"]:
options["feature_values"][plugin.name] = {}
results = plugin.calculate(bgc1, bgc2)
options["feature_values"][plugin.name]["%d+%d" % (i, j)] = [float(result) for result in results]
count += 1
utils.print_progress(count, nrcomb, prefix='Feature extraction..', suffix='', decimals=1)
elif options["features_scope"] == "single":
log.info("Started feature extraction for all BGCs..")
count = 0
utils.print_progress(0, len(input_files), prefix='Feature extraction..', suffix='', decimals=1)
for i, fn in enumerate(input_files):
bgc = parser.parse_bgcjson(path.join(options["output_folder"], "bgcjson", "%s.bgcjson" % utils.get_bgc_name(fn)))
for plugin in feature_extraction_plugins:
if plugin.name not in options["feature_values"]:
options["feature_values"][plugin.name] = {}
results = plugin.calculate(bgc)
options["feature_values"][plugin.name]["%d" % (i)] = [float(result) for result in results]
count += 1
utils.print_progress(count, len(input_files), prefix='Feature extraction..', suffix='', decimals=1)
else:
log.error("Invalid features scope: '%s'" % options["features_scope"])
sys.exit(1)
## Load features & value matrix
features_rows = []
if options["features_scope"] == "pair":
for i, fn1 in enumerate(input_files):
for j, fn2 in enumerate(input_files):
if i < j:
features_rows.append([i, j])
elif options["features_scope"] == "single":
for i in xrange(0, len(input_files)):
features_rows.append([i])
else:
log.error("Invalid features scope: '%s'" % options["features_scope"])
sys.exit(1)
if "features_columns" not in options:
options["features_columns"] = []
for feature in options["features"]:
for sub in feature["subs"]:
options["features_columns"].append("%s.%s" % (feature["name"], sub))
features_matrix = {}
for row_ids in ["+".join([str(row_id) for row_id in row_ids]) for row_ids in features_rows]:
row = [None] * len(options["features_columns"])
for plugin in feature_extraction_plugins:
plugin_folder = path.join(feature_folder, plugin.name)
values = options["feature_values"][plugin.name][row_ids]
if (len(values) != len(plugin.features)):
# technically impossible to reach this, unless output from calculate != #of results expected
log.error("...")
sys.exit(1)
else:
for n, col in enumerate(plugin.features):
colname = ("%s.%s" % (plugin.name, col))
if colname in options["features_columns"]:
row[options["features_columns"].index(colname)] = values[n]
features_matrix[row_ids] = row
## Execute algorithms & save results
if options["mode"] == "train":
## Fetch feature & values training matrix
training_matrix = []
training_target = []
training_rownames = []
if options["features_scope"] == "pair":
for i, idx1 in enumerate(options["train_set"]):
for j, idx2 in enumerate(options["train_set"]):
if idx1 < idx2:
training_matrix.append(features_matrix["%d+%d" % (idx1, idx2)])
training_rownames.append("%s+%s" % (utils.get_bgc_name(input_files[idx1]), utils.get_bgc_name(input_files[idx2])))
if options["algo_mode"] == "classification":
class1 = options["single_values"][idx1].split(",")
class2 = options["single_values"][idx2].split(",")
training_target.append(int(len(set(class1) & set(class2)) > 0))
elif options["algo_mode"] == "regression":
training_target.append(float(options["train_pair_values"][i][j]))
elif options["features_scope"] == "single":
for idx in options["train_set"]:
training_matrix.append(features_matrix["%d" % (idx)])
training_rownames.append("%s" % (utils.get_bgc_name(input_files[idx1])))
training_target.append(options["single_values"][idx])
training_matrix = np.array(training_matrix)
training_target = np.array(training_target)
## Fetch feature & values testing matrix
testing_matrix = []
testing_target = []
testing_rownames = []
if options["features_scope"] == "pair":
for i, idx1 in enumerate(options["test_set"]):
for j, idx2 in enumerate(options["test_set"]):
if idx1 < idx2:
testing_matrix.append(features_matrix["%d+%d" % (idx1, idx2)])
testing_rownames.append("%s+%s" % (utils.get_bgc_name(input_files[idx1]), utils.get_bgc_name(input_files[idx2])))
if options["algo_mode"] == "classification":
class1 = options["single_values"][idx1].split(",")
class2 = options["single_values"][idx2].split(",")
testing_target.append(int(len(set(class1) & set(class2)) > 0))
elif options["algo_mode"] == "regression":
testing_target.append(float(options["test_pair_values"][i][j]))
elif options["features_scope"] == "single":
for idx in options["test_set"]:
testing_matrix.append(features_matrix["%d" % (idx)])
testing_rownames.append("%s" % (utils.get_bgc_name(input_files[idx1])))
testing_target.append(options["single_values"][idx])
testing_matrix = np.array(testing_matrix)
testing_target = np.array(testing_target)
## Load the training model
module = None
for plugin in utils.load_plugins(options["algo_mode"]):
if plugin.name == options["algorithm"]["name"]:
module = plugin
break
if module == None:
log.error("Failed to load module: '%s.%s'" % (options["algo_mode"], options["algorithm"]["name"]))
sys.exit(1)
else:
log.info("Training model...")
classifier = module.train(training_matrix, training_target, options["algorithm"]["params"])
# save model & its metadata to file
model_metadata = {
"mode": options["algo_mode"],
"algorithm": options["algorithm"],
"features": options["features"],
"columns": options["features_columns"],
"training_data_count": len(training_matrix),
"environment": {
"bgc-learn": utils.get_version(),
"scikit-learn": pkg_resources.get_distribution("scikit-learn").version,
"numpy": pkg_resources.get_distribution("numpy").version,
"scipy": pkg_resources.get_distribution("scipy").version,
}
}
save_name = utils.save_result_model(classifier, model_metadata, options["output_folder"])
# calculate accuracies & save summaries
result_training = ({}, [])
if len(training_matrix) > 0:
result_training = module.test(training_matrix, training_target, classifier)
utils.save_result_testing("training-%s" % (save_name), training_rownames, options["features_columns"], training_matrix, training_target, result_training, options["output_folder"])
result_testing = ({}, [])
if len(testing_matrix) > 0:
result_testing = module.test(testing_matrix, testing_target, classifier)
utils.save_result_testing("testing-%s" % (save_name), testing_rownames, options["features_columns"], testing_matrix, testing_target, result_testing, options["output_folder"])
elif options["mode"] == "predict":
print "..."
## Cleanup
log.info("Cleaning up..")
shutil.rmtree(feature_folder) # remove feature folder
## done
log.info("Analysis done. your result is available inside the folder '%s'." % options["output_folder"])
if __name__ == "__main__":
main()
|
ebigelow/LOTlib
|
LOTlib/Hypotheses/Lexicon/RecursiveLexicon.py
|
from SimpleLexicon import SimpleLexicon
from LOTlib.Evaluation.EvaluationException import RecursionDepthException
class RecursiveLexicon(SimpleLexicon):
"""
A lexicon where word meanings can call each other. Analogous to a RecursiveLOTHypothesis from a LOTHypothesis.
To achieve this, we require the LOThypotheses in self.values to take a "recurse" call that is always passed in by
default here on __call__ as the first argument.
This throws a RecursionDepthException when it gets too deep.
See Examples.EvenOdd
"""
def __init__(self, recursive_depth_bound=10, *args, **kwargs):
self.recursive_depth_bound = recursive_depth_bound
SimpleLexicon.__init__(self, *args, **kwargs)
def __call__(self, word, *args):
"""
Wrap in self as a first argument that we don't have to in the grammar. This way, we can use self(word, X Y) as above.
"""
self.recursive_call_depth = 0
return self.value[word](self.recursive_call, *args) # pass in "self" as lex, using the recursive version
def recursive_call(self, word, *args):
"""
This gets called internally on recursive calls. It keeps track of the depth to allow us to escape
"""
self.recursive_call_depth += 1
if self.recursive_call_depth > self.recursive_depth_bound:
raise RecursionDepthException
# print ">>>", self.value[word]
return self.value[word](self.recursive_call, *args)
|
kanishkarj/Rave
|
Qt_Designer_files/main_design.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main.ui'
#
# Created by: PyQt4 UI code generator 4.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(764, 593)
MainWindow.setMinimumSize(QtCore.QSize(650, 500))
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.mediaView = QtGui.QFrame(self.centralwidget)
self.mediaView.setGeometry(QtCore.QRect(0, 0, 461, 231))
self.mediaView.setStyleSheet(_fromUtf8(""))
self.mediaView.setFrameShape(QtGui.QFrame.StyledPanel)
self.mediaView.setFrameShadow(QtGui.QFrame.Raised)
self.mediaView.setObjectName(_fromUtf8("mediaView"))
self.subtitle = QtGui.QLabel(self.centralwidget)
self.subtitle.setGeometry(QtCore.QRect(250, 240, 261, 17))
font = QtGui.QFont()
font.setPointSize(12)
self.subtitle.setFont(font)
self.subtitle.setStyleSheet(_fromUtf8("color:white;"))
self.subtitle.setText(_fromUtf8(""))
self.subtitle.setObjectName(_fromUtf8("subtitle"))
self.controlView = QtGui.QWidget(self.centralwidget)
self.controlView.setGeometry(QtCore.QRect(30, 270, 661, 130))
self.controlView.setMinimumSize(QtCore.QSize(510, 130))
self.controlView.setMaximumSize(QtCore.QSize(16777215, 130))
self.controlView.setObjectName(_fromUtf8("controlView"))
self.verticalLayout = QtGui.QVBoxLayout(self.controlView)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.gridLayout_8 = QtGui.QGridLayout()
self.gridLayout_8.setMargin(1)
self.gridLayout_8.setObjectName(_fromUtf8("gridLayout_8"))
self.timeDone = QtGui.QLabel(self.controlView)
self.timeDone.setMinimumSize(QtCore.QSize(60, 0))
self.timeDone.setMaximumSize(QtCore.QSize(60, 16777215))
self.timeDone.setAlignment(QtCore.Qt.AlignCenter)
self.timeDone.setObjectName(_fromUtf8("timeDone"))
self.gridLayout_8.addWidget(self.timeDone, 0, 0, 1, 1)
self.seekBar = QtGui.QSlider(self.controlView)
self.seekBar.setMinimumSize(QtCore.QSize(365, 18))
self.seekBar.setMaximumSize(QtCore.QSize(16777215, 18))
self.seekBar.setOrientation(QtCore.Qt.Horizontal)
self.seekBar.setObjectName(_fromUtf8("seekBar"))
self.gridLayout_8.addWidget(self.seekBar, 0, 1, 1, 1)
self.timeLeft = QtGui.QLabel(self.controlView)
self.timeLeft.setMinimumSize(QtCore.QSize(60, 18))
self.timeLeft.setMaximumSize(QtCore.QSize(60, 18))
self.timeLeft.setAlignment(QtCore.Qt.AlignCenter)
self.timeLeft.setObjectName(_fromUtf8("timeLeft"))
self.gridLayout_8.addWidget(self.timeLeft, 0, 2, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_8)
self.gridLayout_4 = QtGui.QGridLayout()
self.gridLayout_4.setMargin(1)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.muteButton = QtGui.QPushButton(self.controlView)
self.muteButton.setMinimumSize(QtCore.QSize(30, 30))
self.muteButton.setMaximumSize(QtCore.QSize(30, 30))
self.muteButton.setText(_fromUtf8(""))
self.muteButton.setObjectName(_fromUtf8("muteButton"))
self.gridLayout_4.addWidget(self.muteButton, 0, 4, 1, 1)
self.expansionWidget_3 = QtGui.QWidget(self.controlView)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.expansionWidget_3.sizePolicy().hasHeightForWidth())
self.expansionWidget_3.setSizePolicy(sizePolicy)
self.expansionWidget_3.setObjectName(_fromUtf8("expansionWidget_3"))
self.gridLayout_7 = QtGui.QGridLayout(self.expansionWidget_3)
self.gridLayout_7.setMargin(0)
self.gridLayout_7.setObjectName(_fromUtf8("gridLayout_7"))
self.gridLayout_4.addWidget(self.expansionWidget_3, 0, 1, 1, 1)
self.volumeBar = QtGui.QSlider(self.controlView)
self.volumeBar.setMinimumSize(QtCore.QSize(175, 0))
self.volumeBar.setMaximumSize(QtCore.QSize(100, 16777215))
self.volumeBar.setOrientation(QtCore.Qt.Horizontal)
self.volumeBar.setObjectName(_fromUtf8("volumeBar"))
self.gridLayout_4.addWidget(self.volumeBar, 0, 5, 1, 1)
self.mediaSettingsWidget = QtGui.QWidget(self.controlView)
self.mediaSettingsWidget.setMinimumSize(QtCore.QSize(140, 60))
self.mediaSettingsWidget.setMaximumSize(QtCore.QSize(140, 60))
self.mediaSettingsWidget.setObjectName(_fromUtf8("mediaSettingsWidget"))
self.horizontalLayout_6 = QtGui.QHBoxLayout(self.mediaSettingsWidget)
self.horizontalLayout_6.setMargin(0)
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.fullscreenButton = QtGui.QPushButton(self.mediaSettingsWidget)
self.fullscreenButton.setMinimumSize(QtCore.QSize(30, 30))
self.fullscreenButton.setMaximumSize(QtCore.QSize(30, 30))
self.fullscreenButton.setText(_fromUtf8(""))
self.fullscreenButton.setObjectName(_fromUtf8("fullscreenButton"))
self.horizontalLayout_6.addWidget(self.fullscreenButton)
self.playlistButton = QtGui.QPushButton(self.mediaSettingsWidget)
self.playlistButton.setMinimumSize(QtCore.QSize(30, 30))
self.playlistButton.setMaximumSize(QtCore.QSize(30, 30))
self.playlistButton.setText(_fromUtf8(""))
self.playlistButton.setObjectName(_fromUtf8("playlistButton"))
self.horizontalLayout_6.addWidget(self.playlistButton)
self.stopButton = QtGui.QPushButton(self.mediaSettingsWidget)
self.stopButton.setMinimumSize(QtCore.QSize(30, 30))
self.stopButton.setMaximumSize(QtCore.QSize(30, 30))
self.stopButton.setText(_fromUtf8(""))
self.stopButton.setObjectName(_fromUtf8("stopButton"))
self.horizontalLayout_6.addWidget(self.stopButton)
self.gridLayout_4.addWidget(self.mediaSettingsWidget, 0, 0, 1, 1)
self.mediaControlWidget = QtGui.QWidget(self.controlView)
self.mediaControlWidget.setMinimumSize(QtCore.QSize(225, 70))
self.mediaControlWidget.setMaximumSize(QtCore.QSize(225, 70))
self.mediaControlWidget.setObjectName(_fromUtf8("mediaControlWidget"))
self.horizontalLayout_7 = QtGui.QHBoxLayout(self.mediaControlWidget)
self.horizontalLayout_7.setMargin(0)
self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7"))
self.previous = QtGui.QPushButton(self.mediaControlWidget)
self.previous.setMinimumSize(QtCore.QSize(40, 40))
self.previous.setMaximumSize(QtCore.QSize(40, 40))
self.previous.setText(_fromUtf8(""))
self.previous.setObjectName(_fromUtf8("previous"))
self.horizontalLayout_7.addWidget(self.previous)
self.playState = QtGui.QPushButton(self.mediaControlWidget)
self.playState.setMinimumSize(QtCore.QSize(50, 50))
self.playState.setMaximumSize(QtCore.QSize(50, 50))
self.playState.setText(_fromUtf8(""))
icon = QtGui.QIcon.fromTheme(_fromUtf8("play-2.svg"))
self.playState.setIcon(icon)
self.playState.setObjectName(_fromUtf8("playState"))
self.horizontalLayout_7.addWidget(self.playState)
self.next = QtGui.QPushButton(self.mediaControlWidget)
self.next.setMinimumSize(QtCore.QSize(40, 40))
self.next.setMaximumSize(QtCore.QSize(40, 40))
self.next.setText(_fromUtf8(""))
self.next.setObjectName(_fromUtf8("next"))
self.horizontalLayout_7.addWidget(self.next)
self.gridLayout_4.addWidget(self.mediaControlWidget, 0, 2, 1, 1)
self.expansionWidget_4 = QtGui.QWidget(self.controlView)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.expansionWidget_4.sizePolicy().hasHeightForWidth())
self.expansionWidget_4.setSizePolicy(sizePolicy)
self.expansionWidget_4.setObjectName(_fromUtf8("expansionWidget_4"))
self.gridLayout_4.addWidget(self.expansionWidget_4, 0, 3, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_4)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 764, 29))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
self.menuPlayback = QtGui.QMenu(self.menubar)
self.menuPlayback.setObjectName(_fromUtf8("menuPlayback"))
self.menuSpeed = QtGui.QMenu(self.menuPlayback)
self.menuSpeed.setObjectName(_fromUtf8("menuSpeed"))
self.menu_Subtitles = QtGui.QMenu(self.menubar)
self.menu_Subtitles.setObjectName(_fromUtf8("menu_Subtitles"))
self.menu_Audio = QtGui.QMenu(self.menubar)
self.menu_Audio.setObjectName(_fromUtf8("menu_Audio"))
self.menu_Video = QtGui.QMenu(self.menubar)
self.menu_Video.setObjectName(_fromUtf8("menu_Video"))
MainWindow.setMenuBar(self.menubar)
self.actionOpen_File = QtGui.QAction(MainWindow)
self.actionOpen_File.setShortcutContext(QtCore.Qt.WindowShortcut)
self.actionOpen_File.setObjectName(_fromUtf8("actionOpen_File"))
self.actionExit = QtGui.QAction(MainWindow)
self.actionExit.setObjectName(_fromUtf8("actionExit"))
self.actionOpen_Multiple_Files = QtGui.QAction(MainWindow)
self.actionOpen_Multiple_Files.setObjectName(_fromUtf8("actionOpen_Multiple_Files"))
self.actionAdd_Subtitle_File = QtGui.QAction(MainWindow)
self.actionAdd_Subtitle_File.setObjectName(_fromUtf8("actionAdd_Subtitle_File"))
self.actionJump_Forward = QtGui.QAction(MainWindow)
self.actionJump_Forward.setObjectName(_fromUtf8("actionJump_Forward"))
self.actionJump_Backward = QtGui.QAction(MainWindow)
self.actionJump_Backward.setObjectName(_fromUtf8("actionJump_Backward"))
self.actionX0_5 = QtGui.QAction(MainWindow)
self.actionX0_5.setObjectName(_fromUtf8("actionX0_5"))
self.actionX_1 = QtGui.QAction(MainWindow)
self.actionX_1.setObjectName(_fromUtf8("actionX_1"))
self.actionX_2 = QtGui.QAction(MainWindow)
self.actionX_2.setObjectName(_fromUtf8("actionX_2"))
self.actionX_4 = QtGui.QAction(MainWindow)
self.actionX_4.setObjectName(_fromUtf8("actionX_4"))
self.actionX_8 = QtGui.QAction(MainWindow)
self.actionX_8.setObjectName(_fromUtf8("actionX_8"))
self.actionAdd_Subtitle_Track = QtGui.QAction(MainWindow)
self.actionAdd_Subtitle_Track.setObjectName(_fromUtf8("actionAdd_Subtitle_Track"))
self.actionPlay = QtGui.QAction(MainWindow)
self.actionPlay.setObjectName(_fromUtf8("actionPlay"))
self.actionPause = QtGui.QAction(MainWindow)
self.actionPause.setObjectName(_fromUtf8("actionPause"))
self.actionStop = QtGui.QAction(MainWindow)
self.actionStop.setObjectName(_fromUtf8("actionStop"))
self.actionPrevious = QtGui.QAction(MainWindow)
self.actionPrevious.setObjectName(_fromUtf8("actionPrevious"))
self.actionNext = QtGui.QAction(MainWindow)
self.actionNext.setObjectName(_fromUtf8("actionNext"))
self.actionJump_to_specific_time = QtGui.QAction(MainWindow)
self.actionJump_to_specific_time.setObjectName(_fromUtf8("actionJump_to_specific_time"))
self.actionIncrease_Volume = QtGui.QAction(MainWindow)
self.actionIncrease_Volume.setObjectName(_fromUtf8("actionIncrease_Volume"))
self.actionDecrease_Volume = QtGui.QAction(MainWindow)
self.actionDecrease_Volume.setObjectName(_fromUtf8("actionDecrease_Volume"))
self.actionMute = QtGui.QAction(MainWindow)
self.actionMute.setObjectName(_fromUtf8("actionMute"))
self.actionFullscreen = QtGui.QAction(MainWindow)
self.actionFullscreen.setCheckable(False)
self.actionFullscreen.setObjectName(_fromUtf8("actionFullscreen"))
self.actionShift_forward_by_1_second = QtGui.QAction(MainWindow)
self.actionShift_forward_by_1_second.setObjectName(_fromUtf8("actionShift_forward_by_1_second"))
self.actionShift_backward_by_1_second = QtGui.QAction(MainWindow)
self.actionShift_backward_by_1_second.setObjectName(_fromUtf8("actionShift_backward_by_1_second"))
self.menuFile.addAction(self.actionOpen_File)
self.menuFile.addAction(self.actionOpen_Multiple_Files)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menuSpeed.addAction(self.actionX0_5)
self.menuSpeed.addAction(self.actionX_1)
self.menuSpeed.addAction(self.actionX_2)
self.menuSpeed.addAction(self.actionX_4)
self.menuSpeed.addAction(self.actionX_8)
self.menuPlayback.addAction(self.actionJump_Forward)
self.menuPlayback.addAction(self.actionJump_Backward)
self.menuPlayback.addAction(self.menuSpeed.menuAction())
self.menuPlayback.addSeparator()
self.menuPlayback.addAction(self.actionPlay)
self.menuPlayback.addAction(self.actionStop)
self.menuPlayback.addSeparator()
self.menuPlayback.addAction(self.actionPrevious)
self.menuPlayback.addAction(self.actionNext)
self.menuPlayback.addSeparator()
self.menuPlayback.addAction(self.actionJump_to_specific_time)
self.menu_Subtitles.addAction(self.actionAdd_Subtitle_Track)
self.menu_Subtitles.addSeparator()
self.menu_Subtitles.addAction(self.actionShift_forward_by_1_second)
self.menu_Subtitles.addAction(self.actionShift_backward_by_1_second)
self.menu_Audio.addAction(self.actionIncrease_Volume)
self.menu_Audio.addAction(self.actionDecrease_Volume)
self.menu_Audio.addAction(self.actionMute)
self.menu_Audio.addSeparator()
self.menu_Video.addAction(self.actionFullscreen)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuPlayback.menuAction())
self.menubar.addAction(self.menu_Subtitles.menuAction())
self.menubar.addAction(self.menu_Audio.menuAction())
self.menubar.addAction(self.menu_Video.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.timeDone.setText(_translate("MainWindow", "00:00:00", None))
self.timeLeft.setText(_translate("MainWindow", "00:00:00", None))
self.muteButton.setToolTip(_translate("MainWindow", "volume", None))
self.fullscreenButton.setToolTip(_translate("MainWindow", "Fullscreen", None))
self.playlistButton.setToolTip(_translate("MainWindow", "Playlist", None))
self.stopButton.setToolTip(_translate("MainWindow", "Stop", None))
self.previous.setToolTip(_translate("MainWindow", "Previous", None))
self.playState.setToolTip(_translate("MainWindow", "Play/Pause", None))
self.next.setToolTip(_translate("MainWindow", "Next", None))
self.menuFile.setTitle(_translate("MainWindow", "&Media", None))
self.menuPlayback.setTitle(_translate("MainWindow", "P&layback", None))
self.menuSpeed.setTitle(_translate("MainWindow", "&Speed", None))
self.menu_Subtitles.setTitle(_translate("MainWindow", "&Subtitles", None))
self.menu_Audio.setTitle(_translate("MainWindow", "&Audio ", None))
self.menu_Video.setTitle(_translate("MainWindow", "&Video", None))
self.actionOpen_File.setText(_translate("MainWindow", "&Open File", None))
self.actionOpen_File.setShortcut(_translate("MainWindow", "Ctrl+O", None))
self.actionExit.setText(_translate("MainWindow", "&Exit", None))
self.actionExit.setShortcut(_translate("MainWindow", "Ctrl+Q", None))
self.actionOpen_Multiple_Files.setText(_translate("MainWindow", "Open &Multiple Files", None))
self.actionOpen_Multiple_Files.setShortcut(_translate("MainWindow", "Ctrl+Shift+O", None))
self.actionAdd_Subtitle_File.setText(_translate("MainWindow", "&Add Subtitle File", None))
self.actionJump_Forward.setText(_translate("MainWindow", "&Jump Forward", None))
self.actionJump_Forward.setShortcut(_translate("MainWindow", "Ctrl+Shift++", None))
self.actionJump_Backward.setText(_translate("MainWindow", "Jump &Backward", None))
self.actionJump_Backward.setShortcut(_translate("MainWindow", "Ctrl+Shift+-", None))
self.actionX0_5.setText(_translate("MainWindow", "&x 0.5", None))
self.actionX_1.setText(_translate("MainWindow", "&Normal Speed", None))
self.actionX_2.setText(_translate("MainWindow", "x &2", None))
self.actionX_4.setText(_translate("MainWindow", "x &4", None))
self.actionX_8.setText(_translate("MainWindow", "x &8", None))
self.actionAdd_Subtitle_Track.setText(_translate("MainWindow", "&Add Subtitle Track", None))
self.actionPlay.setText(_translate("MainWindow", "&Play/Pause", None))
self.actionPlay.setShortcut(_translate("MainWindow", "Space", None))
self.actionPause.setText(_translate("MainWindow", "Pause", None))
self.actionPause.setShortcut(_translate("MainWindow", "Space", None))
self.actionStop.setText(_translate("MainWindow", "St&op", None))
self.actionStop.setShortcut(_translate("MainWindow", "Ctrl+Shift+S", None))
self.actionPrevious.setText(_translate("MainWindow", "P&revious", None))
self.actionPrevious.setShortcut(_translate("MainWindow", "Ctrl+Shift+Left", None))
self.actionNext.setText(_translate("MainWindow", "&Next", None))
self.actionNext.setShortcut(_translate("MainWindow", "Ctrl+Shift+Right", None))
self.actionJump_to_specific_time.setText(_translate("MainWindow", "J&ump to specific time", None))
self.actionJump_to_specific_time.setShortcut(_translate("MainWindow", "Ctrl+T", None))
self.actionIncrease_Volume.setText(_translate("MainWindow", "&Increase Volume", None))
self.actionIncrease_Volume.setShortcut(_translate("MainWindow", "Ctrl+Up", None))
self.actionDecrease_Volume.setText(_translate("MainWindow", "&Decrease Volume", None))
self.actionDecrease_Volume.setShortcut(_translate("MainWindow", "Ctrl+Down", None))
self.actionMute.setText(_translate("MainWindow", "&Mute", None))
self.actionMute.setShortcut(_translate("MainWindow", "M", None))
self.actionFullscreen.setText(_translate("MainWindow", "&Fullscreen", None))
self.actionFullscreen.setShortcut(_translate("MainWindow", "F", None))
self.actionShift_forward_by_1_second.setText(_translate("MainWindow", "&Shift Forward By 1 Second", None))
self.actionShift_forward_by_1_second.setShortcut(_translate("MainWindow", "H", None))
self.actionShift_backward_by_1_second.setText(_translate("MainWindow", "Shift &Backward By 1 Second", None))
self.actionShift_backward_by_1_second.setShortcut(_translate("MainWindow", "G", None))
|
SergeySatskiy/codimension
|
codimension/utils/config.py
|
# -*- coding: utf-8 -*-
#
# codimension - graphics python two-way code editor and analyzer
# Copyright (C) 2010-2017 Sergey Satskiy <sergey.satskiy@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""A few constants which do not depend on other project files"""
# Default encoding for the cases when:
# - the encoding could not be detected
# - replaces ascii to be on the safe side
DEFAULT_ENCODING = 'utf-8'
# File encoding used for various settings and project files
SETTINGS_ENCODING = 'utf-8'
# Directory to store Codimension settings and projects
CONFIG_DIR = '.codimension3'
|
MLOrsini/ProjetWEB
|
INSport/tableaubord/migrations/0042_auto_20170507_2352.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-07 23:52
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tableaubord', '0041_auto_20170507_2344'),
]
operations = [
migrations.AlterField(
model_name='evenement',
name='dateheure',
field=models.DateTimeField(default=datetime.datetime(2017, 5, 7, 23, 52, 53, 961581), verbose_name='Date/heure evenement '),
),
]
|
CopyChat/Plotting
|
Python/download_era-interim.py
|
#!/usr/bin/python
import os
from ecmwfapi import ECMWFDataServer
server = ECMWFDataServer()
time=["06"]
year=["2013"]
param=["129.128","130.128","131.128","132.128","157.128","151.128"]
nam=["hgt","air","uwnd","vwnd","rhum","psl"]
#month=["01","02","03","04","05","06","07","08","09","10","11","12"]
for y in year:
#for m in month:
for p in range(len(param)):
for t in time:
date=y+"-01-01/to/"+y+"-12-31"
print date
print nam[p]+"."+y+"."+t+".nc"
os.system('echo "############################################################# ^_^"')
server.retrieve({
'dataset' : "interim",
'levelist' : "1/2/3/5/7/10/20/30/50/70/100/125/150/175/200/225/250/300/350/400/450/500/550/600/650/700/750/775/800/825/850/875/900/925/950/975/1000",
'step' : "0",
'number' : "all",
'levtype' : "pl", # set to "sl" for surface level
'date' : date,
'time' : t ,
'origin' : "all",
'type' : "an",
'param' : "129.128/130.128/131.128/132.128/157.128",
'param' : param[p],
'area' : "0/0/-40/100", # Four values as North/West/South/East
'grid' : "1.5/1.5", # Two values: West-East/North-South increments
'format' : "netcdf", # if grib, just comment this line
'target' : nam[p]+"."+y+"."+t+".nc"
})
|
antonszilasi/honeybeex
|
honeybeex/honeybee/radiance/command/gensky.py
|
# coding=utf-8
from _commandbase import RadianceCommand
from ..datatype import RadiancePath, RadianceTuple
from ..parameters.gensky import GenskyParameters
import os
class Gensky(RadianceCommand):
u"""
gensky - Generate an annual Perez sky matrix from a weather tape.
The attributes for this class and their data descriptors are given below.
Please note that the first two inputs for each descriptor are for internal
naming purposes only.
Attributes:
outputName: An optional name for output file name (Default: 'untitled').
monthDayHour: A tuple containing inputs for month, day and hour.
genskyParameters: Radiance parameters for gensky. If None Default
parameters will be set. You can use self.genskyParameters to view,
add or remove the parameters before executing the command.
Usage:
from honeybee.radiance.parameters.gensky import GenSkyParameters
from honeybee.radiance.command.gensky import GenSky
# create and modify genskyParameters. In this case a sunny with no sun
# will be generated.
gnskyParam = GenSkyParameters()
gnskyParam.sunnySkyNoSun = True
# create the gensky Command.
gnsky = GenSky(monthDayHour=(1,1,11), genskyParameters=gnskyParam,
outputName = r'd:/sunnyWSun_010111.sky' )
# run gensky
gnsky.execute()
>
"""
monthDayHour = RadianceTuple('monthDayHour', 'month day hour', tupleSize=3,
testType=False)
outputFile = RadiancePath('outputFile', descriptiveName='output sky file',
relativePath=None, checkExists=False)
def __init__(self, outputName='untitled', monthDayHour=None,
genskyParameters=None):
"""Init command."""
RadianceCommand.__init__(self)
self.outputFile = outputName if outputName.lower().endswith(".sky") \
else outputName + ".sky"
"""results file for sky (Default: untitled)"""
self.monthDayHour = monthDayHour
self.genskyParameters = genskyParameters
@classmethod
def fromSkyType(cls, outputName='untitled', monthDayHour=(1, 21, 12),
skyType=0, latitude=None, longitude=None, meridian=None):
"""Create a sky by sky type.
Args:
outputName: An optional name for output file name (Default: 'untitled').
monthDayHour: A tuple containing inputs for month, day and hour.
skyType: An intger between 0-5 for CIE sky type.
0: [+s] Sunny with sun, 1: [-s] Sunny without sun,
2: [+i] Intermediate with sun, 3: [-i] Intermediate with no sun,
4: [-c] Cloudy overcast sky, 5: [-u] Uniform cloudy sky
latitude: [-a] A float number to indicate site altitude. Negative
angle indicates south latitude.
longitude: [-o] A float number to indicate site latitude. Negative
angle indicates east longitude.
meridian: [-m] A float number to indicate site meridian west of
Greenwich.
"""
_skyParameters = GenskyParameters(latitude=latitude, longitude=longitude,
meridian=meridian)
# modify parameters based on sky type
try:
skyType = int(skyType)
except TypeError:
"skyType should be an integer between 0-5."
assert 0 <= skyType <= 5, "Sky type should be an integer between 0-5."
if skyType == 0:
_skyParameters.sunnySky = True
elif skyType == 1:
_skyParameters.sunnySky = False
elif skyType == 2:
_skyParameters.intermSky = True
elif skyType == 3:
_skyParameters.intermSky = False
elif skyType == 4:
_skyParameters.cloudySky = True
elif skyType == 5:
_skyParameters.uniformCloudySky = True
return cls(outputName=outputName, monthDayHour=monthDayHour,
genskyParameters=_skyParameters)
@classmethod
def createUniformSkyfromIlluminanceValue(cls, outputName="untitled",
illuminanceValue=10000):
"""Uniform CIE sky based on illuminance value.
Attributes:
outputName: An optional name for output file name (Default: 'untitled').
illuminanceValue: Desired illuminance value in lux
"""
assert float(illuminanceValue) >= 0, "Illuminace value can't be negative."
_skyParameters = GenskyParameters(zenithBrightHorzDiff=illuminanceValue / 179.0)
return cls(outputName=outputName, genskyParameters=_skyParameters)
@classmethod
def fromRadiationValues(cls):
"""Create a sky based on sky radiation values."""
raise NotImplementedError()
@property
def genskyParameters(self):
"""Get and set genskyParameters."""
return self.__genskyParameters
@genskyParameters.setter
def genskyParameters(self, genskyParam):
self.__genskyParameters = genskyParam if genskyParam is not None \
else GenskyParameters()
assert hasattr(self.genskyParameters, "isRadianceParameters"), \
"input genskyParameters is not a valid parameters type."
def toRadString(self, relativePath=False):
"""Return full command as a string."""
# generate the name from self.weaFile
radString = "%s %s %s > %s" % (
self.normspace(os.path.join(self.radbinPath, 'gensky')),
self.monthDayHour.toRadString().replace("-monthDayHour ", ""),
self.genskyParameters.toRadString(),
self.normspace(self.outputFile.toRadString())
)
return radString
@property
def inputFiles(self):
"""Input files for this command."""
return None
|
bradrowesf/AnkiDeckMaker
|
tests/notetranslator_tests.py
|
from nose.tools import *
from DeckMaker.notetranslator import NoteTranslator
def setup():
print "SETUP!"
def teardown():
print "TEAR DOWN!"
def test_basic():
t = NoteTranslator()
assert_equal(t.GetMidiCodeForHumans("E5"),64)
assert_equal(t.GetMidiCodeForHumans("C1"),12)
assert_equal(t.GetMidiCodeForHumans("Ab6"),80)
assert_equal(t.GetMidiCodeForHumans("Gb7"),90)
assert_equal(t.GetMidiCodeForHumans("D#2"),27)
pass
def test_hex():
t = NoteTranslator()
assert_equal(t.GetHexString(t.GetMidiCodeForHumans("E5")),"40")
assert_equal(t.GetHexString(t.GetMidiCodeForHumans("C1")),"c")
assert_equal(t.GetHexString(t.GetMidiCodeForHumans("Ab6")),"50")
assert_equal(t.GetHexString(t.GetMidiCodeForHumans("Gb7")),"5a")
assert_equal(t.GetHexString(t.GetMidiCodeForHumans("D#2")),"1b")
pass
def test_GetTriadCodes():
t = NoteTranslator()
assert_equal(t.GetTriadCodes( t.GetMidiCodeForHumans("C4"), "minor", 3),[48, 53, 56])
assert_equal(t.GetTriadCodes( t.GetMidiCodeForHumans("Ab2"), "major", 2),[32, 40, 35])
assert_equal(t.GetTriadCodes( t.GetMidiCodeForHumans("G#6"), "minor", 1),[80, 83, 87])
def test_GetTriadHexCodeStrings():
t = NoteTranslator()
assert_equal(t.GetTriadHexCodeStrings( t.GetMidiCodeForHumans("C4"), "major", 1),['30', '34', '37'])
assert_equal(t.GetTriadHexCodeStrings( t.GetMidiCodeForHumans("Ab2"), "major", 2),['20', '28', '23'])
assert_equal(t.GetTriadHexCodeStrings( t.GetMidiCodeForHumans("G#6"), "minor", 1),['50', '53', '57'])
|
CaesarTjalbo/musictagger
|
mp3names/model_classes.py
|
# -*- coding: utf-8 -*-
import sys
import os
import logging
import random
import PyQt4
from PyQt4.QtCore import *
#from PyQt4.QtCore import QAbstractTableModel
import constants
class Model(QAbstractTableModel):
keys = list()
modelType = None
def __init__(self, parent = None):
''' '''
self.log = logging.getLogger('Model')
#self.log.debug('__init__ start')
super(QAbstractTableModel, self).__init__(parent)
def rowCount(self, parent = None):
''' '''
#self.log.debug('rowCount start')
#self.log.debug('rowCount end')
if hasattr(self, 'album') and self.album:
if hasattr(self.album, 'rows'):
return len(self.album.rows)
return 0
def columnCount(self, parent = None):
''' '''
#self.log.debug('columnCount start')
#self.log.debug('columnCount end')
return len(self.keys)
def data(self, index, role = None):
''' '''
#self.log.debug('data start')
if index.isValid():
if index.row() >= 0 or index.row() < len(self.rows):
if role == Qt.DisplayRole or role == Qt.ToolTipRole or role == Qt.EditRole:
return self.album.rows[index.row()][self.keys[index.column()]]
#self.log.debug('data end')
return QVariant()
def setData(self, index, value, role):
''' '''
#self.log.debug('setData start')
if index.isValid() and role == Qt.EditRole:
key = self.keys[index.column()]
row = index.row()
value = unicode(value.toString())
self.album.rows[index.row()][key] = value
self.emit(SIGNAL('dataChanged'), index, index)
#self.log.debug('setData end')
return True
def headerData(self, section, orientation, role):
''' '''
#self.log.debug('headerData start' + str(section))
if section >= 0 and section < len(self.keys):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self.keys[section]
#self.log.debug('headerData end ')
return QVariant()
def flags(self, index):
''' '''
#self.log.debug('flags start')
if self.modelType == constants.ModelType.ModelTypeFinal:
return super(QAbstractTableModel, self).flags(index) | Qt.ItemIsEditable
#self.log.debug('flags end')
return super(QAbstractTableModel, self).flags(index)
def getModelType(self):
''' '''
#self.log.debug('getModelType start')
#self.log.debug('getModelType end')
return self.modelType
#def getState(self):
#''' '''
##self.log.debug('getState start')
##self.log.debug('getState end')
#return None
|
byDimasik/Magic_Ping
|
client.py
|
import socket
import argparse
import sys
import magic_ping
import os
import settings
import signal
import logging
import struct
logging.basicConfig(format=u'%(levelname)-8s [%(asctime)s] %(message)s', level=logging.DEBUG, filename=u'client.log')
# Обработка CTRL+C
def signal_handler(signal, frame):
print("\nSTOP CLIENT.")
logging.info("STOP CLIENT.")
exit(0)
# Парсер аргументов командной строки
def create_cmd_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', required=True, type=argparse.FileType(mode='rb'))
parser.add_argument('-a', '--address', required=True)
parser.add_argument('-c', '--cypher', action='store_const', const=True)
return parser
signal.signal(signal.SIGINT, signal_handler)
if __name__ == '__main__':
p = create_cmd_parser()
arguments = p.parse_args(sys.argv[1:])
file = arguments.file
file_name = file.name
file_size = os.stat(file_name).st_size
address = arguments.address
ID = 1
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
packet_number = 1
data = file_name.encode()
if arguments.cypher:
data = struct.pack('b', 1) + data
else:
data = struct.pack('b', 0) + data
logging.debug("Start sending file to %s" % address)
magic_ping.send_ping(s, address, ID, data, packet_number)
print('start sending')
already_sent = 0 # размер уже отправленной части
while True:
data = file.read(settings.DATA_SIZE)
if arguments.cypher:
data = [a ^ b for (a, b) in zip(data, settings.KEY)] # шифруем XORом с ключом
data = bytes(data)
if not data:
break
already_sent += len(data)
packet_number += 1
magic_ping.send_ping(s, address, ID, data, packet_number)
logging.info('Отправлено: %.2f %%' % (already_sent / file_size * 100))
print('Отправлено: %.2f %%' % (already_sent / file_size * 100))
magic_ping.send_ping(s, address, ID, bytes(0), packet_number=0)
logging.debug("Packets sent: %d" % packet_number)
print("send:", packet_number)
file.close()
client_address, packet_number, checksum = magic_ping.receive_ping(s, ID, {}) # проверяем корректность передачи
if checksum and settings.md5_checksum(file_name) != checksum.decode():
logging.warning("Файл передался с ошибками!!!")
print("Файл передался с ошибками!!!")
s.close()
|
cloudnull/tribble-api
|
tribble/api/views/zones_rest.py
|
# =============================================================================
# Copyright [2013] [Kevin Carter]
# License Information :
# This software has no warranty, it is provided 'as is'. It is your
# responsibility to validate the behavior of the routines and its accuracy
# using the code provided. Consult the GNU General Public license for further
# details (see GNU General Public License).
# http://www.gnu.org/licenses/gpl.html
# =============================================================================
import logging
import traceback
import flask
from tribble.api import application
from tribble.api import utils
from tribble.common.db import db_proc
from tribble.common.db import zone_status
from tribble.common import rpc
from tribble.common import system_config
mod = flask.Blueprint('zones', __name__)
LOG = logging.getLogger('tribble-api')
CONFIG = system_config.ConfigurationSetup()
DEFAULT = CONFIG.config_args()
DB = application.DB
@mod.route('/v1/schematics/<sid>/zones', methods=['GET'])
def zones_get(sid):
"""Return a list of zones.
Method is accessible with GET /v1/schematics/<sid>/zones
:param sid: ``str`` # schematic ID
:return json, status: ``tuple``
"""
parsed_data = utils.zone_basic_handler(sid=sid)
if parsed_data[0] is False:
return utils.return_msg(msg=parsed_data[1], status=parsed_data[2])
else:
_success, schematic, zones, user_id = parsed_data
LOG.debug('%s %s %s %s', _success, schematic, zones, user_id)
try:
return_zones = []
for zone in zones:
dzone = utils.pop_ts(zone.__dict__)
instances = db_proc.get_instances(zon=zone)
if instances:
dzone['instance_quantity'] = len(instances)
return_zones.append(dzone)
except Exception:
LOG.error(traceback.format_exc())
return utils.return_msg(msg='Unexpected Error', status=500)
else:
return utils.return_msg(msg=return_zones, status=200)
@mod.route('/v1/schematics/<sid>/zones/<zid>', methods=['GET'])
def zone_get(sid, zid):
"""Return a zone.
Method is accessible with GET /v1/schematics/<sid>/zones/<zid>
:param sid: ``str`` # schematic ID
:param zid: ``str`` # Zone ID
:return json, status: ``tuple``
"""
parsed_data = utils.zone_basic_handler(sid=sid, zid=zid)
if parsed_data[0] is False:
return utils.return_msg(msg=parsed_data[1], status=parsed_data[2])
else:
_success, schematic, zone, user_id = parsed_data
_zone = utils.pop_ts(temp=zone.__dict__)
instances = db_proc.get_instances(zon=zone)
if instances:
_zone['instances'] = [
utils.pop_ts(temp=instance.__dict__) for instance in instances
]
LOG.debug('%s %s %s %s', _success, schematic, zone, user_id)
return utils.return_msg(msg=_zone, status=200)
@mod.route('/v1/schematics/<sid>/zones/<zid>', methods=['DELETE'])
def zone_delete(sid=None, zid=None):
"""Delete a Zone.
Method is accessible with DELETE /v1/schematics/<sid>/zones/<zid>
:param sid: ``str`` # schematic ID
:param zid: ``str`` # Zone ID
:return json, status: ``tuple``
"""
parsed_data = utils.zone_basic_handler(sid=sid, zid=zid)
if parsed_data[0] is False:
return utils.return_msg(msg=parsed_data[1], status=parsed_data[2])
else:
_success, schematic, zone, user_id = parsed_data
if zone.zone_state == 'BUILDING':
build_response = (
'Zone Delete can not be performed because Zone "%s" has a'
' Pending Status' % zone.id
)
return utils.return_msg(msg=build_response, status=200)
LOG.debug('%s %s %s %s', _success, schematic, zone, user_id)
try:
config = db_proc.get_configmanager(skm=schematic)
instances = db_proc.get_instances(zon=zone)
packet = utils.build_cell(
job='zone_delete',
schematic=schematic,
zone=zone,
config=config
)
packet['uuids'] = [instance.instance_id for instance in instances]
rpc.default_publisher(message=packet)
sess = DB.session
zone_status.ZoneState(cell=packet).delete()
except Exception:
LOG.error(traceback.format_exc())
return utils.return_msg(msg='unexpected error', status=500)
else:
db_proc.commit_session(session=sess)
return utils.return_msg(msg='deletes received', status=203)
@mod.route('/v1/schematics/<sid>/zones/<zid>/purge', methods=['DELETE'])
def zone_purge(sid=None, zid=None):
"""purge a Zone.
This is used to remove all indication of a zone without attempting to
disconnect or otherwise clean up the zone or any of its may be attached
instances.
Method is accessible with DELETE /v1/schematics/<sid>/zones/<zid>/purge
:param sid: ``str`` # schematic ID
:param zid: ``str`` # Zone ID
:return json, status: ``tuple``
"""
parsed_data = utils.zone_basic_handler(sid=sid, zid=zid)
if parsed_data[0] is False:
return utils.return_msg(msg=parsed_data[1], status=parsed_data[2])
else:
_success, schematic, zone, user_id = parsed_data
LOG.debug('%s %s %s %s', _success, schematic, zone, user_id)
try:
sess = DB.session
db_proc.delete_item(session=sess, item=zone)
except Exception:
LOG.error(traceback.format_exc())
return utils.return_msg(msg='unexpected error', status=500)
else:
db_proc.commit_session(session=sess)
return utils.return_msg(
msg='zone %s was purged' % zone.id, status=203
)
@mod.route('/v1/schematics/<sid>/zones/<zid>', methods=['PUT'])
def zone_put(sid=None, zid=None):
"""Update a Zone.
Method is accessible with PUT /v1/schematics/<sid>/zones/<zid>
:param sid: ``str`` # schematic ID
:param zid: ``str`` # Zone ID
:return json, status: ``tuple``
"""
parsed_data = utils.zone_data_handler(sid=sid)
if parsed_data[0] is False:
return utils.return_msg(msg=parsed_data[1], status=parsed_data[2])
else:
_success, schematic, payload, user_id = parsed_data
LOG.debug('%s %s %s %s', _success, schematic, payload, user_id)
zone = db_proc.get_zones_by_id(skm=schematic, zid=zid)
if not zone:
return utils.return_msg(msg='no zones found', status=404)
try:
sess = DB.session
sess = db_proc.put_zone(
session=sess,
zon=zone,
put=payload
)
except Exception:
LOG.error(traceback.format_exc())
return utils.return_msg(msg='unexpected error', status=500)
else:
db_proc.commit_session(session=sess)
return utils.return_msg(msg='updates received', status=201)
@mod.route('/v1/schematics/<sid>/zones', methods=['POST'])
def zone_post(sid=None):
"""Post a Zone.
Method is accessible with POST /v1/schematics/<sid>/zones
:param sid: ``str`` # schematic ID
:return json, status: ``tuple``
"""
parsed_data = utils.zone_data_handler(sid=sid, check_for_zone=True)
if parsed_data[0] is False:
return utils.return_msg(msg=parsed_data[1], status=parsed_data[2])
else:
_success, schematic, payload, user_id = parsed_data
LOG.debug('%s %s %s %s', _success, schematic, payload, user_id)
config = db_proc.get_configmanager(skm=schematic)
try:
sess = DB.session
for _zn in payload['zones']:
ssh_user = _zn.get('ssh_user')
pub = _zn.get('ssh_key_pub')
pri = _zn.get('ssh_key_pri')
key_name = _zn.get('key_name')
ssh_key = db_proc.post_instanceskeys(
pub=pub,
pri=pri,
sshu=ssh_user,
key_name=key_name
)
db_proc.add_item(session=sess, item=ssh_key)
zone = db_proc.post_zones(
skm=schematic,
zon=_zn,
ssh=ssh_key
)
db_proc.add_item(session=sess, item=zone)
packet = utils.build_cell(
job='build',
schematic=schematic,
zone=zone,
sshkey=ssh_key,
config=config
)
rpc.default_publisher(message=packet)
except Exception:
LOG.error(traceback.format_exc())
return utils.return_msg(msg='Unexpected Error', status=500)
else:
db_proc.commit_session(session=sess)
msg = 'Application requests have been recieved for Schematic %s' % sid
return utils.return_msg(msg=msg, status=200)
@mod.route('/v1/schematics/<sid>/zones/<zid>/redeploy', methods=['POST'])
def redeploy_zone(sid=None, zid=None):
"""Redploy a zone.
This method will interate over an existing zone and ensure that all things
known in the zone are built and in an active state.
Method is accessible with POST /v1/schematics/<sid>/zones
:param sid: ``str`` # schematic ID
:param zid: ``str`` # Zone ID
:return json, status: ``tuple``
"""
parsed_data = utils.zone_basic_handler(sid=sid, zid=zid)
if parsed_data[0] is False:
return utils.return_msg(msg=parsed_data[1], status=parsed_data[2])
else:
_success, schematic, zone, user_id = parsed_data
LOG.debug('%s %s %s %s', _success, schematic, zone, user_id)
config = db_proc.get_configmanager(skm=schematic)
key = db_proc.get_instanceskeys(zon=zone)
ints = db_proc.get_instances(zon=zone)
base_qty = int(zone.quantity)
numr_qty = len(ints)
if base_qty > numr_qty:
difference = base_qty - numr_qty
packet = utils.build_cell(
job='redeploy_build',
schematic=schematic,
zone=zone,
sshkey=key,
config=config
)
packet['quantity'] = difference
LOG.debug(packet)
rpc.default_publisher(message=packet)
msg = 'Building %s Instances for Zone %s' % (difference, zone.id)
return utils.return_msg(msg=msg, status=200)
elif base_qty < numr_qty:
difference = numr_qty - base_qty
packet = utils.build_cell(
job='redeploy_delete',
schematic=schematic,
zone=zone,
sshkey=key,
config=config
)
instances = [ins.instance_id for ins in ints]
remove_instances = instances[:difference]
packet['uuids'] = remove_instances
LOG.debug(packet)
remove_ids = [
ins for ins in ints
if ins.instance_id in remove_instances
]
try:
sess = DB.session
for instance_id in remove_ids:
db_proc.delete_item(session=sess, item=instance_id)
except Exception:
LOG.error(traceback.format_exc())
return utils.return_msg(msg='Unexpected Error', status=500)
else:
rpc.default_publisher(message=packet)
db_proc.commit_session(session=sess)
msg = 'Removing %s Instances for Zone %s' % (difference, zone.id)
return utils.return_msg(msg=msg, status=200)
else:
return utils.return_msg(msg='nothing to do', status=200)
@mod.route('/v1/schematics/<sid>/zones/<zid>/resetstate', methods=['POST'])
def reset_zone_state(sid=None, zid=None):
r"""Reset the state of a zone to active.
This method will reset the state of an existing zone no matter the current
state. The new state after invoking this method will be set to
"ACTIVE RESET"
Method is accessible with POST /v1/schematics/<sid>/zones
:param sid: ``str`` # schematic ID
:param zid: ``str`` # Zone ID
:return json, status: ``tuple``
"""
parsed_data = utils.zone_basic_handler(sid=sid, zid=zid)
if parsed_data[0] is False:
return utils.return_msg(msg=parsed_data[1], status=parsed_data[2])
else:
_success, schematic, zone, user_id = parsed_data
LOG.debug('%s %s %s %s', _success, schematic, zone, user_id)
cell = {'zone_state': 'ACTIVE RESET'}
try:
sess = DB.session
db_proc.put_zone(session=sess, zon=zone, put=cell)
except Exception:
LOG.error(traceback.format_exc())
return utils.return_msg(msg='unexpected error', status=500)
else:
db_proc.commit_session(session=sess)
return utils.return_msg(
msg='Zone State for %s has been Reset' % zid, status=200
)
|
PlushBeaver/FanFicFare
|
fanficfare/adapters/adapter_literotica.py
|
# -*- coding: utf-8 -*-
# Copyright 2013 Fanficdownloader team, 2015 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import logging
logger = logging.getLogger(__name__)
import re
import urllib2
import urlparse
from bs4.element import Comment
from ..htmlcleanup import stripHTML
from .. import exceptions as exceptions
from base_adapter import BaseSiteAdapter, makeDate
class LiteroticaSiteAdapter(BaseSiteAdapter):
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
logger.debug("LiteroticaComAdapter:__init__ - url='%s'" % url)
self.decode = ["utf8",
"Windows-1252"] # 1252 is a superset of iso-8859-1.
# Most sites that claim to be
# iso-8859-1 (and some that claim to be
# utf8) are really windows-1252.
# Each adapter needs to have a unique site abbreviation.
self.story.setMetadata('siteabbrev','litero')
# normalize to first chapter. Not sure if they ever have more than 2 digits.
storyId = self.parsedUrl.path.split('/',)[2]
# replace later chapters with first chapter but don't remove numbers
# from the URL that disambiguate stories with the same title.
storyId = re.sub("-ch-?\d\d", "", storyId)
self.story.setMetadata('storyId', storyId)
## accept m(mobile)url, but use www.
url = re.sub("^(www|german|spanish|french|dutch|italian|romanian|portuguese|other)\.i",
"\1",
url)
## strip ?page=...
url = re.sub("\?page=.*$", "", url)
## set url
self._setURL(url)
# The date format will vary from site to site.
# http://docs.python.org/library/datetime.html#strftime-strptime-behavior
self.dateformat = "%m/%d/%y"
@staticmethod
def getSiteDomain():
return 'literotica.com'
@classmethod
def getAcceptDomains(cls):
return ['www.literotica.com',
'www.i.literotica.com',
'german.literotica.com',
'german.i.literotica.com',
'spanish.literotica.com',
'spanish.i.literotica.com',
'french.literotica.com',
'french.i.literotica.com',
'dutch.literotica.com',
'dutch.i.literotica.com',
'italian.literotica.com',
'italian.i.literotica.com',
'romanian.literotica.com',
'romanian.i.literotica.com',
'portuguese.literotica.com',
'portuguese.i.literotica.com',
'other.literotica.com',
'other.i.literotica.com']
@classmethod
def getSiteExampleURLs(cls):
return "http://www.literotica.com/s/story-title https://www.literotica.com/s/story-title http://portuguese.literotica.com/s/story-title http://german.literotica.com/s/story-title"
def getSiteURLPattern(self):
return r"https?://(www|german|spanish|french|dutch|italian|romanian|portuguese|other)(\.i)?\.literotica\.com/s/([a-zA-Z0-9_-]+)"
def getCategories(self, soup):
if self.getConfig("use_meta_keywords"):
categories = soup.find("meta", {"name":"keywords"})['content'].split(', ')
categories = [c for c in categories if not self.story.getMetadata('title') in c]
if self.story.getMetadata('author') in categories:
categories.remove(self.story.getMetadata('author'))
logger.debug("Meta = %s" % categories)
for category in categories:
# logger.debug("\tCategory=%s" % category)
# self.story.addToList('category', category.title())
self.story.addToList('eroticatags', category.title())
def extractChapterUrlsAndMetadata(self):
"""
NOTE: Some stories can have versions,
e.g. /my-story-ch-05-version-10
NOTE: If two stories share the same title, a running index is added,
e.g.: /my-story-ch-02-1
Strategy:
* Go to author's page, search for the current story link,
* If it's in a tr.root-story => One-part story
* , get metadata and be done
* If it's in a tr.sl => Chapter in series
* Search up from there until we find a tr.ser-ttl (this is the
story)
* Gather metadata
* Search down from there for all tr.sl until the next
tr.ser-ttl, foreach
* Chapter link is there
"""
if not (self.is_adult or self.getConfig("is_adult")):
raise exceptions.AdultCheckRequired(self.url)
logger.debug("Chapter/Story URL: <%s> " % self.url)
try:
data1 = self._fetchUrl(self.url)
soup1 = self.make_soup(data1)
#strip comments from soup
[comment.extract() for comment in soup1.findAll(text=lambda text:isinstance(text, Comment))]
except urllib2.HTTPError, e:
if e.code == 404:
raise exceptions.StoryDoesNotExist(self.url)
else:
raise e
if "This submission is awaiting moderator's approval" in data1:
raise exceptions.StoryDoesNotExist("This submission is awaiting moderator's approval. %s"%self.url)
# author
a = soup1.find("span", "b-story-user-y")
self.story.setMetadata('authorId', urlparse.parse_qs(a.a['href'].split('?')[1])['uid'][0])
authorurl = a.a['href']
if authorurl.startswith('//'):
authorurl = self.parsedUrl.scheme+':'+authorurl
self.story.setMetadata('authorUrl', authorurl)
self.story.setMetadata('author', a.text)
# get the author page
try:
dataAuth = self._fetchUrl(authorurl)
soupAuth = self.make_soup(dataAuth)
#strip comments from soup
[comment.extract() for comment in soupAuth.findAll(text=lambda text:isinstance(text, Comment))]
# logger.debug(soupAuth)
except urllib2.HTTPError, e:
if e.code == 404:
raise exceptions.StoryDoesNotExist(authorurl)
else:
raise e
## Find link to url in author's page
## site has started using //domain.name/asdf urls remove https?: from front
## site has started putting https back on again.
storyLink = soupAuth.find('a', href=re.compile(r'(https?:)?'+re.escape(self.url[self.url.index(':')+1:])))
# storyLink = soupAuth.find('a', href=self.url)#[self.url.index(':')+1:])
if storyLink is not None:
# pull the published date from the author page
# default values from single link. Updated below if multiple chapter.
logger.debug("Found story on the author page.")
date = storyLink.parent.parent.findAll('td')[-1].text
self.story.setMetadata('datePublished', makeDate(date, self.dateformat))
self.story.setMetadata('dateUpdated',makeDate(date, self.dateformat))
if storyLink is not None:
urlTr = storyLink.parent.parent
if "sl" in urlTr['class']:
isSingleStory = False
else:
isSingleStory = True
else:
raise exceptions.FailedToDownload("Couldn't find story <%s> on author's page <%s>" % (self.url, authorurl))
if isSingleStory:
# self.chapterUrls = [(soup1.h1.string, self.url)]
# self.story.setMetadata('title', soup1.h1.string)
self.story.setMetadata('title', storyLink.text.strip('/'))
logger.debug('Title: "%s"' % storyLink.text.strip('/'))
self.story.setMetadata('description', urlTr.findAll("td")[1].text)
self.story.addToList('category', urlTr.findAll("td")[2].text)
# self.story.addToList('eroticatags', urlTr.findAll("td")[2].text)
date = urlTr.findAll('td')[-1].text
self.story.setMetadata('datePublished', makeDate(date, self.dateformat))
self.story.setMetadata('dateUpdated',makeDate(date, self.dateformat))
self.chapterUrls = [(storyLink.text, self.url)]
averrating = stripHTML(storyLink.parent)
## title (0.00)
averrating = averrating[averrating.rfind('(')+1:averrating.rfind(')')]
try:
self.story.setMetadata('averrating', float(averrating))
except:
pass
# self.story.setMetadata('averrating',averrating)
# parse out the list of chapters
else:
seriesTr = urlTr.previousSibling
while 'ser-ttl' not in seriesTr['class']:
seriesTr = seriesTr.previousSibling
m = re.match("^(?P<title>.*?):\s(?P<numChapters>\d+)\sPart\sSeries$", seriesTr.find("strong").text)
self.story.setMetadata('title', m.group('title'))
seriesTitle = m.group('title')
## Walk the chapters
chapterTr = seriesTr.nextSibling
self.chapterUrls = []
dates = []
descriptions = []
ratings = []
chapters = []
while chapterTr is not None and 'sl' in chapterTr['class']:
description = "%d. %s" % (len(descriptions)+1,stripHTML(chapterTr.findAll("td")[1]))
description = stripHTML(chapterTr.findAll("td")[1])
chapterLink = chapterTr.find("td", "fc").find("a")
self.story.addToList('eroticatags', chapterTr.findAll("td")[2].text)
pub_date = makeDate(chapterTr.findAll('td')[-1].text, self.dateformat)
dates.append(pub_date)
chapterTr = chapterTr.nextSibling
chapter_title = chapterLink.text
if self.getConfig("clean_chapter_titles"):
logger.debug('\tChapter Name: "%s"' % chapterLink.string)
logger.debug('\tChapter Name: "%s"' % chapterLink.text)
if chapterLink.text.lower().startswith(seriesTitle.lower()):
chapter = chapterLink.text[len(seriesTitle):].strip()
logger.debug('\tChapter: "%s"' % chapter)
if chapter == '':
chapter_title = 'Chapter %d' % (len(self.chapterUrls) + 1)
else:
separater_char = chapter[0]
logger.debug('\tseparater_char: "%s"' % separater_char)
chapter = chapter[1:].strip() if separater_char in [":", "-"] else chapter
logger.debug('\tChapter: "%s"' % chapter)
if chapter.lower().startswith('ch.'):
chapter = chapter[len('ch.'):]
try:
chapter_title = 'Chapter %d' % int(chapter)
except:
chapter_title = 'Chapter %s' % chapter
elif chapter.lower().startswith('pt.'):
chapter = chapter[len('pt.'):]
try:
chapter_title = 'Part %d' % int(chapter)
except:
chapter_title = 'Part %s' % chapter
elif separater_char in [":", "-"]:
chapter_title = chapter
# if chapter_title == '':
# chapter_title = chapterLink.string
# pages include full URLs.
chapurl = chapterLink['href']
if chapurl.startswith('//'):
chapurl = self.parsedUrl.scheme + ':' + chapurl
logger.debug("Chapter URL: " + chapurl)
logger.debug("Chapter Title: " + chapter_title)
logger.debug("Chapter description: " + description)
chapters.append((chapter_title, chapurl, description, pub_date))
# self.chapterUrls.append((chapter_title, chapurl))
numrating = stripHTML(chapterLink.parent)
## title (0.00)
numrating = numrating[numrating.rfind('(')+1:numrating.rfind(')')]
try:
ratings.append(float(numrating))
except:
pass
chapters = sorted(chapters, key=lambda chapter: chapter[3])
for i, chapter in enumerate(chapters):
self.chapterUrls.append((chapter[0], chapter[1]))
descriptions.append("%d. %s" % (i + 1, chapter[2]))
## Set the oldest date as publication date, the newest as update date
dates.sort()
self.story.setMetadata('datePublished', dates[0])
self.story.setMetadata('dateUpdated', dates[-1])
self.story.setMetadata('datePublished', chapters[0][3])
self.story.setMetadata('dateUpdated', chapters[-1][3])
## Set description to joint chapter descriptions
self.setDescription(authorurl,"<p>"+"</p>\n<p>".join(descriptions)+"</p>")
if len(ratings) > 0:
self.story.setMetadata('averrating','%4.2f' % (sum(ratings) / float(len(ratings))))
# normalize on first chapter URL.
self._setURL(self.chapterUrls[0][1])
# reset storyId to first chapter.
self.story.setMetadata('storyId',self.parsedUrl.path.split('/',)[2])
self.story.setMetadata('numChapters', len(self.chapterUrls))
self.story.setMetadata('category', soup1.find('div', 'b-breadcrumbs').findAll('a')[1].string)
self.getCategories(soup1)
# self.story.setMetadata('description', soup1.find('meta', {'name': 'description'})['content'])
return
def getPageText(self, raw_page, url):
logger.debug('Getting page text')
# logger.debug(soup)
raw_page = raw_page.replace('<div class="b-story-body-x x-r15"><div><p>','<div class="b-story-body-x x-r15"><div>')
# logger.debug("\tChapter text: %s" % raw_page)
page_soup = self.make_soup(raw_page)
[comment.extract() for comment in page_soup.findAll(text=lambda text:isinstance(text, Comment))]
story2 = page_soup.find('div', 'b-story-body-x').div
# logger.debug("getPageText- name div div...")
# logger.debug(soup)
# story2.append(page_soup.new_tag('br'))
div = self.utf8FromSoup(url, story2)
# logger.debug(div)
fullhtml = unicode(div)
# logger.debug(fullhtml)
fullhtml = re.sub(r'<br />\s*<br />', r'</p><p>', fullhtml)
fullhtml = re.sub(r'^<div>', r'', fullhtml)
fullhtml = re.sub(r'</div>$', r'', fullhtml)
fullhtml = re.sub(r'(<p><br/></p>\s+)+$', r'', fullhtml)
# logger.debug(fullhtml)
return fullhtml
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
raw_page = self._fetchUrl(url)
page_soup = self.make_soup(raw_page)
pages = page_soup.find('select', {'name' : 'page'})
page_nums = [page.text for page in pages.findAll('option')] if pages else 0
fullhtml = ""
self.getCategories(page_soup)
if self.getConfig("description_in_chapter"):
chapter_description = page_soup.find("meta", {"name" : "description"})['content']
logger.debug("\tChapter description: %s" % chapter_description)
fullhtml += '<p><b>Description:</b> %s</p><hr />' % chapter_description
fullhtml += self.getPageText(raw_page, url)
if pages:
for page_no in xrange(2, len(page_nums) + 1):
page_url = url + "?page=%s" % page_no
logger.debug("page_url= %s" % page_url)
raw_page = self._fetchUrl(page_url)
fullhtml += self.getPageText(raw_page, url)
# fullhtml = self.utf8FromSoup(url, bs.BeautifulSoup(fullhtml))
# fullhtml = re.sub(r'^<div>', r'', fullhtml)
# fullhtml = re.sub(r'</div>$', r'', fullhtml)
# if None == div:
# raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
return fullhtml
def getClass():
return LiteroticaSiteAdapter
|
parpg/parpg
|
parpg/components/container.py
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from copy import deepcopy
from base import Base
class Container(Base):
"""
Component that allows an entity to contain one or more child entities.
"""
def __init__(self):
Base.__init__(self, children=list, max_bulk=int)
@property
def saveable_fields(self):
fields = self.fields.keys()
fields.remove("children")
return fields
class BulkLimitError(Exception):
"""Error that gets raised when the item would exceed the
bulk limit of the container."""
def __init__(self, bulk, max_bulk):
self.bulk = bulk
self.max_bulk = max_bulk
def __str__(self):
return "Item would exceed the bulk limit of the container."
class NoFreeSlotError(Exception):
"""Error that gets raised when the container has no free slots."""
def __str__(self):
return "Container can't hold any more items."
def get_free_slot(container):
"""Returns the first slot of the container that is not occupied."""
index = 0
for child in container.children:
if not child:
return index
index += 1
raise NoFreeSlotError
def get_total_bulk(container):
"""Returns the bulk of all items in the container."""
total_bulk = 0
for child in container.children:
if child:
total_bulk += child.bulk
return total_bulk
def get_total_weight(container):
"""Returns the weight of all items in the container."""
total_weight = 0
for child in container.children:
if child:
total_weight += child.weight
return total_weight
def get_item(container, slot_or_type):
"""Returns the item that is in the slot, or has the given type."""
if type(slot_or_type) == int:
if len(container.children) >= (slot_or_type + 1):
return container.children[slot_or_type]
else:
for child in container.children:
if child and child.item_type == slot_or_type:
return child
return None
def remove_item(container, slot_or_type):
"""Removes the item at the given slot, or with the given type."""
if type(slot_or_type) == int:
item = get_item(container, slot_or_type)
if item:
container.children[slot_or_type] = None
item.container = None
item.slot = -1
else:
for child in container.children:
if child and child.item_type == slot_or_type:
container.children[child.slot] = None
child.container = None
child.slot = -1
def take_item(container, slot_or_type):
"""Moves the item at the given slot, or with the given type,
out of the container and returns it."""
item = get_item(container, slot_or_type)
if item:
remove_item(container, slot_or_type)
return item
def put_item(container, item, slot=-1):
"""Puts the item at the given slot in the container.
Returns the item previously at the slot."""
if slot == -1:
slot = get_free_slot(container)
total_bulk = get_total_bulk(container)
total_bulk += item.bulk
old_item = get_item(container, slot)
if old_item:
total_bulk -= old_item.bulk
if total_bulk > container.max_bulk:
raise BulkLimitError(total_bulk, container.max_bulk)
remove_item(container, slot)
container.children[slot] = item
if item.container:
remove_item(item.container, item.slot)
item.container = container
item.slot = slot
return old_item
|
Madpilot0/EVE-Farm
|
app.py
|
#!/usr/bin/env python
from flask import Flask, render_template, request, jsonify, session, redirect, escape, url_for
import MySQLdb
import bcrypt
from esipy import App
from esipy import EsiClient
from esipy import EsiSecurity
from esipy.exceptions import APIException
import time
import json
import requests
import datetime
import math
app = Flask(__name__)
class ServerError(Exception):pass
class DB:
conn = None
def connect(self):
config = {}
execfile("config.conf",config)
self.conn = MySQLdb.connect(
host=config['dbHost'],
user=config['dbUser'],
passwd=config['dbPass'],
db=config['dbBase']
)
self.conn.autocommit(True)
self.conn.set_character_set('utf8')
def query(self, sql, args=None):
try:
cursor = self.conn.cursor()
cursor.execute(sql,args)
except (AttributeError, MySQLdb.OperationalError):
self.connect()
cursor = self.conn.cursor()
cursor.execute(sql,args)
return cursor
if __name__ == '__main__':
config = {}
execfile("config.conf",config)
serverIP = config['serverIP']
serverPort = config['serverPort']
rounds = 10
debug = config['debug']
cer = config['ssl_cer']
key = config['ssl_key']
context = (cer,key)
app.secret_key = config['appKey']
esi_app = App.create('https://esi.tech.ccp.is/latest/swagger.json?datasource=tranquility')
security = EsiSecurity(
app=esi_app,
redirect_uri=config['callbackURL'],
client_id=config['clientID'],
secret_key=config['secretKey']
)
client = EsiClient(security=security)
scopes = ['esi-location.read_location.v1','esi-skills.read_skillqueue.v1','esi-skills.read_skills.v1','esi-clones.read_clones.v1']
db = DB()
def profit():
extractorID = "40519"
injectorID = "40520"
plexID = "44992"
priceList = []
url = "http://api.eve-central.com/api/marketstat/json?regionlimit=10000002&typeid="
try:
prices = requests.get(url+extractorID).json()[0]
extractorPrice = prices['buy']['fivePercent']
extractorPricen= prices['sell']['fivePercent']
prices = requests.get(url+injectorID).json()[0]
injectorPrice = prices['sell']['fivePercent']
injectorPricen= prices['buy']['fivePercent']
prices = requests.get(url+plexID).json()[0]
plexPrice = prices['buy']['fivePercent']
plexPricen= prices['sell']['fivePercent']
injectorsMonth = 3.888
profit = round(((injectorsMonth * (injectorPrice - extractorPrice)) - (plexPrice * 500))/1000000,2)
nonoptimal = round(((injectorsMonth * (injectorPricen - extractorPricen)) - (plexPricen * 500))/1000000,2)
return "<a href='https://market.madpilot.nl/static/graph/farm-month.png'>Projected profits: (min)"+str(nonoptimal)+"mil - (max)"+str(profit)+"mil </a>"
except:
return "<a href='https://market.madpilot.nl/static/graph/farm-month.png'>Projected profits: (min)"+str(0)+"mil - (max)"+str(0)+"mil </a>"
def isk(extractors):
extractorID = "40519"
injectorID = "40520"
plexID = "44992"
priceList = []
url = "http://api.eve-central.com/api/marketstat/json?regionlimit=10000002&typeid="
try:
prices = requests.get(url+extractorID).json()[0]
extractorPrice = prices['buy']['fivePercent']
extractorPricen= prices['sell']['fivePercent']
prices = requests.get(url+injectorID).json()[0]
injectorPrice = prices['sell']['fivePercent']
injectorPricen= prices['buy']['fivePercent']
prices = requests.get(url+plexID).json()[0]
plexPrice = prices['buy']['fivePercent']
plexPricen= prices['sell']['fivePercent']
maxProfit = round(((injectorPrice - extractorPrice) * extractors)/1000000,2)
minProfit = round(((injectorPricen - extractorPricen) * extractors)/1000000,2)
except:
maxProfit = 0
minProfit = 0
return [maxProfit, minProfit]
def isReady(char_id):
checkDelay = 1800
cur = db.query("SELECT UNIX_TIMESTAMP(updated) FROM cache_table WHERE character_id = %s",[char_id])
lastChecked = cur.fetchone()
curTime = int(time.time())
if lastChecked:
lastCheckedEpoch = lastChecked[0]
if (curTime - lastCheckedEpoch) < checkDelay:
print("Checktime is less than "+str(checkDelay)+" Seconds (current: "+str(curTime - lastCheckedEpoch)+"). Skipping")
return False
return True
return True
@app.route('/')
def index():
error = None
if 'username' not in session:
error = "Not logged in"
return redirect(url_for('login', error=error))
secure = security.get_auth_uri(scopes=scopes)
cur = db.query("SELECT id FROM users WHERE user = %s;", [session['username']])
for row in cur.fetchall():
userID = row[0]
characters = []
cur = db.query("SELECT character_id, access_token, refresh_token, expires, expires_in, added, updated FROM characters WHERE owner_id = %s;", [userID])
allSP = 0
extractableSP = 0
numExtract = 0
for row in cur.fetchall():
epoch = round(time.time())
expires = row[3] - row[4] - epoch
if expires < 0:
expires = 0
refresh = {u'access_token': row[1], u'refresh_token': row[2], u'expires_in': expires}
security.update_token(refresh)
ready = isReady(row[0])
if not ready:
cur = db.query("SELECT * FROM cache_table WHERE character_id=%s",[row[0]])
cache = cur.fetchall()[0]
#Get character name
charName = esi_app.op['get_characters_names'](character_ids=[row[0]])
result = client.request(charName)
charName = json.loads(result.raw)[0].get('character_name')
print "Character "+charName
#Get character location
if ready:
charLocation = esi_app.op['get_characters_character_id_location'](character_id=row[0])
result = client.request(charLocation)
location = json.loads(result.raw)
sol = esi_app.op['get_universe_systems_system_id'](system_id=location.get('solar_system_id'))
sol = json.loads(client.request(sol).raw).get('name')
cur = db.query("INSERT INTO cache_table (character_id,char_location) VALUES (%s,%s) ON DUPLICATE KEY UPDATE char_location=%s",[row[0],result.raw,result.raw])
else:
location = json.loads(cache[3])
sol = esi_app.op['get_universe_systems_system_id'](system_id=location.get('solar_system_id'))
sol = json.loads(client.request(sol).raw).get('name')
#Get current training skill + queue
if ready:
charTrain = esi_app.op['get_characters_character_id_skillqueue'](character_id=row[0])
result = client.request(charTrain)
training = json.loads(result.raw)
cur = db.query("INSERT INTO cache_table (character_id,char_queue) VALUES (%s,%s) ON DUPLICATE KEY UPDATE char_queue=%s",[row[0],result.raw,result.raw])
else:
training = json.loads(cache[4])
currentlyTrainingStart = training[0].get('start_date')
currentlyTrainingEnd = training[0].get('finish_date')
startTrainEpoch = int(time.mktime(time.strptime(currentlyTrainingStart, "%Y-%m-%dT%H:%M:%SZ")))
endTrainEpoch = int(time.mktime(time.strptime(currentlyTrainingEnd, "%Y-%m-%dT%H:%M:%SZ")))
if endTrainEpoch < epoch:
while endTrainEpoch < epoch and len(training)>1:
del training[0]
currentlyTrainingStart = training[0].get('start_date')
currentlyTrainingEnd = training[0].get('finish_date')
startTrainEpoch = int(time.mktime(time.strptime(currentlyTrainingStart, "%Y-%m-%dT%H:%M:%SZ")))
endTrainEpoch = int(time.mktime(time.strptime(currentlyTrainingEnd, "%Y-%m-%dT%H:%M:%SZ")))
trainedSpCur = training[0].get('training_start_sp') - training[0].get('level_start_sp')
endQueue = training[-1].get('finish_date')
currentlyTraining = training[0].get('skill_id')
currentlyTrainingLevel = training[0].get('finished_level')
curSkillStartSP = training[0].get('level_start_sp')
curSkillEndSP = training[0].get('level_end_sp')
curSkillSP = curSkillEndSP - curSkillStartSP
#Get currently training name
skillName = esi_app.op['get_universe_types_type_id'](type_id=currentlyTraining)
result = client.request(skillName)
skillName = json.loads(result.raw).get('name')
#Get character total sp
if ready:
charSkill = esi_app.op['get_characters_character_id_skills'](character_id=row[0])
result = client.request(charSkill)
sp = json.loads(result.raw)
totalSp = sp.get('total_sp')
cur = db.query("INSERT INTO cache_table (character_id,char_skills) VALUES (%s,%s) ON DUPLICATE KEY UPDATE char_skills=%s",[row[0],result.raw,result.raw])
else:
sp = json.loads(cache[5])
totalSp = sp.get('total_sp')
#Get current training skill rank
skillRank = esi_app.op['universe_types_type_id'](type_id=currentlyTraining)
result = client.request(skillRank)
skillDogma = json.loads(result.raw).get('dogma_attributes')
print skillDogma
skillRank = 5
# for skill in skillDogma:
# if skill.get('attribute_id') == 275:
# skillRank = skill.get('value')
# break;
startTrainEpoch = int(time.mktime(time.strptime(currentlyTrainingStart, "%Y-%m-%dT%H:%M:%SZ")))
endTrainEpoch = int(time.mktime(time.strptime(currentlyTrainingEnd, "%Y-%m-%dT%H:%M:%SZ")))
totalTrainTime = endTrainEpoch - startTrainEpoch
trainedTime = epoch - startTrainEpoch
# skillStartSP = (250 * math.pow(5.65685,currentlyTrainingLevel-1))
# skillEndSP = (250 * math.pow(5.65685,currentlyTrainingLevel))
spPerSec = float(curSkillSP) / float(totalTrainTime)
trainedSP = int(spPerSec * trainedTime)
totalSp += trainedSP
allSP += totalSp
#Prettify dates
timeLeftCurrent = datetime.datetime.strptime(currentlyTrainingEnd, "%Y-%m-%dT%H:%M:%SZ").replace(microsecond=0) - datetime.datetime.now().replace(microsecond=0)
endQueueLeft = datetime.datetime.strptime(endQueue, "%Y-%m-%dT%H:%M:%SZ").replace(microsecond=0) - datetime.datetime.now().replace(microsecond=0)
currentlyTrainingEnd = time.strftime("%Y-%m-%d %H:%M",time.gmtime(int(time.mktime(time.strptime(currentlyTrainingEnd, "%Y-%m-%dT%H:%M:%SZ")))))
endQueue = time.strftime("%Y-%m-%d %H:%M",time.gmtime(int(time.mktime(time.strptime(endQueue, "%Y-%m-%dT%H:%M:%SZ")))))
#Get Cybernetics skill
for skill in sp.get('skills'):
if skill.get('skill_id') == 3411:
cyberLevel = skill.get('current_skill_level')
break;
#Get character attributes
#Assume 2700(max) for now, until attributes are added to ESI
startTime = time.mktime(time.strptime(currentlyTrainingStart, "%Y-%m-%dT%H:%M:%SZ"))
timeDone = epoch - startTime
spAdded = int(timeDone / 60 / 60 * 2700)
if totalSp > 5500000:
exSP = totalSp - 5000000
extractableSP += exSP
exSP = int(exSP / 500000)
numExtract += exSP
totalSp = format(totalSp, "8,d")
queueStatus = None
if endTrainEpoch < epoch:
queueStatus = "Queue empty!"
characters.append(
{
"characterName": charName,
"characterID": row[0],
"characterImage": "https://image.eveonline.com/Character/"+str(row[0])+"_64.jpg",
"totalSP": totalSp,
"characterLocation": sol,
"currentEnd":currentlyTrainingEnd,
"queueEnd": endQueue,
"currentlyTraining": currentlyTraining,
"timeLeftCurrent": timeLeftCurrent,
"endQueueLeft": endQueueLeft,
"currentlyTrainingLevel": currentlyTrainingLevel,
"currentlyTrainingName": skillName,
"cyberneticsLevel": cyberLevel,
"queueStatus": queueStatus
})
print "----------"
allSP = format(allSP, "8,d")
extractableSP = format(extractableSP, "8,d")
stats = [{
"allSP": allSP,
"exSP": extractableSP,
"numEx": numExtract
}]
profits = isk(numExtract)
return render_template('index.html',secUrl=secure, characters=characters, stats=stats, profit=profit(), profits=profits)
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
error = request.args['error']
if 'username' in session:
return redirect(url_for('index'))
try:
if request.method == 'POST':
username = request.form['username']
cur = db.query("SELECT COUNT(1) FROM users WHERE user = %s", [username])
if not cur.fetchone()[0]:
raise ServerError('Incorrect username / password')
password = request.form['password']
cur = db.query("SELECT pass FROM users WHERE user = %s;", [username])
for row in cur.fetchall():
if bcrypt.hashpw(password.encode('utf-8'), row[0]) == row[0]:
session['username'] = request.form['username']
return redirect(url_for('index'))
raise ServerError('Incorrect username / password')
except ServerError as e:
error = str(e)
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('username', None)
return redirect(url_for('index'))
@app.route('/register', methods=['GET', 'POST'])
def register():
error = None
if 'username' not in session:
try:
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
email = request.form['email']
if not username or not password or not email:
raise ServerError('Fill in all fields please')
password = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt(rounds))
cur = db.query("INSERT INTO users (`user`, `pass`, `email`) VALUES (%s,%s,%s)", [username, password, email])
except ServerError as e:
error = str(e)
return render_template('register.html', error=error)
if config['registerEnabled']:
return render_template('register.html')
error = "Registration is disabled by the admin"
return redirect(url_for('login', error=error))
if session['username'] == 'admin':
return render_template('register.html')
error = "Only available to admins"
return redirect(url_for('login', error=error))
@app.route('/userPage', methods=['GET','POST'])
def userPage():
return "User! :-)"
@app.route('/oauth')
def oauth():
code = request.args.get('code')
if not code:
return redirect(url_for('index'))
token = security.auth(code)
access_token = token['access_token']
refresh_token = token['refresh_token']
expires_in = token['expires_in']
cur = db.query("SELECT id FROM users WHERE user = %s;", [session['username']])
for row in cur.fetchall():
userID = row[0]
verify = security.verify()
charID = verify.get('CharacterID')
print userID
print charID
print token
print token['access_token']
print token['refresh_token']
print token['expires_in']
epoch = round(time.time())
expires = epoch + int(expires_in)
cur = db.query("INSERT INTO characters (owner_id, character_id, access_token, refresh_token, expires, expires_in) VALUES (%s,%s,%s,%s,%s,%s) ON DUPLICATE KEY UPDATE access_token=%s, refresh_token=%s, expires=%s, expires_in=%s",[userID,charID,access_token,refresh_token,expires,int(expires_in),access_token,refresh_token,expires,int(expires_in)])
return redirect(url_for('index'))
if __name__ == '__main__':
if config['ssl']:
app.run(
host=serverIP,
port=serverPort,
ssl_context=context,
debug=debug
)
else:
app.run(
host=serverIP,
port=serverPort,
debug=debug
)
|
stuckj/dupeguru
|
qt/base/details_dialog.py
|
# Created By: Virgil Dupras
# Created On: 2010-02-05
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QDialog
from .details_table import DetailsModel
class DetailsDialog(QDialog):
def __init__(self, parent, app, **kwargs):
super().__init__(parent, Qt.Tool, **kwargs)
self.app = app
self.model = app.model.details_panel
self._setupUi()
# To avoid saving uninitialized geometry on appWillSavePrefs, we track whether our dialog
# has been shown. If it has, we know that our geometry should be saved.
self._shown_once = False
self.app.prefs.restoreGeometry('DetailsWindowRect', self)
self.tableModel = DetailsModel(self.model)
# tableView is defined in subclasses
self.tableView.setModel(self.tableModel)
self.model.view = self
self.app.willSavePrefs.connect(self.appWillSavePrefs)
def _setupUi(self): # Virtual
pass
def show(self):
self._shown_once = True
super().show()
#--- Events
def appWillSavePrefs(self):
if self._shown_once:
self.app.prefs.saveGeometry('DetailsWindowRect', self)
#--- model --> view
def refresh(self):
self.tableModel.beginResetModel()
self.tableModel.endResetModel()
|
mmartinortiz/pyePuck
|
examples/line_follower.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# line_follower.py
#
# Copyright 2011 Manuel Martín Ortiz <mmartinortiz@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
# -- Line Follower --
#
# Program to follow a black line ober white floor
#
# Requisite:
# * ePuck with Webots' firmware 1.4.2
# * ePuck.py Library (>= 0.9)
# * Bluetooth device
from ePuck import ePuck
import time
import sys
import re
# You can use this dictionary to asociate an ePuck ID with its MAC Address
epucks = {
'1797' : '10:00:E8:6C:A2:B6',
'1903' : '10:00:E8:6C:A1:C7'
}
def log(text):
""" Show @text in standart output with colors """
blue = '\033[1;34m'
off = '\033[1;m'
print(''.join((blue, '[Log] ', off, str(text))))
def error(text):
red = '\033[1;31m'
off = '\033[1;m'
print(''.join((red, '[Error] ', off, str(text))))
def main(mac):
global_speed = 180
fs_speed = 0.6
threshold = 1000
log('Conecting with ePuck')
try:
# First, create an ePuck object.
# If you want debug information:
#~ robot = ePuck(mac, debug = True)
# else:
robot = ePuck(mac)
# Second, connect to it
robot.connect()
# You can enable various sensors at the same time. Take a look to
# to DIC_SENSORS for know the name of the sensors
robot.enable('floor', 'proximity')
leds_on = [0] * 8
log('Conection complete. CTRL+C to stop')
log('Library version: ' + robot.version)
times_got = []
except Exception, e:
error(e)
sys.exit(1)
try:
while True:
# Important: when you execute 'step()', al sensors
# and actuators are updated. All changes you do on the ePuck
# will be effectives after this method, not before
robot.step()
# Now, we can get updated information from the sensors
floor_sensors = robot.get_floor_sensors()
prox_sensors = robot.get_proximity()
# line_follower
delta = floor_sensors[2] - floor_sensors[0]
l_speed = global_speed - fs_speed * delta
r_speed = global_speed + fs_speed * delta
# Now, we set the motor speed. Remember that we need to execute 'step()'
# for make this command effective
robot.set_motors_speed(l_speed, r_speed)
# leds on/off
for index, s in enumerate(prox_sensors):
if int(s) > threshold and leds_on[index] == 0:
# Switch On
robot.set_led(index, 1)
leds_on[index] = 1
elif int(s) < threshold and leds_on[index] == 1:
# Switch Off
robot.set_led(index, 0)
leds_on[index] = 0
except KeyboardInterrupt:
log('Stoping the robot. Bye!')
robot.close()
sys.exit()
except Exception, e:
error(e)
return 0
if __name__ == '__main__':
X = '([a-fA-F0-9]{2}[:|\-]?){6}'
if len(sys.argv) < 2:
error("Usage: " + sys.argv[0] + " ePuck_ID | MAC Address")
sys.exit()
robot_id = sys.argv[1]
if epucks.has_key(robot_id):
main(epucks[robot_id])
elif re.match(X, robot_id) != 0:
main(robot_id)
else:
error('You have to indicate the MAC direction of the robot')
|
UCNA/main
|
Scripts/plotters/LarmorClipping.py
|
#!/sw/bin/python2.7
import sys
sys.path.append("..")
from ucnacore.PyxUtils import *
from math import *
from ucnacore.LinFitter import *
#from UCNAUtils import *
from bisect import bisect
from calib.FieldMapGen import *
def clip_function(y,rho,h,R):
sqd = sqrt(rho**2-y**2)
if sqd==0:
sqd = 1e-10
return h*rho**2/R*atan(y/sqd)+2*sqd/(3*R)*(3*h*y/2+rho**2-y**2)
def survival_fraction(h,rho,R):
d = R-h
if d < -rho:
return 1
if h <= -rho:
return 0
c1 = 0
if d < rho:
sqd = sqrt(rho**2-d**2)
c1 = pi/2*rho**2-d*sqd-rho**2*atan(d/sqd)
return ( c1 + clip_function(min(h,rho),rho,h,R)
- clip_function(max(h-R,-rho),rho,h,R))/(pi*rho**2)
def radial_clip_function(r,rho,h,R):
return r**2*(3*h-2*r)/(6*R**2)
def radial_survival_fraction(h,rho,R):
d = h-R
if d > rho:
return 1
if h <= 0:
return 0
c1 = 0
if d > 0:
c1 = (h-R)**2
return ( c1 + radial_clip_function(min(h,rho),rho,h,R) - radial_clip_function(max(d,0),rho,h,R) )/(rho**2)
class rot3:
def __init__(self,t1,t2,t3,s=1.0):
self.c1,self.s1 = cos(t1),sin(t1)
self.c2,self.s2 = cos(t2),sin(t2)
self.c3,self.s3 = cos(t3),sin(t3)
self.s = s
def __call__(self,(x,y,z)):
x,y = self.c1*x+self.s1*y,self.c1*y-self.s1*x
y,z = self.c2*y+self.s2*z,self.c2*z-self.s2*y
z,x = self.c3*z+self.s3*x,self.c3*x-self.s3*z
return self.s*x,self.s*y,self.s*z
class path3d:
def __init__(self):
self.pts = []
self.sty = []
self.endsty = []
self.breakunder = False
self.nopatch = False
def addpt(self,(x,y,z),s=1):
self.pts.append((x*s,y*s,z*s))
def apply(self,transf):
self.pts = [transf(xyz) for xyz in self.pts]
def finish(self):
self.p = path.path()
self.p.append(path.moveto(self.pts[0][0],self.pts[0][1]))
for g in self.pts[1:]:
self.p.append(path.lineto(g[0],g[1]))
self.patchpts = []
self.underpts = []
def nearestpt(self,(x,y)):
d0 = 1e20
n = None
for i in range(len(self.pts)):
d1 = (self.pts[i][0]-x)**2+(self.pts[i][1]-y)**2
if d1 < d0:
d0 = d1
n = i
return n
def znear(self,(x,y)):
return self.pts[self.nearestpt((x,y))][2]
def znearc(self,c):
x,y = self.p.at(c)
x,y = 100*x.t,100*y.t
return self.znear((x,y))
def addPatch(self,c,z):
self.patchpts.append((c,z))
def drawto(self,cnvs):
cnvs.stroke(self.p,self.sty)
def interleave(p3d1,p3d2):
print "Finding intersection points..."
is1,is2 = p3d1.p.intersect(p3d2.p)
print "determining patch z..."
assert len(is1)==len(is2)
for i in range(len(is1)):
z1 = p3d1.znearc(is1[i])
z2 = p3d2.znearc(is2[i])
if z1>z2:
p3d1.addPatch(is1[i],z1)
p3d2.underpts.append(is2[i])
else:
p3d2.addPatch(is2[i],z2)
p3d1.underpts.append(is1[i])
print "done."
def drawInterleaved(c,ps):
print "Drawing base curves..."
for p in ps:
p.p = p.p.normpath()
if p.breakunder:
splits = []
for s in p.underpts:
splits += [s-p.breakunder*0.5,s+p.breakunder*0.5]
psplit = p.p.split(splits)
for seg in psplit[0::2]:
c.stroke(seg,p.sty)
else:
c.stroke(p.p,p.sty+p.endsty)
print "Preparing patches..."
patches = []
for (pn,p) in enumerate(ps):
if p.nopatch:
continue
p.patchpts.sort()
splits = []
for s in p.patchpts:
splits += [s[0]-0.05,s[0]+0.05]
psplit = p.p.split(splits)
patches += [ (patch[1],pn,psplit[2*n+1]) for n,patch in enumerate(p.patchpts) ]
patches.sort()
print "Patching intersections..."
for p in patches:
c.stroke(p[2],ps[p[1]].sty)
print "Done."
def fieldPath(fmap,z0,z1,c,cmax,npts=50):
pfield = path3d()
for z in unifrange(z0,z1,npts):
Bdens = c/sqrt(fmap(z)+0.0001)
if abs(Bdens) < cmax:
pfield.addpt((0,Bdens,z))
return pfield
def larmor_unif(fT,theta,KE,t):
b = electron_beta(KE)
z = t*b*cos(theta)*3e8 # m
r = 3.3e-6*b*(KE+511)*sin(theta)/fT # m
f = 2.8e10*fT # Hz
return r*cos(2*pi*f*t),r*sin(2*pi*f*t),z
def larmor_step(p,pt2_per_B,fT):
nu = 2.8e10*fT*2*pi # angular frequency, Hz
pt = sqrt(fT*pt2_per_B) # transverse momentum component, keV
if p<=pt:
return 0,nu
pl = sqrt(p**2-pt**2) # longitudinal momentum, keV
vz = pl/sqrt(p*p+511*511)*3e8; # z velocity, m/s
return vz,nu
def larmorPath(fmap,p,pt2_per_B,z0,z1,dt,theta=0):
lpath = path3d()
z = z0
vz = 1
while z0 <= z <= z1 and vz>0:
fT = fmap(z) # magnetic field, T
r = 3.3e-6*sqrt(pt2_per_B/fT) # larmor radius, m
lpath.addpt((r*cos(theta),r*sin(theta),z))
# step to next point
vz,nu = larmor_step(p,pt2_per_B,fmap(z))
theta += nu*dt
z += vz*dt
return lpath
def plot_larmor_trajectory():
fmap = fieldMap()
fmap.addFlat(-1.0,0.01,1.0)
fmap.addFlat(0.015,1.0,0.6)
#fmap.addFlat(-1.0,0.01,0.6)
#fmap.addFlat(0.08,1.0,1.0)
fT = fmap(0)
theta = 1.4
KE = 511.
#rot = rot3(0,0.0,-pi/2-0.2,500)
rot = rot3(0,0.0,-pi/2+0.2,500)
tm = 1e-9
doFinal = True
plarmor = larmorPath(fmap,500,495**2/fmap(0),0,0.02,5e-13,3*pi/4)
plarmor.apply(rot)
#plarmor.sty = [style.linewidth.thick,rgb.red]
plarmor.sty = [style.linewidth.thick]
plarmor.endsty = [deco.earrow()]
plarmor.finish()
x0,y0 = plarmor.p.at(plarmor.p.begin())
fieldlines = []
w = 0.0025
cmagf = canvas.canvas()
for o in unifrange(-w,w,20):
pf = fieldPath(fmap,-0.002,0.022,o,1.02*w)
if len(pf.pts) < 10:
continue
pf.apply(rot)
pf.finish()
pf.breakunder = 0.07
pf.nopatch = True
#pf.sty=[style.linewidth.thin,rgb.blue]
pf.sty=[style.linewidth.thin] # field line color/style
fieldlines.append(pf)
pf.drawto(cmagf)
if doFinal:
interleave(plarmor,pf)
#cmagf.stroke(path.circle(x0,y0,0.07),[deco.filled([rgb.green])])
cmagf.stroke(path.circle(x0,y0,0.07),[deco.filled([rgb.white]),style.linewidth.Thick])
cmagf.writetofile("/Users/michael/Desktop/Bfield.pdf")
c = canvas.canvas()
if doFinal:
drawInterleaved(c,[plarmor,]+fieldlines)
else:
plarmor.drawto(c)
for pf in fieldlines:
pf.drawto(c)
#c.stroke(path.circle(x0,y0,0.07),[deco.filled([rgb.green])])
c.stroke(path.circle(x0,y0,0.07),[deco.filled([rgb.white]),style.linewidth.Thick])
c.writetofile("/Users/michael/Desktop/larmor_spiral.pdf")
def plot_spectrometer_field():
fmap = fieldMap()
fmap.addFlat(-3,-2.8,0.01)
fmap.addFlat(-2.3,-2.1,0.6)
fmap.addFlat(-1.6,1.6,1.0)
fmap.addFlat(2.1,2.3,0.6)
fmap.addFlat(2.8,3,0.01)
rot = rot3(0.0,0.0,-pi/2.,10.)
w = 0.25
cmagf = canvas.canvas()
for o in unifrange(-w,w,20):
pf = fieldPath(fmap,-2.6,2.6,o,w,400)
pf.apply(rot)
#if len(pf.pts) < 10:
# continue
pf.finish()
#pf.sty=[style.linewidth.thin,rgb.blue]
pf.sty=[style.linewidth.thin] # field line color/style
pf.drawto(cmagf)
cmagf.writetofile("/Users/michael/Desktop/Bfield.pdf")
def larmor_clipping_plot():
gSurv=graph.graphxy(width=20,height=10,
x=graph.axis.lin(title="Source offset [mm]"),
y=graph.axis.lin(title="",min=0,max=1),
key = graph.key.key(pos="bl"))
gSurv.texrunner.set(lfs='foils17pt')
rho = 1.5
h0 = 9.5
gdat = [ [h0-h,survival_fraction(h,rho,2*3.3),survival_fraction(h,rho,2*3.3/2)] for h in unifrange(h0-10,h0,100) ]
gdat = [ g+[0.5*(g[2]<=1e-3)+(g[2]>1e-3)*(g[1]/(g[2]+1e-6)),] for g in gdat]
gSurv.plot(graph.data.points(gdat,x=1,y=3,title="500keV line survival"),[graph.style.line([style.linewidth.Thick,rgb.blue])])
gSurv.plot(graph.data.points(gdat,x=1,y=2,title="1MeV line survival"),[graph.style.line([style.linewidth.Thick,rgb.red])])
gSurv.plot(graph.data.points(gdat,x=1,y=4,title="1MeV:500keV survival ratio"),[graph.style.line([style.linewidth.Thick])])
gSurv.writetofile("/Users/michael/Desktop/survival_%g.pdf"%rho)
def radial_clipping_plot():
gSurv=graph.graphxy(width=20,height=10,
x=graph.axis.lin(title="Source spot radius [mm]",min=0,max=9.5),
y=graph.axis.lin(title="",min=0,max=1),
key = graph.key.key(pos="bl"))
gSurv.texrunner.set(lfs='foils17pt')
h = 9.5
gdat = [ [rho,radial_survival_fraction(h,rho,3.3),radial_survival_fraction(h,rho,3.3/2.0)] for rho in unifrange(0.,9.5,200) ]
gdat = [ g+[0.5*(g[2]<=1e-3)+(g[2]>1e-3)*(g[1]/(g[2]+1e-6)),] for g in gdat]
gSurv.plot(graph.data.points(gdat,x=1,y=3,title="500keV line survival"),[graph.style.line([style.linewidth.Thick,rgb.blue])])
gSurv.plot(graph.data.points(gdat,x=1,y=2,title="1MeV line survival"),[graph.style.line([style.linewidth.Thick,rgb.red])])
gSurv.plot(graph.data.points(gdat,x=1,y=4,title="1MeV:500keV survival ratio"),[graph.style.line([style.linewidth.Thick])])
gSurv.writetofile("/Users/michael/Desktop/survival_radial.pdf")
if __name__ == "__main__":
#larmor_clipping_plot()
#radial_clipping_plot()
#plot_larmor_trajectory()
plot_spectrometer_field()
|
VPAC/pbs_python
|
src/PBSQuery.py
|
#
# Authors: Roy Dragseth (roy.dragseth@cc.uit.no)
# Bas van der Vlies (basv@sara.nl)
#
# SVN INFO:
# $Id$
#
"""
Usage: from PBSQuery import PBSQuery
This class gets the info from the pbs_server via the pbs.py module
for the several batch objects. All get..() functions return an dictionary
with id as key and batch object as value
There are four batch objects:
- server
- queue
- job
- node
Each object can be handled as an dictionary and has several member
functions. The second parameter is an python list and can be used if you
are only interested in certain resources, see example
There are the following functions for PBSQuery:
job -
getjob(job_id, attributes=<default is all>)
getjobs(attributes=<default is all>)
node -
getnode(node_id, attributes=<default is all>)
getnodes(attributes=<default is all>)
queue -
getqueue(queue_id, attributes=<default is all>)
getqueues(attributes=<default is all>)
server -
get_serverinfo(attributes=<default is all>)
Here is an example how to use the module:
from PBSQuery import PBSQuery
p = PBSQuery()
nodes = p.getnodes()
for name,node in nodes.items():
print name
if node.is_free():
print node, node['state']
l = [ 'state', 'np' ]
nodes = p.getnodes(l)
for name,node in nodes.items():
print node, node['state']
The parameter 'attributes' is an python list of resources that
you are interested in, eg: only show state of nodes
l = list()
l.append('state')
nodes = p.getnodes(l)
"""
import pbs
import UserDict
import string
import sys
import re
import types
class PBSError(Exception):
def __init__(self, msg=''):
self.msg = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.msg
__str__ = __repr__
class PBSQuery:
# a[key] = value, key and value are data type string
#
OLD_DATA_STRUCTURE = False
def __init__(self, server=None):
if not server:
self.server = pbs.pbs_default()
else:
self.server = server
self._connect()
## this is needed for getjob a jobid is made off:
# sequence_number.server (is not self.server)
#
self.job_server_id = list(self.get_serverinfo())[0]
self._disconnect()
def _connect(self):
"""Connect to the PBS/Torque server"""
self.con = pbs.pbs_connect(self.server)
if self.con < 0:
str = "Could not make a connection with %s\n" %(self.server)
raise PBSError(str)
def _disconnect(self):
"""Close the PBS/Torque connection"""
pbs.pbs_disconnect(self.con)
self.attribs = 'NULL'
def _list_2_attrib(self, list):
"""Convert a python list to an attrib list suitable for pbs"""
self.attribs = pbs.new_attrl( len(list) )
i = 0
for attrib in list:
# So we can user Resource
attrib = attrib.split('.')
self.attribs[i].name = attrib[0]
i = i + 1
def _pbsstr_2_list(self, str, delimiter):
"""Convert a string to a python list and use delimiter as spit char"""
l = sting.splitfields(str, delimiter)
if len(l) > 1:
return l
def _list_2_dict(self, l, class_func):
"""
Convert a pbsstat function list to a class dictionary, The
data structure depends on the function new_data_structure().
Default data structure is:
class[key] = value, Where key and value are of type string
Future release, can be set by new_data_structure():
- class[key] = value where value can be:
1. a list of values of type string
2. a dictionary with as list of values of type string. If
values contain a '=' character
eg:
print node['np']
>> [ '2' ]
print node['status']['arch']
>> [ 'x86_64' ]
"""
self.d = {}
for item in l:
new = class_func()
self.d[item.name] = new
new.name = item.name
for a in item.attribs:
if self.OLD_DATA_STRUCTURE:
if a.resource:
key = '%s.%s' %(a.name, a.resource)
else:
key = '%s' %(a.name)
new[key] = a.value
else:
values = string.split(a.value, ',')
sub_dict = string.split(a.value, '=')
# We must creat sub dicts, only for specified
# key values
#
if a.name in ['status', 'Variable_List']:
for v in values:
tmp_l = v.split('=')
## Support for multiple EVENT mesages in format [key=value:]+
# format eg: message=EVENT:sample.time=1288864220.003,EVENT:kernel=upgrade,cputotals.user=0
# message=ERROR <text>
#
if tmp_l[0] in ['message']:
if tmp_l[1].startswith('EVENT:'):
tmp_d = dict()
new['event'] = class_func(tmp_d)
message_list = v.split(':')
for event_type in message_list[1:]:
tmp_l = event_type.split('=')
new['event'][ tmp_l[0] ] = tmp_l[1:]
else:
## ERROR message
#
new['error'] = tmp_l [1:]
elif tmp_l[0].startswith('EVENT:'):
message_list = v.split(':')
for event_type in message_list[1:]:
tmp_l = event_type.split('=')
new['event'][ tmp_l[0] ] = tmp_l[1:]
else:
## Check if we already added the key
#
if new.has_key(a.name):
new[a.name][ tmp_l[0] ] = tmp_l[1:]
else:
tmp_d = dict()
tmp_d[ tmp_l[0] ] = tmp_l[1:]
new[a.name] = class_func(tmp_d)
else:
## Check if it is a resource type variable, eg:
# - Resource_List.(nodes, walltime, ..)
#
if a.resource:
if new.has_key(a.name):
new[a.name][a.resource] = values
else:
tmp_d = dict()
tmp_d[a.resource] = values
new[a.name] = class_func(tmp_d)
else:
# Simple value
#
new[a.name] = values
self._free(l)
def _free(self, memory):
"""
freeing up used memmory
"""
pbs.pbs_statfree(memory)
def _statserver(self, attrib_list=None):
"""Get the server config from the pbs server"""
if attrib_list:
self._list_2_attrib(attrib_list)
else:
self.attribs = 'NULL'
self._connect()
serverinfo = pbs.pbs_statserver(self.con, self.attribs, 'NULL')
self._disconnect()
self._list_2_dict(serverinfo, server)
def get_serverinfo(self, attrib_list=None):
self._statserver(attrib_list)
return self.d
def _statqueue(self, queue_name='', attrib_list=None):
"""Get the queue config from the pbs server"""
if attrib_list:
self._list_2_attrib(attrib_list)
else:
self.attribs = 'NULL'
self._connect()
queues = pbs.pbs_statque(self.con, queue_name, self.attribs, 'NULL')
self._disconnect()
self._list_2_dict(queues, queue)
def getqueue(self, name, attrib_list=None):
self._statqueue(name, attrib_list)
try:
return self.d[name]
except KeyError, detail:
return self.d
def getqueues(self, attrib_list=None):
self._statqueue('', attrib_list)
return self.d
def _statnode(self, select='', attrib_list=None, property=None):
"""Get the node config from the pbs server"""
if attrib_list:
self._list_2_attrib(attrib_list)
else:
self.attribs = 'NULL'
if property:
select = ':%s' %(property)
self._connect()
nodes = pbs.pbs_statnode(self.con, select, self.attribs, 'NULL')
self._disconnect()
self._list_2_dict(nodes, node)
def getnode(self, name, attrib_list=None):
self._statnode(name, attrib_list)
try:
return self.d[name]
except KeyError, detail:
return self.d
def getnodes(self, attrib_list=None):
self._statnode('', attrib_list)
return self.d
def getnodes_with_property(self, property, attrib_list=None):
self._statnode('', attrib_list, property)
return self.d
def _statjob(self, job_name='', attrib_list=None):
"""Get the job config from the pbs server"""
if attrib_list:
self._list_2_attrib(attrib_list)
else:
self.attribs = 'NULL'
self._connect()
jobs = pbs.pbs_statjob(self.con, job_name, self.attribs, 'NULL')
self._disconnect()
self._list_2_dict(jobs, job)
def getjob(self, name, attrib_list=None):
## To make sure we use the full name of a job; Changes a name
# like 1234567 into 1234567.job_server_id
#
if len(name.split('.')) == 1 :
name = name.split('.')[0] + "." + self.job_server_id
self._statjob(name, attrib_list)
try:
return self.d[name]
except KeyError, detail:
return self.d
def getjobs(self, attrib_list=None):
self._statjob('', attrib_list)
return self.d
def get_server_name(self):
return self.server
def new_data_structure(self):
"""
Use the new data structure. Is now the default
"""
self.OLD_DATA_STRUCTURE = False
def old_data_structure(self):
"""
Use the old data structure. This function is obselete and
will be removed in a future release
"""
self.OLD_DATA_STRUCTURE = True
class _PBSobject(UserDict.UserDict):
TRUE = 1
FALSE = 0
def __init__(self, dictin = None):
UserDict.UserDict.__init__(self)
self.name = None
if dictin:
if dictin.has_key('name'):
self.name = dictin['name']
del dictin['name']
self.data = dictin
def get_value(self, key):
if self.has_key(key):
return self[key]
else:
return None
def __repr__(self):
return repr(self.data)
def __str__(self):
return str(self.data)
def __getattr__(self, name):
"""
override the class attribute get method. Return the value
from the Userdict
"""
try:
return self.data[name]
except KeyError:
error = 'Attribute key error: %s' %(name)
raise PBSError(error)
## Disabled for this moment, BvdV 16 July 2010
#
#def __setattr__(self, name, value):
# """
# override the class attribute set method only when the UserDict
# has set its class attribute
# """
# if self.__dict__.has_key('data'):
# self.data[name] = value
# else:
# self.__dict__[name] = value
def __iter__(self):
return iter(self.data.keys())
def uniq(self, list):
"""Filter out unique items of a list"""
uniq_items = {}
for item in list:
uniq_items[item] = 1
return uniq_items.keys()
def return_value(self, key):
"""Function that returns a value independent of new or old data structure"""
if isinstance(self[key], types.ListType):
return self[key][0]
else:
return self[key]
class job(_PBSobject):
"""PBS job class"""
def is_running(self):
value = self.return_value('job_state')
if value == 'Q':
return self.TRUE
else:
return self.FALSE
def get_nodes(self, unique=None):
"""
Returns a list of the nodes which run this job
format:
* exec_host: gb-r10n14/5+gb-r10n14/4+gb-r10n14/3+gb-r10n14/2+gb-r10n14/1+gb-r10n14/0
* split on '+' and if uniq is set split on '/'
"""
nodes = self.get_value('exec_host')
if isinstance(nodes, str):
if nodes:
nodelist = string.split(nodes,'+')
if not unique:
return nodelist
else:
l = list()
for n in nodelist:
t = string.split(n,'/')
if t[0] not in l:
l.append(t[0])
return l
else:
return list()
else:
l = list()
for n in nodes:
nlist = string.split(n,'+')
if unique:
for entry in nlist:
t = string.split(entry,'/')
if t[0] not in l:
l.append(t[0])
else:
l += nlist
return l
class node(_PBSobject):
"""PBS node class"""
def is_free(self):
"""Check if node is free"""
value = self.return_value('state')
if value == 'free':
return self.TRUE
else:
return self.FALSE
def has_job(self):
"""Does the node run a job"""
try:
a = self['jobs']
return self.TRUE
except KeyError, detail:
return self.FALSE
def get_jobs(self, unique=None):
"""Returns a list of the currently running job-id('s) on the node"""
jobs = self.get_value('jobs')
if jobs:
if isinstance(jobs, str):
jlist = re.compile('[^\\ /]\\d+[^/.]').findall( jobs )
if not unique:
return jlist
else:
return self.uniq(jlist)
else:
job_re = re.compile('^(?:\d+/)?(.+)')
l = list()
if unique:
for j in jobs:
jobstr = job_re.findall(j.strip())[0]
if jobstr not in l:
l.append(jobstr)
return l
else:
return jobs
return list()
class queue(_PBSobject):
"""PBS queue class"""
def is_enabled(self):
value = self.return_value('enabled')
if value == 'True':
return self.TRUE
else:
return self.FALSE
def is_execution(self):
value = self.return_value('queue_type')
if value == 'Execution':
return self.TRUE
else:
return self.FALSE
class server(_PBSobject):
"""PBS server class"""
def get_version(self):
return self.get_value('pbs_version')
def main():
p = PBSQuery()
serverinfo = p.get_serverinfo()
for server in serverinfo.keys():
print server, ' version: ', serverinfo[server].get_version()
for resource in serverinfo[server].keys():
print '\t ', resource, ' = ', serverinfo[server][resource]
queues = p.getqueues()
for queue in queues.keys():
print queue
if queues[queue].is_execution():
print '\t ', queues[queue]
if queues[queue].has_key('acl_groups'):
print '\t acl_groups: yes'
else:
print '\t acl_groups: no'
jobs = p.getjobs()
for name,job in jobs.items():
if job.is_running():
print job
l = ['state']
nodes = p.getnodes(l)
for name,node in nodes.items():
if node.is_free():
print node
if __name__ == "__main__":
main()
|
yeyanchao/calibre
|
src/calibre/gui2/preferences/look_feel_ui.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/yc/code/calibre/calibre/src/calibre/gui2/preferences/look_feel.ui'
#
# Created: Thu Oct 25 16:54:55 2012
# by: PyQt4 UI code generator 4.8.5
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(820, 519)
Form.setWindowTitle(_("Form"))
self.gridLayout_2 = QtGui.QGridLayout(Form)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.tabWidget = QtGui.QTabWidget(Form)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.gridLayout_9 = QtGui.QGridLayout(self.tab)
self.gridLayout_9.setObjectName(_fromUtf8("gridLayout_9"))
self.label_7 = QtGui.QLabel(self.tab)
self.label_7.setText(_("Choose &language (requires restart):"))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout_9.addWidget(self.label_7, 2, 0, 1, 1)
self.opt_language = QtGui.QComboBox(self.tab)
self.opt_language.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLengthWithIcon)
self.opt_language.setMinimumContentsLength(20)
self.opt_language.setObjectName(_fromUtf8("opt_language"))
self.gridLayout_9.addWidget(self.opt_language, 2, 1, 1, 1)
self.opt_systray_icon = QtGui.QCheckBox(self.tab)
self.opt_systray_icon.setText(_("Enable system &tray icon (needs restart)"))
self.opt_systray_icon.setObjectName(_fromUtf8("opt_systray_icon"))
self.gridLayout_9.addWidget(self.opt_systray_icon, 3, 0, 1, 1)
self.label_17 = QtGui.QLabel(self.tab)
self.label_17.setText(_("User Interface &layout (needs restart):"))
self.label_17.setObjectName(_fromUtf8("label_17"))
self.gridLayout_9.addWidget(self.label_17, 1, 0, 1, 1)
self.opt_gui_layout = QtGui.QComboBox(self.tab)
self.opt_gui_layout.setMaximumSize(QtCore.QSize(250, 16777215))
self.opt_gui_layout.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLengthWithIcon)
self.opt_gui_layout.setMinimumContentsLength(20)
self.opt_gui_layout.setObjectName(_fromUtf8("opt_gui_layout"))
self.gridLayout_9.addWidget(self.opt_gui_layout, 1, 1, 1, 1)
self.opt_disable_animations = QtGui.QCheckBox(self.tab)
self.opt_disable_animations.setToolTip(_("Disable all animations. Useful if you have a slow/old computer."))
self.opt_disable_animations.setText(_("Disable &animations"))
self.opt_disable_animations.setObjectName(_fromUtf8("opt_disable_animations"))
self.gridLayout_9.addWidget(self.opt_disable_animations, 3, 1, 1, 1)
self.opt_disable_tray_notification = QtGui.QCheckBox(self.tab)
self.opt_disable_tray_notification.setText(_("Disable ¬ifications in system tray"))
self.opt_disable_tray_notification.setObjectName(_fromUtf8("opt_disable_tray_notification"))
self.gridLayout_9.addWidget(self.opt_disable_tray_notification, 4, 0, 1, 1)
self.opt_show_splash_screen = QtGui.QCheckBox(self.tab)
self.opt_show_splash_screen.setText(_("Show &splash screen at startup"))
self.opt_show_splash_screen.setObjectName(_fromUtf8("opt_show_splash_screen"))
self.gridLayout_9.addWidget(self.opt_show_splash_screen, 4, 1, 1, 1)
self.groupBox_2 = QtGui.QGroupBox(self.tab)
self.groupBox_2.setTitle(_("&Toolbar"))
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout_8 = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout_8.setObjectName(_fromUtf8("gridLayout_8"))
self.opt_toolbar_icon_size = QtGui.QComboBox(self.groupBox_2)
self.opt_toolbar_icon_size.setObjectName(_fromUtf8("opt_toolbar_icon_size"))
self.gridLayout_8.addWidget(self.opt_toolbar_icon_size, 0, 1, 1, 1)
self.label_5 = QtGui.QLabel(self.groupBox_2)
self.label_5.setText(_("&Icon size:"))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout_8.addWidget(self.label_5, 0, 0, 1, 1)
self.opt_toolbar_text = QtGui.QComboBox(self.groupBox_2)
self.opt_toolbar_text.setObjectName(_fromUtf8("opt_toolbar_text"))
self.gridLayout_8.addWidget(self.opt_toolbar_text, 1, 1, 1, 1)
self.label_8 = QtGui.QLabel(self.groupBox_2)
self.label_8.setText(_("Show &text under icons:"))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout_8.addWidget(self.label_8, 1, 0, 1, 1)
self.gridLayout_9.addWidget(self.groupBox_2, 7, 0, 1, 2)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_9.addItem(spacerItem, 8, 0, 1, 1)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label_2 = QtGui.QLabel(self.tab)
self.label_2.setText(_("Interface font:"))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout.addWidget(self.label_2)
self.font_display = QtGui.QLineEdit(self.tab)
self.font_display.setReadOnly(True)
self.font_display.setObjectName(_fromUtf8("font_display"))
self.horizontalLayout.addWidget(self.font_display)
self.gridLayout_9.addLayout(self.horizontalLayout, 6, 0, 1, 1)
self.change_font_button = QtGui.QPushButton(self.tab)
self.change_font_button.setText(_("Change &font (needs restart)"))
self.change_font_button.setObjectName(_fromUtf8("change_font_button"))
self.gridLayout_9.addWidget(self.change_font_button, 6, 1, 1, 1)
self.label_widget_style = QtGui.QLabel(self.tab)
self.label_widget_style.setText(_("User interface &style (needs restart):"))
self.label_widget_style.setObjectName(_fromUtf8("label_widget_style"))
self.gridLayout_9.addWidget(self.label_widget_style, 0, 0, 1, 1)
self.opt_ui_style = QtGui.QComboBox(self.tab)
self.opt_ui_style.setObjectName(_fromUtf8("opt_ui_style"))
self.gridLayout_9.addWidget(self.opt_ui_style, 0, 1, 1, 1)
self.opt_book_list_tooltips = QtGui.QCheckBox(self.tab)
self.opt_book_list_tooltips.setText(_("Show &tooltips in the book list"))
self.opt_book_list_tooltips.setObjectName(_fromUtf8("opt_book_list_tooltips"))
self.gridLayout_9.addWidget(self.opt_book_list_tooltips, 5, 0, 1, 1)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(I("lt.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tabWidget.addTab(self.tab, icon, _fromUtf8(""))
self.tab_4 = QtGui.QWidget()
self.tab_4.setObjectName(_fromUtf8("tab_4"))
self.gridLayout_12 = QtGui.QGridLayout(self.tab_4)
self.gridLayout_12.setObjectName(_fromUtf8("gridLayout_12"))
self.label_3 = QtGui.QLabel(self.tab_4)
self.label_3.setText(_("Note that <b>comments</b> will always be displayed at the end, regardless of the position you assign here."))
self.label_3.setWordWrap(True)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_12.addWidget(self.label_3, 2, 1, 1, 1)
self.opt_use_roman_numerals_for_series_number = QtGui.QCheckBox(self.tab_4)
self.opt_use_roman_numerals_for_series_number.setText(_("Use &Roman numerals for series"))
self.opt_use_roman_numerals_for_series_number.setChecked(True)
self.opt_use_roman_numerals_for_series_number.setObjectName(_fromUtf8("opt_use_roman_numerals_for_series_number"))
self.gridLayout_12.addWidget(self.opt_use_roman_numerals_for_series_number, 0, 1, 1, 1)
self.groupBox = QtGui.QGroupBox(self.tab_4)
self.groupBox.setTitle(_("Select displayed metadata"))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_3 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.df_up_button = QtGui.QToolButton(self.groupBox)
self.df_up_button.setToolTip(_("Move up"))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(I("arrow-up.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.df_up_button.setIcon(icon1)
self.df_up_button.setObjectName(_fromUtf8("df_up_button"))
self.gridLayout_3.addWidget(self.df_up_button, 0, 1, 1, 1)
self.df_down_button = QtGui.QToolButton(self.groupBox)
self.df_down_button.setToolTip(_("Move down"))
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8(I("arrow-down.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.df_down_button.setIcon(icon2)
self.df_down_button.setObjectName(_fromUtf8("df_down_button"))
self.gridLayout_3.addWidget(self.df_down_button, 2, 1, 1, 1)
self.field_display_order = QtGui.QListView(self.groupBox)
self.field_display_order.setAlternatingRowColors(True)
self.field_display_order.setObjectName(_fromUtf8("field_display_order"))
self.gridLayout_3.addWidget(self.field_display_order, 0, 0, 3, 1)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem1, 1, 1, 1, 1)
self.gridLayout_12.addWidget(self.groupBox, 2, 0, 2, 1)
self.hboxlayout = QtGui.QHBoxLayout()
self.hboxlayout.setObjectName(_fromUtf8("hboxlayout"))
self.label = QtGui.QLabel(self.tab_4)
self.label.setText(_("Default author link template:"))
self.label.setObjectName(_fromUtf8("label"))
self.hboxlayout.addWidget(self.label)
self.opt_default_author_link = QtGui.QLineEdit(self.tab_4)
self.opt_default_author_link.setToolTip(_("<p>Enter a template to be used to create a link for\n"
"an author in the books information dialog. This template will\n"
"be used when no link has been provided for the author using\n"
"Manage Authors. You can use the values {author} and\n"
"{author_sort}, and any template function."))
self.opt_default_author_link.setObjectName(_fromUtf8("opt_default_author_link"))
self.hboxlayout.addWidget(self.opt_default_author_link)
self.gridLayout_12.addLayout(self.hboxlayout, 0, 0, 1, 1)
self.opt_bd_show_cover = QtGui.QCheckBox(self.tab_4)
self.opt_bd_show_cover.setText(_("Show &cover in the book details panel"))
self.opt_bd_show_cover.setObjectName(_fromUtf8("opt_bd_show_cover"))
self.gridLayout_12.addWidget(self.opt_bd_show_cover, 1, 0, 1, 2)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8(I("book.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tabWidget.addTab(self.tab_4, icon3, _fromUtf8(""))
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName(_fromUtf8("tab_2"))
self.gridLayout_10 = QtGui.QGridLayout(self.tab_2)
self.gridLayout_10.setObjectName(_fromUtf8("gridLayout_10"))
self.opt_categories_using_hierarchy = EditWithComplete(self.tab_2)
self.opt_categories_using_hierarchy.setToolTip(_("A comma-separated list of categories in which items containing\n"
"periods are displayed in the tag browser trees. For example, if\n"
"this box contains \'tags\' then tags of the form \'Mystery.English\'\n"
"and \'Mystery.Thriller\' will be displayed with English and Thriller\n"
"both under \'Mystery\'. If \'tags\' is not in this box,\n"
"then the tags will be displayed each on their own line."))
self.opt_categories_using_hierarchy.setObjectName(_fromUtf8("opt_categories_using_hierarchy"))
self.gridLayout_10.addWidget(self.opt_categories_using_hierarchy, 3, 2, 1, 3)
self.label_9 = QtGui.QLabel(self.tab_2)
self.label_9.setText(_("Tags browser category &partitioning method:"))
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout_10.addWidget(self.label_9, 0, 0, 1, 2)
self.opt_tags_browser_partition_method = QtGui.QComboBox(self.tab_2)
self.opt_tags_browser_partition_method.setToolTip(_("Choose how tag browser subcategories are displayed when\n"
"there are more items than the limit. Select by first\n"
"letter to see an A, B, C list. Choose partitioned to\n"
"have a list of fixed-sized groups. Set to disabled\n"
"if you never want subcategories"))
self.opt_tags_browser_partition_method.setObjectName(_fromUtf8("opt_tags_browser_partition_method"))
self.gridLayout_10.addWidget(self.opt_tags_browser_partition_method, 0, 2, 1, 1)
self.label_10 = QtGui.QLabel(self.tab_2)
self.label_10.setText(_("&Collapse when more items than:"))
self.label_10.setObjectName(_fromUtf8("label_10"))
self.gridLayout_10.addWidget(self.label_10, 0, 3, 1, 1)
self.opt_tags_browser_collapse_at = QtGui.QSpinBox(self.tab_2)
self.opt_tags_browser_collapse_at.setToolTip(_("If a Tag Browser category has more than this number of items, it is divided\n"
"up into subcategories. If the partition method is set to disable, this value is ignored."))
self.opt_tags_browser_collapse_at.setMaximum(10000)
self.opt_tags_browser_collapse_at.setObjectName(_fromUtf8("opt_tags_browser_collapse_at"))
self.gridLayout_10.addWidget(self.opt_tags_browser_collapse_at, 0, 4, 1, 1)
spacerItem2 = QtGui.QSpacerItem(690, 252, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_10.addItem(spacerItem2, 5, 0, 1, 5)
self.label_8111 = QtGui.QLabel(self.tab_2)
self.label_8111.setText(_("Categories not to partition:"))
self.label_8111.setObjectName(_fromUtf8("label_8111"))
self.gridLayout_10.addWidget(self.label_8111, 1, 2, 1, 1)
self.opt_tag_browser_dont_collapse = EditWithComplete(self.tab_2)
self.opt_tag_browser_dont_collapse.setToolTip(_("A comma-separated list of categories that are not to\n"
"be partitioned even if the number of items is larger than\n"
"the value shown above. This option can be used to\n"
"avoid collapsing hierarchical categories that have only\n"
"a few top-level elements."))
self.opt_tag_browser_dont_collapse.setObjectName(_fromUtf8("opt_tag_browser_dont_collapse"))
self.gridLayout_10.addWidget(self.opt_tag_browser_dont_collapse, 1, 3, 1, 2)
self.opt_show_avg_rating = QtGui.QCheckBox(self.tab_2)
self.opt_show_avg_rating.setText(_("Show &average ratings in the tags browser"))
self.opt_show_avg_rating.setChecked(True)
self.opt_show_avg_rating.setObjectName(_fromUtf8("opt_show_avg_rating"))
self.gridLayout_10.addWidget(self.opt_show_avg_rating, 2, 0, 1, 5)
self.label_81 = QtGui.QLabel(self.tab_2)
self.label_81.setText(_("Categories with &hierarchical items:"))
self.label_81.setObjectName(_fromUtf8("label_81"))
self.gridLayout_10.addWidget(self.label_81, 3, 0, 1, 1)
self.opt_tag_browser_old_look = QtGui.QCheckBox(self.tab_2)
self.opt_tag_browser_old_look.setText(_("Use &alternating row colors in the Tag Browser"))
self.opt_tag_browser_old_look.setObjectName(_fromUtf8("opt_tag_browser_old_look"))
self.gridLayout_10.addWidget(self.opt_tag_browser_old_look, 4, 0, 1, 5)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(_fromUtf8(I("tags.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tabWidget.addTab(self.tab_2, icon4, _fromUtf8(""))
self.tab_3 = QtGui.QWidget()
self.tab_3.setObjectName(_fromUtf8("tab_3"))
self.gridLayout_11 = QtGui.QGridLayout(self.tab_3)
self.gridLayout_11.setObjectName(_fromUtf8("gridLayout_11"))
self.opt_separate_cover_flow = QtGui.QCheckBox(self.tab_3)
self.opt_separate_cover_flow.setText(_("Show cover &browser in a separate window (needs restart)"))
self.opt_separate_cover_flow.setObjectName(_fromUtf8("opt_separate_cover_flow"))
self.gridLayout_11.addWidget(self.opt_separate_cover_flow, 0, 0, 1, 2)
self.label_6 = QtGui.QLabel(self.tab_3)
self.label_6.setText(_("&Number of covers to show in browse mode (needs restart):"))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout_11.addWidget(self.label_6, 1, 0, 1, 1)
self.opt_cover_flow_queue_length = QtGui.QSpinBox(self.tab_3)
self.opt_cover_flow_queue_length.setObjectName(_fromUtf8("opt_cover_flow_queue_length"))
self.gridLayout_11.addWidget(self.opt_cover_flow_queue_length, 1, 1, 1, 1)
spacerItem3 = QtGui.QSpacerItem(690, 283, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_11.addItem(spacerItem3, 4, 0, 1, 2)
self.opt_cb_fullscreen = QtGui.QCheckBox(self.tab_3)
self.opt_cb_fullscreen.setText(_("When showing cover browser in separate window, show it &fullscreen"))
self.opt_cb_fullscreen.setObjectName(_fromUtf8("opt_cb_fullscreen"))
self.gridLayout_11.addWidget(self.opt_cb_fullscreen, 2, 0, 1, 2)
self.fs_help_msg = QtGui.QLabel(self.tab_3)
self.fs_help_msg.setStyleSheet(_fromUtf8("margin-left: 1.5em"))
self.fs_help_msg.setText(_("You can press the %s keys to toggle full screen mode."))
self.fs_help_msg.setWordWrap(True)
self.fs_help_msg.setObjectName(_fromUtf8("fs_help_msg"))
self.gridLayout_11.addWidget(self.fs_help_msg, 3, 0, 1, 2)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(_fromUtf8(I("cover_flow.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tabWidget.addTab(self.tab_3, icon5, _fromUtf8(""))
self.gridLayout_2.addWidget(self.tabWidget, 0, 0, 1, 1)
self.label_7.setBuddy(self.opt_language)
self.label_17.setBuddy(self.opt_gui_layout)
self.label_5.setBuddy(self.opt_toolbar_icon_size)
self.label_8.setBuddy(self.opt_toolbar_text)
self.label_2.setBuddy(self.font_display)
self.label_widget_style.setBuddy(self.opt_ui_style)
self.label.setBuddy(self.opt_default_author_link)
self.label_9.setBuddy(self.opt_tags_browser_partition_method)
self.label_10.setBuddy(self.opt_tags_browser_collapse_at)
self.label_8111.setBuddy(self.opt_tag_browser_dont_collapse)
self.label_81.setBuddy(self.opt_categories_using_hierarchy)
self.label_6.setBuddy(self.opt_cover_flow_queue_length)
self.retranslateUi(Form)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _("Main Interface"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _("Book Details"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _("Tag Browser"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _("Cover Browser"))
from calibre.gui2.complete2 import EditWithComplete
|
qbeenslee/Nepenthes-Server
|
config/configuration.py
|
# coding:utf-8
"""
Author : qbeenslee
Created : 2014/12/12
"""
import re
# 客户端ID号
CLIENT_ID = "TR5kVmYeMEh9M"
'''
传输令牌格式
加密方式$迭代次数$盐$结果串
举个栗子:
====start====
md5$23$YUXQ_-2GfwhzVpt5IQWp$3ebb6e78bf7d0c1938578855982e2b1c
====end====
'''
MATCH_PWD = r"md5\$(\d\d)\$([a-zA-Z0-9_\-]{20})\$([a-f0-9]{32})"
REMATCH_PWD = re.compile(MATCH_PWD)
# 支持的上传文件格式
SUPPORT_IMAGE_TYPE_LIST = ['image/gif', 'image/jpeg', 'image/png', 'image/bmp', 'image/x-png',
'application/octet-stream']
# 最大上传大小
MAX_UPLOAD_FILE_SIZE = 10485760 # 10*1024*1024 =10M
# 最小上传尺寸
MIN_IMAGE_SIZE = {'w': 10, 'h': 10}
MAX_IMAGE_SIZE = {'w': 4000, 'h': 4000}
# 图片裁剪的尺寸(THUMBNAIL)
THUMB_SIZE_SMALL = {'w': 100, 'h': 100, 'thumb': 's'}
THUMB_SIZE_NORMAL = {'w': 480, 'h': 480, 'thumb': 'n'}
THUMB_SIZE_LARGE = {'w': 3000, 'h': 3000, 'thumb': 'l'}
THUMB_SIZE_ORIGIN = {'w': 0, 'h': 0, 'thumb': 'r'}
MAX_SHARE_DESCRIPTION_SIZE = 140
NOW_ANDROID_VERSION_CODE = 7
NOW_VERSION_DOWNLOAD_URL = "/static/download/nepenthes-beta0.9.3.apk"
MAX_RAND_EMAIL_CODE = 99999
MIN_RAND_EMAIL_CODE = 10000
# 定位精度
PRECISION = 12
LOACTION_PRECISION = 4
PAGE_SIZE = 10
|
UnbDroid/robomagellan
|
Codigos/Raspberry/desenvolvimentoRos/devel/lib/python2.7/dist-packages/tf2_ros/__init__.py
|
# -*- coding: utf-8 -*-
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from sys import path as sys_path
from pkgutil import extend_path
__extended_path = "/home/pi/Documents/desenvolvimentoRos/src/tf2_ros/src".split(";")
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
|
timbotron/ICLS
|
framework.py
|
# This file is part of ICLS.
#
# ICLS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ICLS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ICLS. If not, see <http://www.gnu.org/licenses/>.
import xml.dom.minidom
from time import strftime, strptime
from sys import exit
from textwrap import wrap
from os import path
def colorize(the_color='blue',entry='',new_line=0):
color={'gray':30,'green':32,'red':31,'blue':34,'magenta':35,'cyan':36,'white':37,'highgreen':42,'highblue':44,'highred':41,'highgray':47}
if new_line==1:
new_line='\n'
else:
new_line=''
return_me='\033[1;'+str(color[the_color])+'m'+entry+'\033[1;m'+new_line
return return_me
def getText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
# Only if error is one that halts things, stop script
def aws_print_error(error_obj):
error_code=getText(xml.dom.minidom.parseString(error_obj[2]).documentElement.getElementsByTagName('Code')[0].childNodes)
error_message=getText(xml.dom.minidom.parseString(error_obj[2]).documentElement.getElementsByTagName('Message')[0].childNodes)
error_message=colorize('red',"ERROR",1)+colorize('red',"AWS Error Code: ")+error_code+colorize('red',"\nError Message: ")+error_message
print error_message
exit()
return True
def print_error(error_text):
error_message=colorize('red',"ERROR",1)+colorize('red',"\nError Message: ")+error_text
print error_message
exit()
return True
#takes an entry, and makes it pretty!
def makeover(entry,ismonochrome=False):
if ismonochrome==False:
output=colorize('gray','========================================',1)
output+=colorize('cyan',entry['entry'],1)
output+=colorize('cyan',strftime("%H:%M %m.%d.%Y", strptime(entry['date'],"%Y-%m-%dT%H:%M:%S+0000")),1)
output+=colorize('gray','ID: '+entry.name,0)
else:
output="========================================\n"
output+=entry['entry']+"\n"
output+=strftime("%H:%M %m.%d.%Y", strptime(entry['date'],"%Y-%m-%dT%H:%M:%S+0000"))+"\n"
output+='ID: '+entry.name
return output
#If, during parsing, help was flagged print out help text and then exit TODO read it from a md file
def print_help():
filepath = path.join(path.dirname(path.abspath(__file__)), 'DOCUMENTATION.mkd')
f = open(filepath,'r')
print f.read()
f.close()
exit()
|
iLoop2/ResInsight
|
ThirdParty/Ert/devel/python/python/ert/enkf/enkf_main.py
|
# Copyright (C) 2012 Statoil ASA, Norway.
#
# The file 'ecl_kw.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from ert.cwrap import BaseCClass, CWrapper
from ert.enkf import AnalysisConfig, EclConfig, EnkfObs, EnKFState, LocalConfig, ModelConfig, EnsembleConfig, PlotConfig, SiteConfig, ENKF_LIB, EnkfSimulationRunner, EnkfFsManager, ErtWorkflowList, PostSimulationHook
from ert.enkf.enums import EnkfInitModeEnum
from ert.util import SubstitutionList, Log
class EnKFMain(BaseCClass):
def __init__(self, model_config, strict=True):
c_ptr = EnKFMain.cNamespace().bootstrap(model_config, strict, False)
super(EnKFMain, self).__init__(c_ptr)
self.__simulation_runner = EnkfSimulationRunner(self)
self.__fs_manager = EnkfFsManager(self)
@classmethod
def createCReference(cls, c_pointer, parent=None):
obj = super(EnKFMain, cls).createCReference(c_pointer, parent)
obj.__simulation_runner = EnkfSimulationRunner(obj)
obj.__fs_manager = EnkfFsManager(obj)
return obj
@staticmethod
def createNewConfig(config_file, storage_path, case_name, dbase_type, num_realizations):
EnKFMain.cNamespace().create_new_config(config_file, storage_path, case_name, dbase_type, num_realizations)
def getRealisation(self , iens):
""" @rtype: EnKFState """
if 0 <= iens < self.getEnsembleSize():
return EnKFMain.cNamespace().iget_state(self, iens).setParent(self)
else:
raise IndexError("iens value:%d invalid Valid range: [0,%d)" % (iens , len(self)))
def set_eclbase(self, eclbase):
EnKFMain.cNamespace().set_eclbase(self, eclbase)
def umount(self):
self.__fs_manager.umount()
def free(self):
self.umount()
EnKFMain.cNamespace().free(self)
def getEnsembleSize(self):
""" @rtype: int """
return EnKFMain.cNamespace().get_ensemble_size(self)
def resizeEnsemble(self, value):
EnKFMain.cNamespace().resize_ensemble(self, value)
def ensembleConfig(self):
""" @rtype: EnsembleConfig """
return EnKFMain.cNamespace().get_ens_config(self).setParent(self)
def analysisConfig(self):
""" @rtype: AnalysisConfig """
return EnKFMain.cNamespace().get_analysis_config(self).setParent(self)
def getModelConfig(self):
""" @rtype: ModelConfig """
return EnKFMain.cNamespace().get_model_config(self).setParent(self)
def logh(self):
""" @rtype: Log """
return EnKFMain.cNamespace().get_logh(self).setParent(self)
def local_config(self):
""" @rtype: LocalConfig """
return EnKFMain.cNamespace().get_local_config(self).setParent(self)
def siteConfig(self):
""" @rtype: SiteConfig """
return EnKFMain.cNamespace().get_site_config(self).setParent(self)
def eclConfig(self):
""" @rtype: EclConfig """
return EnKFMain.cNamespace().get_ecl_config(self).setParent(self)
def plotConfig(self):
""" @rtype: PlotConfig """
return EnKFMain.cNamespace().get_plot_config(self).setParent(self)
def set_datafile(self, datafile):
EnKFMain.cNamespace().set_datafile(self, datafile)
def get_schedule_prediction_file(self):
schedule_prediction_file = EnKFMain.cNamespace().get_schedule_prediction_file(self)
return schedule_prediction_file
def set_schedule_prediction_file(self, file):
EnKFMain.cNamespace().set_schedule_prediction_file(self, file)
def getDataKW(self):
""" @rtype: SubstitutionList """
return EnKFMain.cNamespace().get_data_kw(self)
def clearDataKW(self):
EnKFMain.cNamespace().clear_data_kw(self)
def addDataKW(self, key, value):
EnKFMain.cNamespace().add_data_kw(self, key, value)
def getMountPoint(self):
return EnKFMain.cNamespace().get_mount_point(self)
def del_node(self, key):
EnKFMain.cNamespace().del_node(self, key)
def getObservations(self):
""" @rtype: EnkfObs """
return EnKFMain.cNamespace().get_obs(self).setParent(self)
def load_obs(self, obs_config_file):
EnKFMain.cNamespace().load_obs(self, obs_config_file)
def reload_obs(self):
EnKFMain.cNamespace().reload_obs(self)
def get_pre_clear_runpath(self):
pre_clear = EnKFMain.cNamespace().get_pre_clear_runpath(self)
return pre_clear
def set_pre_clear_runpath(self, value):
EnKFMain.cNamespace().set_pre_clear_runpath(self, value)
def iget_keep_runpath(self, iens):
ikeep = EnKFMain.cNamespace().iget_keep_runpath(self, iens)
return ikeep
def iset_keep_runpath(self, iens, keep_runpath):
EnKFMain.cNamespace().iset_keep_runpath(self, iens, keep_runpath)
def get_templates(self):
return EnKFMain.cNamespace().get_templates(self).setParent(self)
def get_site_config_file(self):
site_conf_file = EnKFMain.cNamespace().get_site_config_file(self)
return site_conf_file
def getUserConfigFile(self):
""" @rtype: str """
config_file = EnKFMain.cNamespace().get_user_config_file(self)
return config_file
def getHistoryLength(self):
return EnKFMain.cNamespace().get_history_length(self)
def getMemberRunningState(self, ensemble_member):
""" @rtype: EnKFState """
return EnKFMain.cNamespace().iget_state(self, ensemble_member).setParent(self)
def get_observations(self, user_key, obs_count, obs_x, obs_y, obs_std):
EnKFMain.cNamespace().get_observations(self, user_key, obs_count, obs_x, obs_y, obs_std)
def get_observation_count(self, user_key):
return EnKFMain.cNamespace().get_observation_count(self, user_key)
def getEnkfSimulationRunner(self):
""" @rtype: EnkfSimulationRunner """
return self.__simulation_runner
def getEnkfFsManager(self):
""" @rtype: EnkfFsManager """
return self.__fs_manager
def getWorkflowList(self):
""" @rtype: ErtWorkflowList """
return EnKFMain.cNamespace().get_workflow_list(self).setParent(self)
def getPostSimulationHook(self):
""" @rtype: PostSimulationHook """
return EnKFMain.cNamespace().get_qc_module(self)
def exportField(self, keyword, path, iactive, file_type, report_step, state, enkfFs):
"""
@type keyword: str
@type path: str
@type iactive: BoolVector
@type file_type: EnkfFieldFileFormatEnum
@type report_step: int
@type state: EnkfStateType
@type enkfFs: EnkfFs
"""
assert isinstance(keyword, str)
return EnKFMain.cNamespace().export_field_with_fs(self, keyword, path, iactive, file_type, report_step, state, enkfFs)
def loadFromForwardModel(self, realization, iteration, fs):
EnKFMain.cNamespace().load_from_forward_model(self, iteration, realization, fs)
def submitSimulation(self , run_arg):
EnKFMain.cNamespace().submit_simulation( self , run_arg)
def getRunContextENSEMPLE_EXPERIMENT(self , fs , iactive , init_mode = EnkfInitModeEnum.INIT_CONDITIONAL , iteration = 0):
return EnKFMain.cNamespace().alloc_run_context_ENSEMBLE_EXPERIMENT( self , fs , iactive , init_mode , iteration )
##################################################################
cwrapper = CWrapper(ENKF_LIB)
cwrapper.registerType("enkf_main", EnKFMain)
cwrapper.registerType("enkf_main_ref", EnKFMain.createCReference)
EnKFMain.cNamespace().bootstrap = cwrapper.prototype("c_void_p enkf_main_bootstrap(char*, bool, bool)")
EnKFMain.cNamespace().free = cwrapper.prototype("void enkf_main_free(enkf_main)")
EnKFMain.cNamespace().get_ensemble_size = cwrapper.prototype("int enkf_main_get_ensemble_size( enkf_main )")
EnKFMain.cNamespace().get_ens_config = cwrapper.prototype("ens_config_ref enkf_main_get_ensemble_config( enkf_main )")
EnKFMain.cNamespace().get_model_config = cwrapper.prototype("model_config_ref enkf_main_get_model_config( enkf_main )")
EnKFMain.cNamespace().get_local_config = cwrapper.prototype("local_config_ref enkf_main_get_local_config( enkf_main )")
EnKFMain.cNamespace().get_analysis_config = cwrapper.prototype("analysis_config_ref enkf_main_get_analysis_config( enkf_main)")
EnKFMain.cNamespace().get_site_config = cwrapper.prototype("site_config_ref enkf_main_get_site_config( enkf_main)")
EnKFMain.cNamespace().get_ecl_config = cwrapper.prototype("ecl_config_ref enkf_main_get_ecl_config( enkf_main)")
EnKFMain.cNamespace().get_plot_config = cwrapper.prototype("plot_config_ref enkf_main_get_plot_config( enkf_main)")
EnKFMain.cNamespace().set_eclbase = cwrapper.prototype("ui_return_obj enkf_main_set_eclbase( enkf_main, char*)")
EnKFMain.cNamespace().set_datafile = cwrapper.prototype("void enkf_main_set_data_file( enkf_main, char*)")
EnKFMain.cNamespace().get_schedule_prediction_file = cwrapper.prototype("char* enkf_main_get_schedule_prediction_file( enkf_main )")
EnKFMain.cNamespace().set_schedule_prediction_file = cwrapper.prototype("void enkf_main_set_schedule_prediction_file( enkf_main , char*)")
EnKFMain.cNamespace().get_data_kw = cwrapper.prototype("subst_list_ref enkf_main_get_data_kw(enkf_main)")
EnKFMain.cNamespace().clear_data_kw = cwrapper.prototype("void enkf_main_clear_data_kw(enkf_main)")
EnKFMain.cNamespace().add_data_kw = cwrapper.prototype("void enkf_main_add_data_kw(enkf_main, char*, char*)")
EnKFMain.cNamespace().resize_ensemble = cwrapper.prototype("void enkf_main_resize_ensemble(enkf_main, int)")
EnKFMain.cNamespace().del_node = cwrapper.prototype("void enkf_main_del_node(enkf_main, char*)")
EnKFMain.cNamespace().get_obs = cwrapper.prototype("enkf_obs_ref enkf_main_get_obs(enkf_main)")
EnKFMain.cNamespace().load_obs = cwrapper.prototype("void enkf_main_load_obs(enkf_main, char*)")
EnKFMain.cNamespace().reload_obs = cwrapper.prototype("void enkf_main_reload_obs(enkf_main)")
EnKFMain.cNamespace().get_pre_clear_runpath = cwrapper.prototype("bool enkf_main_get_pre_clear_runpath(enkf_main)")
EnKFMain.cNamespace().set_pre_clear_runpath = cwrapper.prototype("void enkf_main_set_pre_clear_runpath(enkf_main, bool)")
EnKFMain.cNamespace().iget_keep_runpath = cwrapper.prototype("int enkf_main_iget_keep_runpath(enkf_main, int)")
EnKFMain.cNamespace().iset_keep_runpath = cwrapper.prototype("void enkf_main_iset_keep_runpath(enkf_main, int, int_vector)")
EnKFMain.cNamespace().get_templates = cwrapper.prototype("ert_templates_ref enkf_main_get_templates(enkf_main)")
EnKFMain.cNamespace().get_site_config_file = cwrapper.prototype("char* enkf_main_get_site_config_file(enkf_main)")
EnKFMain.cNamespace().get_history_length = cwrapper.prototype("int enkf_main_get_history_length(enkf_main)")
EnKFMain.cNamespace().get_observations = cwrapper.prototype("void enkf_main_get_observations(enkf_main, char*, int, long*, double*, double*)")
EnKFMain.cNamespace().get_observation_count = cwrapper.prototype("int enkf_main_get_observation_count(enkf_main, char*)")
EnKFMain.cNamespace().iget_state = cwrapper.prototype("enkf_state_ref enkf_main_iget_state(enkf_main, int)")
EnKFMain.cNamespace().get_workflow_list = cwrapper.prototype("ert_workflow_list_ref enkf_main_get_workflow_list(enkf_main)")
EnKFMain.cNamespace().get_qc_module = cwrapper.prototype("qc_module_ref enkf_main_get_qc_module(enkf_main)")
EnKFMain.cNamespace().fprintf_config = cwrapper.prototype("void enkf_main_fprintf_config(enkf_main)")
EnKFMain.cNamespace().create_new_config = cwrapper.prototype("void enkf_main_create_new_config(char* , char*, char* , char* , int)")
EnKFMain.cNamespace().get_user_config_file = cwrapper.prototype("char* enkf_main_get_user_config_file(enkf_main)")
EnKFMain.cNamespace().get_mount_point = cwrapper.prototype("char* enkf_main_get_mount_root( enkf_main )")
EnKFMain.cNamespace().export_field = cwrapper.prototype("bool enkf_main_export_field(enkf_main, char*, char*, bool_vector, enkf_field_file_format_enum, int, enkf_state_type_enum)")
EnKFMain.cNamespace().export_field_with_fs = cwrapper.prototype("bool enkf_main_export_field_with_fs(enkf_main, char*, char*, bool_vector, enkf_field_file_format_enum, int, enkf_state_type_enum, enkf_fs_manager)")
EnKFMain.cNamespace().load_from_forward_model = cwrapper.prototype("void enkf_main_load_from_forward_model_from_gui(enkf_main, int, bool_vector, enkf_fs)")
EnKFMain.cNamespace().submit_simulation = cwrapper.prototype("void enkf_main_isubmit_job(enkf_main , run_arg)")
EnKFMain.cNamespace().alloc_run_context_ENSEMBLE_EXPERIMENT= cwrapper.prototype("ert_run_context_obj enkf_main_alloc_ert_run_context_ENSEMBLE_EXPERIMENT( enkf_main , enkf_fs , bool_vector , enkf_init_mode_enum , int)")
|
Uruwolf/pyshop
|
products/admin.py
|
'''
This file is part of pyShop
pyShop is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pyShop is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pyShop. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) Steve "Uru" West 2012 <uruwolf@gmail.com>
'''
from products.models import Catergory, Product
from django.contrib import admin
#Catergories get the generic admin treatment
admin.site.register(Catergory)
class ProductAdmin(admin.ModelAdmin):
'''Contains the admin panel settings for product objects
Currently set to display the name and catergory,
be filterable by catergory
and searchable via name and description.'''
list_display = ('name', 'catergory')
list_filter = ['catergory']
search_fields = ['name', 'description']
admin.site.register(Product, ProductAdmin)
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/oneconf/packagesethandler.py
|
# Copyright (C) 2010 Canonical
#
# Authors:
# Didier Roche <didrocks@ubuntu.com>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import hashlib
import json
import logging
import os
LOG = logging.getLogger(__name__)
from oneconf.hosts import Hosts, HostError
from oneconf.distributor import get_distro
from oneconf.paths import ONECONF_CACHE_DIR, PACKAGE_LIST_PREFIX
class PackageSetHandler(object):
"""
Direct access to database for getting and updating the list
"""
def __init__(self, hosts=None):
self.hosts = hosts
if not hosts:
self.hosts = Hosts()
self.distro = get_distro()
self.last_storage_sync = None
# create cache for storage package list, indexed by hostid
self.package_list = {}
def update(self):
'''update the store with package list'''
hostid = self.hosts.current_host['hostid']
LOG.debug("Updating package list")
newpkg_list = self.distro.compute_local_packagelist()
LOG.debug("Creating the checksum")
checksum = hashlib.sha224(str(newpkg_list)).hexdigest()
LOG.debug("Package list need refresh")
self.package_list[hostid] = {'valid': True, 'package_list': newpkg_list}
with open(os.path.join(self.hosts.get_currenthost_dir(), '%s_%s' % (PACKAGE_LIST_PREFIX, hostid)), 'w') as f:
json.dump(self.package_list[hostid]['package_list'], f)
if self.hosts.current_host['packages_checksum'] != checksum:
self.hosts.current_host['packages_checksum'] = checksum
self.hosts.save_current_host()
LOG.debug("Update done")
def get_packages(self, hostid=None, hostname=None, only_manual=False):
'''get all installed packages from the storage'''
hostid = self.hosts.get_hostid_from_context(hostid, hostname)
LOG.debug ("Request for package list for %s with only manual packages reduced scope to: %s", hostid, only_manual)
package_list = self._get_installed_packages(hostid)
if only_manual:
package_list = [package_elem for package_elem in package_list if package_list[package_elem]["auto"] == False]
return package_list
def _get_installed_packages(self, hostid):
'''get installed packages from the storage or cache
Return: uptodate package_list'''
need_reload = False
try:
if self.package_list[hostid]['valid']:
LOG.debug("Hit cache for package list")
package_list = self.package_list[hostid]['package_list']
else:
need_reload = True
except KeyError:
need_reload = True
if need_reload:
self.package_list[hostid] = {'valid': True, 'package_list': self._get_packagelist_from_store(hostid)}
return self.package_list[hostid]['package_list']
def diff(self, distant_hostid=None, distant_hostname=None):
'''get a diff from current package state from another host
This function can be use to make a diff between all packages installed on both computer
, use_cache
Return: (packages_to_install (packages in distant_hostid not in local_hostid),
packages_to_remove (packages in local hostid not in distant_hostid))
'''
distant_hostid = self.hosts.get_hostid_from_context(distant_hostid, distant_hostname)
LOG.debug("Collecting all installed packages on this system")
local_package_list = set(self.get_packages(self.hosts.current_host['hostid'], False))
LOG.debug("Collecting all installed packages on the other system")
distant_package_list = set(self.get_packages(distant_hostid, False))
LOG.debug("Comparing")
packages_to_install = [x for x in distant_package_list if x not in local_package_list]
packages_to_remove = [x for x in local_package_list if x not in distant_package_list]
# for Dbus which doesn't like empty list
if not packages_to_install:
packages_to_install = ''
if not packages_to_remove:
packages_to_remove = ''
return(packages_to_install, packages_to_remove)
def _get_packagelist_from_store(self, hostid):
'''load package list for every computer in cache'''
LOG.debug('get package list from store for hostid: %s' % hostid)
# load current content in cache
try:
with open(os.path.join(self.hosts.get_currenthost_dir(), '%s_%s' % (PACKAGE_LIST_PREFIX, hostid)), 'r') as f:
# can be none in corrupted null file
pkg_list = json.load(f)
except (IOError, ValueError):
LOG.warning ("no valid package list stored for hostid: %s" % hostid)
pkg_list = None
if pkg_list is None:
pkg_list = {}
# there is no way that no package is installed in current host
# At least, there is oneconf ;) Ask for refresh
if hostid == self.hosts.current_host['hostid']:
LOG.debug ("Processing first update for current host")
self.update()
pkg_list = self.package_list[hostid]['package_list']
return pkg_list
|
the-new-sky/BlogInPy
|
markdown/extensions/smartLegend.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import markdown
from markdown.treeprocessors import Treeprocessor
from markdown.blockprocessors import BlockProcessor
import re
from markdown import util
import xml.etree.ElementTree as ET
import copy
from markdown.inlinepatterns import IMAGE_LINK_RE
class InFigureParser(object):
def transform(self, parent, element, legend, index, InP = False):
if InP:
lelems = list(element.iter())
oldImg = lelems[-1]
element.remove(oldImg)
else:
oldImg = element
nFig = util.etree.Element("figure")
nFigCaption = util.etree.Element("figcaption")
contentLegend = legend.items()
for el in legend:
legend.remove(el)
nFigCaption.append(el)
nFig.append(oldImg)
nFig.append(nFigCaption)
parent.remove(element)
parent.remove(legend)
parent.insert(index, nFig)
class FigureParser(InFigureParser):
def __init__(self, ignoringImg):
InFigureParser.__init__(self)
self.ignoringImg = ignoringImg
self.ree = re.compile(r"^" + IMAGE_LINK_RE + r"(\n|$)")
def detect(self, element, type):
if element == None:
return False
lelems = list(element.iter())
#print repr(element.text)
return (type == "unknown" or type == "Figure") \
and element.tag=="p" \
and( ( element.text is not None \
and self.ree.search(element.text)) \
or ( (element.text is None or element.text.strip() == "") \
and (len(lelems) == 1 or (len(lelems)==2 and lelems[0] is element)) \
and lelems[-1].tag == "img" \
and (lelems[-1].attrib["src"] not in self.ignoringImg)))
def transform(self, parent, element, legend, index):
InFigureParser.transform(self, parent, element, legend, index, True)
class EquationParser(InFigureParser):
def detect(self, element, type):
if element == None:
return False
lelems = list(element.iter())
return (type == "unknown" or type == "Equation") \
and element.tag=="p" \
and (element.text is None or element.text.strip() == "") \
and (len(lelems) == 1 or (len(lelems)==2 and lelems[0] is element)) \
and lelems[-1].tag == "mathjax"
def transform(self, parent, element, legend, index):
InFigureParser.transform(self, parent, element, legend, index, True)
class CodeParser(InFigureParser):
def __init__(self, md):
self.md = md
def detect(self, element, type):
if element == None:
return False
if (type == "unknown" or type == "Code") and element.tag=="p" :
hs = self.md.htmlStash
for i in range(hs.html_counter):
if element.text == hs.get_placeholder(i) :
Teste = ET.fromstring(hs.rawHtmlBlocks[i][0].encode('utf-8'))
if Teste is not None and Teste.tag=="table" and "class" in Teste.attrib and Teste.attrib["class"] == "codehilitetable":
return True
else:
return False
return False
class QuoteParser(InFigureParser):
def detect(self, element, type):
if element == None:
return False
return (type == "unknown" or type == "Source") and element.tag=="blockquote"
class TableParser(object):
def detect(self, element, type):
if element == None:
return False
return (type == "unknown" or type == "Table") and element.tag=="table"
def transform(self, parent, element, legend, index):
parent.remove(legend)
cap = util.etree.Element('caption')
contentLegend = legend.items()
for el in legend:
legend.remove(el)
cap.append(el)
element.insert(0, cap)
class VideoParser(InFigureParser):
def detect(self, element, type):
if element == None:
return False
lelems = list(element.iter())
return (type == "unknown" or type == "Video") \
and element.tag=="iframe"
class SmartLegendProcessor(Treeprocessor):
def __init__(self, parser, configs, md):
Treeprocessor.__init__(self, parser)
self.configs = configs
self.processors = ( FigureParser(configs["IGNORING_IMG"]),
EquationParser(),
CodeParser(md),
TableParser(),
VideoParser(),
QuoteParser())
def run(self, root):
root = self.parse_legend(root)
root = self.parse_autoimg(root)
return root
def parse_legend(self, root):
elemsToInspect = [root]
while len(elemsToInspect) > 0:
elem = elemsToInspect.pop()
Restart=True
while Restart:
Restart = False
precedent = None
i=0
for nelem in elem:
if nelem.tag in self.configs["PARENTS"] and nelem not in elemsToInspect:
elemsToInspect.append(nelem)
if nelem.tag == "customlegend" and precedent is not None : # and len(list(nelem.itertext())) == 0 :
proc = self.detectElement(precedent, nelem.attrib["type"])
if proc is not None:
proc.transform(elem, precedent, nelem, i-1)
Restart = True
break
precedent = nelem
i+=1
return root
def parse_autoimg(self, root):
elemsToInspect = [root]
while len(elemsToInspect) > 0:
elem = elemsToInspect.pop()
Restart=True
while Restart:
Restart = False
i=0
for nelem in elem:
if nelem.tag in self.configs["PARENTS"] and nelem not in elemsToInspect:
elemsToInspect.append(nelem)
#Auto Legend for image
if nelem.tag == 'p' and len(list(nelem.itertext())) == 0 :
lelems = list(nelem.iter())
if (len(lelems) == 1 or (len(lelems)==2 and lelems[0] is nelem)) \
and lelems[-1].tag == "img" \
and lelems[-1].attrib["alt"] != "" \
and not (lelems[-1].attrib["src"] in self.configs["IGNORING_IMG"]):
oldImg = lelems[-1]
nelem.remove(oldImg)
nFig = util.etree.Element("figure")
nFigCaption = util.etree.Element("figcaption")
nFigCaption.text = oldImg.attrib["alt"]
oldImg.attrib["alt"]=""
nFig.append(oldImg)
nFig.append(nFigCaption)
nelem.insert(i-1, nFig)
Restart = True
break
i+=1
return root
def detectElement(self, elem, legend):
for proc in self.processors:
if proc.detect(elem, legend) :
return proc
return None
class LegendProcessor(BlockProcessor):
def __init__(self, parser, md, configs):
BlockProcessor.__init__(self, parser)
self.md = md
self.configs = configs
self.processors = ( FigureParser(configs["IGNORING_IMG"]),
EquationParser(),
CodeParser(md),
TableParser(),
VideoParser(),
QuoteParser())
self.RE = re.compile(r'(^|(?<=\n))((?P<typelegend>Figure|Table|Code|Equation|Video|Source)\s?)*\:\s?(?P<txtlegend>.*?)(\n|$)')
def detectElement(self, elem, legend):
for proc in self.processors:
if proc.detect(elem, legend) :
return proc
return None
def test(self, parent, block):
mLeg = self.RE.search(block)
if not bool(mLeg):
return False
else:
return True
def test_complete(self, parent, block):
mLeg = self.RE.search(block)
gd = mLeg.groupdict()
if gd["typelegend"] is None:
type = "unknown"
else:
type = gd["typelegend"]
sibling = self.lastChild(parent)
return self.detectElement(sibling, type) is not None
def run(self, parent, blocks):
block = blocks.pop(0)
mLeg = self.RE.search(block)
before = block[:mLeg.start()]
after = block[mLeg.end():]
contentStart = block[mLeg.start():mLeg.start("txtlegend")]
cpp = None
if before:
cpp = copy.copy(parent)
self.parser.parseBlocks(cpp, [before])
else:
cpp = parent
if not self.test_complete(cpp, block):
blocks.insert(0, block)
return False
elif before:
self.parser.parseBlocks(parent, [before])
nLegend = util.etree.Element("customlegend")
self.parser.parseChunk(nLegend, mLeg.group('txtlegend'))
gd = mLeg.groupdict()
if gd["typelegend"] is None:
nLegend.set("type", "unknown")
else:
nLegend.set("type", gd["typelegend"])
nLegend.set("rawStart", contentStart)
parent.append(nLegend)
if after:
blocks.insert(0,after)
class SmartLegendExtension(markdown.extensions.Extension):
def __init__(self, configs={}):
self.configs = {
"IGNORING_IMG" : [],
"PARENTS" : [],
}
for key, value in configs.items():
self.configs[key] = value
if "div" not in self.configs["PARENTS"]:
self.configs["PARENTS"].append("div")
pass
def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
md.treeprocessors.add('smart-legend', SmartLegendProcessor(md.parser,self.configs, md),"_end")
md.parser.blockprocessors.add('legend-processor', LegendProcessor(md.parser,md, self.configs),"_begin")
def makeExtension(configs={}):
return SmartImgExtension(configs=configs)
|
mgautierfr/devparrot
|
devparrot/core/ui/statusBar.py
|
# This file is part of DevParrot.
#
# Author: Matthieu Gautier <matthieu.gautier@devparrot.org>
#
# DevParrot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DevParrot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DevParrot. If not, see <http://www.gnu.org/licenses/>.
#
#
# Copyright 2011-2013 Matthieu Gautier
import tkinter, tkinter.ttk
import logging
from devparrot.core import session, userLogging
class StatusBar(tkinter.Frame, logging.Handler):
def __init__(self, parent):
tkinter.Frame.__init__(self, parent)
logging.Handler.__init__(self)
self.pack(side=tkinter.BOTTOM, fill=tkinter.X)
self['relief'] = 'sunken'
session.userLogger.addHandler(self)
self.label = tkinter.Label(self)
self.label.pack(side='left', fill=tkinter.BOTH, expand=True)
self.defaultColor = self['background']
self.label['anchor'] = 'nw'
separator = tkinter.ttk.Separator(self, orient="vertical")
separator.pack(side='left', fill='y')
self.insertLabel = tkinter.ttk.Label(self)
self.insertLabel.pack(side='right', expand=False, fill="none")
session.eventSystem.connect('mark_set', self.on_mark_set)
self.currentLevel = 0
self.callbackId = 0
def flush(self):
"""overide logging.Handler.flush"""
pass
def clear(self):
self.currentLevel = 0
self.label['text'] = ""
self.label['background'] = self.defaultColor
self.callbackId = 0
def emit(self,record):
"""overide logging.Handler.emit"""
if record.levelno >= self.currentLevel:
self.currentLevel = record.levelno
self.label['text'] = record.getMessage()
if self.currentLevel == userLogging.INFO:
self.label['background'] = session.config.get('ok_color')
if self.currentLevel == userLogging.ERROR:
self.label['background'] = session.config.get('error_color')
if self.currentLevel == userLogging.INVALID:
self.label['background'] = session.config.get('invalid_color')
if self.callbackId:
self.after_cancel(self.callbackId)
self.callbackId = self.after(5000, self.clear)
def on_mark_set(self, model, name, index):
if name == "insert":
if model.sel_isSelection():
self.insertLabel['text'] = "[%s:%s]"%(model.index("sel.first"), model.index("sel.last"))
else:
self.insertLabel['text'] = str(model.index("insert"))
|
michellab/SireUnitTests
|
unittests/SireIO/test_mol2.py
|
from Sire.Base import *
from Sire.IO import *
from Sire.Mol import *
from glob import glob
from nose.tools import assert_equal, assert_almost_equal
# Check that we have Mol2 support in this version of Sire.
has_mol2 = True
try:
p = Mol2()
except:
# No Mol2 support.
has_mol2 = False
# General test of ability to read and write Mol2 files.
# All Mol2 files in the "../io/" directory are parsed.
# Once the input file is parsed we then check that the parser constructs a
# Sire Molecule from the parsed data. Following this, we then check that the
# parser can convert the molecule back into the correct data format, ready to
# be written to file.
def test_read_write(verbose=False):
if not has_mol2:
return
# Glob all of the Mol2 files in the example file directory.
mol2files = glob('../io/*mol2')
# Loop over all test files.
for file in mol2files:
# Test in parallel and serial mode.
for use_par in [True, False]:
if verbose:
print("Reading Mol2 file: %s" % file)
print("Parallel = %s" % use_par)
# Parse the file into a Mol2 object.
p = Mol2(file, {"parallel" : wrap(use_par)})
if verbose:
print("Constructing molecular system...")
# Construct a Sire molecular system.
s = p.toSystem()
if verbose:
print("Reconstructing Mol2 data from molecular system...")
# Now re-parse the molecular system.
p = Mol2(s, {"parallel" : wrap(use_par)})
if verbose:
print("Passed!\n")
# Specific atom coordinate data validation test for file "../io/complex.mol2".
def test_atom_coords(verbose=False):
if not has_mol2:
return
# Test atoms.
atoms = ["N", "CA", "C", "O", "CB"]
# Test coordinates.
coords = [[ -2.9880, -2.0590, -2.6220],
[ -3.8400, -2.0910, -7.4260],
[ -6.4250, -3.9190, -10.9580],
[ -6.1980, -6.0090, -14.2910],
[ -9.8700, -6.5500, -15.2480]]
# Test in parallel and serial mode.
for use_par in [True, False]:
if verbose:
print("Reading Mol2 file: ../io/complex.mol2")
print("Parallel = %s" % use_par)
# Parse the Mol2 file.
p = Mol2('../io/complex.mol2', {"parallel" : wrap(use_par)})
if verbose:
print("Constructing molecular system...")
# Create a molecular system.
s = p.toSystem()
# Get the first molecule.
m = s[MolIdx(0)]
if verbose:
print("Checking atomic coordinates...")
# Loop over all of the atoms.
for i in range(0, len(atoms)):
# Extract the atom from the residue "i + 1".
a = m.atom(AtomName(atoms[i]) + ResNum(i+1))
# Extract the atom coordinates.
c = a.property("coordinates")
# Validate parsed coordinates against known values.
assert_almost_equal( c[0], coords[i][0] )
assert_almost_equal( c[1], coords[i][1] )
assert_almost_equal( c[2], coords[i][2] )
if verbose:
print("Passed!\n")
# Residue and chain validation test for file "../io/complex.mol2".
def test_residues(verbose=False):
if not has_mol2:
return
# Test in parallel and serial mode.
for use_par in [True, False]:
if verbose:
print("Reading Mol2 file: ../io/complex.mol2")
print("Parallel = %s" % use_par)
# Parse the Mol2 file.
p = Mol2('../io/complex.mol2', {"parallel" : wrap(use_par)})
if verbose:
print("Constructing molecular system...")
# Create a molecular system.
s = p.toSystem()
# Get the two molecules.
m1 = s[MolIdx(0)]
m2 = s[MolIdx(1)]
# Get the chains from the molecules.
c1 = m1.chains()
c2 = m2.chains()
if verbose:
print("Checking chain and residue data...")
# Check the number of chains in each molecule.
assert_equal( len(c1), 3 )
assert_equal( len(c2), 1 )
# Check the number of residues in each chain of the first molecule.
assert_equal( len(c1[0].residues()), 118 )
assert_equal( len(c1[1].residues()), 114 )
assert_equal( len(c1[2].residues()), 118 )
# Check the number of residues in the single chain of the second molecule.
assert_equal( len(c2[0].residues()), 1 )
# Check some specific residue names in the first chain from the first molecule.
assert_equal( c1[0].residues()[0].name().toString(), "ResName('PRO1')" )
assert_equal( c1[1].residues()[1].name().toString(), "ResName('MET2')" )
assert_equal( c1[1].residues()[2].name().toString(), "ResName('PHE3')" )
if verbose:
print("Passed!\n")
if __name__ == "__main__":
test_read_write(True)
test_atom_coords(True)
test_residues(True)
|
valesi/electrum
|
gui/kivy/uix/dialogs/create_restore.py
|
''' Dialogs and widgets Responsible for creation, restoration of accounts are
defined here.
Namely: CreateAccountDialog, CreateRestoreDialog, RestoreSeedDialog
'''
from functools import partial
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.core.window import Window
from electrum_gui.kivy.uix.dialogs import EventsDialog
from electrum.i18n import _
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import _ electrum.i18n._
<WizardTextInput@TextInput>
border: 4, 4, 4, 4
font_size: '15sp'
padding: '15dp', '15dp'
background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1)
foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1)
hint_text_color: self.foreground_color
background_active: 'atlas://gui/kivy/theming/light/create_act_text_active'
background_normal: 'atlas://gui/kivy/theming/light/create_act_text_active'
size_hint_y: None
height: '48sp'
<WizardButton@Button>:
root: None
size_hint: 1, None
height: '48sp'
on_press: if self.root: self.root.dispatch('on_press', self)
on_release: if self.root: self.root.dispatch('on_release', self)
<-WizardDialog>
text_color: .854, .925, .984, 1
#auto_dismiss: False
size_hint: None, None
canvas.before:
Color:
rgba: 0, 0, 0, .9
Rectangle:
size: Window.size
Color:
rgba: .239, .588, .882, 1
Rectangle:
size: Window.size
crcontent: crcontent
# add electrum icon
BoxLayout:
orientation: 'vertical' if self.width < self.height else 'horizontal'
padding:
min(dp(42), self.width/8), min(dp(60), self.height/9.7),\
min(dp(42), self.width/8), min(dp(72), self.height/8)
spacing: '27dp'
GridLayout:
id: grid_logo
cols: 1
pos_hint: {'center_y': .5}
size_hint: 1, .42
#height: self.minimum_height
Image:
id: logo_img
mipmap: True
allow_stretch: True
size_hint: 1, None
height: '110dp'
source: 'atlas://gui/kivy/theming/light/electrum_icon640'
Widget:
size_hint: 1, None
height: 0 if stepper.opacity else dp(15)
Label:
color: root.text_color
opacity: 0 if stepper.opacity else 1
text: 'ELECTRUM'
size_hint: 1, None
height: self.texture_size[1] if self.opacity else 0
font_size: '33sp'
font_name: 'data/fonts/tron/Tr2n.ttf'
Image:
id: stepper
allow_stretch: True
opacity: 0
source: 'atlas://gui/kivy/theming/light/stepper_left'
size_hint: 1, None
height: grid_logo.height/2.5 if self.opacity else 0
Widget:
size_hint: None, None
size: '5dp', '5dp'
GridLayout:
cols: 1
id: crcontent
spacing: '13dp'
<CreateRestoreDialog>
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text:
_("Wallet file not found!!")+"\\n\\n" +\
_("Do you want to create a new wallet ")+\
_("or restore an existing one?")
Widget
size_hint: 1, None
height: dp(15)
GridLayout:
id: grid
orientation: 'vertical'
cols: 1
spacing: '14dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
id: create
text: _('Create a new seed')
root: root
WizardButton:
id: restore
text: _('I already have a seed')
root: root
<RestoreSeedDialog>
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: "[b]ENTER YOUR SEED PHRASE[/b]"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardTextInput:
id: text_input_seed
size_hint: 1, None
height: '110dp'
hint_text:
_('Enter your seedphrase')
on_text: root._trigger_check_seed()
Label:
font_size: '12sp'
text_size: self.width, None
size_hint: 1, None
height: self.texture_size[1]
halign: 'justify'
valign: 'middle'
text: root.message
on_ref_press:
import webbrowser
webbrowser.open('https://electrum.org/faq.html#seed')
GridLayout:
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
id: back
text: _('Back')
root: root
Button:
id: scan
text: _('QR')
on_release: root.scan_seed()
WizardButton:
id: next
text: _('Next')
root: root
<ShowSeedDialog>
spacing: '12dp'
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: "[b]PLEASE WRITE DOWN YOUR SEED PHRASE[/b]"
GridLayout:
id: grid
cols: 1
pos_hint: {'center_y': .5}
size_hint_y: None
height: dp(180)
orientation: 'vertical'
Button:
border: 4, 4, 4, 4
halign: 'justify'
valign: 'middle'
font_size: self.width/15
text_size: self.width - dp(24), self.height - dp(12)
#size_hint: 1, None
#height: self.texture_size[1] + dp(24)
color: .1, .1, .1, 1
background_normal: 'atlas://gui/kivy/theming/light/white_bg_round_top'
background_down: self.background_normal
text: root.seed_text
Label:
rows: 1
size_hint: 1, .7
id: but_seed
border: 4, 4, 4, 4
halign: 'justify'
valign: 'middle'
font_size: self.width/21
text: root.message
text_size: self.width - dp(24), self.height - dp(12)
GridLayout:
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
id: back
text: _('Back')
root: root
WizardButton:
id: confirm
text: _('Confirm')
root: root
''')
class WizardDialog(EventsDialog):
''' Abstract dialog to be used as the base for all Create Account Dialogs
'''
crcontent = ObjectProperty(None)
def __init__(self, **kwargs):
super(WizardDialog, self).__init__(**kwargs)
self.action = kwargs.get('action')
_trigger_size_dialog = Clock.create_trigger(self._size_dialog)
Window.bind(size=_trigger_size_dialog,
rotation=_trigger_size_dialog)
_trigger_size_dialog()
Window.softinput_mode = 'pan'
def _size_dialog(self, dt):
app = App.get_running_app()
if app.ui_mode[0] == 'p':
self.size = Window.size
else:
#tablet
if app.orientation[0] == 'p':
#portrait
self.size = Window.size[0]/1.67, Window.size[1]/1.4
else:
self.size = Window.size[0]/2.5, Window.size[1]
def add_widget(self, widget, index=0):
if not self.crcontent:
super(WizardDialog, self).add_widget(widget)
else:
self.crcontent.add_widget(widget, index=index)
def on_dismiss(self):
app = App.get_running_app()
if app.wallet is None and self._on_release is not None:
print "on dismiss: stopping app"
app.stop()
else:
Window.softinput_mode = 'below_target'
class CreateRestoreDialog(WizardDialog):
''' Initial Dialog for creating or restoring seed'''
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
class ShowSeedDialog(WizardDialog):
seed_text = StringProperty('')
message = StringProperty('')
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
stepper = self.ids.stepper
stepper.opacity = 1
stepper.source = 'atlas://gui/kivy/theming/light/stepper_full'
self._back = _back = partial(self.ids.back.dispatch, 'on_release')
class RestoreSeedDialog(WizardDialog):
message = StringProperty('')
def __init__(self, **kwargs):
super(RestoreSeedDialog, self).__init__(**kwargs)
self._test = kwargs['test']
self._trigger_check_seed = Clock.create_trigger(self.check_seed)
def check_seed(self, dt):
self.ids.next.disabled = not bool(self._test(self.get_seed_text()))
def get_seed_text(self):
ti = self.ids.text_input_seed
text = unicode(ti.text).strip()
text = ' '.join(text.split())
return text
def scan_seed(self):
def on_complete(text):
self.ids.text_input_seed.text = text
app = App.get_running_app()
app.scan_qr(on_complete)
def on_parent(self, instance, value):
if value:
tis = self.ids.text_input_seed
tis.focus = True
tis._keyboard.bind(on_key_down=self.on_key_down)
stepper = self.ids.stepper
stepper.opacity = 1
stepper.source = ('atlas://gui/kivy/theming'
'/light/stepper_restore_seed')
self._back = _back = partial(self.ids.back.dispatch,
'on_release')
app = App.get_running_app()
#app.navigation_higherarchy.append(_back)
def on_key_down(self, keyboard, keycode, key, modifiers):
if keycode[0] in (13, 271):
self.on_enter()
return True
def on_enter(self):
#self._remove_keyboard()
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
def _remove_keyboard(self):
tis = self.ids.text_input_seed
if tis._keyboard:
tis._keyboard.unbind(on_key_down=self.on_key_down)
tis.focus = False
def close(self):
self._remove_keyboard()
app = App.get_running_app()
#if self._back in app.navigation_higherarchy:
# app.navigation_higherarchy.pop()
# self._back = None
super(RestoreSeedDialog, self).close()
|
fr500/mgba
|
src/platform/python/mgba/core.py
|
# Copyright (c) 2013-2016 Jeffrey Pfau
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from ._pylib import ffi, lib
from . import tile
from cached_property import cached_property
def find(path):
core = lib.mCoreFind(path.encode('UTF-8'))
if core == ffi.NULL:
return None
return Core._init(core)
def findVF(vf):
core = lib.mCoreFindVF(vf.handle)
if core == ffi.NULL:
return None
return Core._init(core)
def loadPath(path):
core = find(path)
if not core or not core.loadFile(path):
return None
return core
def loadVF(vf):
core = findVF(vf)
if not core or not core.loadROM(vf):
return None
return core
def needsReset(f):
def wrapper(self, *args, **kwargs):
if not self._wasReset:
raise RuntimeError("Core must be reset first")
return f(self, *args, **kwargs)
return wrapper
class Core(object):
if hasattr(lib, 'PLATFORM_GBA'):
PLATFORM_GBA = lib.PLATFORM_GBA
if hasattr(lib, 'PLATFORM_GB'):
PLATFORM_GB = lib.PLATFORM_GB
def __init__(self, native):
self._core = native
self._wasReset = False
@cached_property
def tiles(self):
return tile.TileView(self)
@classmethod
def _init(cls, native):
core = ffi.gc(native, native.deinit)
success = bool(core.init(core))
if not success:
raise RuntimeError("Failed to initialize core")
if hasattr(cls, 'PLATFORM_GBA') and core.platform(core) == cls.PLATFORM_GBA:
from .gba import GBA
return GBA(core)
if hasattr(cls, 'PLATFORM_GB') and core.platform(core) == cls.PLATFORM_GB:
from .gb import GB
return GB(core)
return Core(core)
def _deinit(self):
self._core.deinit(self._core)
def loadFile(self, path):
return bool(lib.mCoreLoadFile(self._core, path.encode('UTF-8')))
def isROM(self, vf):
return bool(self._core.isROM(vf.handle))
def loadROM(self, vf):
return bool(self._core.loadROM(self._core, vf.handle))
def loadSave(self, vf):
return bool(self._core.loadSave(self._core, vf.handle))
def loadTemporarySave(self, vf):
return bool(self._core.loadTemporarySave(self._core, vf.handle))
def loadPatch(self, vf):
return bool(self._core.loadPatch(self._core, vf.handle))
def autoloadSave(self):
return bool(lib.mCoreAutoloadSave(self._core))
def autoloadPatch(self):
return bool(lib.mCoreAutoloadPatch(self._core))
def platform(self):
return self._core.platform(self._core)
def desiredVideoDimensions(self):
width = ffi.new("unsigned*")
height = ffi.new("unsigned*")
self._core.desiredVideoDimensions(self._core, width, height)
return width[0], height[0]
def setVideoBuffer(self, image):
self._core.setVideoBuffer(self._core, image.buffer, image.stride)
def reset(self):
self._core.reset(self._core)
self._wasReset = True
@needsReset
def runFrame(self):
self._core.runFrame(self._core)
@needsReset
def runLoop(self):
self._core.runLoop(self._core)
@needsReset
def step(self):
self._core.step(self._core)
@staticmethod
def _keysToInt(*args, **kwargs):
keys = 0
if 'raw' in kwargs:
keys = kwargs['raw']
for key in args:
keys |= 1 << key
return keys
def setKeys(self, *args, **kwargs):
self._core.setKeys(self._core, self._keysToInt(*args, **kwargs))
def addKeys(self, *args, **kwargs):
self._core.addKeys(self._core, self._keysToInt(*args, **kwargs))
def clearKeys(self, *args, **kwargs):
self._core.clearKeys(self._core, self._keysToInt(*args, **kwargs))
@needsReset
def frameCounter(self):
return self._core.frameCounter(self._core)
def frameCycles(self):
return self._core.frameCycles(self._core)
def frequency(self):
return self._core.frequency(self._core)
def getGameTitle(self):
title = ffi.new("char[16]")
self._core.getGameTitle(self._core, title)
return ffi.string(title, 16).decode("ascii")
def getGameCode(self):
code = ffi.new("char[12]")
self._core.getGameCode(self._core, code)
return ffi.string(code, 12).decode("ascii")
|
arkharin/OpenCool
|
scr/logic/components/expansion_valve/theoretical.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Define the Expansion Valve component.
"""
from scr.logic.components.component import Component as Cmp
from scr.logic.components.component import ComponentInfo as CmpInfo
from scr.logic.components.component import component, fundamental_equation
def update_saved_data_to_last_version(orig_data, orig_version):
return orig_data
@component('theoretical_expansion_valve', CmpInfo.EXPANSION_VALVE, 1, update_saved_data_to_last_version)
class Theoretical(Cmp):
def __init__(self, id_, inlet_nodes_id, outlet_nodes_id, component_data):
super().__init__(id_, inlet_nodes_id, outlet_nodes_id, component_data)
""" Fundamental properties equations """
@fundamental_equation()
# function name can be arbitrary. Return a single vector with each side of the equation evaluated.
def _eval_intrinsic_equations(self):
id_inlet_node = self.get_id_inlet_nodes()[0]
inlet_node = self.get_inlet_node(id_inlet_node)
id_outlet_node = self.get_id_outlet_nodes()[0]
outlet_node = self.get_outlet_node(id_outlet_node)
h_in = inlet_node.enthalpy()
h_out = outlet_node.enthalpy()
return [h_in / 1000.0, h_out / 1000.0]
|
KWierso/treeherder
|
tests/etl/conftest.py
|
import datetime
import pytest
from tests.test_utils import create_generic_job
from treeherder.model.models import Push
@pytest.fixture
def perf_push(test_repository):
return Push.objects.create(
repository=test_repository,
revision='1234abcd',
author='foo@bar.com',
time=datetime.datetime.now())
@pytest.fixture
def perf_job(perf_push, failure_classifications, generic_reference_data):
return create_generic_job('myfunguid', perf_push.repository,
perf_push.id, generic_reference_data)
|
jgmize/kuma
|
kuma/wiki/models.py
|
import hashlib
import json
import sys
import traceback
from datetime import datetime, timedelta
from functools import wraps
from uuid import uuid4
import newrelic.agent
import waffle
from constance import config
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models, transaction
from django.db.models import signals
from django.utils.decorators import available_attrs
from django.utils.functional import cached_property
from django.utils.translation import ugettext, ugettext_lazy as _
from pyquery import PyQuery
from taggit.managers import TaggableManager
from taggit.models import ItemBase, TagBase
from taggit.utils import edit_string_for_tags, parse_tags
from tidings.models import NotificationsMixin
from kuma.core.cache import memcache
from kuma.core.exceptions import ProgrammingError
from kuma.core.i18n import get_language_mapping
from kuma.core.urlresolvers import reverse
from kuma.search.decorators import register_live_index
from kuma.spam.models import AkismetSubmission, SpamAttempt
from . import kumascript
from .constants import (DEKI_FILE_URL, DOCUMENT_LAST_MODIFIED_CACHE_KEY_TMPL,
KUMA_FILE_URL, REDIRECT_CONTENT, REDIRECT_HTML,
TEMPLATE_TITLE_PREFIX)
from .content import parse as parse_content
from .content import (Extractor, H2TOCFilter, H3TOCFilter, SectionTOCFilter,
get_content_sections, get_seo_description)
from .exceptions import (DocumentRenderedContentNotAvailable,
DocumentRenderingInProgress, PageMoveError,
SlugCollision, UniqueCollision)
from .jobs import DocumentContributorsJob, DocumentZoneStackJob
from .managers import (DeletedDocumentManager, DocumentAdminManager,
DocumentManager, RevisionIPManager,
TaggedDocumentManager, TransformManager)
from .signals import render_done
from .templatetags.jinja_helpers import absolutify
from .utils import tidy_content
def cache_with_field(field_name):
"""Decorator for generated content methods.
If the backing model field is null, or kwarg force_fresh is True, call the
decorated method to generate and return the content.
Otherwise, just return the value in the backing model field.
"""
def decorator(fn):
@wraps(fn, assigned=available_attrs(fn))
def wrapper(self, *args, **kwargs):
force_fresh = kwargs.pop('force_fresh', False)
# Try getting the value using the DB field.
field_val = getattr(self, field_name)
if field_val is not None and not force_fresh:
return field_val
# DB field is blank, or we're forced to generate it fresh.
field_val = fn(self, force_fresh=force_fresh)
setattr(self, field_name, field_val)
return field_val
return wrapper
return decorator
def _inherited(parent_attr, direct_attr):
"""Return a descriptor delegating to an attr of the original document.
If `self` is a translation, the descriptor delegates to the attribute
`parent_attr` from the original document. Otherwise, it delegates to the
attribute `direct_attr` from `self`.
Use this only on a reference to another object, like a ManyToMany or a
ForeignKey. Using it on a normal field won't work well, as it'll preclude
the use of that field in QuerySet field lookups. Also, ModelForms that are
passed instance=this_obj won't see the inherited value.
"""
getter = lambda self: (getattr(self.parent, parent_attr)
if self.parent and self.parent.id != self.id
else getattr(self, direct_attr))
setter = lambda self, val: (setattr(self.parent, parent_attr, val)
if self.parent and self.parent.id != self.id
else setattr(self, direct_attr, val))
return property(getter, setter)
def valid_slug_parent(slug, locale):
slug_bits = slug.split('/')
slug_bits.pop()
parent = None
if slug_bits:
parent_slug = '/'.join(slug_bits)
try:
parent = Document.objects.get(locale=locale, slug=parent_slug)
except Document.DoesNotExist:
raise Exception(
ugettext('Parent %s does not exist.' % (
'%s/%s' % (locale, parent_slug))))
return parent
class DocumentTag(TagBase):
"""A tag indexing a document"""
class Meta:
verbose_name = _('Document Tag')
verbose_name_plural = _('Document Tags')
def tags_for(cls, model, instance=None, **extra_filters):
"""
Sadly copied from taggit to work around the issue of not being
able to use the TaggedItemBase class that has tag field already
defined.
"""
kwargs = extra_filters or {}
if instance is not None:
kwargs.update({
'%s__content_object' % cls.tag_relname(): instance
})
return cls.tag_model().objects.filter(**kwargs)
kwargs.update({
'%s__content_object__isnull' % cls.tag_relname(): False
})
return cls.tag_model().objects.filter(**kwargs).distinct()
class TaggedDocument(ItemBase):
"""Through model, for tags on Documents"""
content_object = models.ForeignKey('Document')
tag = models.ForeignKey(DocumentTag, related_name="%(app_label)s_%(class)s_items")
objects = TaggedDocumentManager()
@classmethod
def tags_for(cls, *args, **kwargs):
return tags_for(cls, *args, **kwargs)
class DocumentAttachment(models.Model):
"""
Intermediary between Documents and Attachments. Allows storing the
user who attached a file to a document, and a (unique for that
document) name for referring to the file from the document.
"""
file = models.ForeignKey(
'attachments.Attachment',
related_name='document_attachments',
)
document = models.ForeignKey(
'wiki.Document',
related_name='attached_files',
)
attached_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True)
name = models.TextField()
# whether or not this attachment was uploaded for the document
is_original = models.BooleanField(
verbose_name=_('uploaded to the document'),
default=False,
)
# whether or not this attachment is linked in the document's content
is_linked = models.BooleanField(
verbose_name=_('linked in the document content'),
default=False,
)
class Meta:
db_table = 'attachments_documentattachment'
def __unicode__(self):
return u'"%s" for document "%s"' % (self.file, self.document)
def clean(self):
if self.pk and (self.document.files.through.objects.exclude(pk=self.pk)
.exists()):
raise ValidationError(
_("Attachment %(attachment_id)s can't be attached "
"multiple times to document %(document_id)s") %
{'attachment_id': self.pk, 'document_id': self.document.pk}
)
@register_live_index
class Document(NotificationsMixin, models.Model):
"""A localized knowledgebase document, not revision-specific."""
TOC_FILTERS = {
1: SectionTOCFilter,
2: H2TOCFilter,
3: H3TOCFilter,
4: SectionTOCFilter
}
title = models.CharField(max_length=255, db_index=True)
slug = models.CharField(max_length=255, db_index=True)
# NOTE: Documents are indexed by tags, but tags are edited in Revisions.
# Also, using a custom through table to isolate Document tags from those
# used in other models and apps. (Works better than namespaces, for
# completion and such.)
tags = TaggableManager(through=TaggedDocument)
# Is this document a template or not?
is_template = models.BooleanField(default=False, editable=False,
db_index=True)
# Is this a redirect or not?
is_redirect = models.BooleanField(default=False, editable=False,
db_index=True)
# Is this document localizable or not?
is_localizable = models.BooleanField(default=True, db_index=True)
locale = models.CharField(
max_length=7,
choices=settings.LANGUAGES,
default=settings.WIKI_DEFAULT_LANGUAGE,
db_index=True,
)
# Latest approved revision. L10n dashboard depends on this being so (rather
# than being able to set it to earlier approved revisions).
current_revision = models.ForeignKey(
'Revision',
null=True,
related_name='current_for+',
)
# The Document I was translated from. NULL if this doc is in the default
# locale or it is nonlocalizable. TODO: validate against
# settings.WIKI_DEFAULT_LANGUAGE.
parent = models.ForeignKey(
'self',
related_name='translations',
null=True,
blank=True,
)
parent_topic = models.ForeignKey(
'self',
related_name='children',
null=True,
blank=True,
)
# The files attached to the document, represented by a custom intermediate
# model so we can store some metadata about the relation
files = models.ManyToManyField(
'attachments.Attachment',
through=DocumentAttachment,
)
# JSON representation of Document for API results, built on save
json = models.TextField(editable=False, blank=True, null=True)
# Raw HTML of approved revision's wiki markup
html = models.TextField(editable=False)
# Cached result of kumascript and other offline processors (if any)
rendered_html = models.TextField(editable=False, blank=True, null=True)
# Errors (if any) from the last rendering run
rendered_errors = models.TextField(editable=False, blank=True, null=True)
# Whether or not to automatically defer rendering of this page to a queued
# offline task. Generally used for complex pages that need time
defer_rendering = models.BooleanField(default=False, db_index=True)
# Timestamp when this document was last scheduled for a render
render_scheduled_at = models.DateTimeField(null=True, db_index=True)
# Timestamp when a render for this document was last started
render_started_at = models.DateTimeField(null=True, db_index=True)
# Timestamp when this document was last rendered
last_rendered_at = models.DateTimeField(null=True, db_index=True)
# Maximum age (in seconds) before this document needs re-rendering
render_max_age = models.IntegerField(blank=True, null=True)
# Time after which this document needs re-rendering
render_expires = models.DateTimeField(blank=True, null=True, db_index=True)
# Whether this page is deleted.
deleted = models.BooleanField(default=False, db_index=True)
# Last modified time for the document. Should be equal-to or greater than
# the current revision's created field
modified = models.DateTimeField(auto_now=True, null=True, db_index=True)
body_html = models.TextField(editable=False, blank=True, null=True)
quick_links_html = models.TextField(editable=False, blank=True, null=True)
zone_subnav_local_html = models.TextField(editable=False,
blank=True, null=True)
toc_html = models.TextField(editable=False, blank=True, null=True)
summary_html = models.TextField(editable=False, blank=True, null=True)
summary_text = models.TextField(editable=False, blank=True, null=True)
uuid = models.UUIDField(default=uuid4, editable=False)
class Meta(object):
unique_together = (
('parent', 'locale'),
('slug', 'locale'),
)
permissions = (
('view_document', 'Can view document'),
('add_template_document', 'Can add Template:* document'),
('change_template_document', 'Can change Template:* document'),
('move_tree', 'Can move a tree of documents'),
('purge_document', 'Can permanently delete document'),
('restore_document', 'Can restore deleted document'),
)
objects = DocumentManager()
deleted_objects = DeletedDocumentManager()
admin_objects = DocumentAdminManager()
def __unicode__(self):
return u'%s (%s)' % (self.get_absolute_url(), self.title)
@cache_with_field('body_html')
def get_body_html(self, *args, **kwargs):
html = self.rendered_html and self.rendered_html or self.html
sections_to_hide = ('Quick_Links', 'Subnav')
doc = parse_content(html)
for sid in sections_to_hide:
doc = doc.replaceSection(sid, '<!-- -->')
doc.injectSectionIDs()
doc.annotateLinks(base_url=settings.SITE_URL)
return doc.serialize()
@cache_with_field('quick_links_html')
def get_quick_links_html(self, *args, **kwargs):
return self.get_section_content('Quick_Links')
@cache_with_field('zone_subnav_local_html')
def get_zone_subnav_local_html(self, *args, **kwargs):
return self.get_section_content('Subnav')
@cache_with_field('toc_html')
def get_toc_html(self, *args, **kwargs):
if not self.current_revision:
return ''
toc_depth = self.current_revision.toc_depth
if not toc_depth:
return ''
html = self.rendered_html and self.rendered_html or self.html
return (parse_content(html)
.injectSectionIDs()
.filter(self.TOC_FILTERS[toc_depth])
.serialize())
@cache_with_field('summary_html')
def get_summary_html(self, *args, **kwargs):
return self.get_summary(strip_markup=False)
@cache_with_field('summary_text')
def get_summary_text(self, *args, **kwargs):
return self.get_summary(strip_markup=True)
def regenerate_cache_with_fields(self):
"""Regenerate fresh content for all the cached fields"""
# TODO: Maybe @cache_with_field can build a registry over which this
# method can iterate?
self.get_body_html(force_fresh=True)
self.get_quick_links_html(force_fresh=True)
self.get_zone_subnav_local_html(force_fresh=True)
self.get_toc_html(force_fresh=True)
self.get_summary_html(force_fresh=True)
self.get_summary_text(force_fresh=True)
def get_zone_subnav_html(self):
"""
Search from self up through DocumentZone stack, returning the first
zone nav HTML found.
"""
src = self.get_zone_subnav_local_html()
if src:
return src
for zone in DocumentZoneStackJob().get(self.pk):
src = zone.document.get_zone_subnav_local_html()
if src:
return src
def get_section_content(self, section_id, ignore_heading=True):
"""
Convenience method to extract the rendered content for a single section
"""
if self.rendered_html:
content = self.rendered_html
else:
content = self.html
return self.extract.section(content, section_id, ignore_heading)
def calculate_etag(self, section_id=None):
"""Calculate an etag-suitable hash for document content or a section"""
if not section_id:
content = self.html
else:
content = self.extract.section(self.html, section_id)
return '"%s"' % hashlib.sha1(content.encode('utf8')).hexdigest()
def current_or_latest_revision(self):
"""Returns current revision if there is one, else the last created
revision."""
rev = self.current_revision
if not rev:
revs = self.revisions.order_by('-created')
if revs.exists():
rev = revs[0]
return rev
@property
def is_rendering_scheduled(self):
"""Does this have a rendering scheduled?"""
if not self.render_scheduled_at:
return False
# Check whether a scheduled rendering has waited for too long. Assume
# failure, in this case, and allow another scheduling attempt.
timeout = config.KUMA_DOCUMENT_RENDER_TIMEOUT
max_duration = timedelta(seconds=timeout)
duration = datetime.now() - self.render_scheduled_at
if duration > max_duration:
return False
if not self.last_rendered_at:
return True
return self.render_scheduled_at > self.last_rendered_at
@property
def is_rendering_in_progress(self):
"""Does this have a rendering in progress?"""
if not self.render_started_at:
# No start time, so False.
return False
# Check whether an in-progress rendering has gone on for too long.
# Assume failure, in this case, and allow another rendering attempt.
timeout = config.KUMA_DOCUMENT_RENDER_TIMEOUT
max_duration = timedelta(seconds=timeout)
duration = datetime.now() - self.render_started_at
if duration > max_duration:
return False
if not self.last_rendered_at:
# No rendering ever, so in progress.
return True
# Finally, if the render start is more recent than last completed
# render, then we have one in progress.
return self.render_started_at > self.last_rendered_at
@newrelic.agent.function_trace()
def get_rendered(self, cache_control=None, base_url=None):
"""Attempt to get rendered content for this document"""
# No rendered content yet, so schedule the first render.
if not self.rendered_html:
try:
self.schedule_rendering(cache_control, base_url)
except DocumentRenderingInProgress:
# Unable to trigger a rendering right now, so we bail.
raise DocumentRenderedContentNotAvailable
# If we have a cache_control directive, try scheduling a render.
if cache_control:
try:
self.schedule_rendering(cache_control, base_url)
except DocumentRenderingInProgress:
pass
# Parse JSON errors, if available.
errors = None
try:
errors = (self.rendered_errors and
json.loads(self.rendered_errors) or None)
except ValueError:
pass
# If the above resulted in an immediate render, we might have content.
if not self.rendered_html:
if errors:
return ('', errors)
else:
# But, no such luck, so bail out.
raise DocumentRenderedContentNotAvailable
return (self.rendered_html, errors)
def schedule_rendering(self, cache_control=None, base_url=None):
"""
Attempt to schedule rendering. Honor the deferred_rendering field to
decide between an immediate or a queued render.
"""
# Avoid scheduling a rendering if already scheduled or in progress.
if self.is_rendering_scheduled or self.is_rendering_in_progress:
return False
# Note when the rendering was scheduled. Kind of a hack, doing a quick
# update and setting the local property rather than doing a save()
now = datetime.now()
Document.objects.filter(pk=self.pk).update(render_scheduled_at=now)
self.render_scheduled_at = now
if (waffle.switch_is_active('wiki_force_immediate_rendering') or
not self.defer_rendering):
# Attempt an immediate rendering.
self.render(cache_control, base_url)
else:
# Attempt to queue a rendering. If celery.conf.ALWAYS_EAGER is
# True, this is also an immediate rendering.
from . import tasks
tasks.render_document.delay(self.pk, cache_control, base_url)
def render(self, cache_control=None, base_url=None, timeout=None):
"""
Render content using kumascript and any other services necessary.
"""
if not base_url:
base_url = settings.SITE_URL
# Disallow rendering while another is in progress.
if self.is_rendering_in_progress:
raise DocumentRenderingInProgress
# Note when the rendering was started. Kind of a hack, doing a quick
# update and setting the local property rather than doing a save()
now = datetime.now()
Document.objects.filter(pk=self.pk).update(render_started_at=now)
self.render_started_at = now
# Perform rendering and update document
if not config.KUMASCRIPT_TIMEOUT:
# A timeout of 0 should shortcircuit kumascript usage.
self.rendered_html, self.rendered_errors = self.html, []
else:
self.rendered_html, errors = kumascript.get(self, cache_control,
base_url,
timeout=timeout)
self.rendered_errors = errors and json.dumps(errors) or None
# Regenerate the cached content fields
self.regenerate_cache_with_fields()
# Finally, note the end time of rendering and update the document.
self.last_rendered_at = datetime.now()
# If this rendering took longer than we'd like, mark it for deferred
# rendering in the future.
timeout = config.KUMA_DOCUMENT_FORCE_DEFERRED_TIMEOUT
max_duration = timedelta(seconds=timeout)
duration = self.last_rendered_at - self.render_started_at
if duration >= max_duration:
self.defer_rendering = True
# TODO: Automatically clear the defer_rendering flag if the rendering
# time falls under the limit? Probably safer to require manual
# intervention to free docs from deferred jail.
if self.render_max_age:
# If there's a render_max_age, automatically update render_expires
self.render_expires = (datetime.now() +
timedelta(seconds=self.render_max_age))
else:
# Otherwise, just clear the expiration time as a one-shot
self.render_expires = None
self.save()
render_done.send(sender=self.__class__, instance=self)
def get_summary(self, strip_markup=True, use_rendered=True):
"""
Attempt to get the document summary from rendered content, with
fallback to raw HTML
"""
if use_rendered and self.rendered_html:
src = self.rendered_html
else:
src = self.html
return get_seo_description(src, self.locale, strip_markup)
def build_json_data(self):
html = self.rendered_html and self.rendered_html or self.html
content = parse_content(html).injectSectionIDs().serialize()
sections = get_content_sections(content)
translations = []
if self.pk:
for translation in self.other_translations:
revision = translation.current_revision
if revision.summary:
summary = revision.summary
else:
summary = translation.get_summary(strip_markup=False)
translations.append({
'last_edit': revision.created.isoformat(),
'locale': translation.locale,
'localization_tags': list(revision.localization_tags
.names()),
'review_tags': list(revision.review_tags.names()),
'summary': summary,
'tags': list(translation.tags.names()),
'title': translation.title,
'url': translation.get_absolute_url(),
'uuid': str(translation.uuid)
})
if self.current_revision:
review_tags = list(self.current_revision.review_tags.names())
localization_tags = list(self.current_revision
.localization_tags
.names())
last_edit = self.current_revision.created.isoformat()
if self.current_revision.summary:
summary = self.current_revision.summary
else:
summary = self.get_summary(strip_markup=False)
else:
review_tags = []
localization_tags = []
last_edit = ''
summary = ''
if not self.pk:
tags = []
else:
tags = list(self.tags.names())
now_iso = datetime.now().isoformat()
if self.modified:
modified = self.modified.isoformat()
else:
modified = now_iso
return {
'title': self.title,
'label': self.title,
'url': self.get_absolute_url(),
'id': self.id,
'uuid': str(self.uuid),
'slug': self.slug,
'tags': tags,
'review_tags': review_tags,
'localization_tags': localization_tags,
'sections': sections,
'locale': self.locale,
'summary': summary,
'translations': translations,
'modified': modified,
'json_modified': now_iso,
'last_edit': last_edit
}
def get_json_data(self, stale=True):
"""Returns a document in object format for output as JSON.
The stale parameter, when True, accepts stale cached data even after
the document has been modified."""
# Have parsed data & don't care about freshness? Here's a quick out..
curr_json_data = getattr(self, '_json_data', None)
if curr_json_data and stale:
return curr_json_data
# Attempt to parse the current contents of self.json, taking care in
# case it's empty or broken JSON.
self._json_data = {}
if self.json:
try:
self._json_data = json.loads(self.json)
except (TypeError, ValueError):
pass
# Try to get ISO 8601 datestamps for the doc and the json
json_lmod = self._json_data.get('json_modified', '')
doc_lmod = self.modified.isoformat()
# If there's no parsed data or the data is stale & we care, it's time
# to rebuild the cached JSON data.
if (not self._json_data) or (not stale and doc_lmod > json_lmod):
self._json_data = self.build_json_data()
self.json = json.dumps(self._json_data)
Document.objects.filter(pk=self.pk).update(json=self.json)
return self._json_data
@cached_property
def extract(self):
return Extractor(self)
def natural_key(self):
return (self.locale, self.slug)
@staticmethod
def natural_key_hash(keys):
natural_key = u'/'.join(keys)
return hashlib.md5(natural_key.encode('utf8')).hexdigest()
@cached_property
def natural_cache_key(self):
return self.natural_key_hash(self.natural_key())
def _existing(self, attr, value):
"""Return an existing doc (if any) in this locale whose `attr` attr is
equal to mine."""
return Document.objects.filter(locale=self.locale, **{attr: value})
def _raise_if_collides(self, attr, exception):
"""Raise an exception if a page of this title/slug already exists."""
if self.id is None or hasattr(self, 'old_' + attr):
# If I am new or my title/slug changed...
existing = self._existing(attr, getattr(self, attr))
if existing.exists():
raise exception(existing[0])
def clean(self):
"""Translations can't be localizable."""
self._clean_is_localizable()
def _clean_is_localizable(self):
"""is_localizable == allowed to have translations. Make sure that isn't
violated.
For default language (en-US), is_localizable means it can have
translations. Enforce:
* is_localizable=True if it has translations
* if has translations, unable to make is_localizable=False
For non-default langauges, is_localizable must be False.
"""
if self.locale != settings.WIKI_DEFAULT_LANGUAGE:
self.is_localizable = False
# Can't save this translation if parent not localizable
if (self.parent and self.parent.id != self.id and
not self.parent.is_localizable):
raise ValidationError('"%s": parent "%s" is not localizable.' % (
unicode(self), unicode(self.parent)))
# Can't make not localizable if it has translations
# This only applies to documents that already exist, hence self.pk
if self.pk and not self.is_localizable and self.translations.exists():
raise ValidationError('"%s": document has %s translations but is '
'not localizable.' %
(unicode(self), self.translations.count()))
def _attr_for_redirect(self, attr, template):
"""Return the slug or title for a new redirect.
`template` is a Python string template with "old" and "number" tokens
used to create the variant.
"""
def unique_attr():
"""Return a variant of getattr(self, attr) such that there is no
Document of my locale with string attribute `attr` equal to it.
Never returns the original attr value.
"""
# "My God, it's full of race conditions!"
i = 1
while True:
new_value = template % dict(old=getattr(self, attr), number=i)
if not self._existing(attr, new_value).exists():
return new_value
i += 1
old_attr = 'old_' + attr
if hasattr(self, old_attr):
# My slug (or title) is changing; we can reuse it for the redirect.
return getattr(self, old_attr)
else:
# Come up with a unique slug (or title):
return unique_attr()
def revert(self, revision, user, comment=None):
"""
Reverts the given revision by creating a new one.
- Sets its comment to the given value and points the new revision
to the old revision
- Keeps review tags
- Make new revision the current one of the document
"""
# remember the current revision's primary key for later
old_revision_pk = revision.pk
# get a list of review tag names for later
old_review_tags = list(revision.review_tags.names())
with transaction.atomic():
# reset primary key
revision.pk = None
# add a sensible comment
revision.comment = ("Revert to revision of %s by %s" %
(revision.created, revision.creator))
if comment:
revision.comment = u'%s: "%s"' % (revision.comment, comment)
revision.created = datetime.now()
revision.creator = user
if revision.document.original.pk == self.pk:
revision.based_on_id = old_revision_pk
revision.save()
# set review tags
if old_review_tags:
revision.review_tags.set(*old_review_tags)
# populate model instance with fresh data from database
revision.refresh_from_db()
# make this new revision the current one for the document
revision.make_current()
return revision
def revise(self, user, data, section_id=None):
"""Given a dict of changes to make, build and save a new Revision to
revise this document"""
curr_rev = self.current_revision
new_rev = Revision(creator=user, document=self, content=self.html)
for n in ('title', 'slug', 'render_max_age'):
setattr(new_rev, n, getattr(self, n))
if curr_rev:
new_rev.toc_depth = curr_rev.toc_depth
original_doc = curr_rev.document.original
if original_doc == self:
new_rev.based_on = curr_rev
else:
new_rev.based_on = original_doc.current_revision
# Accept optional field edits...
new_title = data.get('title', False)
new_rev.title = new_title and new_title or self.title
new_tags = data.get('tags', False)
new_rev.tags = (new_tags and new_tags or
edit_string_for_tags(self.tags.all()))
new_review_tags = data.get('review_tags', False)
if new_review_tags:
review_tags = new_review_tags
elif curr_rev:
review_tags = edit_string_for_tags(curr_rev.review_tags.all())
else:
review_tags = ''
new_rev.summary = data.get('summary', '')
# To add comment, when Technical/Editorial review completed
new_rev.comment = data.get('comment', '')
# Accept HTML edits, optionally by section
new_html = data.get('content', data.get('html', False))
if new_html:
if not section_id:
new_rev.content = new_html
else:
content = parse_content(self.html)
new_rev.content = (content.replaceSection(section_id, new_html)
.serialize())
# Finally, commit the revision changes and return the new rev.
new_rev.save()
new_rev.review_tags.set(*parse_tags(review_tags))
return new_rev
@cached_property
def last_modified_cache_key(self):
return DOCUMENT_LAST_MODIFIED_CACHE_KEY_TMPL % self.natural_cache_key
def fill_last_modified_cache(self):
"""
Convert python datetime to Unix epoch seconds. This is more
easily digested by the cache, and is more compatible with other
services that might spy on Kuma's cache entries (eg. KumaScript)
"""
modified_epoch = self.modified.strftime('%s')
memcache.set(self.last_modified_cache_key, modified_epoch)
return modified_epoch
def save(self, *args, **kwargs):
self.is_template = self.slug.startswith(TEMPLATE_TITLE_PREFIX)
self.is_redirect = bool(self.get_redirect_url())
try:
# Check if the slug would collide with an existing doc
self._raise_if_collides('slug', SlugCollision)
except UniqueCollision as err:
if err.existing.get_redirect_url() is not None:
# If the existing doc is a redirect, delete it and clobber it.
err.existing.delete()
else:
raise err
# These are too important to leave to a (possibly omitted) is_valid
# call:
self._clean_is_localizable()
if not self.parent_topic and self.parent:
# If this is a translation without a topic parent, try to get one.
self.acquire_translated_topic_parent()
super(Document, self).save(*args, **kwargs)
# Delete any cached last-modified timestamp.
self.fill_last_modified_cache()
def delete(self, *args, **kwargs):
if waffle.switch_is_active('wiki_error_on_delete'):
# bug 863692: Temporary while we investigate disappearing pages.
raise Exception("Attempt to delete document %s: %s" %
(self.id, self.title))
else:
if self.is_redirect or 'purge' in kwargs:
if 'purge' in kwargs:
kwargs.pop('purge')
return super(Document, self).delete(*args, **kwargs)
signals.pre_delete.send(sender=self.__class__,
instance=self)
if not self.deleted:
Document.objects.filter(pk=self.pk).update(deleted=True)
memcache.delete(self.last_modified_cache_key)
signals.post_delete.send(sender=self.__class__, instance=self)
def purge(self):
if waffle.switch_is_active('wiki_error_on_delete'):
# bug 863692: Temporary while we investigate disappearing pages.
raise Exception("Attempt to purge document %s: %s" %
(self.id, self.title))
else:
if not self.deleted:
raise Exception("Attempt tp purge non-deleted document %s: %s" %
(self.id, self.title))
self.delete(purge=True)
def restore(self):
"""
Restores a logically deleted document by reverting the deleted
boolean to False. Sends pre_save and post_save Django signals to
follow ducktyping best practices.
"""
if not self.deleted:
raise Exception("Document is not deleted, cannot be restored.")
signals.pre_save.send(sender=self.__class__, instance=self)
Document.deleted_objects.filter(pk=self.pk).update(deleted=False)
signals.post_save.send(sender=self.__class__, instance=self)
def _post_move_redirects(self, new_slug, user, title):
"""
Create and return a Document and a Revision to serve as
redirects once this page has been moved.
"""
redirect_doc = Document(locale=self.locale,
title=self.title,
slug=self.slug,
is_localizable=False)
content = REDIRECT_CONTENT % {
'href': reverse('wiki.document',
args=[new_slug],
locale=self.locale),
'title': title,
}
redirect_rev = Revision(content=content,
is_approved=True,
toc_depth=self.current_revision.toc_depth,
creator=user)
return redirect_doc, redirect_rev
def _moved_revision(self, new_slug, user, title=None):
"""
Create and return a Revision which is a copy of this
Document's current Revision, as it will exist at a moved
location.
"""
moved_rev = self.current_revision
# Shortcut trick for getting an object with all the same
# values, but making Django think it's new.
moved_rev.id = None
moved_rev.creator = user
moved_rev.created = datetime.now()
moved_rev.slug = new_slug
if title:
moved_rev.title = title
return moved_rev
def _get_new_parent(self, new_slug):
"""
Get this moved Document's parent doc if a Document
exists at the appropriate slug and locale.
"""
return valid_slug_parent(new_slug, self.locale)
def _move_conflicts(self, new_slug):
"""
Given a new slug to be assigned to this document, check
whether there is an existing, non-redirect, Document at that
slug in this locale. Any redirect existing there will be
deleted.
This is necessary since page moving is a background task, and
a Document may come into existence at the target slug after
the move is requested.
"""
existing = None
try:
existing = Document.objects.get(locale=self.locale,
slug=new_slug)
except Document.DoesNotExist:
pass
if existing is not None:
if existing.is_redirect:
existing.delete()
else:
raise Exception("Requested move would overwrite a non-redirect page.")
def _tree_conflicts(self, new_slug):
"""
Given a new slug to be assigned to this document, return a
list of documents (if any) which would be overwritten by
moving this document or any of its children in that fashion.
"""
conflicts = []
try:
existing = Document.objects.get(locale=self.locale, slug=new_slug)
if not existing.is_redirect:
conflicts.append(existing)
except Document.DoesNotExist:
pass
for child in self.get_descendants():
child_title = child.slug.split('/')[-1]
try:
slug = '/'.join([new_slug, child_title])
existing = Document.objects.get(locale=self.locale, slug=slug)
if not existing.get_redirect_url():
conflicts.append(existing)
except Document.DoesNotExist:
pass
return conflicts
def _move_tree(self, new_slug, user=None, title=None):
"""
Move this page and all its children.
"""
# Page move is a 10-step process.
#
# Step 1: Sanity check. Has a page been created at this slug
# since the move was requested? If not, OK to go ahead and
# change our slug.
self._move_conflicts(new_slug)
if user is None:
user = self.current_revision.creator
if title is None:
title = self.title
# Step 2: stash our current review tags, since we want to
# preserve them.
review_tags = list(self.current_revision.review_tags.names())
# Step 3: Create (but don't yet save) a Document and Revision
# to leave behind as a redirect from old location to new.
redirect_doc, redirect_rev = self._post_move_redirects(new_slug,
user,
title)
# Step 4: Update our breadcrumbs.
new_parent = self._get_new_parent(new_slug)
# If we found a Document at what will be our parent slug, set
# it as our parent_topic. If we didn't find one, then we no
# longer have a parent_topic (since our original parent_topic
# would already have moved if it were going to).
self.parent_topic = new_parent
# Step 5: Save this Document.
self.slug = new_slug
self.save()
# Step 6: Create (but don't yet save) a copy of our current
# revision, but with the new slug and title (if title is
# changing too).
moved_rev = self._moved_revision(new_slug, user, title)
# Step 7: Save the Revision that actually moves us.
moved_rev.save(force_insert=True)
# Step 8: Save the review tags.
moved_rev.review_tags.set(*review_tags)
# Step 9: Save the redirect.
redirect_doc.save()
redirect_rev.document = redirect_doc
redirect_rev.save()
# Finally, step 10: recurse through all of our children.
for child in self.children.filter(locale=self.locale):
# Save the original slug and locale so we can use them in
# the error message if something goes wrong.
old_child_slug, old_child_locale = child.slug, child.locale
child_title = child.slug.split('/')[-1]
try:
child._move_tree('/'.join([new_slug, child_title]), user)
except PageMoveError:
# A child move already caught this and created the
# correct exception + error message, so just propagate
# it up.
raise
except Exception as e:
# One of the immediate children of this page failed to
# move.
exc_class, exc_message, exc_tb = sys.exc_info()
message = """
Failure occurred while attempting to move document
with id %(doc_id)s.
That document can be viewed at:
https://developer.mozilla.org/%(locale)s/docs/%(slug)s
The exception raised was:
Exception type: %(exc_class)s
Exception message: %(exc_message)s
Full traceback:
%(traceback)s
""" % {'doc_id': child.id,
'locale': old_child_locale,
'slug': old_child_slug,
'exc_class': exc_class,
'exc_message': exc_message,
'traceback': traceback.format_exc(e)}
raise PageMoveError(message)
def repair_breadcrumbs(self):
"""
Temporary method while we work out the real issue behind
translation/breadcrumb mismatches (bug 900961).
Basically just walks up the tree of topical parents, calling
acquire_translated_topic_parent() for as long as there's a
language mismatch.
"""
if (not self.parent_topic or
self.parent_topic.locale != self.locale):
self.acquire_translated_topic_parent()
if self.parent_topic:
self.parent_topic.repair_breadcrumbs()
def acquire_translated_topic_parent(self):
"""
This normalizes topic breadcrumb paths between locales.
Attempt to acquire a topic parent from a translation of our translation
parent's topic parent, auto-creating a stub document if necessary.
"""
if not self.parent:
# Bail, if this is not in fact a translation.
return
parent_topic = self.parent.parent_topic
if not parent_topic:
# Bail, if the translation parent has no topic parent
return
try:
# Look for an existing translation of the topic parent
new_parent = parent_topic.translations.get(locale=self.locale)
except Document.DoesNotExist:
try:
# No luck. As a longshot, let's try looking for the same slug.
new_parent = Document.objects.get(locale=self.locale,
slug=parent_topic.slug)
if not new_parent.parent:
# HACK: This same-slug/different-locale doc should probably
# be considered a translation. Let's correct that on the
# spot.
new_parent.parent = parent_topic
new_parent.save()
except Document.DoesNotExist:
# Finally, let's create a translated stub for a topic parent
new_parent = Document.objects.get(pk=parent_topic.pk)
new_parent.pk = None
new_parent.current_revision = None
new_parent.parent_topic = None
new_parent.parent = parent_topic
new_parent.locale = self.locale
new_parent.save()
if parent_topic.current_revision:
# Don't forget to clone a current revision
new_rev = Revision.objects.get(pk=parent_topic.current_revision.pk)
new_rev.pk = None
new_rev.document = new_parent
# HACK: Let's auto-add tags that flag this as a topic stub
stub_tags = '"TopicStub","NeedsTranslation"'
stub_l10n_tags = ['inprogress']
if new_rev.tags:
new_rev.tags = '%s,%s' % (new_rev.tags, stub_tags)
else:
new_rev.tags = stub_tags
new_rev.save()
new_rev.localization_tags.add(*stub_l10n_tags)
# Finally, assign the new default parent topic
self.parent_topic = new_parent
self.save()
@property
def content_parsed(self):
if not self.current_revision:
return None
return self.current_revision.content_parsed
def populate_attachments(self):
"""
File attachments are stored at the DB level and synced here
with the document's HTML content.
We find them by regex-searching over the HTML for URLs that match the
file URL patterns.
"""
mt_files = DEKI_FILE_URL.findall(self.html)
kuma_files = KUMA_FILE_URL.findall(self.html)
params = None
if mt_files:
# We have at least some MindTouch files.
params = models.Q(mindtouch_attachment_id__in=mt_files)
if kuma_files:
# We also have some kuma files. Use an OR query.
params = params | models.Q(id__in=kuma_files)
if kuma_files and not params:
# We have only kuma files.
params = models.Q(id__in=kuma_files)
Attachment = apps.get_model('attachments', 'Attachment')
if params:
found_attachments = Attachment.objects.filter(params)
else:
# If no files found, return an empty Attachment queryset.
found_attachments = Attachment.objects.none()
# Delete all document-attachments-relations for attachments that
# weren't originally uploaded for the document to populate the list
# again below
self.attached_files.filter(is_original=False).delete()
# Reset the linked status for all attachments that are left
self.attached_files.all().update(is_linked=False)
# Go through the attachments discovered in the HTML and
# create linked attachments
"""
three options of state:
- linked in the document, but not originally uploaded
- linked in the document and originally uploaded
- not linked in the document, but originally uploaded
"""
populated = []
for attachment in (found_attachments.only('pk', 'current_revision')
.iterator()):
revision = attachment.current_revision
relation, created = self.files.through.objects.update_or_create(
file_id=attachment.pk,
document_id=self.pk,
defaults={
'attached_by': revision.creator,
'name': revision.filename,
'is_linked': True,
},
)
populated.append((relation, created))
return populated
@property
def show_toc(self):
return self.current_revision and self.current_revision.toc_depth
@cached_property
def language(self):
return get_language_mapping()[self.locale.lower()]
def get_absolute_url(self, endpoint='wiki.document'):
"""
Build the absolute URL to this document from its full path
"""
return reverse(endpoint, locale=self.locale, args=[self.slug])
def get_edit_url(self):
return self.get_absolute_url(endpoint='wiki.edit')
def get_redirect_url(self):
"""
If I am a redirect, return the absolute URL to which I redirect.
Otherwise, return None.
"""
# If a document starts with REDIRECT_HTML and contains any <a> tags
# with hrefs, return the href of the first one. This trick saves us
# from having to parse the HTML every time.
if REDIRECT_HTML in self.html:
anchors = PyQuery(self.html)('a[href].redirect')
if anchors:
url = anchors[0].get('href')
# allow explicit domain and *not* '//'
# i.e allow "https://developer...." and "/en-US/docs/blah"
if len(url) > 1:
if url.startswith(settings.SITE_URL):
return url
elif url[0] == '/' and url[1] != '/':
return url
elif len(url) == 1 and url[0] == '/':
return url
def get_topic_parents(self):
"""Build a list of parent topics from self to root"""
curr, parents = self, []
while curr.parent_topic:
curr = curr.parent_topic
parents.append(curr)
return parents
def allows_revision_by(self, user):
"""
Return whether `user` is allowed to create new revisions of me.
The motivation behind this method is that templates and other types of
docs may have different permissions.
"""
if (self.slug.startswith(TEMPLATE_TITLE_PREFIX) and
not user.has_perm('wiki.change_template_document')):
return False
return True
def allows_editing_by(self, user):
"""
Return whether `user` is allowed to edit document-level metadata.
If the Document doesn't have a current_revision (nothing approved) then
all the Document fields are still editable. Once there is an approved
Revision, the Document fields can only be edited by privileged users.
"""
if (self.slug.startswith(TEMPLATE_TITLE_PREFIX) and
not user.has_perm('wiki.change_template_document')):
return False
return (not self.current_revision or
user.has_perm('wiki.change_document'))
def translated_to(self, locale):
"""
Return the translation of me to the given locale.
If there is no such Document, return None.
"""
if self.locale != settings.WIKI_DEFAULT_LANGUAGE:
raise NotImplementedError('translated_to() is implemented only on'
'Documents in the default language so'
'far.')
try:
return Document.objects.get(locale=locale, parent=self)
except Document.DoesNotExist:
return None
@property
def original(self):
"""
Return the document I was translated from or, if none, myself.
"""
return self.parent or self
@cached_property
def other_translations(self):
"""
Return a list of Documents - other translations of this Document
"""
if self.parent is None:
return self.translations.all().order_by('locale')
else:
translations = (self.parent.translations.all()
.exclude(id=self.id)
.order_by('locale'))
pks = list(translations.values_list('pk', flat=True))
return Document.objects.filter(pk__in=[self.parent.pk] + pks)
@property
def parents(self):
"""
Return the list of topical parent documents above this one,
or an empty list if none exist.
"""
if self.parent_topic is None:
return []
current_parent = self.parent_topic
parents = [current_parent]
while current_parent.parent_topic is not None:
parents.insert(0, current_parent.parent_topic)
current_parent = current_parent.parent_topic
return parents
def is_child_of(self, other):
"""
Circular dependency detection -- if someone tries to set
this as a parent of a document it's a child of, they're gonna
have a bad time.
"""
return other.id in (d.id for d in self.parents)
# This is a method, not a property, because it can do a lot of DB
# queries and so should look scarier. It's not just named
# 'children' because that's taken already by the reverse relation
# on parent_topic.
def get_descendants(self, limit=None, levels=0):
"""
Return a list of all documents which are children
(grandchildren, great-grandchildren, etc.) of this one.
"""
results = []
if (limit is None or levels < limit) and self.children.exists():
for child in self.children.all().filter(locale=self.locale):
results.append(child)
[results.append(grandchild)
for grandchild in child.get_descendants(limit, levels + 1)]
return results
def is_watched_by(self, user):
"""
Return whether `user` is notified of edits to me.
"""
from .events import EditDocumentEvent
return EditDocumentEvent.is_notifying(user, self)
def tree_is_watched_by(self, user):
"""Return whether `user` is notified of edits to me AND sub-pages."""
from .events import EditDocumentInTreeEvent
return EditDocumentInTreeEvent.is_notifying(user, self)
def parent_trees_watched_by(self, user):
"""
Return any and all of this document's parents that are watched by the
given user.
"""
return [doc for doc in self.parents if doc.tree_is_watched_by(user)]
@cached_property
def contributors(self):
return DocumentContributorsJob().get(self.pk)
@cached_property
def zone_stack(self):
return DocumentZoneStackJob().get(self.pk)
def get_full_url(self):
return absolutify(self.get_absolute_url())
class DocumentDeletionLog(models.Model):
"""
Log of who deleted a Document, when, and why.
"""
# We store the locale/slug because it's unique, and also because a
# ForeignKey would delete this log when the Document gets purged.
locale = models.CharField(
max_length=7,
choices=settings.LANGUAGES,
default=settings.WIKI_DEFAULT_LANGUAGE,
db_index=True,
)
slug = models.CharField(max_length=255, db_index=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
timestamp = models.DateTimeField(auto_now=True)
reason = models.TextField()
def __unicode__(self):
return "/%(locale)s/%(slug)s deleted by %(user)s" % {
'locale': self.locale,
'slug': self.slug,
'user': self.user
}
class DocumentZone(models.Model):
"""
Model object declaring a content zone root at a given Document, provides
attributes inherited by the topic hierarchy beneath it.
"""
document = models.OneToOneField(Document, related_name='zone')
styles = models.TextField(null=True, blank=True)
url_root = models.CharField(
max_length=255, null=True, blank=True, db_index=True,
help_text="alternative URL path root for documents under this zone")
def __unicode__(self):
return u'DocumentZone %s (%s)' % (self.document.get_absolute_url(),
self.document.title)
class ReviewTag(TagBase):
"""A tag indicating review status, mainly for revisions"""
class Meta:
verbose_name = _('Review Tag')
verbose_name_plural = _('Review Tags')
class LocalizationTag(TagBase):
"""A tag indicating localization status, mainly for revisions"""
class Meta:
verbose_name = _('Localization Tag')
verbose_name_plural = _('Localization Tags')
class ReviewTaggedRevision(ItemBase):
"""Through model, just for review tags on revisions"""
content_object = models.ForeignKey('Revision')
tag = models.ForeignKey(ReviewTag, related_name="%(app_label)s_%(class)s_items")
@classmethod
def tags_for(cls, *args, **kwargs):
return tags_for(cls, *args, **kwargs)
class LocalizationTaggedRevision(ItemBase):
"""Through model, just for localization tags on revisions"""
content_object = models.ForeignKey('Revision')
tag = models.ForeignKey(LocalizationTag, related_name="%(app_label)s_%(class)s_items")
@classmethod
def tags_for(cls, *args, **kwargs):
return tags_for(cls, *args, **kwargs)
class Revision(models.Model):
"""A revision of a localized knowledgebase document"""
# Depth of table-of-contents in document display.
TOC_DEPTH_NONE = 0
TOC_DEPTH_ALL = 1
TOC_DEPTH_H2 = 2
TOC_DEPTH_H3 = 3
TOC_DEPTH_H4 = 4
TOC_DEPTH_CHOICES = (
(TOC_DEPTH_NONE, _(u'No table of contents')),
(TOC_DEPTH_ALL, _(u'All levels')),
(TOC_DEPTH_H2, _(u'H2 and higher')),
(TOC_DEPTH_H3, _(u'H3 and higher')),
(TOC_DEPTH_H4, _('H4 and higher')),
)
document = models.ForeignKey(Document, related_name='revisions')
# Title and slug in document are primary, but they're kept here for
# revision history.
title = models.CharField(max_length=255, null=True, db_index=True)
slug = models.CharField(max_length=255, null=True, db_index=True)
summary = models.TextField() # wiki markup
content = models.TextField() # wiki markup
tidied_content = models.TextField(blank=True) # wiki markup tidied up
# Keywords are used mostly to affect search rankings. Moderators may not
# have the language expertise to translate keywords, so we put them in the
# Revision so the translators can handle them:
keywords = models.CharField(max_length=255, blank=True)
# Tags are stored in a Revision as a plain CharField, because Revisions are
# not indexed by tags. This data is retained for history tracking.
tags = models.CharField(max_length=255, blank=True)
# Tags are (ab)used as status flags and for searches, but the through model
# should constrain things from getting expensive.
review_tags = TaggableManager(through=ReviewTaggedRevision)
localization_tags = TaggableManager(through=LocalizationTaggedRevision)
toc_depth = models.IntegerField(choices=TOC_DEPTH_CHOICES,
default=TOC_DEPTH_ALL)
# Maximum age (in seconds) before this document needs re-rendering
render_max_age = models.IntegerField(blank=True, null=True)
created = models.DateTimeField(default=datetime.now, db_index=True)
comment = models.CharField(max_length=255)
creator = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='created_revisions')
is_approved = models.BooleanField(default=True, db_index=True)
# The default locale's rev that was current when the Edit button was hit to
# create this revision. Used to determine whether localizations are out of
# date.
based_on = models.ForeignKey('self', null=True, blank=True)
# TODO: limit_choices_to={'document__locale':
# settings.WIKI_DEFAULT_LANGUAGE} is a start but not sufficient.
is_mindtouch_migration = models.BooleanField(default=False, db_index=True,
help_text="Did this revision come from MindTouch?")
objects = TransformManager()
def get_absolute_url(self):
"""Build the absolute URL to this revision"""
return reverse('wiki.revision',
locale=self.document.locale,
args=[self.document.slug, self.pk])
def _based_on_is_clean(self):
"""Return a tuple: (the correct value of based_on, whether the old
value was correct).
based_on must be an approved revision of the English version of the
document if there are any such revisions, any revision if no
approved revision exists, and None otherwise. If based_on is not
already set when this is called, the return value defaults to the
current_revision of the English document.
"""
# TODO(james): This could probably be simplified down to "if
# based_on is set, it must be a revision of the original document."
original = self.document.original
base = original.current_or_latest_revision()
has_approved = original.revisions.filter(is_approved=True).exists()
if (original.current_revision or not has_approved):
if (self.based_on and self.based_on.document != original):
# based_on is set and points to the wrong doc.
return base, False
# Else based_on is valid; leave it alone.
elif self.based_on:
return None, False
return self.based_on, True
def clean(self):
"""Ensure based_on is valid."""
# All of the cleaning herein should be unnecessary unless the user
# messes with hidden form data.
try:
self.document and self.document.original
except Document.DoesNotExist:
# For clean()ing forms that don't have a document instance behind
# them yet
self.based_on = None
else:
based_on, is_clean = self._based_on_is_clean()
if not is_clean:
if self.document.parent:
# Restoring translation source, so base on current_revision
self.based_on = self.document.parent.current_revision
else:
old = self.based_on
self.based_on = based_on # Guess a correct value.
locale = settings.LOCALES[settings.WIKI_DEFAULT_LANGUAGE].native
error = ugettext(
'A revision must be based on a revision of the '
'%(locale)s document. Revision ID %(id)s does '
'not fit those criteria.')
raise ValidationError(error %
{'locale': locale, 'id': old.id})
def save(self, *args, **kwargs):
_, is_clean = self._based_on_is_clean()
if not is_clean: # No more Mister Nice Guy
# TODO(erik): This error message ignores non-translations.
raise ProgrammingError('Revision.based_on must be None or refer '
'to a revision of the default-'
'language document. It was %s' %
self.based_on)
if not self.title:
self.title = self.document.title
if not self.slug:
self.slug = self.document.slug
super(Revision, self).save(*args, **kwargs)
# When a revision is approved, update document metadata and re-cache
# the document's html content
if self.is_approved:
self.make_current()
def make_current(self):
"""
Make this revision the current one for the document
"""
self.document.title = self.title
self.document.slug = self.slug
self.document.html = self.content_cleaned
self.document.render_max_age = self.render_max_age
self.document.current_revision = self
# Since Revision stores tags as a string, we need to parse them first
# before setting on the Document.
self.document.tags.set(*parse_tags(self.tags))
self.document.save()
# Re-create all document-attachment relations since they are based
# on the actual HTML content
self.document.populate_attachments()
def __unicode__(self):
return u'[%s] %s #%s' % (self.document.locale,
self.document.title,
self.id)
def get_section_content(self, section_id):
"""Convenience method to extract the content for a single section"""
return self.document.extract.section(self.content, section_id)
def get_tidied_content(self, allow_none=False):
"""
Return the revision content parsed and cleaned by tidy.
First, check in denormalized db field. If it's not available, schedule
an asynchronous task to store it.
allow_none -- To prevent CPU-hogging calls, return None instead of
calling tidy_content in-process.
"""
# we may be lucky and have the tidied content already denormalized
# in the database, if so return it
if self.tidied_content:
tidied_content = self.tidied_content
else:
if allow_none:
if self.pk:
from .tasks import tidy_revision_content
tidy_revision_content.delay(self.pk, refresh=False)
tidied_content = None
else:
tidied_content, errors = tidy_content(self.content)
if self.pk:
Revision.objects.filter(pk=self.pk).update(
tidied_content=tidied_content)
self.tidied_content = tidied_content or ''
return tidied_content
@property
def content_cleaned(self):
if self.document.is_template:
return self.content
else:
return Document.objects.clean_content(self.content)
@cached_property
def previous(self):
return self.get_previous()
def get_previous(self):
"""
Returns the previous approved revision or None.
"""
try:
return self.document.revisions.filter(
is_approved=True,
created__lt=self.created,
).order_by('-created')[0]
except IndexError:
return None
@cached_property
def needs_editorial_review(self):
return self.review_tags.filter(name='editorial').exists()
@cached_property
def needs_technical_review(self):
return self.review_tags.filter(name='technical').exists()
@cached_property
def localization_in_progress(self):
return self.localization_tags.filter(name='inprogress').exists()
@property
def translation_age(self):
return abs((datetime.now() - self.created).days)
class RevisionIP(models.Model):
"""
IP Address for a Revision including User-Agent string and Referrer URL.
"""
revision = models.ForeignKey(
Revision
)
ip = models.CharField(
_('IP address'),
max_length=40,
editable=False,
db_index=True,
blank=True,
null=True,
)
user_agent = models.TextField(
_('User-Agent'),
editable=False,
blank=True,
)
referrer = models.TextField(
_('HTTP Referrer'),
editable=False,
blank=True,
)
data = models.TextField(
editable=False,
blank=True,
null=True,
verbose_name=_('Data submitted to Akismet')
)
objects = RevisionIPManager()
def __unicode__(self):
return '%s (revision %d)' % (self.ip or 'No IP', self.revision.id)
class RevisionAkismetSubmission(AkismetSubmission):
"""
The Akismet submission per wiki document revision.
Stores only a reference to the submitted revision.
"""
revision = models.ForeignKey(
Revision,
related_name='akismet_submissions',
null=True,
blank=True,
verbose_name=_('Revision'),
# don't delete the akismet submission but set the revision to null
on_delete=models.SET_NULL,
)
class Meta:
verbose_name = _('Akismet submission')
verbose_name_plural = _('Akismet submissions')
def __unicode__(self):
if self.revision:
return (
u'%(type)s submission by %(sender)s (Revision %(revision_id)d)' % {
'type': self.get_type_display(),
'sender': self.sender,
'revision_id': self.revision.id,
}
)
else:
return (
u'%(type)s submission by %(sender)s (no revision)' % {
'type': self.get_type_display(),
'sender': self.sender,
}
)
class EditorToolbar(models.Model):
creator = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='created_toolbars')
default = models.BooleanField(default=False)
name = models.CharField(max_length=100)
code = models.TextField(max_length=2000)
def __unicode__(self):
return self.name
class DocumentSpamAttempt(SpamAttempt):
"""
The wiki document specific spam attempt.
Stores title, slug and locale of the documet revision to be able
to see where it happens. Stores data sent to Akismet so that staff can
review Akismet's spam detection for false positives.
"""
title = models.CharField(
verbose_name=_('Title'),
max_length=255,
)
slug = models.CharField(
verbose_name=_('Slug'),
max_length=255,
)
document = models.ForeignKey(
Document,
related_name='spam_attempts',
null=True,
blank=True,
verbose_name=_('Document (optional)'),
on_delete=models.SET_NULL,
)
data = models.TextField(
editable=False,
blank=True,
null=True,
verbose_name=_('Data submitted to Akismet')
)
reviewed = models.DateTimeField(
_('reviewed'),
blank=True,
null=True,
)
NEEDS_REVIEW = 0
HAM = 1
SPAM = 2
REVIEW_UNAVAILABLE = 3
AKISMET_ERROR = 4
REVIEW_CHOICES = (
(NEEDS_REVIEW, _('Needs Review')),
(HAM, _('Ham / False Positive')),
(SPAM, _('Confirmed as Spam')),
(REVIEW_UNAVAILABLE, _('Review Unavailable')),
(AKISMET_ERROR, _('Akismet Error')),
)
review = models.IntegerField(
choices=REVIEW_CHOICES,
default=NEEDS_REVIEW,
verbose_name=_("Review of Akismet's classification as spam"),
)
reviewer = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='documentspam_reviewed',
blank=True,
null=True,
verbose_name=_('Staff reviewer'),
)
def __unicode__(self):
return u'%s (%s)' % (self.slug, self.title)
|
Metronus/metronus
|
Metronus-Project/metronus_app/model/goalEvolution.py
|
from django.db import models
from metronus_app.model.actor import Actor
from metronus_app.model.task import Task
class GoalEvolution(models.Model):
"""
Each time the goal or the price per unit/hour from a task is changed, a new entry is created in the log
Maybe should have been named TaskLog, but...
"""
task_id = models.ForeignKey(Task)
registryDate = models.DateTimeField(auto_now=True)
actor_id = models.ForeignKey(Actor)
production_goal = models.FloatField(blank=True, null=True)
goal_description = models.CharField(blank=True, max_length=100, default="")
price_per_unit = models.FloatField(null=True, blank=True)
price_per_hour = models.FloatField(null=True, blank=True)
def __unicode__(self):
return self.production_goal
|
hipsterware/scoreline
|
toys/corpscore.py
|
#!/usr/bin/env python
import csv
import json
import sys
import click
def score(company, sexbiases):
"""
Given a company record with board of directors and executive names,
return our guess of the % of governance that is male.
Since names are not always unambiguous determinants of sex, we also
return an error bound, with 0.0 being perfect and 1.0 being possibly 100%
wrong.
"""
men = 0
error = 0.0
governors = company['board'] + company['executives']
# Get all governor names, de-duping since board/exec team may overlap
names = set([governor.get('name', '') for governor in governors])
for name in names:
first_name = name.split(' ')[0].strip().title()
bias = sexbiases.get(first_name, 0.0) # Assume male if not known, with maximal error bound
if bias <= 0.0:
men += 1
error += 1.0 - abs(bias)
count = len(names)
return (men/count, error/count)
@click.command()
@click.option('--companies', type=click.File(mode='rt'), required=True, help="A companies data file, as created by symbol-to-company-details.")
@click.option('--sexbiases', type=click.File(mode='rt'), required=True, help="A sex bias CSV datafile, as in first_name_sex_bias.csv")
def corpscore(companies, sexbiases):
sexbias_reader = csv.reader(sexbiases)
sexbiases = dict([item[0], float(item[1])] for item in sexbias_reader)
fieldnames = ['symbol', 'url', 'percent_men', 'error', 'description']
writer = csv.DictWriter(sys.stdout, fieldnames=fieldnames)
writer.writeheader()
for company_json in companies:
company = json.loads(company_json)
percent_men, error = score(company, sexbiases)
writer.writerow({
'symbol': company['symbol'],
'url': company.get('url'),
'percent_men': percent_men,
'error': error,
'description': company.get('description'),
})
sys.stdout.flush()
if __name__ == '__main__':
corpscore()
|
ViaFerrata/DL_pipeline_TauAppearance
|
scraps/multi_gpu/multi_gpu.py
|
# -*- coding: utf-8 -*-
"""
Multi-gpu code for Keras/TF.
From https://github.com/avolkov1/keras_experiments
"""
# MODIFIED. Inspiration taken from the ref link below.
# ref: https://raw.githubusercontent.com/kuza55/keras-extras/master/utils/multi_gpu.py @IgnorePep8
# The inspirational one carried license:
# Apache License
# Version 2.0, January 2004
# For further info refer to: https://github.com/kuza55/keras-extras
#
# Also used https://github.com/fchollet/keras/issues/2436 which was just
# posted as code snippets in a forum.
import sys
# import time
from itertools import chain
import warnings
from .multi_gpu_utils import Capturing
from keras import backend as KB
from keras.layers.core import Lambda
from keras.models import Model
from keras.layers.merge import Concatenate # , Average)
# import keras.layers as KL
import keras.optimizers as KO
if KB.backend() == 'tensorflow':
# Monkey patch Keras back-end to use Function with enqueue.
# import keras_exp._patch_tf_backend as tfbpatch
# tfbpatch.patch()
# from keras_exp._patch_tf_backend import patch as tfbpatch
# tfbpatch()
import tensorflow as tf
from tensorflow.python.client import device_lib
try:
from tensorflow.contrib import nccl
have_nccl = True
print('NCCL support available', file=sys.stderr)
except ImportError:
have_nccl = False
print('WARNING: NCCL support not available', file=sys.stderr)
from tensorflow.python.ops import data_flow_ops
_DEBUG = False
__all__ = ('get_available_gpus', 'make_parallel', 'print_mgpu_modelsummary',
'ModelMGPU')
def get_available_gpus(ngpus=-1):
'''
:param int ngpus: GPUs max to use. Default -1 means all gpus.
:returns: List of gpu devices. Ex.: ['/gpu:0', '/gpu:1', ...]
'''
local_device_protos = device_lib.list_local_devices()
gpus_list = [x.name for x in local_device_protos if x.device_type == 'GPU']
return gpus_list[:ngpus] if ngpus > -1 else gpus_list
def print_mgpu_modelsummary(model):
'''Prints the summary for a multi-GPU keras model.
:param model: Keras model.
:type model: Model
'''
# print json.dumps(model.get_config(), indent=2) # DEBUG
print('\nMULTI-GPU MODEL: {}'.format(model.name))
print(model.summary())
for layer in model.layers:
# print 'layer:', layer, '\ttype:', type(layer)
if isinstance(layer, Model):
submodel = layer
print('\n\tSUBMODEL SUMMARY: {}'.format(layer.name))
with Capturing() as msum:
minfo = submodel.summary()
print('\t{}\n\t{}\n'.format('\n\t'.join(msum), minfo))
def all_sync_params(tower_params, devices, usenccl=True):
"""Assigns the params from the first tower to all others"""
if len(devices) == 1:
return tf.no_op()
sync_ops = []
if have_nccl and usenccl:
for param_on_devices in zip(*tower_params):
# print('PARAM_ON_DEVICES: {}'.format(param_on_devices)) # DEBUG
# Note: param_on_devices is [paramX_gpu0, paramX_gpu1, ...]
param0 = param_on_devices[0]
send_op, received_tensors = nccl.broadcast(param0, devices[1:])
sync_ops.append(send_op)
for device, param, received in zip(devices[1:],
param_on_devices[1:],
received_tensors):
with tf.device(device):
sync_op = param.assign(received)
sync_ops.append(sync_op)
else:
params0 = tower_params[0]
for device, params in zip(devices, tower_params):
with tf.device(device):
for param, param0 in zip(params, params0):
sync_op = param.assign(param0.read_value())
sync_ops.append(sync_op)
return tf.group(*sync_ops)
# def stage(tensors):
# """Stages the given tensors in a StagingArea for asynchronous put/get.
# """
# stage_area = data_flow_ops.StagingArea(
# dtypes=[tensor.dtype for tensor in tensors],
# shapes=[tensor.get_shape() for tensor in tensors])
# put_op = stage_area.put(tensors)
# get_tensors = stage_area.get()
# if not isinstance(get_tensors, list):
# get_tensors = [get_tensors]
# # print('GET_TENSORS: {}'.format(get_tensors)) # DEBUG
#
# get_tensors = [tf.reshape(gt, t.get_shape())
# for (gt, t) in zip(get_tensors, tensors)]
# return put_op, get_tensors
class ModelMGPU(Model):
'''Override load and save methods of the multi-gpu model. The load and
save should correspond to the serial model's load and save.
If there are other idiosyncracies to handle for multi-gpu model case then
these can be handled in this subclass. A serial model should always be
instantiated prior to wrapping it or converting it to a multi-GPU model.
This multi-gpu implementation uses data-parallelism.
A copy-constructor is not implemented so optionally pass any additional
parameters besides inputs/outputs as args/kwargs to initialize the
multi-gpu model the same way as the serial model. Typically not needed.
Currently, it seems that using NCCL and synchronizing/averaging gradients
slows multi-gpu processing down.
.. seealso::
Refer to :func:`make_parallel` docstring for scenarios when
out-of-memory errors might occur and workaround.
Kwargs:
:param Model serial_model: Serial i.e. non-multi GPU Keras model. REQUIRED.
:param list gdev_list: List of gpu devices i.e. ['/gpu:0', '/gpu:1', ...]
Use function get_available_gpus to get the list of available gpus.
This can be a list of strings or list of instances of tf.DeviceSpec.
REQUIRED.
:param str ps_device: Parameter server device to use.
:param bool usenccl: Use the contrib.nccl Tensorflow library for initial
parameter synchronization and gradients averaging. Note, the models
usenccl option overrides the optimizers usenccl option.
Default: False
Raises RuntimeError if specified True and a non-multi-gpu optimizer is
passed during compile stage.
:param bool initsync: Synchronize initial Variables i.e. weights,
biases, etc. Default: True
:param bool syncopt: Synchronize gradients. Requires a multi-gpu optimizer.
Default: False
:param bool enqueue: Use StagingArea in the multi-GPU model. Could
potentially speed up Host-to-Device transfers.
Produces a warning that kwargs are ignored for Tensorflow. The
_patch_tf_backend module mokey patches the Function in
tensorflow_backend to use the enqueue_ops option.
Default: False
'''
def __init__(self, *args, **kwargs):
# :param model_creator: Callable that returns a serial i.e. non-multi
# GPU Keras model i.e. a keras.models.Model model. REQUIRED.
# Suggestion, use partial from functools to setup model_creator.
# try:
# model_creator = kwargs.pop('model_creator')
# except KeyError:
# raise RuntimeError('Keyword argument "model_creator" required '
# 'for ModelMGPU.')
try:
smodel = kwargs.pop('serial_model')
except KeyError:
raise RuntimeError('Keyword argument "serial_model" required '
'for ModelMGPU.')
# SET STATE: Instance of serial model for checkpointing
self._smodel = smodel # model_creator()
try:
gdev_list = kwargs.pop('gdev_list')
except KeyError:
raise RuntimeError('Keyword argument "gdev_list" required '
'for ModelMGPU.')
self._gdev_list = gdev_list
mname = kwargs.pop('name', self._smodel.name)
kwargs['name'] = mname
self._ps_device = kwargs.pop('ps_device', '/cpu:0')
self._initsync = kwargs.pop('initsync', True)
self._usenccl = kwargs.pop('usenccl', False)
self._syncopt = kwargs.pop('syncopt', False)
self._enqueue = kwargs.pop('enqueue', False)
if self._enqueue:
warnings.warn('Enqueue option to use StagingArea currenctly does '
'not work.', UserWarning)
# NOTE: To use staging have to patch keras tensorflow_backend.Function.
# Function implementation in keras_exp.multigpu._patch_tf_backend
self._enqueue_ops = []
self._tower_params = [] # For init/sync'ing of parameters.
self._init_make_dataparallel(gdev_list, *args,
**kwargs)
def __getattribute__(self, attrname):
'''Override load and save methods to be used from the serial-model. The
serial-model holds references to the weights in the multi-gpu model.
'''
# return Model.__getattribute__(self, attrname)
if 'load' in attrname or 'save' in attrname:
return getattr(self._smodel, attrname)
return super(ModelMGPU, self).__getattribute__(attrname)
# ref: https://github.com/fchollet/keras/issues/2436
def _init_make_dataparallel(self, gdev_list, *args, **kwargs):
'''Uses data-parallelism to convert a serial model to multi-gpu. Refer
to make_parallel doc.
'''
gpucopy_ops = []
def slice_batch(x, ngpus, part, dev):
'''Divide the input batch into [ngpus] slices, and obtain slice
no. [part]. i.e. if len(x)=10, then slice_batch(x, 2, 1) will
return x[5:].
'''
sh = KB.shape(x)
L = sh[0] // ngpus
if part == ngpus - 1:
xslice = x[part * L:]
else:
xslice = x[part * L:(part + 1) * L]
# tf.split fails if batch size is not divisible by ngpus. Error:
# InvalidArgumentError (see above for traceback): Number of
# ways to split should evenly divide the split dimension
# xslice = tf.split(x, ngpus)[part]
if not self._enqueue:
return xslice
# Did not see any benefit.
with tf.device(dev):
# if self._stager is None:
stager = data_flow_ops.StagingArea(
dtypes=[xslice.dtype], shapes=[xslice.shape])
stage = stager.put([xslice])
gpucopy_ops.append(stage)
# xslice_stage = stager.get()
return stager.get()
ngpus = len(gdev_list)
if ngpus < 2:
raise RuntimeError('Number of gpus < 2. Require two or more GPUs '
'for multi-gpu model parallelization.')
model = self._smodel
noutputs = len(self._smodel.outputs)
global_scope = tf.get_variable_scope()
towers = [[] for _ in range(noutputs)]
for idev, dev in enumerate(gdev_list):
# TODO: The last slice could cause a gradient calculation outlier
# when averaging gradients. Maybe insure ahead of time that the
# batch_size is evenly divisible by number of GPUs, or maybe don't
# use the last slice.
with tf.device(self._ps_device):
slices = [] # multi-input case
for ix, x in enumerate(model.inputs):
slice_g = Lambda(
slice_batch, # lambda shape: shape,
# lambda shape: x.shape.as_list(),
name='stage_cpuSliceIn{}_Dev{}'.format(ix, idev),
arguments={'ngpus': ngpus, 'part': idev,
'dev': dev})(x)
slices.append(slice_g)
# print('SLICE_G: {}'.format(slice_g)) # DEBUG
# print('SLICES: {}'.format(slices)) # DEBUG
# with tf.variable_scope('GPU_%i' % idev), \
# tf.variable_scope(global_scope, reuse=idev > 0), \
# tf.variable_scope('GPU_{}'.format(idev),
# reuse=idev > 0) as var_scope, \
with tf.device(dev), \
tf.variable_scope(global_scope, reuse=idev > 0), \
tf.name_scope('tower_%i' % idev):
# NOTE: Currently not using model_creator. Did not observe
# any benefit in such an implementation.
# Instantiate model under device context. More complicated.
# Need to use optimizer synchronization in this scenario.
# model_ = model_creator()
# If using NCCL without re-instantiating the model then must
# set the colocate_gradients_with_ops to False in optimizer.
# if idev == 0:
# # SET STATE: Instance of serial model for checkpointing
# self._smodel = model_ # for ability to checkpoint
# Handle multi-output case
modeltower = model(slices)
if not isinstance(modeltower, list):
modeltower = [modeltower]
for imt, mt in enumerate(modeltower):
towers[imt].append(mt)
params = mt.graph._collections['trainable_variables']
# params = model_.trainable_weights
# params = tf.get_collection(
# tf.GraphKeys.TRAINABLE_VARIABLES, scope=var_scope.name)
# params = modeltower.graph._collections['trainable_variables']
# print('PARAMS: {}'.format(params)) # DEBUG
self._tower_params.append(params)
with tf.device(self._ps_device):
# merged = Concatenate(axis=0)(towers)
merged = [Concatenate(axis=0)(tw) for tw in towers]
# self._enqueue_ops.append(tf.group(*gpucopy_ops))
self._enqueue_ops += gpucopy_ops
kwargs['inputs'] = model.inputs
kwargs['outputs'] = merged
super(ModelMGPU, self).__init__(*args, **kwargs)
def compile(self, *args, **kwargs):
'''Refer to Model.compile docstring for parameters. Override
functionality is documented below.
:override compile: Override Model.compile method to check for options
that the optimizer is multi-gpu enabled, and synchronize initial
variables.
'''
initsync = self._initsync
usenccl = self._usenccl
opt = kwargs['optimizer']
# if isinstance(opt, str):
if not isinstance(opt, KO.Optimizer):
opt = KO.get(opt)
kwargs['optimizer'] = opt
if self._syncopt and not getattr(opt, 'ismgpu', False):
raise RuntimeError(
'Multi-GPU synchronization model requires a multi-GPU '
'optimizer. Instead got: {}'.format(opt))
opt.usenccl = usenccl
if self._enqueue_ops:
# Produces a warning that kwargs are ignored for Tensorflow. Patch
# Function in tensorflow_backend to use the enqueue_ops option.
kwargs['fetches'] = self._enqueue_ops
super(ModelMGPU, self).compile(*args, **kwargs)
if initsync:
self._run_initsync()
def _run_initsync(self):
# tparams = [list(chain(*tp)) for tp in self._tower_params]
tparams = self._tower_params
# Check to prevent from unnecessarily re-initializing and
# synchronizing, i.e. when the model loads the weights.
for v in chain.from_iterable(tparams):
if getattr(v, '_keras_initialized', False):
return
KB.manual_variable_initialization(True)
sess = KB.get_session()
KB.manual_variable_initialization(False)
# glob_variables = tf.global_variables()
# sess.run(tf.variables_initializer(glob_variables))
# Initialize on GPU0 and sync to other GPUs
init_op = tf.variables_initializer(tparams[0])
# init_op = tf.variables_initializer(self._tower_params[0])
# init_op = tf.variables_initializer(self.trainable_weights)
sess.run(init_op)
# Important if using model_creator. Not necessary of model instance is
# reused in which case the model layers are shared between slices
# and are automatically sync'd.
sync_op = all_sync_params(tparams, self._gdev_list,
usenccl=self._usenccl)
sess.run(sync_op)
for v in chain.from_iterable(tparams):
v._keras_initialized = True
# Data-parallel ref: https://github.com/fchollet/keras/issues/2436
# Tower-parallel:
# ref: https://medium.com/autonomous-agents/multi-gpu-training-of-large-sparse-matrix-on-wide-neuralnetwork-cac7afc52ffe @IgnorePep8
# ref: https://gist.github.com/vvpreetham/1379cc4e208ea33ce3e615067e92fc5e
def make_parallel(serial_model, gdev_list, ps_device='/cpu:0', usenccl=False,
initsync=True, syncopt=False, enqueue=False,
model_class=ModelMGPU):
'''Given a keras model, return an equivalent model which parallelizes
the computation over multiple GPUs listed in the gdev_list.
Data-Parallel:
Each GPU gets a slice of the input batch, applies the model on that slice
and later the outputs of the models are concatenated to a single tensor,
hence the user sees a model that behaves the same as the original.
If getting an out-of-memory (OOM) error when scaling the batch size by the
number of GPUs, there might be input layer(s) in the serial model that runs
additional special operations (such as tranformation of some sort) on the
1st GPU as enumerated by Tensorflow. This was an observed behavior for
Embedding layers. The workaround is to pin such layers to the CPU, or
simply pin the instantiation of the serial mode to CPU. The parallelization
will move the operations to GPU.
:Example:
if mgpu_flag:
with tf.device('/cpu:0'):
# define the serial model.
model_serial = get_model_serial()
gdev_list = get_available_gpus()
model = make_parallel(model_serial, gdev_list)
else:
model = def_model_serial()
:param Model serial_model: Serial i.e. non-multi GPU Keras model.
:param list gdev_list: List of gpu devices i.e. ['/gpu:0', '/gpu:1', ...]
Use function get_available_gpus to get the list of available gpus.
This can be a list of strings or list of instances of tf.DeviceSpec.
:param str ps_device: Parameter server device to use.
:param bool usenccl: Use the contrib.nccl Tensorflow library for initial
parameter synchronization and gradients averaging. Note, the model's
usenccl option overrides the optimizers usenccl option.
Default: False
:param bool initsync: Synchronize initial Variables i.e. weights,
biases, etc. Default: True
:param bool syncopt: Synchronize gradients. Requires a multi-gpu optimizer.
Default: False
:param bool enqueue: Use StagingArea in the multi-GPU model. Could
potentially speed up Host-to-Device transfers.
Produces a warning that kwargs are ignored for Tensorflow. The
_patch_tf_backend module mokey patches the Function in
tensorflow_backend to use the enqueue_ops option.
Default: False
:param model_class: Class object to instantiate for multi-gpu models. This
is needed when the ModelMGPU is mixed-in with other classes.
Default: ModelMGPU
:returns: Multi-GPU parallelized model. If ngpus < 2 then do nothing and
return the provided serial_model.
:rtype: ModelMGPU
'''
ngpus = len(gdev_list)
if ngpus < 2:
return serial_model # model_creator()
return model_class(
serial_model=serial_model, gdev_list=gdev_list,
ps_device=ps_device,
enqueue=enqueue, usenccl=usenccl,
initsync=initsync, syncopt=syncopt)
####### old version following
# # MODIFIED. Inspiration taken from the ref link below.
# # ref: https://raw.githubusercontent.com/kuza55/keras-extras/master/utils/multi_gpu.py @IgnorePep8
# # The inspirational one carried license:
# # Apache License
# # Version 2.0, January 2004
# # For further info refer to: https://github.com/kuza55/keras-extras
# #
# # Also used https://github.com/fchollet/keras/issues/2436 which was just
# # posted as code snippets in a forum.
# from __future__ import print_function
#
# import sys
#
# try:
# from cStringIO import StringIO
# except ImportError:
# # Python 3 compat.
# from io import StringIO
#
# from itertools import chain
#
# from keras import backend as KB
# from keras.layers.core import Lambda
# from keras.models import Model
# from keras.layers.merge import Concatenate
#
# from .multi_gpu_mixin_models import ModelDataflowMixin
#
#
# if KB.backend() == 'tensorflow':
# # Monkey patch Keras back-end to use Function with enqueue.
# # import keras_exp._patch_tf_backend as tfbpatch
# # tfbpatch.patch()
# from .multi_gpu_patch_tf_backend import patch as tfbpatch
# tfbpatch()
#
# import tensorflow as tf
# from tensorflow.python.client import device_lib
#
# try:
# from tensorflow.contrib import nccl
# have_nccl = True
# print('NCCL support available', file=sys.stderr)
# except ImportError:
# have_nccl = False
# print('WARNING: NCCL support not available', file=sys.stderr)
#
# from tensorflow.python.ops import data_flow_ops
#
#
# _DEBUG = False
#
# __all__ = ('get_available_gpus', 'make_parallel', 'print_mgpu_modelsummary',
# 'ModelMGPU')
#
#
# # TODO: Move to some utils module
# class Capturing(list):
# def __enter__(self):
# self._stdout = sys.stdout
# sys.stdout = self._stringio = StringIO()
# return self
#
# def __exit__(self, *args):
# self.extend(self._stringio.getvalue().splitlines())
# del self._stringio # free up some memory
# sys.stdout = self._stdout
#
#
# def get_available_gpus(ngpus=-1):
# '''
# :param int ngpus: GPUs max to use. Default -1 means all gpus.
# :returns: List of gpu devices. Ex.: ['/gpu:0', '/gpu:1', ...]
# '''
# local_device_protos = device_lib.list_local_devices()
# gpus_list = [x.name for x in local_device_protos if x.device_type == 'GPU']
# return gpus_list[:ngpus] if ngpus > -1 else gpus_list
#
#
# def print_mgpu_modelsummary(model):
# '''Prints the summary for a multi-GPU keras model.
# :param model: Keras model.
# :type model: Model
# '''
# # print json.dumps(model.get_config(), indent=2) # DEBUG
# print('\nMULTI-GPU MODEL: {}'.format(model.name))
# print(model.summary())
# for layer in model.layers:
# # print 'layer:', layer, '\ttype:', type(layer)
# if isinstance(layer, Model):
# submodel = layer
# print('\n\tSUBMODEL SUMMARY: {}'.format(layer.name))
# with Capturing() as msum:
# minfo = submodel.summary()
# print('\t{}\n\t{}\n'.format('\n\t'.join(msum), minfo))
#
#
# def all_sync_params(tower_params, devices, usenccl=True):
# """Assigns the params from the first tower to all others"""
# if len(devices) == 1:
# return tf.no_op()
# sync_ops = []
# if have_nccl and usenccl:
# for param_on_devices in zip(*tower_params):
# # print('PARAM_ON_DEVICES: {}'.format(param_on_devices)) # DEBUG
# # Note: param_on_devices is [paramX_gpu0, paramX_gpu1, ...]
# param0 = param_on_devices[0]
# send_op, received_tensors = nccl.broadcast(param0, devices[1:])
# sync_ops.append(send_op)
# for device, param, received in zip(devices[1:],
# param_on_devices[1:],
# received_tensors):
# with tf.device(device):
# sync_op = param.assign(received)
# sync_ops.append(sync_op)
# else:
# params0 = tower_params[0]
# for device, params in zip(devices, tower_params):
# with tf.device(device):
# for param, param0 in zip(params, params0):
# sync_op = param.assign(param0.read_value())
# sync_ops.append(sync_op)
#
# return tf.group(*sync_ops)
#
#
# # Data-parallel ref: https://github.com/fchollet/keras/issues/2436
# # Tower-parallel:
# # ref: https://medium.com/autonomous-agents/multi-gpu-training-of-large-sparse-matrix-on-wide-neuralnetwork-cac7afc52ffe @IgnorePep8
# # ref: https://gist.github.com/vvpreetham/1379cc4e208ea33ce3e615067e92fc5e
# def make_parallel(serial_model, gdev_list, ps_device='/cpu:0', usenccl=False,
# initsync=True, syncopt=False, enqueue=False):
# '''Given a keras [model], return an equivalent model which parallelizes
# the computation over [ngpus] GPUs.
# Data-Parallel:
# Each GPU gets a slice of the input batch, applies the model on that slice
# and later the outputs of the models are concatenated to a single tensor,
# hence the user sees a model that behaves the same as the original.
# :param Model serial_model: Serial i.e. non-multi GPU Keras model.
# :param list gdev_list: List of gpu devices i.e. ['/gpu:0', '/gpu:1', ...]
# Use function get_available_gpus to get the list of available gpus.
# :param str ps_device: Parameter server device to use.
# :param bool usenccl: Use the contrib.nccl Tensorflow library for initial
# parameter synchronization and gradients averaging. Note, the model's
# usenccl option overrides the optimizers usenccl option.
# Default: False
# :param bool initsync: Synchronize initial Variables i.e. weights,
# biases, etc. Default: True
# :param bool syncopt: Synchronize gradients. Requires a multi-gpu optimizer.
# Default: False
# :param bool enqueue: Use StagingArea in the multi-GPU model. Could
# potentially speed up Host-to-Device transfers.
# Produces a warning that kwargs are ignored for Tensorflow. The
# _patch_tf_backend module mokey patches the Function in
# tensorflow_backend to use the enqueue_ops option.
# Default: False
# :returns: Multi-GPU parallelized model. If ngpus < 2 then do nothing and
# return the provided serial_model.
# :rtype: ModelMGPU
# '''
# ngpus = len(gdev_list)
# if ngpus < 2:
# return serial_model # model_creator()
#
# return ModelMGPU(serial_model=serial_model, gdev_list=gdev_list,
# ps_device=ps_device,
# enqueue=enqueue, usenccl=usenccl,
# initsync=initsync, syncopt=syncopt)
#
#
# # def stage(tensors):
# # """Stages the given tensors in a StagingArea for asynchronous put/get.
# # """
# # stage_area = data_flow_ops.StagingArea(
# # dtypes=[tensor.dtype for tensor in tensors],
# # shapes=[tensor.get_shape() for tensor in tensors])
# # put_op = stage_area.put(tensors)
# # get_tensors = stage_area.get()
# # if not isinstance(get_tensors, list):
# # get_tensors = [get_tensors]
# # # print('GET_TENSORS: {}'.format(get_tensors)) # DEBUG
# #
# # get_tensors = [tf.reshape(gt, t.get_shape())
# # for (gt, t) in zip(get_tensors, tensors)]
# # return put_op, get_tensors
#
#
# class ModelMGPU(ModelDataflowMixin, Model):
# '''Override load and save methods of the multi-gpu model. The load and
# save should correspond to the serial model's load and save.
# If there are other idiosyncracies to handle for multi-gpu model case then
# these can be handled in this subclass. A serial model should always be
# instantiated prior to wrapping it or converting it to a multi-GPU model.
# This multi-gpu implementation uses data-parallelism.
# A copy-constructor is not implemented so optionally pass any additional
# parameters besides inputes/outputs as args/kwargs to initialize the
# multi-gpu model the same way as the serial model. Typically not needed.
# Currently, it seems that using NCCL and synchronizing/averaging gradients
# slows multi-gpu processing down.
# Kwargs:
# :param Model serial_model: Serial i.e. non-multi GPU Keras model. REQUIRED.
# :param list gdev_list: List of gpu devices i.e. ['/gpu:0', '/gpu:1', ...]
# Use function get_available_gpus to get the list of available gpus.
# REQUIRED.
# :param str ps_device: Parameter server device to use.
# :param bool usenccl: Use the contrib.nccl Tensorflow library for initial
# parameter synchronization and gradients averaging. Note, the models
# usenccl option overrides the optimizers usenccl option.
# Default: False
# Raises RuntimeError if specified True and a non-multi-gpu optimizer is
# passed during compile stage.
# :param bool initsync: Synchronize initial Variables i.e. weights,
# biases, etc. Default: True
# :param bool syncopt: Synchronize gradients. Requires a multi-gpu optimizer.
# Default: False
# :param bool enqueue: Use StagingArea in the multi-GPU model. Could
# potentially speed up Host-to-Device transfers.
# Produces a warning that kwargs are ignored for Tensorflow. The
# _patch_tf_backend module mokey patches the Function in
# tensorflow_backend to use the enqueue_ops option.
# Default: False
# '''
# def __init__(self, *args, **kwargs):
# # :param model_creator: Callable that returns a serial i.e. non-multi
# # GPU Keras model i.e. a keras.models.Model model. REQUIRED.
# # Suggestion, use partial from functools to setup model_creator.
# # try:
# # model_creator = kwargs.pop('model_creator')
# # except KeyError:
# # raise RuntimeError('Keyword argument "model_creator" required '
# # 'for ModelMGPU.')
#
# try:
# smodel = kwargs.pop('serial_model')
# except KeyError:
# raise RuntimeError('Keyword argument "serial_model" required '
# 'for ModelMGPU.')
#
# # SET STATE: Instance of serial model for checkpointing
# self._smodel = smodel # model_creator()
#
# try:
# gdev_list = kwargs.pop('gdev_list')
# except KeyError:
# raise RuntimeError('Keyword argument "gdev_list" required '
# 'for ModelMGPU.')
# self._gdev_list = gdev_list
#
# mname = kwargs.pop('name', self._smodel.name)
# kwargs['name'] = mname
#
# self._ps_device = kwargs.pop('ps_device', '/cpu:0')
# self._initsync = kwargs.pop('initsync', True)
# self._usenccl = kwargs.pop('usenccl', False)
# self._syncopt = kwargs.pop('syncopt', False)
# self._enqueue = kwargs.pop('enqueue', False)
#
# # NOTE: To use staging have to patch keras tensorflow_backend.Function.
# # Function implementation in keras_exp.multigpu._patch_tf_backend
# self._enqueue_ops = []
#
# self._tower_params = [] # For init/sync'ing of parameters.
# self._init_make_dataparallel(gdev_list, *args,
# **kwargs)
#
# def __getattribute__(self, attrname):
# '''Override load and save methods to be used from the serial-model. The
# serial-model holds references to the weights in the multi-gpu model.
# '''
# # return Model.__getattribute__(self, attrname)
# if 'load' in attrname or 'save' in attrname:
# return getattr(self._smodel, attrname)
#
# return super(ModelMGPU, self).__getattribute__(attrname)
#
# # ref: https://github.com/fchollet/keras/issues/2436
# def _init_make_dataparallel(self, gdev_list, *args, **kwargs):
# '''Uses data-parallelism to convert a serial model to multi-gpu. Refer
# to make_parallel doc.
# '''
# gpucopy_ops = []
#
# def slice_batch(x, ngpus, part, dev):
# '''Divide the input batch into [ngpus] slices, and obtain slice
# no. [part]. i.e. if len(x)=10, then slice_batch(x, 2, 1) will
# return x[5:].
# '''
# sh = KB.shape(x)
# L = sh[0] // ngpus
# if part == ngpus - 1:
# xslice = x[part * L:]
# else:
# xslice = x[part * L:(part + 1) * L]
#
# # tf.split fails if batch size is not divisible by ngpus. Error:
# # InvalidArgumentError (see above for traceback): Number of
# # ways to split should evenly divide the split dimension
# # xslice = tf.split(x, ngpus)[part]
#
# if not self._enqueue:
# return xslice
#
# # Did not see any benefit.
# with tf.device(dev):
# # if self._stager is None:
# stager = data_flow_ops.StagingArea(
# dtypes=[xslice.dtype], shapes=[xslice.shape])
# stage = stager.put([xslice])
# gpucopy_ops.append(stage)
# # xslice_stage = stager.get()
# return stager.get()
#
# ngpus = len(gdev_list)
# if ngpus < 2:
# raise RuntimeError('Number of gpus < 2. Require two or more GPUs '
# 'for multi-gpu model parallelization.')
#
# model_ = model = self._smodel
# global_scope = tf.get_variable_scope()
# towers = []
# for idev, dev in enumerate(gdev_list):
# # TODO: The last slice could cause a gradient calculation outlier
# # when averaging gradients. Maybe insure ahead of time that the
# # batch_size is evenly divisible by number of GPUs, or maybe don't
# # use the last slice.
# with tf.device(self._ps_device):
# slices = [] # multi-input case
# for ix, x in enumerate(model.inputs):
# slice_g = Lambda(
# slice_batch, # lambda shape: shape,
# lambda shape: x.shape.as_list(),
# name='stage_cpuSliceIn{}_Dev{}'.format(ix, idev),
# arguments={'ngpus': ngpus, 'part': idev,
# 'dev': dev})(x)
# slices.append(slice_g)
# # print('SLICE_G: {}'.format(slice_g)) # DEBUG
# # print('SLICES: {}'.format(slices)) # DEBUG
#
# # with tf.variable_scope('GPU_%i' % idev), \
# # tf.variable_scope(global_scope, reuse=idev > 0), \
# # tf.variable_scope('GPU_{}'.format(idev),
# # reuse=idev > 0) as var_scope, \
# with tf.device(dev), \
# tf.variable_scope(global_scope, reuse=idev > 0), \
# tf.name_scope('tower_%i' % idev):
# # NOTE: Currently not using model_creator. Did not observe
# # any benefit in such an implementation.
# # Instantiate model under device context. More complicated.
# # Need to use optimizer synchronization in this scenario.
# # model_ = model_creator()
# # If using NCCL without re-instantiating the model then must
# # set the colocate_gradients_with_ops to False in optimizer.
# # if idev == 0:
# # # SET STATE: Instance of serial model for checkpointing
# # self._smodel = model_ # for ability to checkpoint
#
# modeltower = model_(slices)
# towers.append(modeltower)
#
# # params = model_.trainable_weights
# # params = tf.get_collection(
# # tf.GraphKeys.TRAINABLE_VARIABLES, scope=var_scope.name)
# params = modeltower.graph._collections['trainable_variables']
# # print('PARAMS: {}'.format(params)) # DEBUG
#
# self._tower_params.append(params)
#
# with tf.device(self._ps_device):
# merged = Concatenate(axis=0)(towers)
# # print('MERGED: {}'.format(merged)) # DEBUG
#
# # self._enqueue_ops.append(tf.group(*gpucopy_ops))
# self._enqueue_ops += gpucopy_ops
#
# kwargs['inputs'] = model.inputs
# kwargs['outputs'] = merged
# super(ModelMGPU, self).__init__(*args, **kwargs)
#
# def compile(self, *args, **kwargs):
# '''Refer to Model.compile docstring for parameters. Override
# functionality is documented below.
# :override compile: Override Model.compile method to check for options
# that the optimizer is multi-gpu enabled, and synchronize initial
# variables.
# '''
# initsync = self._initsync
# usenccl = self._usenccl
#
# opt = kwargs['optimizer']
# if self._syncopt and not getattr(opt, 'ismgpu', False):
# raise RuntimeError(
# 'Multi-GPU synchronization model requires a multi-GPU '
# 'optimizer. Instead got: {}'.format(opt))
#
# opt.usenccl = usenccl
#
# if self._enqueue_ops:
# # Produces a warning that kwargs are ignored for Tensorflow. Patch
# # Function in tensorflow_backend to use the enqueue_ops option.
# kwargs['enqueue_ops'] = self._enqueue_ops
#
# super(ModelMGPU, self).compile(*args, **kwargs)
#
# if initsync:
# self._run_initsync()
#
# def _run_initsync(self):
# # tparams = [list(chain(*tp)) for tp in self._tower_params]
# tparams = self._tower_params
#
# # Check to prevent from unnecessarily re-initializing and
# # synchronizing, i.e. when the model loads the weights.
# for v in chain.from_iterable(tparams):
# if getattr(v, '_keras_initialized', False):
# return
#
# KB.manual_variable_initialization(True)
# sess = KB.get_session()
# KB.manual_variable_initialization(False)
#
# # glob_variables = tf.global_variables()
# # sess.run(tf.variables_initializer(glob_variables))
#
# # Initialize on GPU0 and sync to other GPUs
# init_op = tf.variables_initializer(tparams[0])
# # init_op = tf.variables_initializer(self._tower_params[0])
# # init_op = tf.variables_initializer(self.trainable_weights)
# sess.run(init_op)
#
# # Important if using model_creator. Not necessary of model instance is
# # reused in which case the model layers are shared between slices
# # and are automatically sync'd.
# sync_op = all_sync_params(tparams, self._gdev_list,
# usenccl=self._usenccl)
# sess.run(sync_op)
#
# for v in chain.from_iterable(tparams):
# v._keras_initialized = True
|
co-ment/comt
|
src/cm/migrations/0003_update_keys_to_textversion.py
|
from south.db import db
from django.db import models
from cm.models import *
class Migration:
def forwards(self, orm):
"Write your forwards migration here"
for tv in orm.TextVersion.objects.all():
tv.key = orm.TextVersion.objects._gen_key()
tv.adminkey = orm.TextVersion.objects._gen_adminkey()
tv.save()
def backwards(self, orm):
"Write your backwards migration here"
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'})
},
'cm.activity': {
'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cm.Comment']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': 'None', 'max_length': '15', 'null': 'True', 'blank': 'True'}),
'originator_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'originator_activity'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'text': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cm.Text']", 'null': 'True', 'blank': 'True'}),
'text_version': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cm.TextVersion']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cm.attachment': {
'adminkey': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '1000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'text_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.TextVersion']"})
},
'cm.comment': {
'adminkey': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'content_html': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'end_offset': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'end_wrapper': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'default': "'markdown'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Comment']", 'null': 'True', 'blank': 'True'}),
'start_offset': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_wrapper': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tags': ('tagging.fields.TagField', [], {}),
'text_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.TextVersion']"}),
'title': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cm.configuration': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {}),
'raw_value': ('django.db.models.fields.TextField', [], {})
},
'cm.email': {
'bcc': ('django.db.models.fields.TextField', [], {}),
'body': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'from_email': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'subject': ('django.db.models.fields.TextField', [], {}),
'to': ('django.db.models.fields.TextField', [], {})
},
'cm.notification': {
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'adminkey': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Text']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cm.role': {
'anon': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'global_scope': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']"})
},
'cm.text': {
'adminkey': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'last_text_version': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'related_text'", 'null': 'True', 'to': "orm['cm.TextVersion']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'private_feed_key': ('django.db.models.fields.CharField', [], {'null': 'True', 'default': 'None', 'max_length': '20', 'blank': 'True', 'unique': 'True', 'db_index': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'title': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cm.textversion': {
'adminkey': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'default': "'markdown'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'mod_posteriori': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'tags': ('tagging.fields.TagField', [], {'max_length': '1000'}),
'text': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Text']"}),
'title': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cm.userprofile': {
'adminkey': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'allow_contact': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_email_error': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_suspended': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_temp': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'cm.userrole': {
'Meta': {'unique_together': "(('role', 'user', 'text'),)"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Role']", 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Text']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cm']
|
odoousers2014/LibrERP
|
l10n_it_sale/wizard/confirmation.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2013-2014 Didotech SRL (info at didotech.com)
# All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from openerp.osv import orm, fields
import decimal_precision as dp
import netsvc
from tools import ustr
class sale_order_confirm(orm.TransientModel):
_inherit = "sale.order.confirm"
_columns = {
'cig': fields.char('CIG', size=64, help="Codice identificativo di gara"),
'cup': fields.char('CUP', size=64, help="Codice unico di Progetto")
}
# def default_get(self, cr, uid, fields, context=None):
# sale_order_obj = self.pool['sale.order']
# if context is None:
# context = {}
#
# res = super(sale_order_confirm, self).default_get(cr, uid, fields, context=context)
# sale_order_data = sale_order_obj.browse(cr, uid, context['active_ids'][0], context)
#
# res['cup'] = sale_order_data.cig
# res['cig'] = sale_order_data.cup
#
# return res
def sale_order_confirmated(self, cr, uid, ids, context=None):
sale_order_obj = self.pool['sale.order']
result = super(sale_order_confirm, self).sale_order_confirmated(cr, uid, ids, context=context)
sale_order_confirm_data = self.browse(cr, uid, ids[0], context=context)
if result.get('res_id'):
sale_order_obj.write(cr, uid, result['res_id'], {
'cig': sale_order_confirm_data.cig,
'cup': sale_order_confirm_data.cup,
}, context=context)
else:
sale_order_obj.write(cr, uid, context['active_ids'][0], {
'cig': sale_order_confirm_data.cig,
'cup': sale_order_confirm_data.cup,
}, context=context)
for order in sale_order_obj.browse(cr, uid, [result.get('res_id') or context['active_ids'][0]], context=context):
# partner = self.pool['res.partner'].browse(cr, uid, order.partner_id.id)
picking_obj = self.pool['stock.picking']
picking_ids = picking_obj.search(cr, uid, [('sale_id', '=', order.id)], context=context)
for picking_id in picking_ids:
picking_obj.write(cr, uid, picking_id, {
'cig': sale_order_confirm_data.cig or '',
'cup': sale_order_confirm_data.cup or ''
}, context=context)
return result
|
fidelcoria/AYFM-Scheduling
|
ScheduleParsing/scripts/schedule/convert_docx.py
|
#https://python-docx.readthedocs.io/en/latest/user/install.html
#python-docx may be installed with pip if you have it available:
# pip install python-docx
#python-docx can also be installed using easy_install, although this is #discouraged:
# easy_install python-docx
#If neither pip nor easy_install is available, it can be installed manually by downloading the distribution from PyPI, unpacking the tarball, and running setup.py:
# tar xvzf python-docx-{version}.tar.gz
# cd python-docx-{version}
# python setup.py install
#python-docx depends on the lxml package. Both pip and easy_install will take care of satisfying those dependencies for you, but if you use this last method you will need to install those yourself.
from docx import Document
import datetime
from schedule.Assignment import *
#indices for row cells in the template table
DATE = 0
SECTION_A_PARTICIPANTS = 1
SECTION_A_LESSON = 2
SECTION_B_PARTICIPANTS = 3
SECTION_B_LESSON = 4
# csv column header
HEADER = 'Date,Type,Assignee,Householder,Lesson,Classroom'
def getWeekDate(weekHeaderRow, year, month):
'''extract the date from the date row from table'''
raw_date = weekHeaderRow.cells[DATE].text.strip()
if raw_date == '':
return raw_date
# convert to nice date format
# split up to look for a number for the day of the month
date_parts = raw_date.split()
# list comprehension to select numbeic strings
numericParts = [part for part in date_parts if part.isnumeric()]
# assume there will only be one
day = int( numericParts[0] )
# this is the format for MySQL date
date = '{:%Y-%m-%d}'.format( datetime.date(year, month, day) )
return date
def parseAssignmentRow(row, date, aType):
'''parse an assignment row from table, returns an array with the assignments'''
assignments = []
# participants for first assgn
participants = row.cells[SECTION_A_PARTICIPANTS].text.strip()
if participants != '': # if there are participants
assgn = Assignment() # new empty assignment
# begin populating fields
assgn.date = date
assgn.type = aType
assgn.lesson = row.cells[SECTION_A_LESSON].text.strip()
assgn.section = SECTION_A
# be sure to strip each element of the array `students` in case split leaves white space
students = participants.split('\n') # assume at most 2 elements, and at least one
# the assignee should come 1st
assgn.assignee = students[0].strip()
if len(students) > 1: # the householder second
# '> 1' in case there is an additinal helper (will be ignored)
assgn.hholder = students[1].strip()
assignments.append( assgn )
# the same for the second
participants = row.cells[SECTION_B_PARTICIPANTS].text.strip() # participants for second assgn
if participants != '':
assgn = Assignment() # new empty assignment
assgn.date = date
assgn.type = aType
assgn.lesson = row.cells[SECTION_B_LESSON].text.strip()
assgn.section = SECTION_B
students = participants.split('\n')
assgn.assignee = students[0].strip()
if len(students) > 1:
assgn.hholder = students[1].strip()
assignments.append( assgn )
return assignments
def to_csv(path, year, month):
'''path to docx file, year and month as int, will convert into a csv file'''
docxsched = Document(path)
#find tables in the doc
tables = docxsched.tables
if len(tables) != 1:
#should be raising an exception...
print('uh oh, there should be exactly one table in the document')
#select the first table
table = tables[0]
# Assume everything else is as expected
#there are 5 weeks for every schedule
# pick up the date
# then the type
# if a name and a lesson are found write the csv String
# if only a name is found write the csv string
# if no name is found continue looking for date or type (which ever appears first)
csvSched = [] #this is an array of strings for the csv file
row_iter = iter(table.rows[1:]) #skipping the first row (header)
row = next(row_iter)
# The first week of every month is different (has only 1 assgn)
date = getWeekDate(row, year, month)
#advance to the only participation for first week (Reading)
row = next(row_iter)
assgnRow = parseAssignmentRow(row, date, READING)
for assgn in assgnRow:
csvSched.append(assgn.makeCSV() + '\n')
# Now continue with the remaining 4 weeks
for week in range(4):
row = next(row_iter)
# Extract date for this week
date = getWeekDate(row, year, month)
if date == '': # no date will be available for assignments
continue
for assgnType in TYPES:
row = next(row_iter)
assgnRow = parseAssignmentRow(row, date, assgnType)
for assgn in assgnRow:
# print(assgn)
csvSched.append( assgn.makeCSV() + '\n' )
#path will only work when called from main.py
csvfilename = '../csv/%d-%d.csv' % (year, month)
with open(csvfilename, encoding='utf-8', mode='w') as parsed:
parsed.write(HEADER+'\n')
for line in csvSched:
parsed.write(line)
if __name__ == '__main__':
print('Running as main. Doing nothing.')
|
emilydolson/forestcat
|
pyrobot/system/serial/serialwin32.py
|
#! python
#Python Serial Port Extension for Win32, Linux, BSD, Jython
#serial driver for win32
#see __init__.py
#
#(C) 2001-2003 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
import win32file # The base COM port and file IO functions.
import win32event # We use events and the WaitFor[Single|Multiple]Objects functions.
import win32con # constants.
from serialutil import *
VERSION = "$Revision: 1527 $".split()[1] #extract CVS version
#from winbase.h. these should realy be in win32con
MS_CTS_ON = 16
MS_DSR_ON = 32
MS_RING_ON = 64
MS_RLSD_ON = 128
def device(portnum):
"""Turn a port number into a device name"""
#the "//./COMx" format is required for devices >= 9
#not all versions of windows seem to support this propperly
#so that the first few ports are used with the DOS device name
if portnum < 9:
return 'COM%d' % (portnum+1) #numbers are transformed to a string
else:
return r'\\.\COM%d' % (portnum+1)
class Serial(SerialBase):
"""Serial port implemenation for Win32. This implemenatation requires a
win32all installation."""
BAUDRATES = (50,75,110,134,150,200,300,600,1200,1800,2400,4800,9600,
19200,38400,57600,115200)
def open(self):
"""Open port with current settings. This may throw a SerialException
if the port cannot be opened."""
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
self.hComPort = None
try:
self.hComPort = win32file.CreateFile(self.portstr,
win32con.GENERIC_READ | win32con.GENERIC_WRITE,
0, # exclusive access
None, # no security
win32con.OPEN_EXISTING,
win32con.FILE_ATTRIBUTE_NORMAL | win32con.FILE_FLAG_OVERLAPPED,
None)
except Exception, msg:
self.hComPort = None #'cause __del__ is called anyway
raise SerialException("could not open port: %s" % msg)
# Setup a 4k buffer
win32file.SetupComm(self.hComPort, 4096, 4096)
#Save original timeout values:
self._orgTimeouts = win32file.GetCommTimeouts(self.hComPort)
self._reconfigurePort()
# Clear buffers:
# Remove anything that was there
win32file.PurgeComm(self.hComPort,
win32file.PURGE_TXCLEAR | win32file.PURGE_TXABORT |
win32file.PURGE_RXCLEAR | win32file.PURGE_RXABORT)
self._overlappedRead = win32file.OVERLAPPED()
self._overlappedRead.hEvent = win32event.CreateEvent(None, 1, 0, None)
self._overlappedWrite = win32file.OVERLAPPED()
self._overlappedWrite.hEvent = win32event.CreateEvent(None, 0, 0, None)
self._isOpen = True
def _reconfigurePort(self):
"""Set commuication parameters on opened port."""
if not self.hComPort:
raise SerialException("Can only operate on a valid port handle")
#Set Windows timeout values
#timeouts is a tuple with the following items:
#(ReadIntervalTimeout,ReadTotalTimeoutMultiplier,
# ReadTotalTimeoutConstant,WriteTotalTimeoutMultiplier,
# WriteTotalTimeoutConstant)
if self._timeout is None:
timeouts = (0, 0, 0, 0, 0)
elif self._timeout == 0:
timeouts = (win32con.MAXDWORD, 0, 0, 0, 0)
else:
timeouts = (0, 0, int(self._timeout*1000), 0, 0)
win32file.SetCommTimeouts(self.hComPort, timeouts)
win32file.SetCommMask(self.hComPort, win32file.EV_ERR)
# Setup the connection info.
# Get state and modify it:
comDCB = win32file.GetCommState(self.hComPort)
comDCB.BaudRate = self._baudrate
if self._bytesize == FIVEBITS:
comDCB.ByteSize = 5
elif self._bytesize == SIXBITS:
comDCB.ByteSize = 6
elif self._bytesize == SEVENBITS:
comDCB.ByteSize = 7
elif self._bytesize == EIGHTBITS:
comDCB.ByteSize = 8
else:
raise ValueError("Unsupported number of data bits: %r" % self._bytesize)
if self._parity == PARITY_NONE:
comDCB.Parity = win32file.NOPARITY
comDCB.fParity = 0 # Dis/Enable Parity Check
elif self._parity == PARITY_EVEN:
comDCB.Parity = win32file.EVENPARITY
comDCB.fParity = 1 # Dis/Enable Parity Check
elif self._parity == PARITY_ODD:
comDCB.Parity = win32file.ODDPARITY
comDCB.fParity = 1 # Dis/Enable Parity Check
else:
raise ValueError("Unsupported parity mode: %r" % self._parity)
if self._stopbits == STOPBITS_ONE:
comDCB.StopBits = win32file.ONESTOPBIT
elif self._stopbits == STOPBITS_TWO:
comDCB.StopBits = win32file.TWOSTOPBITS
else:
raise ValueError("Unsupported number of stop bits: %r" % self._stopbits)
comDCB.fBinary = 1 # Enable Binary Transmission
# Char. w/ Parity-Err are replaced with 0xff (if fErrorChar is set to TRUE)
if self._rtscts:
comDCB.fRtsControl = win32file.RTS_CONTROL_HANDSHAKE
comDCB.fDtrControl = win32file.DTR_CONTROL_HANDSHAKE
else:
comDCB.fRtsControl = win32file.RTS_CONTROL_ENABLE
comDCB.fDtrControl = win32file.DTR_CONTROL_ENABLE
comDCB.fOutxCtsFlow = self._rtscts
comDCB.fOutxDsrFlow = self._rtscts
comDCB.fOutX = self._xonxoff
comDCB.fInX = self._xonxoff
comDCB.fNull = 0
comDCB.fErrorChar = 0
comDCB.fAbortOnError = 0
win32file.SetCommState(self.hComPort, comDCB)
#~ def __del__(self):
#~ self.close()
def close(self):
"""Close port"""
if self._isOpen:
if self.hComPort:
#Restore original timeout values:
win32file.SetCommTimeouts(self.hComPort, self._orgTimeouts)
#Close COM-Port:
win32file.CloseHandle(self.hComPort)
self.hComPort = None
self._isOpen = False
def makeDeviceName(self, port):
return device(port)
# - - - - - - - - - - - - - - - - - - - - - - - -
def inWaiting(self):
"""Return the number of characters currently in the input buffer."""
flags, comstat = win32file.ClearCommError(self.hComPort)
return comstat.cbInQue
def read(self, size=1):
"""Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read."""
if not self.hComPort: raise portNotOpenError
if size > 0:
win32event.ResetEvent(self._overlappedRead.hEvent)
flags, comstat = win32file.ClearCommError(self.hComPort)
if self.timeout == 0:
n = min(comstat.cbInQue, size)
if n > 0:
rc, buf = win32file.ReadFile(self.hComPort, win32file.AllocateReadBuffer(n), self._overlappedRead)
win32event.WaitForSingleObject(self._overlappedRead.hEvent, win32event.INFINITE)
read = str(buf)
else:
read = ''
else:
rc, buf = win32file.ReadFile(self.hComPort, win32file.AllocateReadBuffer(size), self._overlappedRead)
n = win32file.GetOverlappedResult(self.hComPort, self._overlappedRead, 1)
read = str(buf[:n])
else:
read = ''
return read
def write(self, s):
"""Output the given string over the serial port."""
if not self.hComPort: raise portNotOpenError
#print repr(s),
if s:
err, n = win32file.WriteFile(self.hComPort, s, self._overlappedWrite)
if err: #will be ERROR_IO_PENDING:
# Wait for the write to complete.
win32event.WaitForSingleObject(self._overlappedWrite.hEvent, win32event.INFINITE)
def flushInput(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self.hComPort: raise portNotOpenError
win32file.PurgeComm(self.hComPort, win32file.PURGE_RXCLEAR | win32file.PURGE_RXABORT)
def flushOutput(self):
"""Clear output buffer, aborting the current output and
discarding all that is in the buffer."""
if not self.hComPort: raise portNotOpenError
win32file.PurgeComm(self.hComPort, win32file.PURGE_TXCLEAR | win32file.PURGE_TXABORT)
def sendBreak(self):
"""Send break condition."""
if not self.hComPort: raise portNotOpenError
import time
win32file.SetCommBreak(self.hComPort)
#TODO: how to set the correct duration??
time.sleep(0.020)
win32file.ClearCommBreak(self.hComPort)
def setRTS(self,level=1):
"""Set terminal status line: Request To Send"""
if not self.hComPort: raise portNotOpenError
if level:
win32file.EscapeCommFunction(self.hComPort, win32file.SETRTS)
else:
win32file.EscapeCommFunction(self.hComPort, win32file.CLRRTS)
def setDTR(self,level=1):
"""Set terminal status line: Data Terminal Ready"""
if not self.hComPort: raise portNotOpenError
if level:
win32file.EscapeCommFunction(self.hComPort, win32file.SETDTR)
else:
win32file.EscapeCommFunction(self.hComPort, win32file.CLRDTR)
def getCTS(self):
"""Read terminal status line: Clear To Send"""
if not self.hComPort: raise portNotOpenError
return MS_CTS_ON & win32file.GetCommModemStatus(self.hComPort) != 0
def getDSR(self):
"""Read terminal status line: Data Set Ready"""
if not self.hComPort: raise portNotOpenError
return MS_DSR_ON & win32file.GetCommModemStatus(self.hComPort) != 0
def getRI(self):
"""Read terminal status line: Ring Indicator"""
if not self.hComPort: raise portNotOpenError
return MS_RING_ON & win32file.GetCommModemStatus(self.hComPort) != 0
def getCD(self):
"""Read terminal status line: Carrier Detect"""
if not self.hComPort: raise portNotOpenError
return MS_RLSD_ON & win32file.GetCommModemStatus(self.hComPort) != 0
#Nur Testfunktion!!
if __name__ == '__main__':
print __name__
s = Serial()
print s
s = Serial(0)
print s
s.baudrate = 19200
s.databits = 7
s.close()
s.port = 3
s.open()
print s
|
bameda/monarch
|
back/settings/common.py
|
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
ADMINS = (
("David Barragán", "bameda@dbarragan.com"),
)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0q)_&-!hu%%en55a&cx!a2c^7aiw*7*+^zg%_&vk9&4&-4&qg#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
# Media files
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'monarch.base',
'monarch.documents',
'monarch.users',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'monarch.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'TEMPLATE_DEBUG': False,
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
|
ztp-at/RKSV
|
librksv/depparser.py
|
###########################################################################
# Copyright 2017 ZT Prentner IT GmbH (www.ztp.at)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
"""
This module provides functions to parse a DEP.
"""
from builtins import int
from builtins import range
from .gettext_helper import _
import copy
import ijson
from math import ceil
from six import string_types
from . import utils
from . import receipt
class DEPException(utils.RKSVVerifyException):
"""
An exception that is thrown if something is wrong with a DEP.
"""
pass
class DEPParseException(DEPException):
"""
Indicates that an error occurred while parsing the DEP.
"""
def __init__(self, msg):
super(DEPParseException, self).__init__(msg)
self._initargs = (msg,)
class MalformedDEPException(DEPParseException):
"""
Indicates that the DEP is not properly formed.
"""
def __init__(self, msg=None, groupidx=None):
if msg is None:
super(MalformedDEPException, self).__init__(_("Malformed DEP"))
else:
if groupidx is None:
super(MalformedDEPException, self).__init__(
_('{}.').format(msg))
else:
super(MalformedDEPException, self).__init__(
_("In group {}: {}.").format(groupidx, msg))
self._initargs = (msg, groupidx)
class MissingDEPElementException(MalformedDEPException):
"""
Indicates that an element in the DEP is missing.
"""
def __init__(self, elem, groupidx=None):
super(MissingDEPElementException, self).__init__(
_("Element \"{}\" missing").format(elem),
groupidx)
self._initargs = (elem, groupidx)
class MalformedDEPElementException(MalformedDEPException):
"""
Indicates that an element in the DEP is malformed.
"""
def __init__(self, elem, detail=None, groupidx=None):
if detail is None:
super(MalformedDEPElementException, self).__init__(
_("Element \"{}\" malformed").format(elem),
groupidx)
else:
super(MalformedDEPElementException, self).__init__(
_("Element \"{}\" malformed: {}").format(elem, detail),
groupidx)
self._initargs = (elem, detail, groupidx)
class DuplicateDEPElementException(MalformedDEPException):
"""
Indicates that an element in the DEP is redundant.
"""
def __init__(self, elem, groupidx=None):
super(DuplicateDEPElementException, self).__init__(
_("Duplicate element \"{}\"").format(elem),
groupidx)
self._initargs = (elem, groupidx)
class MalformedCertificateException(DEPParseException):
"""
Indicates that a certificate in the DEP is not properly formed.
"""
def __init__(self, cert):
super(MalformedCertificateException, self).__init__(
_("Certificate \"{}\" malformed.").format(cert))
self._initargs = (cert,)
class DEPState(object):
def __init__(self, upper = None):
self.upper = upper
def parse(self, prefix, event, value):
raise NotImplementedError("Please implement this yourself.")
def ready(self):
return False
def getChunk(self):
raise NotImplementedError("Please implement this yourself.")
def needCrt(self):
return None
def setCrt(self, cert, cert_chain):
raise NotImplementedError("Please implement this yourself.")
class DEPStateWithData(DEPState):
def __init__(self, chunksize, upper = None):
super(DEPStateWithData, self).__init__(upper)
self.chunksize = chunksize
if upper:
self.chunk = self.upper.chunk
else:
self.chunk = list()
def currentChunksize(self):
return sum(len(recs) for recs, cert, cert_chain in self.chunk)
def ready(self):
if self.chunksize == 0:
return False
return self.currentChunksize() >= self.chunksize
def getChunk(self):
if self.currentChunksize() <= 0:
return []
# Note that we only copy the groups (of which there are hopefully few)
# FIXME: but still...
ret = copy.copy(self.chunk)
del self.chunk[:]
return ret
class DEPStateWithIncompleteData(DEPStateWithData):
class WIPData(object):
def __init__(self):
self.cert = None
self.cert_chain = None
self.recs = list()
def __init__(self, chunksize, upper, idx):
super(DEPStateWithIncompleteData, self).__init__(chunksize, upper)
if hasattr(upper, 'wip'):
self.wip = upper.wip
else:
self.wip = DEPStateWithIncompleteData.WIPData()
self.idx = idx
def needCrt(self):
if self.wip.cert is None or self.wip.cert_chain is None:
return self.idx
return None
def setCrt(self, cert, cert_chain):
self.wip.cert = cert
self.wip.cert_chain = cert_chain
def mergeIntoChunk(self):
if len(self.wip.recs) > 0:
clist = self.wip.cert_chain
if clist is None:
clist = list()
self.chunk.append((self.wip.recs, self.wip.cert, clist))
self.wip.recs = list()
def ready(self):
if self.chunksize == 0:
return False
return self.currentChunksize() + len(self.wip.recs) >= self.chunksize
def getChunk(self):
self.mergeIntoChunk()
return super(DEPStateWithIncompleteData, self).getChunk()
class DEPStateRoot(DEPStateWithData):
def __init__(self, chunksize):
super(DEPStateRoot, self).__init__(chunksize)
self.root_seen = False
def parse(self, prefix, event, value):
if prefix == '' and event == 'start_map' and value == None:
if self.root_seen:
raise MalformedDEPException(_('Duplicate DEP root'))
self.root_seen = True
return DEPStateRootMap(self.chunksize, self)
raise MalformedDEPException(_('Malformed DEP root'))
class DEPStateRootMap(DEPStateWithData):
def __init__(self, chunksize, upper):
super(DEPStateRootMap, self).__init__(chunksize, upper)
self.groups_seen = False
def parse(self, prefix, event, value):
if prefix == '' and event == 'end_map':
if not self.groups_seen:
raise MissingDEPElementException('Belege-Gruppe')
return self.upper
if prefix == 'Belege-Gruppe':
if event != 'start_array':
raise MalformedDEPException(_('Malformed DEP root'))
if self.groups_seen:
raise MalformedDEPException(_('Duplicate DEP root'))
self.groups_seen = True
return DEPStateBGList(self.chunksize, self)
# TODO: handle other elements
return self
class DEPStateBGList(DEPStateWithData):
def __init__(self, chunksize, upper):
super(DEPStateBGList, self).__init__(chunksize, upper)
self.curIdx = 0
def parse(self, prefix, event, value):
if prefix == 'Belege-Gruppe' and event == 'end_array':
return self.upper
if prefix == 'Belege-Gruppe.item' and event == 'start_map':
nextState = DEPStateGroup(self.chunksize, self, self.curIdx)
self.curIdx += 1
return nextState
raise MalformedDEPElementException('Belege-Gruppe')
class DEPStateGroup(DEPStateWithIncompleteData):
def __init__(self, chunksize, upper, idx):
super(DEPStateGroup, self).__init__(chunksize, upper, idx)
self.recs_seen = False
self.cert_seen = False
self.cert_list_seen = False
def parse(self, prefix, event, value):
if prefix == 'Belege-Gruppe.item' and event == 'end_map':
if not self.cert_seen:
raise MissingDEPElementException('Signaturzertifikat', self.idx)
if not self.cert_list_seen:
raise MissingDEPElementException('Zertifizierungsstellen', self.idx)
if not self.recs_seen:
raise MissingDEPElementException('Belege-kompakt', self.idx)
self.mergeIntoChunk()
return self.upper
if prefix == 'Belege-Gruppe.item.Signaturzertifikat':
if self.cert_seen:
raise DuplicateDEPElementException('Signaturzertifikat', self.idx)
if event != 'string':
raise MalformedDEPElementException('Signaturzertifikat',
_('not a string'), self.idx)
self.cert_seen = True
self.wip.cert = parseDEPCert(value) if value != '' else None
elif prefix == 'Belege-Gruppe.item.Zertifizierungsstellen':
if self.cert_list_seen:
raise DuplicateDEPElementException('Zertifizierungsstellen', self.idx)
if event != 'start_array':
raise MalformedDEPElementException('Zertifizierungsstellen',
_('not a list'), self.idx)
self.wip.cert_chain = list()
self.cert_list_seen = True
return DEPStateCertList(self.chunksize, self, self.idx)
elif prefix == 'Belege-Gruppe.item.Belege-kompakt':
if self.recs_seen:
raise DuplicateDEPElementException('Belege-kompakt', self.idx)
if event != 'start_array':
raise MalformedDEPElementException('Belege-kompakt',
_('not a list'), self.idx)
self.recs_seen = True
return DEPStateReceiptList(self.chunksize, self, self.idx)
# TODO: handle other elements
return self
class DEPStateCertList(DEPStateWithIncompleteData):
def parse(self, prefix, event, value):
if prefix == 'Belege-Gruppe.item.Zertifizierungsstellen' and event == 'end_array':
return self.upper
if prefix == 'Belege-Gruppe.item.Zertifizierungsstellen.item' \
and event == 'string':
self.wip.cert_chain.append(parseDEPCert(value))
return self
raise MalformedDEPElementException('Zertifizierungsstellen', self.idx)
class DEPStateReceiptList(DEPStateWithIncompleteData):
def parse(self, prefix, event, value):
if prefix == 'Belege-Gruppe.item.Belege-kompakt' and event == 'end_array':
return self.upper
if prefix == 'Belege-Gruppe.item.Belege-kompakt.item' \
and event == 'string':
self.wip.recs.append(shrinkDEPReceipt(value))
return self
raise MalformedDEPElementException('Belege-kompakt', self.idx)
def shrinkDEPReceipt(rec, idx = None):
"""
Encode a JWS receipt string to a bytes representation. This takes up less
memory.
:param rec: The receipt JWS as a string.
:param idx: The index of the group in the DEP to which the receipt belongs
or None if it is unknown. This is only used to generate error messages.
:return: The receipt JWS as a byte array.
"""
try:
return rec.encode('utf-8')
except TypeError:
if idx is None:
raise MalformedDEPElementException(_('Receipt \"{}\"').format(rec))
else:
raise MalformedDEPElementException(_('Receipt \"{}\"').format(rec), idx)
def expandDEPReceipt(rec, idx = None):
"""
Decodes a receipt JWS byte array to a regular string.
:param rec: The receipt JWS as a byte array.
:param idx: The index of the group in the DEP to which the receipt belongs
or None if it is unknown. This is only used to generate error messages.
:return: The receipt JWS as a string.
"""
try:
return rec.decode('utf-8')
except UnicodeDecodeError:
if idx is None:
raise MalformedDEPElementException(_('Receipt \"{}\"').format(rec))
else:
raise MalformedDEPElementException(_('Receipt \"{}\"').format(rec), idx)
def parseDEPCert(cert_str):
"""
Turns a certificate string as used in a DEP into a certificate object.
:param cert_str: A certificate in PEM format without header and footer
and on a single line.
:return: A cryptography certificate object.
:throws: MalformedCertificateException
"""
if not isinstance(cert_str, string_types):
raise MalformedCertificateException(cert_str)
try:
return utils.loadCert(utils.addPEMCertHeaders(cert_str))
except ValueError:
raise MalformedCertificateException(cert_str)
class DEPParserI(object):
"""
The base class for DEP parsers. This interface allows reading a DEP in
small chunks without having to store it in memory entirely. Do not use this
directly, use one of the subclasses.
"""
def parse(self, chunksize = 0):
"""
This function parses a DEP and yields chunks of at most chunksize
receipts. A chunk is a list of group tuples. Every group tuple consists
of a list of receipt JWS as byte arrays, a certificate object
containing the certificate used to sign the receipts (or None) and a
list of certificate objects with the certificates used to sign the
first certificate (or an empty list) in that order.
If the chunksize is non-zero, every chunk is guaranteed to contain at
most chunksize receipts in total (over all groups). Otherwise, the
maximum number of receipts is implementation dependent. Every yielded
chunk is guaranteed to contain at least one group tuple.
:param chunksize: A positive number specifying the maximum number of
receipts in one chunk or zero.
:yield: One chunk at a time as described above.
:throws: DEPParseException
"""
raise NotImplementedError("Please implement this yourself.")
class IncrementalDEPParser(DEPParserI):
"""
A DEP parser that reads a DEP from a file descriptor. Do not use this
directly, use one of the subclasses or the fromFd() method which will return
an appropriate parser object.
"""
def __init__(self, fd):
# skipBOM checks if we can seek, so no harm in doing it to a non-file
self.startpos = utils.skipBOM(fd)
self.fd = fd
@staticmethod
def fromFd(fd, need_certs=True):
"""
Returns a new IncrementalDEPParser object using the specified file
descriptor. If chunks don't necessarily have to contain the DEP group
certificates (because, for example, no signature verification is
performed), the need_certs parameter can be set to False. In this case
fromFd() will return a CertlessStreamDEPParser. If need_certs is True,
it will return a FileDEPParser for a seekable file descriptor and a
StreamDEPParser for a non-seekable one.
:param fd: The file descriptor to use.
:param need_certs: Whether chunks need to contain the group
certificates.
:return: An IncrementalDEPParser object using fd as data source.
"""
if not need_certs:
return CertlessStreamDEPParser(fd)
try:
fd.tell()
return FileDEPParser(fd)
except IOError:
return StreamDEPParser(fd)
def _needCerts(self, state, chunksize, groupidx):
raise NotImplementedError("Please implement this yourself.")
def parse(self, chunksize = 0):
parser = ijson.parse(self.fd)
state = DEPStateRoot(chunksize)
got_something = False
try:
for prefix, event, value in parser:
nextState = state.parse(prefix, event, value)
if state.ready():
needed = state.needCrt()
if needed is not None:
self._needCerts(state, chunksize, needed)
yield state.getChunk()
got_something = True
state = nextState
# The entire DEP is parsed, get the rest.
# We should have found any certs here, so no check needed.
last = state.getChunk()
if len(last) > 0:
yield last
elif not got_something:
raise MalformedDEPException(_('No receipts found'))
except ijson.JSONError as e:
raise DEPParseException(_('Malformed JSON: {}.').format(e))
class StreamDEPParser(IncrementalDEPParser):
"""
A DEP parser that reads a DEP from a stream type file descriptor. Such a
file descriptor is not seekable. The parse() method will raise an exception
if an element needed to construct a chunk was not read by the time the
chunk has to be yielded. It will not perform any look-ahead operations
because all receipts read until the missing elements are found would need
to be stored in memory, thus defeating the purpose of the parser API.
A chunksize of zero for the parse() method will cause all receipts in the
DEP to be returned in a single chunk.
"""
def _needCerts(self, state, chunksize, groupidx):
raise MalformedDEPException(
_("Element \"Signaturzertifikat\" or \"Zertifizierungsstellen\" missing"),
groupidx)
def parse(self, chunksize = 0):
return super(StreamDEPParser, self).parse(chunksize)
class CertlessStreamDEPParser(StreamDEPParser):
"""
This DEP parser behaves identically to StreamDEPParser, except for the
fact, that it will not raise an exception if a DEP element needed to
construct the current chunk has not been read yet. Instead, the yielded
chunk will have these elements set to None (for Signaturzertifikat) and the
empty list (for Zertifizierungsstellen) respectively.
Note that the parser will still not tolerate if the elements are missing
altogether.
"""
def _needCerts(self, state, chunksize, groupidx):
# Do nothing, we don't really care about certs.
# The parser will still fail if they are outright missing, but we are ok
# with returning chunks without certs even though the DEP contains some.
pass
class FileDEPParser(IncrementalDEPParser):
"""
A DEP parser that reads a DEP from a seekable file. If DEP elements needed
to construct the current chunk are missing, this parser will perform an
additional parsing pass to locate these elements before returning the
chunk. If the total number of such elements is less than the given
chunksize, they will be cached in memory to avoid having to do even more
parsing passes.
A chunksize of zero for the parse() method will cause all receipts in the
DEP to be returned in a single chunk.
"""
def __getItems(self, prefix, chunksize):
if prefix in self.cache:
return self.cache[prefix]
# cache miss, gotta parse the JSON again
ofs = self.fd.tell()
self.fd.seek(self.startpos)
items = list(ijson.items(self.fd, prefix))
self.fd.seek(ofs)
if chunksize == 0 or len(items) <= chunksize:
self.cache[prefix] = items
return items
def _needCerts(self, state, chunksize, groupidx):
cert_str = self.__getItems(
'Belege-Gruppe.item.Signaturzertifikat', chunksize)[groupidx]
cert_str_list = self.__getItems(
'Belege-Gruppe.item.Zertifizierungsstellen', chunksize)[groupidx]
cert = parseDEPCert(cert_str) if cert_str != '' else None
cert_list = [ parseDEPCert(cs) for cs in cert_str_list ]
state.setCrt(cert, cert_list)
def parse(self, chunksize = 0):
self.fd.seek(self.startpos)
self.cache = dict()
return super(FileDEPParser, self).parse(chunksize)
def totalRecsInDictDEP(dep):
def _nrecs(group):
try:
recs = group['Belege-kompakt']
if not isinstance(recs, list):
return 0
return len(recs)
except (TypeError, KeyError):
return 0
bg = dep.get('Belege-Gruppe', [])
if not isinstance(bg, list):
return 0
return sum(_nrecs(g) for g in bg)
class DictDEPParser(DEPParserI):
"""
A DEP parser that accepts an already parsed dictionary data structure and
yields chunks of the requested size. This parser is intended to parse DEPs
that are already completely in memory anyway but emulates the parser API
for compatibility.
If the chunksize is zero and the nparts parameter equals 1, the parse()
method will return each group in the DEP in its own chunk.
If the chunksize is zero and the nparts parameter is greater than 1, the
parse() method will try to evenly distribute the receipts over nparts
chunks. It will then yield at most nparts chunks.
"""
def __init__(self, dep, nparts = 1):
self.dep = dep
self.nparts = nparts
pass
def _parseDEPGroup(self, group, idx):
if not isinstance(group, dict):
raise MalformedDEPElementException('Belege-Gruppe', idx)
if 'Belege-kompakt' not in group:
raise MissingDEPElementException('Belege-kompakt', idx)
if 'Signaturzertifikat' not in group:
raise MissingDEPElementException('Signaturzertifikat', idx)
if 'Zertifizierungsstellen' not in group:
raise MissingDEPElementException('Zertifizierungsstellen', idx)
cert_str = group['Signaturzertifikat']
cert_str_list = group['Zertifizierungsstellen']
receipts = (shrinkDEPReceipt(r) for r in group['Belege-kompakt'])
if not isinstance(cert_str, string_types):
raise MalformedDEPElementException('Signaturzertifikat',
_('not a string'), idx)
if not isinstance(cert_str_list, list):
raise MalformedDEPElementException('Zertifizierungsstellen',
_('not a list'), idx)
try:
iter(receipts)
except TypeError:
raise MalformedDEPElementException('Belege-kompakt',
_('not a list'), idx)
cert = parseDEPCert(cert_str) if cert_str != '' else None
cert_list = [ parseDEPCert(cs) for cs in cert_str_list ]
return receipts, cert, cert_list
def _groupChunkGen(self, chunksize, groups):
if chunksize == 0:
groupidx = 0
for group in groups:
recgen, cert, certs = self._parseDEPGroup(group, groupidx)
recs = list(recgen)
if len(recs) > 0:
yield [(recs, cert, certs)]
groupidx += 1
return
chunk = list()
chunklen = 0
groupidx = 0
for group in groups:
recgen, cert, cert_list = self._parseDEPGroup(group, groupidx)
nextrecs = list()
for rec in recgen:
nextrecs.append(rec)
chunklen += 1
if chunklen >= chunksize:
chunk.append((nextrecs, cert, cert_list))
yield chunk
nextrecs = list()
chunk = list()
chunklen = 0
if len(nextrecs) > 0:
chunk.append((nextrecs, cert, cert_list))
groupidx += 1
if chunklen > 0:
yield chunk
def parse(self, chunksize = 0):
if not isinstance(self.dep, dict):
raise MalformedDEPException(_('Malformed DEP root'))
if 'Belege-Gruppe' not in self.dep:
raise MissingDEPElementException('Belege-Gruppe')
bg = self.dep['Belege-Gruppe']
if not isinstance(bg, list) or not bg:
raise MalformedDEPElementException('Belege-Gruppe')
if self.nparts > 1 and not chunksize:
nrecs = totalRecsInDictDEP(self.dep)
chunksize = int(ceil(float(nrecs) / self.nparts))
got_something = False
for chunk in self._groupChunkGen(chunksize, bg):
yield chunk
got_something = True
if not got_something:
raise MalformedDEPException(_('No receipts found'))
class FullFileDEPParser(DEPParserI):
"""
This parser behaves like DictDEPParser but accepts a file descriptor from
which to read the JSON instead of an already parsed dictionary structure.
The file is read in its entirety on the first call to parse() and JSON
parsed contents are kept in memory. Subsequent calls reuse these contents.
"""
def __init__(self, fd, nparts = 1):
self.fd = fd
self.nparts = nparts
self.dictParser = None
def parse(self, chunksize = 0):
if not self.dictParser:
try:
dep = utils.readJsonStream(self.fd)
except (IOError, UnicodeDecodeError, ValueError) as e:
raise DEPParseException(_('Malformed JSON: {}.').format(e))
self.dictParser = DictDEPParser(dep, self.nparts)
return self.dictParser.parse(chunksize)
def receiptGroupAdapter(depgen):
for chunk in depgen:
for recs, cert, cert_list in chunk:
rec_tuples = [ receipt.Receipt.fromJWSString(expandDEPReceipt(r))
for r in recs ]
recs = None
yield (rec_tuples, cert, cert_list)
rec_tuples = None
chunk = None
|
goyal-sidd/BLT
|
website/migrations/0037_auto_20170813_0319.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-13 03:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0036_auto_20170813_0049'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='follows',
field=models.ManyToManyField(blank=True, related_name='follower', to='website.UserProfile'),
),
]
|
madprof/alpha-hub
|
prototype/pool.py
|
# |ALPHA| Hub - an authorization server for alpha-ioq3
# See files README and COPYING for copyright and licensing details.
"""
A throwaway thread pool with thread-local storage.
Throwaway Tasks
===============
A throwaway task is one you'd *like* to get done, but it's
not a big deal if it doesn't actually get done.
Scary as it may be, throwaway tasks are quite common in the
wild. They usually compute for a while and then *end* with
*one* operation that permanently changes the state of the
world: think of committing to a database or sending a lone
network packet.
Tasks that iterate while modifying the world, or tasks that
need to apply more than one operation to change the world
consistently, are *not* throwaway. You have been warned.
This thread pool assumes that all tasks are throwaway. It
doesn't care if they finish and it certainly doesn't care
to tell anyone that a task is done. Also, while some pools
go to great lengths to cope with blocked threads, this one
assumes that your application is broken if no progress can
be made for a certain amount of time; see add() below.
Thread-local Storage
====================
Threads frequently require some local storage of their own,
for example it may be necessary for each thread to hold its
own database connection.
This thread pool automatically equips each worker with local
storage; see __init__() and add() below.
"""
import inspect as I
import logging as L
import Queue as Q
import threading as T
class _NullHandler(L.Handler):
"""Logging handler that does nothing."""
def emit(self, _record):
pass
L.getLogger("com.urbanban.threading.throwaway.pool").addHandler(_NullHandler())
class _Worker(T.Thread):
"""Worker thread, don't instantiate directly!"""
def __init__(self, task_queue, init_local=None):
"""Initialize and start a new worker."""
super(_Worker, self).__init__()
assert isinstance(task_queue, Q.Queue)
assert init_local is None or callable(init_local)
self.__task_queue = task_queue
self.__init_local = init_local
self.daemon = True
self.start()
def run(self):
"""Worker thread main loop."""
storage = self.__make_local()
self.__run_forever(storage)
def __make_local(self):
"""Create and initialize thread-local storage."""
storage = T.local()
if self.__init_local is not None:
self.__init_local(storage)
return storage
def __run_forever(self, storage):
"""Grab the next task and run it."""
while True:
task = self.__task_queue.get()
self.__run_task(task, storage)
self.__task_queue.task_done()
def __run_task(self, task, storage):
"""Run a single task."""
func, args, kwargs = task
required_args, _, _, _ = I.getargspec(func)
try:
if '_tp_local' in required_args:
func(_tp_local=storage, *args, **kwargs)
else:
func(*args, **kwargs)
except Exception as exc:
L.exception(
"exception %s during %s ignored by thread pool",
exc, func
)
class ThreadPool(object):
"""The thread pool."""
def __init__(self, num_threads=4, max_tasks=16, timeout=32,
init_local=None, stack_size=None):
"""
Initialize and start a new thread pool.
Exactly num_threads will be spawned. At most max_tasks
can be queued before add() blocks; add() blocks for at
most timeout seconds before raising an exception.
You can pass a callable with one argument as init_local
to initialize thread-local storage for each thread; see
add() below for how to access thread-local storage from
your tasks. For example:
import sqlite3
...
def init_local(local):
local.connection = sqlite3.connect("some.db")
...
pool = ThreadPool(init_local=init_local)
"""
assert num_threads > 0
assert max_tasks > 0
assert timeout > 0
# TODO: undocumented and probably a very bad idea
assert stack_size is None or stack_size > 16*4096
if stack_size is not None:
T.stack_size(stack_size)
self.__queue = Q.Queue(max_tasks)
self.__timeout = timeout
for _ in range(num_threads):
_Worker(self.__queue, init_local)
def add(self, func, *args, **kwargs):
"""
Add a task.
A task consists of a callable func and arguments for
func. For example:
def task(some, argu, ments=None):
...
pool.add(task, act, ual, ments=parameters)
You can access thread-local storage by requiring the
special "_tp_local" argument for func. For example:
def task(_tp_local, some, argu, ments=None):
_tp_local.connection.rollback()
...
_tp_local.connection.commit()
...
pool.add(task, act, ual, ments=parameters)
"""
assert callable(func)
self.__queue.put((func, args, kwargs), True, self.__timeout)
def test():
"""Simple example and test case."""
from random import uniform
from time import sleep
from signal import pause
def init_local(local):
"""A silly local. :-D"""
local.x = uniform(0, 1)
local.y = 0
L.info("init_local local.x %s", local.x)
def task(number, _tp_local):
"""A silly task. :-D"""
L.info("task %s thread local.x %s", number, _tp_local.x)
L.info("task %s started", number)
sleep(uniform(1, 4))
L.info("task %s finished", number)
_tp_local.y += 1
L.info("thread %s has finished %s tasks", _tp_local.x, _tp_local.y)
pool = ThreadPool(init_local=init_local)
L.info("starting to add tasks to pool")
for i in range(32):
pool.add(task, i)
L.info("all tasks added, press CTRL-C to exit")
pause()
if __name__ == "__main__":
L.basicConfig(level=L.DEBUG,
format="%(asctime)s - %(levelname)s - %(message)s")
test()
|
abramhindle/UnnaturalCodeFork
|
python/testdata/launchpad/scripts/process-one-mail.py
|
#!/usr/bin/python -S
#
# Copyright 2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Process one email message, read from stdin."""
import _pythonpath
import sys
from lp.services.config import config
from lp.services.mail.helpers import save_mail_to_librarian
from lp.services.mail.incoming import handle_one_mail
from lp.services.mail.signedmessage import signed_message_from_string
from lp.services.scripts.base import LaunchpadScript
class ProcessMail(LaunchpadScript):
usage = """%prog [options] [MAIL_FILE]
Process one incoming email, read from the specified file or from stdin.
Any mail generated in response is printed to stdout.
""" + __doc__
def main(self):
self.txn.begin()
# NB: This somewhat duplicates handleMail, but there it's mixed in
# with handling a mailbox, which we're avoiding here.
if len(self.args) >= 1:
from_file = file(self.args[0], 'rb')
else:
from_file = sys.stdin
self.logger.debug("reading message from %r" % (from_file,))
raw_mail = from_file.read()
self.logger.debug("got %d bytes" % len(raw_mail))
file_alias = save_mail_to_librarian(raw_mail)
self.logger.debug("saved to librarian as %r" % (file_alias,))
parsed_mail = signed_message_from_string(raw_mail)
# Kinda kludgey way to cause sendmail to just print it.
config.sendmail_to_stdout = True
handle_one_mail(
self.logger, parsed_mail,
file_alias, file_alias.http_url,
signature_timestamp_checker=None)
self.logger.debug("mail handling complete")
self.txn.commit()
if __name__ == '__main__':
script = ProcessMail('process-one-mail', dbuser=config.processmail.dbuser)
# No need to lock; you can run as many as you want as they use no global
# resources (like a mailbox).
script.run(use_web_security=True)
|
shyba/cryptosync
|
cryptosync/tests/test_webserver.py
|
from twisted.trial.unittest import TestCase
from mock import Mock
from twisted.web.test.test_web import DummyRequest
from twisted.web.http import OK, NOT_FOUND
from cryptosync.resources import make_site
def make_request(uri='', method='GET', args={}):
site = make_site(authenticator=Mock())
request = DummyRequest(uri.split('/'))
request.method = method
request.args = args
resource = site.getResourceFor(request)
request.render(resource)
request.data = "".join(request.written)
return request
class RootResourceResponseCodesTestCase(TestCase):
def test_root_resource_ok(self):
request = make_request()
self.assertEquals(request.responseCode, OK)
def test_root_resource_not_found_url(self):
request = make_request(uri='shouldneverfindthisthing')
self.assertEquals(request.responseCode, NOT_FOUND)
class AuthResourceTestCase(TestCase):
def _try_auth(self, credentials, expected):
request = make_request(uri='/auth/', method='POST', args=credentials)
self.assertEquals(request.responseCode, OK)
self.assertEquals(request.data, expected)
def test_auth_success_with_good_parameters(self):
credentials = {'username': 'myself', 'password': 'somethingawesome'}
self._try_auth(credentials, '{"status": "success"}')
def test_auth_failure_with_missing_parameters(self):
credentials = {'username': 'myself', 'password': 'somethingawesome'}
for (k, v) in credentials.items():
self._try_auth({k: v}, '{"status": "failure"}')
|
joshbohde/megaminer-framework
|
server/networking/Filter.py
|
"""
Copyright (C) 2008 by Steven Wallace
snwallace@gmail.com
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the
Free Software Foundation, Inc.,
59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
from __future__ import with_statement
import struct
import threading
import sys, traceback, time
def cascadeSetIn(a, b):
a.setIn(b)
return b
class NetworkException(Exception):
pass
class Filter:
def __init__(self, *args):
self.input = None
self.output = None
self.server = False
self.master = None
self.initialized = threading.Event()
self.wlock = threading.Lock()
self.rlock = threading.Lock()
self.init_lock = threading.Lock()
self._init(*args)
def _init(self, *args):
pass
def disconnect(self):
if self.input:
self.input.disconnect()
def begin(self):
with self.init_lock:
if not self.initialized.isSet():
self._begin()
if self.input:
if not self.initialized.isSet():
self.initialized.wait()
self.input.begin()
def _begin(self):
self.initialized.set()
def end(self):
if self.output:
self.output.end()
def setIn(self, input = None):
self.input = input
if input:
input.setOut(self)
def setOut(self, output = None):
self.output = output
def readIn(self, data):
self.writeOut(data)
def readOut(self, data):
with self.rlock:
self._readOut(data)
def _readOut(self, data):
self.writeIn(data)
def writeIn(self, data):
if self.input:
self.input.readOut(data)
def writeOut(self, data):
self.initialized.wait()
with self.wlock:
self._writeOut(data)
def _writeOut(self, data):
if self.output:
self.output.readIn(data)
def error(self, error):
raise NetworkException(error)
class PacketizerFilter(Filter):
def _init(self):
self.received = ""
def _readOut(self, data):
self.received += data
while len(self.received) > 3:
length ,= struct.unpack("!i",self.received[:4])
if length + 4 <= len(self.received):
self.writeIn(self.received[4:length+4])
self.received = self.received[length+4:]
else:
return
def _writeOut(self, data):
Filter._writeOut(self, struct.pack("!i",len(data))+data)
class CompressionFilter(Filter):
def _init(self):
self.algorithms = {}
self.otherAlgorithms = []
try:
import zlib
self.algorithms['z'] = zlib
except:
pass
try:
import bz2
self.algorithms['b'] = bz2
except:
pass
try:
import noCompress
self.algorithms['n'] = noCompress
except:
pass
def _begin(self):
if self.server:
self._writeOut(''.join(self.algorithms.keys()))
def _readOut(self, data):
if not self.initialized.isSet():
if self.server:
self.otherAlgorithms = [i for i in data]
self.initialized.set()
self.begin()
else:
self.otherAlgorithms = [i for i in data]
self._writeOut(''.join(self.algorithms.keys()))
self.initialized.set()
self.begin()
else:
algorithm = data[0]
if algorithm not in self.algorithms:
self.error("UNKNOWN COMPRESSION ALGORITHM " + data)
self.writeIn(self.algorithms[algorithm].decompress(data[1:]))
def _writeOut(self, data):
if not self.initialized:
Filter._writeOut(self, data)
else:
algorithm = 'n'
newData = data
for i in self.otherAlgorithms:
if i in self.algorithms:
tmpData = self.algorithms[i].compress(data, 9)
if len(tmpData) < len(newData):
newData = tmpData
algorithm = i
Filter._writeOut(self, ''.join((algorithm, newData)))
def EncryptionFilter(Filter):
pass #TODO
class TCPFilter(Filter):
def _init(self, connection = None):
self.connection = connection
def _writeOut(self, data):
if self.connection:
try:
self.connection.send(data)
except:
pass
def poll(self):
try:
data = self.connection.recv(4096)
if data:
self.readOut(data)
else:
self.disconnect()
except:
print "bleh!"
traceback.print_exc(file=sys.stdout)
self.disconnect()
def disconnect(self):
self.master.remove(self.connection)
if self.connection:
self.connection.close()
Filter.disconnect(self)
def end(self):
self.disconnect()
|
hip-odoo/odoo
|
addons/delivery/models/delivery_carrier.py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from odoo import api, fields, models, _
from odoo.exceptions import UserError, ValidationError
from odoo.tools.safe_eval import safe_eval
_logger = logging.getLogger(__name__)
class DeliveryCarrier(models.Model):
_name = 'delivery.carrier'
_inherits = {'product.product': 'product_id'}
_description = "Carrier"
_order = 'sequence, id'
''' A Shipping Provider
In order to add your own external provider, follow these steps:
1. Create your model MyProvider that _inherit 'delivery.carrier'
2. Extend the selection of the field "delivery_type" with a pair
('<my_provider>', 'My Provider')
3. Add your methods:
<my_provider>_get_shipping_price_from_so
<my_provider>_send_shipping
<my_provider>_open_tracking_page
<my_provider>_cancel_shipment
(they are documented hereunder)
'''
# -------------------------------- #
# Internals for shipping providers #
# -------------------------------- #
sequence = fields.Integer(help="Determine the display order", default=10)
# This field will be overwritten by internal shipping providers by adding their own type (ex: 'fedex')
delivery_type = fields.Selection([('fixed', 'Fixed Price'), ('base_on_rule', 'Based on Rules')], string='Provider', default='fixed', required=True)
product_type = fields.Selection(related='product_id.type', default='service')
product_sale_ok = fields.Boolean(related='product_id.sale_ok', default=False)
product_id = fields.Many2one('product.product', string='Delivery Product', required=True, ondelete="cascade")
price = fields.Float(compute='get_price')
available = fields.Boolean(compute='get_price')
free_if_more_than = fields.Boolean('Free if Order total is more than', help="If the order is more expensive than a certain amount, the customer can benefit from a free shipping", default=False)
amount = fields.Float(string='Amount', help="Amount of the order to benefit from a free shipping, expressed in the company currency")
country_ids = fields.Many2many('res.country', 'delivery_carrier_country_rel', 'carrier_id', 'country_id', 'Countries')
state_ids = fields.Many2many('res.country.state', 'delivery_carrier_state_rel', 'carrier_id', 'state_id', 'States')
zip_from = fields.Char('Zip From')
zip_to = fields.Char('Zip To')
price_rule_ids = fields.One2many('delivery.price.rule', 'carrier_id', 'Pricing Rules', copy=True)
fixed_price = fields.Float(compute='_compute_fixed_price', inverse='_set_product_fixed_price', store=True, string='Fixed Price',help="Keep empty if the pricing depends on the advanced pricing per destination")
integration_level = fields.Selection([('rate', 'Get Rate'), ('rate_and_ship', 'Get Rate and Create Shipment')], string="Integration Level", default='rate_and_ship', help="Action while validating Delivery Orders")
prod_environment = fields.Boolean("Environment", help="Set to True if your credentials are certified for production.")
margin = fields.Integer(help='This percentage will be added to the shipping price.')
_sql_constraints = [
('margin_not_under_100_percent', 'CHECK (margin >= -100)', 'Margin cannot be lower than -100%'),
]
@api.one
def toggle_prod_environment(self):
self.prod_environment = not self.prod_environment
@api.multi
def install_more_provider(self):
return {
'name': 'New Providers',
'view_mode': 'kanban',
'res_model': 'ir.module.module',
'domain': [['name', 'ilike', 'delivery_']],
'type': 'ir.actions.act_window',
'help': _('''<p class="oe_view_nocontent">
Buy Odoo Enterprise now to get more providers.
</p>'''),
}
@api.multi
def name_get(self):
display_delivery = self.env.context.get('display_delivery', False)
order_id = self.env.context.get('order_id', False)
if display_delivery and order_id:
order = self.env['sale.order'].browse(order_id)
currency = order.pricelist_id.currency_id.name or ''
res = []
for carrier_id in self.ids:
try:
r = self.read([carrier_id], ['name', 'price'])[0]
res.append((r['id'], r['name'] + ' (' + (str(r['price'])) + ' ' + currency + ')'))
except ValidationError:
r = self.read([carrier_id], ['name'])[0]
res.append((r['id'], r['name']))
else:
res = super(DeliveryCarrier, self).name_get()
return res
@api.depends('product_id.list_price', 'product_id.product_tmpl_id.list_price')
def _compute_fixed_price(self):
for carrier in self:
carrier.fixed_price = carrier.product_id.list_price
def _set_product_fixed_price(self):
for carrier in self:
carrier.product_id.list_price = carrier.fixed_price
@api.one
def get_price(self):
SaleOrder = self.env['sale.order']
self.available = False
self.price = False
order_id = self.env.context.get('order_id')
if order_id:
# FIXME: temporary hack until we refactor the delivery API in master
order = SaleOrder.browse(order_id)
if self.delivery_type not in ['fixed', 'base_on_rule']:
try:
computed_price = self.get_shipping_price_from_so(order)[0]
self.available = True
except ValidationError as e:
# No suitable delivery method found, probably configuration error
_logger.info("Carrier %s: %s, not found", self.name, e.name)
computed_price = 0.0
else:
carrier = self.verify_carrier(order.partner_shipping_id)
if carrier:
try:
computed_price = carrier.get_price_available(order)
self.available = True
except UserError as e:
# No suitable delivery method found, probably configuration error
_logger.info("Carrier %s: %s", carrier.name, e.name)
computed_price = 0.0
else:
computed_price = 0.0
self.price = computed_price * (1.0 + (float(self.margin) / 100.0))
# -------------------------- #
# API for external providers #
# -------------------------- #
# TODO define and handle exceptions that could be thrown by providers
def get_shipping_price_from_so(self, orders):
''' For every sale order, compute the price of the shipment
:param orders: A recordset of sale orders
:return list: A list of floats, containing the estimated price for the shipping of the sale order
'''
self.ensure_one()
if hasattr(self, '%s_get_shipping_price_from_so' % self.delivery_type):
return getattr(self, '%s_get_shipping_price_from_so' % self.delivery_type)(orders)
def send_shipping(self, pickings):
''' Send the package to the service provider
:param pickings: A recordset of pickings
:return list: A list of dictionaries (one per picking) containing of the form::
{ 'exact_price': price,
'tracking_number': number }
'''
self.ensure_one()
if hasattr(self, '%s_send_shipping' % self.delivery_type):
return getattr(self, '%s_send_shipping' % self.delivery_type)(pickings)
def get_tracking_link(self, pickings):
''' Ask the tracking link to the service provider
:param pickings: A recordset of pickings
:return list: A list of string URLs, containing the tracking links for every picking
'''
self.ensure_one()
if hasattr(self, '%s_get_tracking_link' % self.delivery_type):
return getattr(self, '%s_get_tracking_link' % self.delivery_type)(pickings)
def cancel_shipment(self, pickings):
''' Cancel a shipment
:param pickings: A recordset of pickings
'''
self.ensure_one()
if hasattr(self, '%s_cancel_shipment' % self.delivery_type):
return getattr(self, '%s_cancel_shipment' % self.delivery_type)(pickings)
@api.onchange('state_ids')
def onchange_states(self):
self.country_ids = [(6, 0, self.country_ids.ids + self.state_ids.mapped('country_id.id'))]
@api.onchange('country_ids')
def onchange_countries(self):
self.state_ids = [(6, 0, self.state_ids.filtered(lambda state: state.id in self.country_ids.mapped('state_ids').ids).ids)]
@api.multi
def verify_carrier(self, contact):
self.ensure_one()
if self.country_ids and contact.country_id not in self.country_ids:
return False
if self.state_ids and contact.state_id not in self.state_ids:
return False
if self.zip_from and (contact.zip or '') < self.zip_from:
return False
if self.zip_to and (contact.zip or '') > self.zip_to:
return False
return self
@api.multi
def create_price_rules(self):
PriceRule = self.env['delivery.price.rule']
for record in self:
# If using advanced pricing per destination: do not change
if record.delivery_type == 'base_on_rule':
continue
# Not using advanced pricing per destination: override lines
if record.delivery_type == 'base_on_rule' and not (record.fixed_price is not False or record.free_if_more_than):
record.price_rule_ids.unlink()
# Check that float, else 0.0 is False
if not (record.fixed_price is not False or record.free_if_more_than):
continue
if record.delivery_type == 'fixed':
PriceRule.search([('carrier_id', '=', record.id)]).unlink()
line_data = {
'carrier_id': record.id,
'variable': 'price',
'operator': '>=',
}
# Create the delivery price rules
if record.free_if_more_than:
line_data.update({
'max_value': record.amount,
'standard_price': 0.0,
'list_base_price': 0.0,
})
PriceRule.create(line_data)
if record.fixed_price is not False:
line_data.update({
'max_value': 0.0,
'standard_price': record.fixed_price,
'list_base_price': record.fixed_price,
})
PriceRule.create(line_data)
return True
@api.model
def create(self, vals):
res = super(DeliveryCarrier, self).create(vals)
res.create_price_rules()
return res
@api.multi
def write(self, vals):
res = super(DeliveryCarrier, self).write(vals)
self.create_price_rules()
return res
@api.multi
def get_price_available(self, order):
self.ensure_one()
total = weight = volume = quantity = 0
total_delivery = 0.0
for line in order.order_line:
if line.state == 'cancel':
continue
if line.is_delivery:
total_delivery += line.price_total
if not line.product_id or line.is_delivery:
continue
qty = line.product_uom._compute_quantity(line.product_uom_qty, line.product_id.uom_id)
weight += (line.product_id.weight or 0.0) * qty
volume += (line.product_id.volume or 0.0) * qty
quantity += qty
total = (order.amount_total or 0.0) - total_delivery
total = order.currency_id.with_context(date=order.date_order).compute(total, order.company_id.currency_id)
return self.get_price_from_picking(total, weight, volume, quantity)
def get_price_from_picking(self, total, weight, volume, quantity):
price = 0.0
criteria_found = False
price_dict = {'price': total, 'volume': volume, 'weight': weight, 'wv': volume * weight, 'quantity': quantity}
for line in self.price_rule_ids:
test = safe_eval(line.variable + line.operator + str(line.max_value), price_dict)
if test:
price = line.list_base_price + line.list_price * price_dict[line.variable_factor]
criteria_found = True
break
if not criteria_found:
raise UserError(_("Selected product in the delivery method doesn't fulfill any of the delivery carrier(s) criteria."))
return price
|
suutari/shoop
|
shuup_tests/core/test_payments_api.py
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
import json
from rest_framework.test import APIClient
from rest_framework import status
from shuup.core.models import Order
from shuup.testing.factories import (
create_order_with_product, get_default_product,
get_default_shop, get_default_supplier
)
def create_order():
shop = get_default_shop()
product = get_default_product()
supplier = get_default_supplier()
order = create_order_with_product(
product,
shop=shop,
supplier=supplier,
quantity=1,
taxless_base_unit_price=10,
)
order.cache_prices()
order.save()
return order
def get_client(admin_user):
client = APIClient()
client.force_authenticate(user=admin_user)
return client
def get_create_payment_url(order_pk):
return "/api/shuup/order/%s/create_payment/" % order_pk
def get_set_fully_paid_url(order_pk):
return "/api/shuup/order/%s/set_fully_paid/" % order_pk
def get_order_url(order_pk):
return "/api/shuup/order/%s/" % order_pk
def test_create_payment(admin_user):
order = create_order()
client = get_client(admin_user)
payment_identifier = "some_identifier"
data = {
"amount_value": 1,
"payment_identifier": payment_identifier,
"description": "some_payment"
}
response = client.post(
get_create_payment_url(order.pk),
data,
format="json"
)
assert response.status_code == status.HTTP_201_CREATED
assert order.get_total_paid_amount().value == 1
response = client.get(
get_order_url(order.pk),
format="json"
)
assert response.status_code == status.HTTP_200_OK
order_data = json.loads(response.content.decode("utf-8"))
payments = order_data["payments"]
assert len(payments) == 1
assert payments[0]["payment_identifier"] == payment_identifier
def test_set_fully_paid(admin_user):
order = create_order()
client = get_client(admin_user)
data = {
"payment_identifier": 1,
"description": "some_payment"
}
order_pk = order.pk
response = client.post(
get_set_fully_paid_url(order_pk),
data,
format="json"
)
assert response.status_code == status.HTTP_201_CREATED
order = Order.objects.get(pk=order_pk)
assert bool(order.is_paid())
currently_paid_amount = order.get_total_paid_amount()
# Make sure that api works with already fully paid orders
response = client.post(
"/api/shuup/order/%s/set_fully_paid/" % order_pk,
data,
format="json"
)
assert response.status_code == status.HTTP_200_OK
order = Order.objects.get(pk=order_pk)
assert bool(order.is_paid())
assert currently_paid_amount == order.get_total_paid_amount()
def test_set_paid_from_partially_paid_order(admin_user):
order = create_order()
client = get_client(admin_user)
data = {
"amount_value": 1,
"payment_identifier": 1,
"description": "some_payment"
}
response = client.post(
get_create_payment_url(order.pk),
data,
format="json"
)
assert response.status_code == status.HTTP_201_CREATED
assert order.get_total_paid_amount().value == 1
data = {
"payment_identifier": 2,
"description": "some_payment"
}
order_pk = order.pk
response = client.post(
get_set_fully_paid_url(order_pk),
data,
format="json"
)
assert response.status_code == status.HTTP_201_CREATED
order = Order.objects.get(pk=order_pk)
assert bool(order.is_paid())
assert bool(order.get_total_paid_amount() == order.taxful_total_price.amount)
|
moschlar/SAUCE
|
sauce/controllers/crc/selectors.py
|
'''
@since: 2015-01-07
@author: moschlar
'''
import sqlalchemy.types as sqlat
import tw2.core as twc
import tw2.bootstrap.forms as twb
import tw2.jqplugins.chosen.widgets as twjc
import sprox.widgets.tw2widgets.widgets as sw
from sprox.sa.widgetselector import SAWidgetSelector
from sprox.sa.validatorselector import SAValidatorSelector, Email
from sauce.widgets.widgets import (LargeMixin, SmallMixin, AdvancedWysihtml5,
MediumTextField, SmallTextField, CalendarDateTimePicker)
from sauce.widgets.validators import AdvancedWysihtml5BleachValidator
class ChosenPropertyMultipleSelectField(LargeMixin, twjc.ChosenMultipleSelectField, sw.PropertyMultipleSelectField):
search_contains = True
def _validate(self, value, state=None):
value = super(ChosenPropertyMultipleSelectField, self)._validate(value, state)
if self.required and not value:
raise twc.ValidationError('Please select at least one value')
else:
return value
class ChosenPropertySingleSelectField(SmallMixin, twjc.ChosenSingleSelectField, sw.PropertySingleSelectField):
search_contains = True
class MyWidgetSelector(SAWidgetSelector):
'''Custom WidgetSelector for SAUCE
Primarily uses fields from tw2.bootstrap.forms and tw2.jqplugins.chosen.
'''
text_field_limit = 256
default_multiple_select_field_widget_type = ChosenPropertyMultipleSelectField
default_single_select_field_widget_type = ChosenPropertySingleSelectField
default_name_based_widgets = {
'name': MediumTextField,
'subject': MediumTextField,
'_url': MediumTextField,
'user_name': MediumTextField,
'email_address': MediumTextField,
'_display_name': MediumTextField,
'description': AdvancedWysihtml5,
'message': AdvancedWysihtml5,
}
def __init__(self, *args, **kwargs):
self.default_widgets.update({
sqlat.String: MediumTextField,
sqlat.Integer: SmallTextField,
sqlat.Numeric: SmallTextField,
sqlat.DateTime: CalendarDateTimePicker,
sqlat.Date: twb.CalendarDatePicker,
sqlat.Time: twb.CalendarTimePicker,
sqlat.Binary: twb.FileField,
sqlat.BLOB: twb.FileField,
sqlat.PickleType: MediumTextField,
sqlat.Enum: twjc.ChosenSingleSelectField,
})
super(MyWidgetSelector, self).__init__(*args, **kwargs)
def select(self, field):
widget = super(MyWidgetSelector, self).select(field)
if (issubclass(widget, sw.TextArea)
and hasattr(field.type, 'length')
and (field.type.length is None or field.type.length < self.text_field_limit)):
widget = MediumTextField
return widget
class MyValidatorSelector(SAValidatorSelector):
_name_based_validators = {
'email_address': Email,
'description': AdvancedWysihtml5BleachValidator,
'message': AdvancedWysihtml5BleachValidator,
}
# def select(self, field):
# print 'MyValidatorSelector', 'select', field
# return super(MyValidatorSelector, self).select(field)
|
naresh21/synergetics-edx-platform
|
cms/djangoapps/contentstore/courseware_index.py
|
""" Code to allow module store to interface with courseware index """
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
from datetime import timedelta
import logging
import re
from six import add_metaclass
from django.conf import settings
from django.utils.translation import ugettext_lazy, ugettext as _
from django.core.urlresolvers import resolve
from contentstore.course_group_config import GroupConfiguration
from course_modes.models import CourseMode
from eventtracking import tracker
from openedx.core.lib.courses import course_image_url
from search.search_engine_base import SearchEngine
from xmodule.annotator_mixin import html_to_text
from xmodule.modulestore import ModuleStoreEnum
from xmodule.library_tools import normalize_key_for_search
# REINDEX_AGE is the default amount of time that we look back for changes
# that might have happened. If we are provided with a time at which the
# indexing is triggered, then we know it is safe to only index items
# recently changed at that time. This is the time period that represents
# how far back from the trigger point to look back in order to index
REINDEX_AGE = timedelta(0, 60) # 60 seconds
log = logging.getLogger('edx.modulestore')
def strip_html_content_to_text(html_content):
""" Gets only the textual part for html content - useful for building text to be searched """
# Removing HTML-encoded non-breaking space characters
text_content = re.sub(r"(\s| |//)+", " ", html_to_text(html_content))
# Removing HTML CDATA
text_content = re.sub(r"<!\[CDATA\[.*\]\]>", "", text_content)
# Removing HTML comments
text_content = re.sub(r"<!--.*-->", "", text_content)
return text_content
def indexing_is_enabled():
"""
Checks to see if the indexing feature is enabled
"""
return settings.FEATURES.get('ENABLE_COURSEWARE_INDEX', False)
class SearchIndexingError(Exception):
""" Indicates some error(s) occured during indexing """
def __init__(self, message, error_list):
super(SearchIndexingError, self).__init__(message)
self.error_list = error_list
@add_metaclass(ABCMeta)
class SearchIndexerBase(object):
"""
Base class to perform indexing for courseware or library search from different modulestores
"""
__metaclass__ = ABCMeta
INDEX_NAME = None
DOCUMENT_TYPE = None
ENABLE_INDEXING_KEY = None
INDEX_EVENT = {
'name': None,
'category': None
}
@classmethod
def indexing_is_enabled(cls):
"""
Checks to see if the indexing feature is enabled
"""
return settings.FEATURES.get(cls.ENABLE_INDEXING_KEY, False)
@classmethod
@abstractmethod
def normalize_structure_key(cls, structure_key):
""" Normalizes structure key for use in indexing """
@classmethod
@abstractmethod
def _fetch_top_level(cls, modulestore, structure_key):
""" Fetch the item from the modulestore location """
@classmethod
@abstractmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
@classmethod
def _id_modifier(cls, usage_id):
""" Modifies usage_id to submit to index """
return usage_id
@classmethod
def remove_deleted_items(cls, searcher, structure_key, exclude_items):
"""
remove any item that is present in the search index that is not present in updated list of indexed items
as we find items we can shorten the set of items to keep
"""
response = searcher.search(
doc_type=cls.DOCUMENT_TYPE,
field_dictionary=cls._get_location_info(structure_key),
exclude_dictionary={"id": list(exclude_items)}
)
result_ids = [result["data"]["id"] for result in response["results"]]
searcher.remove(cls.DOCUMENT_TYPE, result_ids)
@classmethod
def index(cls, modulestore, structure_key, triggered_at=None, reindex_age=REINDEX_AGE):
"""
Process course for indexing
Arguments:
modulestore - modulestore object to use for operations
structure_key (CourseKey|LibraryKey) - course or library identifier
triggered_at (datetime) - provides time at which indexing was triggered;
useful for index updates - only things changed recently from that date
(within REINDEX_AGE above ^^) will have their index updated, others skip
updating their index but are still walked through in order to identify
which items may need to be removed from the index
If None, then a full reindex takes place
Returns:
Number of items that have been added to the index
"""
error_list = []
searcher = SearchEngine.get_search_engine(cls.INDEX_NAME)
if not searcher:
return
structure_key = cls.normalize_structure_key(structure_key)
location_info = cls._get_location_info(structure_key)
# Wrap counter in dictionary - otherwise we seem to lose scope inside the embedded function `prepare_item_index`
indexed_count = {
"count": 0
}
# indexed_items is a list of all the items that we wish to remain in the
# index, whether or not we are planning to actually update their index.
# This is used in order to build a query to remove those items not in this
# list - those are ready to be destroyed
indexed_items = set()
# items_index is a list of all the items index dictionaries.
# it is used to collect all indexes and index them using bulk API,
# instead of per item index API call.
items_index = []
def get_item_location(item):
"""
Gets the version agnostic item location
"""
return item.location.version_agnostic().replace(branch=None)
def prepare_item_index(item, skip_index=False, groups_usage_info=None):
"""
Add this item to the items_index and indexed_items list
Arguments:
item - item to add to index, its children will be processed recursively
skip_index - simply walk the children in the tree, the content change is
older than the REINDEX_AGE window and would have been already indexed.
This should really only be passed from the recursive child calls when
this method has determined that it is safe to do so
Returns:
item_content_groups - content groups assigned to indexed item
"""
is_indexable = hasattr(item, "index_dictionary")
item_index_dictionary = item.index_dictionary() if is_indexable else None
# if it's not indexable and it does not have children, then ignore
if not item_index_dictionary and not item.has_children:
return
item_content_groups = None
if item.category == "split_test":
split_partition = item.get_selected_partition()
for split_test_child in item.get_children():
if split_partition:
for group in split_partition.groups:
group_id = unicode(group.id)
child_location = item.group_id_to_child.get(group_id, None)
if child_location == split_test_child.location:
groups_usage_info.update({
unicode(get_item_location(split_test_child)): [group_id],
})
for component in split_test_child.get_children():
groups_usage_info.update({
unicode(get_item_location(component)): [group_id]
})
if groups_usage_info:
item_location = get_item_location(item)
item_content_groups = groups_usage_info.get(unicode(item_location), None)
item_id = unicode(cls._id_modifier(item.scope_ids.usage_id))
indexed_items.add(item_id)
if item.has_children:
# determine if it's okay to skip adding the children herein based upon how recently any may have changed
skip_child_index = skip_index or \
(triggered_at is not None and (triggered_at - item.subtree_edited_on) > reindex_age)
children_groups_usage = []
for child_item in item.get_children():
if modulestore.has_published_version(child_item):
children_groups_usage.append(
prepare_item_index(
child_item,
skip_index=skip_child_index,
groups_usage_info=groups_usage_info
)
)
if None in children_groups_usage:
item_content_groups = None
if skip_index or not item_index_dictionary:
return
item_index = {}
# if it has something to add to the index, then add it
try:
item_index.update(location_info)
item_index.update(item_index_dictionary)
item_index['id'] = item_id
if item.start:
item_index['start_date'] = item.start
item_index['content_groups'] = item_content_groups if item_content_groups else None
item_index.update(cls.supplemental_fields(item))
items_index.append(item_index)
indexed_count["count"] += 1
return item_content_groups
except Exception as err: # pylint: disable=broad-except
# broad exception so that index operation does not fail on one item of many
log.warning('Could not index item: %s - %r', item.location, err)
error_list.append(_('Could not index item: {}').format(item.location))
try:
with modulestore.branch_setting(ModuleStoreEnum.RevisionOption.published_only):
structure = cls._fetch_top_level(modulestore, structure_key)
groups_usage_info = cls.fetch_group_usage(modulestore, structure)
# First perform any additional indexing from the structure object
cls.supplemental_index_information(modulestore, structure)
# Now index the content
for item in structure.get_children():
prepare_item_index(item, groups_usage_info=groups_usage_info)
searcher.index(cls.DOCUMENT_TYPE, items_index)
cls.remove_deleted_items(searcher, structure_key, indexed_items)
except Exception as err: # pylint: disable=broad-except
# broad exception so that index operation does not prevent the rest of the application from working
log.exception(
"Indexing error encountered, courseware index may be out of date %s - %r",
structure_key,
err
)
error_list.append(_('General indexing error occurred'))
if error_list:
raise SearchIndexingError('Error(s) present during indexing', error_list)
return indexed_count["count"]
@classmethod
def _do_reindex(cls, modulestore, structure_key):
"""
(Re)index all content within the given structure (course or library),
tracking the fact that a full reindex has taken place
"""
indexed_count = cls.index(modulestore, structure_key)
if indexed_count:
cls._track_index_request(cls.INDEX_EVENT['name'], cls.INDEX_EVENT['category'], indexed_count)
return indexed_count
@classmethod
def _track_index_request(cls, event_name, category, indexed_count):
"""Track content index requests.
Arguments:
event_name (str): Name of the event to be logged.
category (str): category of indexed items
indexed_count (int): number of indexed items
Returns:
None
"""
data = {
"indexed_count": indexed_count,
'category': category,
}
tracker.emit(
event_name,
data
)
@classmethod
def fetch_group_usage(cls, modulestore, structure): # pylint: disable=unused-argument
"""
Base implementation of fetch group usage on course/library.
"""
return None
@classmethod
def supplemental_index_information(cls, modulestore, structure):
"""
Perform any supplemental indexing given that the structure object has
already been loaded. Base implementation performs no operation.
Arguments:
modulestore - modulestore object used during the indexing operation
structure - structure object loaded during the indexing job
Returns:
None
"""
pass
@classmethod
def supplemental_fields(cls, item): # pylint: disable=unused-argument
"""
Any supplemental fields that get added to the index for the specified
item. Base implementation returns an empty dictionary
"""
return {}
class CoursewareSearchIndexer(SearchIndexerBase):
"""
Class to perform indexing for courseware search from different modulestores
"""
INDEX_NAME = "courseware_index"
DOCUMENT_TYPE = "courseware_content"
ENABLE_INDEXING_KEY = 'ENABLE_COURSEWARE_INDEX'
INDEX_EVENT = {
'name': 'edx.course.index.reindexed',
'category': 'courseware_index'
}
UNNAMED_MODULE_NAME = ugettext_lazy("(Unnamed)")
@classmethod
def normalize_structure_key(cls, structure_key):
""" Normalizes structure key for use in indexing """
return structure_key
@classmethod
def _fetch_top_level(cls, modulestore, structure_key):
""" Fetch the item from the modulestore location """
return modulestore.get_course(structure_key, depth=None)
@classmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
return {"course": unicode(normalized_structure_key), "org": normalized_structure_key.org}
@classmethod
def do_course_reindex(cls, modulestore, course_key):
"""
(Re)index all content within the given course, tracking the fact that a full reindex has taken place
"""
return cls._do_reindex(modulestore, course_key)
@classmethod
def fetch_group_usage(cls, modulestore, structure):
groups_usage_dict = {}
groups_usage_info = GroupConfiguration.get_content_groups_usage_info(modulestore, structure).items()
groups_usage_info.extend(
GroupConfiguration.get_content_groups_items_usage_info(
modulestore,
structure
).items()
)
if groups_usage_info:
for name, group in groups_usage_info:
for module in group:
view, args, kwargs = resolve(module['url']) # pylint: disable=unused-variable
usage_key_string = unicode(kwargs['usage_key_string'])
if groups_usage_dict.get(usage_key_string, None):
groups_usage_dict[usage_key_string].append(name)
else:
groups_usage_dict[usage_key_string] = [name]
return groups_usage_dict
@classmethod
def supplemental_index_information(cls, modulestore, structure):
"""
Perform additional indexing from loaded structure object
"""
CourseAboutSearchIndexer.index_about_information(modulestore, structure)
@classmethod
def supplemental_fields(cls, item):
"""
Add location path to the item object
Once we've established the path of names, the first name is the course
name, and the next 3 names are the navigable path within the edx
application. Notice that we stop at that level because a full path to
deep children would be confusing.
"""
location_path = []
parent = item
while parent is not None:
path_component_name = parent.display_name
if not path_component_name:
path_component_name = unicode(cls.UNNAMED_MODULE_NAME)
location_path.append(path_component_name)
parent = parent.get_parent()
location_path.reverse()
return {
"course_name": location_path[0],
"location": location_path[1:4]
}
class LibrarySearchIndexer(SearchIndexerBase):
"""
Base class to perform indexing for library search from different modulestores
"""
INDEX_NAME = "library_index"
DOCUMENT_TYPE = "library_content"
ENABLE_INDEXING_KEY = 'ENABLE_LIBRARY_INDEX'
INDEX_EVENT = {
'name': 'edx.library.index.reindexed',
'category': 'library_index'
}
@classmethod
def normalize_structure_key(cls, structure_key):
""" Normalizes structure key for use in indexing """
return normalize_key_for_search(structure_key)
@classmethod
def _fetch_top_level(cls, modulestore, structure_key):
""" Fetch the item from the modulestore location """
return modulestore.get_library(structure_key, depth=None)
@classmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
return {"library": unicode(normalized_structure_key)}
@classmethod
def _id_modifier(cls, usage_id):
""" Modifies usage_id to submit to index """
return usage_id.replace(library_key=(usage_id.library_key.replace(version_guid=None, branch=None)))
@classmethod
def do_library_reindex(cls, modulestore, library_key):
"""
(Re)index all content within the given library, tracking the fact that a full reindex has taken place
"""
return cls._do_reindex(modulestore, library_key)
class AboutInfo(object):
""" About info structure to contain
1) Property name to use
2) Where to add in the index (using flags above)
3) Where to source the properties value
"""
# Bitwise Flags for where to index the information
#
# ANALYSE - states that the property text contains content that we wish to be able to find matched within
# e.g. "joe" should yield a result for "I'd like to drink a cup of joe"
#
# PROPERTY - states that the property text should be a property of the indexed document, to be returned with the
# results: search matches will only be made on exact string matches
# e.g. "joe" will only match on "joe"
#
# We are using bitwise flags because one may want to add the property to EITHER or BOTH parts of the index
# e.g. university name is desired to be analysed, so that a search on "Oxford" will match
# property values "University of Oxford" and "Oxford Brookes University",
# but it is also a useful property, because within a (future) filtered search a user
# may have chosen to filter courses from "University of Oxford"
#
# see https://wiki.python.org/moin/BitwiseOperators for information about bitwise shift operator used below
#
ANALYSE = 1 << 0 # Add the information to the analysed content of the index
PROPERTY = 1 << 1 # Add the information as a property of the object being indexed (not analysed)
def __init__(self, property_name, index_flags, source_from):
self.property_name = property_name
self.index_flags = index_flags
self.source_from = source_from
def get_value(self, **kwargs):
""" get the value for this piece of information, using the correct source """
return self.source_from(self, **kwargs)
def from_about_dictionary(self, **kwargs):
""" gets the value from the kwargs provided 'about_dictionary' """
about_dictionary = kwargs.get('about_dictionary', None)
if not about_dictionary:
raise ValueError("Context dictionary does not contain expected argument 'about_dictionary'")
return about_dictionary.get(self.property_name, None)
def from_course_property(self, **kwargs):
""" gets the value from the kwargs provided 'course' """
course = kwargs.get('course', None)
if not course:
raise ValueError("Context dictionary does not contain expected argument 'course'")
return getattr(course, self.property_name, None)
def from_course_mode(self, **kwargs):
""" fetches the available course modes from the CourseMode model """
course = kwargs.get('course', None)
if not course:
raise ValueError("Context dictionary does not contain expected argument 'course'")
return [mode.slug for mode in CourseMode.modes_for_course(course.id)]
# Source location options - either from the course or the about info
FROM_ABOUT_INFO = from_about_dictionary
FROM_COURSE_PROPERTY = from_course_property
FROM_COURSE_MODE = from_course_mode
class CourseAboutSearchIndexer(object):
"""
Class to perform indexing of about information from course object
"""
DISCOVERY_DOCUMENT_TYPE = "course_info"
INDEX_NAME = CoursewareSearchIndexer.INDEX_NAME
# List of properties to add to the index - each item in the list is an instance of AboutInfo object
ABOUT_INFORMATION_TO_INCLUDE = [
AboutInfo("advertised_start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("announcement", AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("end", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("effort", AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("display_name", AboutInfo.ANALYSE, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("overview", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("title", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("university", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("number", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("short_description", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("description", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("key_dates", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("video", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("course_staff_short", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("course_staff_extended", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("requirements", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("syllabus", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("textbook", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("faq", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("more_info", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("ocw_links", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("enrollment_start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("enrollment_end", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("org", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("modes", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_MODE),
AboutInfo("language", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
]
@classmethod
def index_about_information(cls, modulestore, course):
"""
Add the given course to the course discovery index
Arguments:
modulestore - modulestore object to use for operations
course - course object from which to take properties, locate about information
"""
searcher = SearchEngine.get_search_engine(cls.INDEX_NAME)
if not searcher:
return
course_id = unicode(course.id)
course_info = {
'id': course_id,
'course': course_id,
'content': {},
'image_url': course_image_url(course),
}
# load data for all of the 'about' modules for this course into a dictionary
about_dictionary = {
item.location.name: item.data
for item in modulestore.get_items(course.id, qualifiers={"category": "about"})
}
about_context = {
"course": course,
"about_dictionary": about_dictionary,
}
for about_information in cls.ABOUT_INFORMATION_TO_INCLUDE:
# Broad exception handler so that a single bad property does not scupper the collection of others
try:
section_content = about_information.get_value(**about_context)
except: # pylint: disable=bare-except
section_content = None
log.warning(
"Course discovery could not collect property %s for course %s",
about_information.property_name,
course_id,
exc_info=True,
)
if section_content:
if about_information.index_flags & AboutInfo.ANALYSE:
analyse_content = section_content
if isinstance(section_content, basestring):
analyse_content = strip_html_content_to_text(section_content)
course_info['content'][about_information.property_name] = analyse_content
if about_information.index_flags & AboutInfo.PROPERTY:
course_info[about_information.property_name] = section_content
# Broad exception handler to protect around and report problems with indexing
try:
searcher.index(cls.DISCOVERY_DOCUMENT_TYPE, [course_info])
except: # pylint: disable=bare-except
log.exception(
"Course discovery indexing error encountered, course discovery index may be out of date %s",
course_id,
)
raise
log.debug(
"Successfully added %s course to the course discovery index",
course_id
)
@classmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
return {"course": unicode(normalized_structure_key), "org": normalized_structure_key.org}
@classmethod
def remove_deleted_items(cls, structure_key):
""" Remove item from Course About Search_index """
searcher = SearchEngine.get_search_engine(cls.INDEX_NAME)
if not searcher:
return
response = searcher.search(
doc_type=cls.DISCOVERY_DOCUMENT_TYPE,
field_dictionary=cls._get_location_info(structure_key)
)
result_ids = [result["data"]["id"] for result in response["results"]]
searcher.remove(cls.DISCOVERY_DOCUMENT_TYPE, result_ids)
|
avanzosc/avanzosc6.1
|
avanzosc_net_weight_in_lots/__init__.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Avanzosc - Avanced Open Source Consulting
# Copyright (C) 2011 - 2013 Avanzosc <http://www.avanzosc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import stock_production_lot_ext
import stock_picking_ext
import stock_move_ext
import purchase_order_ext
import stock_move_split_ext
|
ROB-Seismology/oq-hazardlib
|
openquake/hazardlib/gsim/atkinson_wald_2007.py
|
# coding: utf-8
# The Hazard Library
# Copyright (C) 2012 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`AtkinsonWald2007`.
"""
from __future__ import division
import numpy as np
from openquake.hazardlib.gsim.base import IPE
from openquake.hazardlib import const
from openquake.hazardlib.imt import MMI
class AtkinsonWald2007(IPE):
"""
Implements IPE developed by Atkinson and Wald (2007)
California, USA
MS!
"""
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([
MMI
])
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.AVERAGE_HORIZONTAL
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.TOTAL
])
# TODO !
REQUIRES_SITES_PARAMETERS = set(('vs30', ))
REQUIRES_RUPTURE_PARAMETERS = set(('mag',))
REQUIRES_DISTANCES = set(('rrup', ))
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
h = 14.0
R = np.sqrt(dists.rrup**2 + h**2)
B = np.zeros_like(dists.rrup)
B[R > 30.] = np.log10(R / 30.)[R > 30.]
mean_mmi = 12.27 + 2.270 * (rup.mag - 6) + 0.1304 * (rup.mag - 6)**2 - 1.30 * np.log10(R) - 0.0007070 * R + 1.95 * B - 0.577 * rup.mag * np.log10(R)
mean_mmi += self.compute_site_term(sites)
mean_mmi = mean_mmi.clip(min=1, max=12)
stddevs = np.zeros_like(dists.rrup)
stddevs.fill(0.4)
stddevs = stddevs.reshape(1, len(stddevs))
return mean_mmi, stddevs
def compute_site_term(self, sites):
# TODO !
return 0
|
tpltnt/ircensus
|
ircensus_channel_bot.py
|
#!/usr/bin/env python3
"""
A simple bot to gather some census data in IRC channels.
It is intended to sit in a channel and collect the data for statistics.
:author: tpltnt
:license: AGPLv3
"""
import irc.bot
import irc.strings
from irc.client import ip_numstr_to_quad, ip_quad_to_numstr
class CensusBot(irc.bot.SingleServerIRCBot):
"""
The class implementing the census bot.
"""
def __init__(self, channel, nickname, server, port=6667):
"""
The constructor for the CensusBot class.
:param channel: name of the channel to join
:type channel: str
:param nickname: nick of the bot (to use)
:type nickname: str
:param server: FQDN of the server to use
:type server: str
:param port: port to use when connecting to the server
:type port: int
"""
if 0 != channel.find('#'):
channel = '#' + channel
irc.bot.SingleServerIRCBot.__init__(self, [(server, port)], nickname, nickname)
self.channel = channel
def on_nickname_in_use(self, connection, event):
"""
Change own nickname if already in use.
:param connection: connection to the server
:type connection: irc.client.ServerConnection
:param event: event to react to
:type event:
:raises: TypeError
"""
if not isinstance(connection, ServerConnection):
raise TypeError("'connection' is not of type 'ServerConnection'")
connection.nick(connection.get_nickname() + "_")
def main():
import sys
if len(sys.argv) != 4:
print("Usage: " + sys.argv[0] + " <server[:port]> <channel> <nickname>")
sys.exit(1)
server = sys.argv[1].split(":", 1)
host = server[0]
if len(server) == 2:
try:
port = int(server[1])
except ValueError:
print("Error: Erroneous port.")
sys.exit(1)
else:
port = 6667
channel = sys.argv[2]
nickname = sys.argv[3]
bot = CensusBot(channel, nickname, server, port)
bot.start()
if __name__ == "__main__":
main()
|
pculture/unisubs
|
utils/one_time_data.py
|
# Amara, universalsubtitles.org
#
# Copyright (C) 2017 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
import uuid
from django.conf import settings
from django.core.cache import cache
from django.urls import reverse
def _mk_key(token):
return "one-time-data-" + token
def set_one_time_data(data):
token = str(uuid.uuid4())
key = _mk_key(token)
cache.set(key, data, 60)
return '{}://{}{}'.format(settings.DEFAULT_PROTOCOL,
settings.HOSTNAME,
reverse("one_time_url", kwargs={"token": token}))
def get_one_time_data(token):
key = _mk_key(token)
data = cache.get(key)
# It seems like Brightcove wants to hit it twice
# cache.delete(key)
return data
|
hasgeek/funnel
|
migrations/versions/3a6b2ab00e3e_session_proposal_one.py
|
"""Make session:proposal 1:1.
Revision ID: 3a6b2ab00e3e
Revises: 4dbf686f4380
Create Date: 2013-11-09 13:51:58.343243
"""
# revision identifiers, used by Alembic.
revision = '3a6b2ab00e3e'
down_revision = '4dbf686f4380'
from alembic import op
def upgrade():
op.create_unique_constraint('session_proposal_id_key', 'session', ['proposal_id'])
def downgrade():
op.drop_constraint('session_proposal_id_key', 'session', 'unique')
|
franek/weboob
|
weboob/tools/capabilities/gallery/genericcomicreader.py
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Noé Rubinstein
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import re
from weboob.capabilities.gallery import ICapGallery, BaseGallery, BaseImage
from weboob.tools.backend import BaseBackend
from weboob.tools.browser import BaseBrowser, BasePage
__all__ = ['GenericComicReaderBackend']
class DisplayPage(BasePage):
def get_page(self, gallery):
src = self.document.xpath(self.browser.params['img_src_xpath'])[0]
return BaseImage(src,
gallery=gallery,
url=src)
def page_list(self):
return self.document.xpath(self.browser.params['page_list_xpath'])
class GenericComicReaderBrowser(BaseBrowser):
def __init__(self, browser_params, *args, **kwargs):
self.params = browser_params
BaseBrowser.__init__(self, *args, **kwargs)
def iter_gallery_images(self, gallery):
self.location(gallery.url)
assert self.is_on_page(DisplayPage)
for p in self.page.page_list():
if 'page_to_location' in self.params:
self.location(self.params['page_to_location'] % p)
else:
self.location(p)
assert self.is_on_page(DisplayPage)
yield self.page.get_page(gallery)
def fill_image(self, image, fields):
if 'data' in fields:
image.data = self.readurl(image.url)
class GenericComicReaderBackend(BaseBackend, ICapGallery):
NAME = 'genericcomicreader'
MAINTAINER = u'Noé Rubinstein'
EMAIL = 'noe.rubinstein@gmail.com'
VERSION = '0.f'
DESCRIPTION = 'Generic comic reader backend; subclasses implement specific sites'
LICENSE = 'AGPLv3+'
BROWSER = GenericComicReaderBrowser
BROWSER_PARAMS = {}
ID_REGEXP = None
URL_REGEXP = None
ID_TO_URL = None
PAGES = {}
def create_default_browser(self):
b = self.create_browser(self.BROWSER_PARAMS)
b.PAGES = self.PAGES
try:
b.DOMAIN = self.DOMAIN
except AttributeError:
pass
return b
def iter_gallery_images(self, gallery):
with self.browser:
return self.browser.iter_gallery_images(gallery)
def get_gallery(self, _id):
match = re.match(r'^%s$' % self.URL_REGEXP, _id)
if match:
_id = match.group(1)
else:
match = re.match(r'^%s$' % self.ID_REGEXP, _id)
if match:
_id = match.group(0)
else:
return None
gallery = BaseGallery(_id, url=(self.ID_TO_URL % _id))
with self.browser:
return gallery
def fill_gallery(self, gallery, fields):
gallery.title = gallery.id
def fill_image(self, image, fields):
with self.browser:
self.browser.fill_image(image, fields)
OBJECTS = {
BaseGallery: fill_gallery,
BaseImage: fill_image}
|
dsanders11/django-newsletter
|
test_project/test_project/settings.py
|
import os
test_dir = os.path.dirname(__file__)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(test_dir, 'db.sqlite3'),
}
}
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.auth',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.staticfiles',
'imperavi',
'tinymce',
'newsletter'
]
# Imperavi is not compatible with Django 1.9+
import django
if django.VERSION > (1, 8):
INSTALLED_APPS.remove('imperavi')
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
ROOT_URLCONF = 'test_project.urls'
FIXTURE_DIRS = [os.path.join(test_dir, 'fixtures'), ]
SITE_ID = 1
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.join(test_dir, 'templates')],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Enable time-zone support
USE_TZ = True
TIME_ZONE = 'UTC'
# Required for django-webtest to work
STATIC_URL = '/static/'
# Random secret key
import random
key_chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
SECRET_KEY = ''.join([
random.SystemRandom().choice(key_chars) for i in range(50)
])
# Logs all newsletter app messages to the console
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'newsletter': {
'handlers': ['console'],
'propagate': True,
},
},
}
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
|
abramhindle/UnnaturalCodeFork
|
python/testdata/launchpad/lib/lp/services/webapp/tests/test_navigation.py
|
# Copyright 2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
__metaclass__ = type
from zope.component import (
ComponentLookupError,
getMultiAdapter,
)
from zope.configuration import xmlconfig
from zope.interface import (
implements,
Interface,
)
from zope.publisher.interfaces.browser import (
IBrowserPublisher,
IDefaultBrowserLayer,
)
from zope.testing.cleanup import cleanUp
from lp.services.webapp import Navigation
from lp.testing import TestCase
class TestNavigationDirective(TestCase):
def test_default_layer(self):
# By default all navigation classes are registered for
# IDefaultBrowserLayer.
directive = """
<browser:navigation
module="%(this)s" classes="ThingNavigation"/>
""" % dict(this=this)
xmlconfig.string(zcml_configure % directive)
navigation = getMultiAdapter(
(Thing(), DefaultBrowserLayer()), IBrowserPublisher, name='')
self.assertIsInstance(navigation, ThingNavigation)
def test_specific_layer(self):
# If we specify a layer when registering a navigation class, it will
# only be available on that layer.
directive = """
<browser:navigation
module="%(this)s" classes="OtherThingNavigation"
layer="%(this)s.IOtherLayer" />
""" % dict(this=this)
xmlconfig.string(zcml_configure % directive)
self.assertRaises(
ComponentLookupError,
getMultiAdapter,
(Thing(), DefaultBrowserLayer()), IBrowserPublisher, name='')
navigation = getMultiAdapter(
(Thing(), OtherLayer()), IBrowserPublisher, name='')
self.assertIsInstance(navigation, OtherThingNavigation)
def test_multiple_navigations_for_single_context(self):
# It is possible to have multiple navigation classes for a given
# context class as long as they are registered for different layers.
directive = """
<browser:navigation
module="%(this)s" classes="ThingNavigation"/>
<browser:navigation
module="%(this)s" classes="OtherThingNavigation"
layer="%(this)s.IOtherLayer" />
""" % dict(this=this)
xmlconfig.string(zcml_configure % directive)
navigation = getMultiAdapter(
(Thing(), DefaultBrowserLayer()), IBrowserPublisher, name='')
other_navigation = getMultiAdapter(
(Thing(), OtherLayer()), IBrowserPublisher, name='')
self.assertNotEqual(navigation, other_navigation)
def tearDown(self):
TestCase.tearDown(self)
cleanUp()
class DefaultBrowserLayer:
implements(IDefaultBrowserLayer)
class IThing(Interface):
pass
class Thing(object):
implements(IThing)
class ThingNavigation(Navigation):
usedfor = IThing
class OtherThingNavigation(Navigation):
usedfor = IThing
class IOtherLayer(Interface):
pass
class OtherLayer:
implements(IOtherLayer)
this = "lp.services.webapp.tests.test_navigation"
zcml_configure = """
<configure xmlns:browser="http://namespaces.zope.org/browser">
<include package="lp.services.webapp" file="meta.zcml" />
%s
</configure>
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.