repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
zubron/servo | tests/wpt/web-platform-tests/cors/resources/preflight.py | 253 | 1238 | def main(request, response):
headers = [("Content-Type", "text/plain")]
if "check" in request.GET:
token = request.GET.first("token")
value = request.server.stash.take(token)
if value == None:
body = "0"
else:
if request.GET.first("check", None) == "keep":
request.server.stash.put(token, value)
body = "1"
return headers, body
if request.method == "OPTIONS":
if not "Access-Control-Request-Method" in request.headers:
response.set_error(400, "No Access-Control-Request-Method header")
return "ERROR: No access-control-request-method in preflight!"
headers.append(("Access-Control-Allow-Methods",
request.headers['Access-Control-Request-Method']))
if "max_age" in request.GET:
headers.append(("Access-Control-Max-Age", request.GET['max_age']))
if "token" in request.GET:
request.server.stash.put(request.GET.first("token"), 1)
headers.append(("Access-Control-Allow-Origin", "*"))
headers.append(("Access-Control-Allow-Headers", "x-print"))
body = request.headers.get("x-print", "NO")
return headers, body
| mpl-2.0 |
jnovinger/django | tests/admin_changelist/admin.py | 247 | 3931 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.core.paginator import Paginator
from .models import Child, Event, Parent, Swallow
site = admin.AdminSite(name="admin")
site.register(User, UserAdmin)
class CustomPaginator(Paginator):
def __init__(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
super(CustomPaginator, self).__init__(queryset, 5, orphans=2,
allow_empty_first_page=allow_empty_first_page)
class EventAdmin(admin.ModelAdmin):
list_display = ['event_date_func']
def event_date_func(self, event):
return event.date
site.register(Event, EventAdmin)
class ParentAdmin(admin.ModelAdmin):
list_filter = ['child__name']
search_fields = ['child__name']
class ChildAdmin(admin.ModelAdmin):
list_display = ['name', 'parent']
list_per_page = 10
list_filter = ['parent', 'age']
def get_queryset(self, request):
return super(ChildAdmin, self).get_queryset(request).select_related("parent__name")
class CustomPaginationAdmin(ChildAdmin):
paginator = CustomPaginator
class FilteredChildAdmin(admin.ModelAdmin):
list_display = ['name', 'parent']
list_per_page = 10
def get_queryset(self, request):
return super(FilteredChildAdmin, self).get_queryset(request).filter(
name__contains='filtered')
class BandAdmin(admin.ModelAdmin):
list_filter = ['genres']
class GroupAdmin(admin.ModelAdmin):
list_filter = ['members']
class ConcertAdmin(admin.ModelAdmin):
list_filter = ['group__members']
search_fields = ['group__members__name']
class QuartetAdmin(admin.ModelAdmin):
list_filter = ['members']
class ChordsBandAdmin(admin.ModelAdmin):
list_filter = ['members']
class InvitationAdmin(admin.ModelAdmin):
list_display = ('band', 'player')
list_select_related = ('player',)
class DynamicListDisplayChildAdmin(admin.ModelAdmin):
list_display = ('parent', 'name', 'age')
def get_list_display(self, request):
my_list_display = super(DynamicListDisplayChildAdmin, self).get_list_display(request)
if request.user.username == 'noparents':
my_list_display = list(my_list_display)
my_list_display.remove('parent')
return my_list_display
class DynamicListDisplayLinksChildAdmin(admin.ModelAdmin):
list_display = ('parent', 'name', 'age')
list_display_links = ['parent', 'name']
def get_list_display_links(self, request, list_display):
return ['age']
site.register(Child, DynamicListDisplayChildAdmin)
class NoListDisplayLinksParentAdmin(admin.ModelAdmin):
list_display_links = None
site.register(Parent, NoListDisplayLinksParentAdmin)
class SwallowAdmin(admin.ModelAdmin):
actions = None # prevent ['action_checkbox'] + list(list_display)
list_display = ('origin', 'load', 'speed', 'swallowonetoone')
site.register(Swallow, SwallowAdmin)
class DynamicListFilterChildAdmin(admin.ModelAdmin):
list_filter = ('parent', 'name', 'age')
def get_list_filter(self, request):
my_list_filter = super(DynamicListFilterChildAdmin, self).get_list_filter(request)
if request.user.username == 'noparents':
my_list_filter = list(my_list_filter)
my_list_filter.remove('parent')
return my_list_filter
class DynamicSearchFieldsChildAdmin(admin.ModelAdmin):
search_fields = ('name',)
def get_search_fields(self, request):
search_fields = super(DynamicSearchFieldsChildAdmin, self).get_search_fields(request)
search_fields += ('age',)
return search_fields
class EmptyValueChildAdmin(admin.ModelAdmin):
empty_value_display = '-empty-'
list_display = ('name', 'age_display', 'age')
def age_display(self, obj):
return obj.age
age_display.empty_value_display = '†'
| bsd-3-clause |
BarlowHall/irreplay | relay/onOffRelay.py | 1 | 1317 | #!/usr/bin/python
"""
irreplay is a commandline utility that uses lirc to receive a sequence of
IR remote button presses and plays back that sequence.
Copyright (C) 2017 Alex Barlow-Hall
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import RPi.GPIO as gpio
import sys
pin = 4
numOfArgs = len(sys.argv)
if numOfArgs == 2:
data = int(sys.argv[1])
gpio.setmode(gpio.BCM)
gpio.setwarnings(False)
gpio.setup(pin, gpio.OUT)
if data == 1:
gpio.output(pin, gpio.HIGH)
elif data == 0:
gpio.output(pin, gpio.LOW)
elif numOfArgs > 2:
print "ArgumentError: Too many arguments given! (only needs one)"
else:
print "ArgumentError: No argument given! (needs only one!)"
| gpl-2.0 |
Jusedawg/SickRage | lib/hachoir_metadata/program.py | 94 | 3646 | from hachoir_metadata.metadata import RootMetadata, registerExtractor
from hachoir_parser.program import ExeFile
from hachoir_metadata.safe import fault_tolerant, getValue
class ExeMetadata(RootMetadata):
KEY_TO_ATTR = {
u"ProductName": "title",
u"LegalCopyright": "copyright",
u"LegalTrademarks": "copyright",
u"LegalTrademarks1": "copyright",
u"LegalTrademarks2": "copyright",
u"CompanyName": "author",
u"BuildDate": "creation_date",
u"FileDescription": "title",
u"ProductVersion": "version",
}
SKIP_KEY = set((u"InternalName", u"OriginalFilename", u"FileVersion", u"BuildVersion"))
def extract(self, exe):
if exe.isPE():
self.extractPE(exe)
elif exe.isNE():
self.extractNE(exe)
def extractNE(self, exe):
if "ne_header" in exe:
self.useNE_Header(exe["ne_header"])
if "info" in exe:
self.useNEInfo(exe["info"])
@fault_tolerant
def useNEInfo(self, info):
for node in info.array("node"):
if node["name"].value == "StringFileInfo":
self.readVersionInfo(node["node[0]"])
def extractPE(self, exe):
# Read information from headers
if "pe_header" in exe:
self.usePE_Header(exe["pe_header"])
if "pe_opt_header" in exe:
self.usePE_OptHeader(exe["pe_opt_header"])
# Use PE resource
resource = exe.getResource()
if resource and "version_info/node[0]" in resource:
for node in resource.array("version_info/node[0]/node"):
if getValue(node, "name") == "StringFileInfo" \
and "node[0]" in node:
self.readVersionInfo(node["node[0]"])
@fault_tolerant
def useNE_Header(self, hdr):
if hdr["is_dll"].value:
self.format_version = u"New-style executable: Dynamic-link library (DLL)"
elif hdr["is_win_app"].value:
self.format_version = u"New-style executable: Windows 3.x application"
else:
self.format_version = u"New-style executable for Windows 3.x"
@fault_tolerant
def usePE_Header(self, hdr):
self.creation_date = hdr["creation_date"].value
self.comment = "CPU: %s" % hdr["cpu"].display
if hdr["is_dll"].value:
self.format_version = u"Portable Executable: Dynamic-link library (DLL)"
else:
self.format_version = u"Portable Executable: Windows application"
@fault_tolerant
def usePE_OptHeader(self, hdr):
self.comment = "Subsystem: %s" % hdr["subsystem"].display
def readVersionInfo(self, info):
values = {}
for node in info.array("node"):
if "value" not in node or "name" not in node:
continue
value = node["value"].value.strip(" \0")
if not value:
continue
key = node["name"].value
values[key] = value
if "ProductName" in values and "FileDescription" in values:
# Make sure that FileDescription is set before ProductName
# as title value
self.title = values["FileDescription"]
self.title = values["ProductName"]
del values["FileDescription"]
del values["ProductName"]
for key, value in values.iteritems():
if key in self.KEY_TO_ATTR:
setattr(self, self.KEY_TO_ATTR[key], value)
elif key not in self.SKIP_KEY:
self.comment = "%s=%s" % (key, value)
registerExtractor(ExeFile, ExeMetadata)
| gpl-3.0 |
nowopen/scrapy | scrapy/downloadermiddlewares/redirect.py | 47 | 4320 | import logging
from six.moves.urllib.parse import urljoin
from scrapy.http import HtmlResponse
from scrapy.utils.response import get_meta_refresh
from scrapy.utils.python import to_native_str
from scrapy.exceptions import IgnoreRequest, NotConfigured
logger = logging.getLogger(__name__)
class BaseRedirectMiddleware(object):
enabled_setting = 'REDIRECT_ENABLED'
def __init__(self, settings):
if not settings.getbool(self.enabled_setting):
raise NotConfigured
self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES')
self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST')
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def _redirect(self, redirected, request, spider, reason):
ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times)
redirects = request.meta.get('redirect_times', 0) + 1
if ttl and redirects <= self.max_redirect_times:
redirected.meta['redirect_times'] = redirects
redirected.meta['redirect_ttl'] = ttl - 1
redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + \
[request.url]
redirected.dont_filter = request.dont_filter
redirected.priority = request.priority + self.priority_adjust
logger.debug("Redirecting (%(reason)s) to %(redirected)s from %(request)s",
{'reason': reason, 'redirected': redirected, 'request': request},
extra={'spider': spider})
return redirected
else:
logger.debug("Discarding %(request)s: max redirections reached",
{'request': request}, extra={'spider': spider})
raise IgnoreRequest("max redirections reached")
def _redirect_request_using_get(self, request, redirect_url):
redirected = request.replace(url=redirect_url, method='GET', body='')
redirected.headers.pop('Content-Type', None)
redirected.headers.pop('Content-Length', None)
return redirected
class RedirectMiddleware(BaseRedirectMiddleware):
"""Handle redirection of requests based on response status and meta-refresh html tag"""
def process_response(self, request, response, spider):
if (request.meta.get('dont_redirect', False) or
response.status in getattr(spider, 'handle_httpstatus_list', []) or
response.status in request.meta.get('handle_httpstatus_list', []) or
request.meta.get('handle_httpstatus_all', False)):
return response
allowed_status = (301, 302, 303, 307)
if 'Location' not in response.headers or response.status not in allowed_status:
return response
# HTTP header is ascii or latin1, redirected url will be percent-encoded utf-8
location = to_native_str(response.headers['location'].decode('latin1'))
redirected_url = urljoin(request.url, location)
if response.status in (301, 307) or request.method == 'HEAD':
redirected = request.replace(url=redirected_url)
return self._redirect(redirected, request, spider, response.status)
redirected = self._redirect_request_using_get(request, redirected_url)
return self._redirect(redirected, request, spider, response.status)
class MetaRefreshMiddleware(BaseRedirectMiddleware):
enabled_setting = 'METAREFRESH_ENABLED'
def __init__(self, settings):
super(MetaRefreshMiddleware, self).__init__(settings)
self._maxdelay = settings.getint('REDIRECT_MAX_METAREFRESH_DELAY',
settings.getint('METAREFRESH_MAXDELAY'))
def process_response(self, request, response, spider):
if request.meta.get('dont_redirect', False) or request.method == 'HEAD' or \
not isinstance(response, HtmlResponse):
return response
if isinstance(response, HtmlResponse):
interval, url = get_meta_refresh(response)
if url and interval < self._maxdelay:
redirected = self._redirect_request_using_get(request, url)
return self._redirect(redirected, request, spider, 'meta refresh')
return response
| bsd-3-clause |
foursquare/pants | contrib/cpp/tests/python/pants_test/contrib/cpp/test_cpp_toolchain.py | 2 | 1686 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
from contextlib import contextmanager
from pants.util.contextutil import environment_as, temporary_dir
from pants.util.dirutil import chmod_plus_x, touch
from pants.contrib.cpp.toolchain.cpp_toolchain import CppToolchain
class CppToolchainTest(unittest.TestCase):
@contextmanager
def tool(self, name):
with temporary_dir() as tool_root:
tool_path = os.path.join(tool_root, name)
touch(tool_path)
chmod_plus_x(tool_path)
new_path = os.pathsep.join([tool_root] + os.environ.get('PATH', '').split(os.pathsep))
with environment_as(PATH=new_path):
yield tool_path
def test_default_compiler_from_environ(self):
with self.tool('g++') as tool_path:
with environment_as(CXX='g++'):
self.assertEqual(CppToolchain().compiler, tool_path)
self.assertEqual(CppToolchain().compiler,
CppToolchain().register_tool(name='compiler', tool=tool_path))
def test_invalid_compiler(self):
cpp_toolchain = CppToolchain(compiler='not-a-command')
with self.assertRaises(CppToolchain.Error):
cpp_toolchain.compiler
def test_tool_registration(self):
with self.tool('good-tool') as tool_path:
self.assertEqual(tool_path, CppToolchain().register_tool(name='foo', tool='good-tool'))
def test_invalid_tool_registration(self):
with self.assertRaises(CppToolchain.Error):
CppToolchain().register_tool('not-a-command')
| apache-2.0 |
TaiSakuma/AlphaTwirl | tests/ROOT/performance_read_branch.py | 1 | 2204 | #!/usr/bin/env python
# Tai Sakuma <tai.sakuma@cern.ch>
##__________________________________________________________________||
import os, sys
import timeit
import array
import ROOT
##__________________________________________________________________||
inputPath = '/Users/sakuma/work/cms/c150130_RA1_data/80X/MC/20160708_B01_MCMiniAODv2_SM/AtLogic_MCMiniAODv2_SM/TTJets_HT2500toInf_madgraphMLM/treeProducerSusyAlphaT/tree.root'
treeName = 'tree'
##__________________________________________________________________||
def simplest_way():
inputFile = ROOT.TFile.Open(inputPath)
tree = inputFile.Get(treeName)
for i in xrange(tree.GetEntries()):
if tree.GetEntry(i) <= 0: break
tree.met_pt
##__________________________________________________________________||
def use_SetBranchStatus():
inputFile = ROOT.TFile.Open(inputPath)
tree = inputFile.Get(treeName)
tree.SetBranchStatus("*", 0)
tree.SetBranchStatus("met_pt", 1)
for i in xrange(tree.GetEntries()):
if tree.GetEntry(i) <= 0: break
tree.met_pt
##__________________________________________________________________||
def use_GetLeaf():
inputFile = ROOT.TFile.Open(inputPath)
tree = inputFile.Get(treeName)
tree.SetBranchStatus("*", 0)
tree.SetBranchStatus("met_pt", 1)
for i in xrange(tree.GetEntries()):
if tree.GetEntry(i) <= 0: break
tree.GetLeaf('met_pt').GetValue()
##__________________________________________________________________||
def use_SetBranchAddress():
inputFile = ROOT.TFile.Open(inputPath)
tree = inputFile.Get(treeName)
tree.SetBranchStatus("*", 0)
tree.SetBranchStatus("met_pt", 1)
met_pt = array.array('d', [ 0 ])
tree.SetBranchAddress("met_pt" , met_pt)
for i in xrange(tree.GetEntries()):
if tree.GetEntry(i) <= 0: break
met_pt[0]
##__________________________________________________________________||
ways = ['simplest_way', 'use_SetBranchStatus', 'use_GetLeaf', 'use_SetBranchAddress']
for w in ways:
print w, ':',
print timeit.timeit(w + '()', number = 1, setup = 'from __main__ import ' + w)
##__________________________________________________________________||
| bsd-3-clause |
EttusResearch/gnuradio | gr-uhd/examples/python/usrp_wfm_rcv_pll.py | 58 | 14217 | #!/usr/bin/env python
#
# Copyright 2005-2007,2011,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, audio, uhd
from gnuradio import blocks
from gnuradio import filter
from gnuradio import analog
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from gnuradio.wxgui import slider, powermate
from gnuradio.wxgui import stdgui2, fftsink2, form, scopesink2
from optparse import OptionParser
import sys
import wx
class wfm_rx_block (stdgui2.std_top_block):
def __init__(self,frame,panel,vbox,argv):
stdgui2.std_top_block.__init__ (self,frame,panel,vbox,argv)
parser=OptionParser(option_class=eng_option)
parser.add_option("-a", "--args", type="string", default="",
help="UHD device address args [default=%default]")
parser.add_option("", "--spec", type="string", default=None,
help="Subdevice of UHD device where appropriate")
parser.add_option("-A", "--antenna", type="string", default=None,
help="select Rx Antenna where appropriate")
parser.add_option("-f", "--freq", type="eng_float", default=100.1e6,
help="set frequency to FREQ", metavar="FREQ")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is midpoint)")
parser.add_option("-s", "--squelch", type="eng_float", default=0,
help="set squelch level (default is 0)")
parser.add_option("-V", "--volume", type="eng_float", default=None,
help="set volume (default is midpoint)")
parser.add_option("-O", "--audio-output", type="string", default="default",
help="pcm device name. E.g., hw:0,0 or surround51 or /dev/dsp")
parser.add_option("", "--freq-min", type="eng_float", default=87.9e6,
help="Set a minimum frequency [default=%default]")
parser.add_option("", "--freq-max", type="eng_float", default=108.1e6,
help="Set a maximum frequency [default=%default]")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
sys.exit(1)
self.frame = frame
self.panel = panel
self.vol = 0
self.state = "FREQ"
self.freq = 0
self.fm_freq_min = options.freq_min
self.fm_freq_max = options.freq_max
# build graph
self.u = uhd.usrp_source(device_addr=options.args, stream_args=uhd.stream_args('fc32'))
# Set the subdevice spec
if(options.spec):
self.u.set_subdev_spec(options.spec, 0)
# Set the antenna
if(options.antenna):
self.u.set_antenna(options.antenna, 0)
usrp_rate = 320e3
demod_rate = 320e3
audio_rate = 48e3
audio_decim = 10
self.u.set_samp_rate(usrp_rate)
dev_rate = self.u.get_samp_rate()
nfilts = 32
chan_coeffs = filter.firdes.low_pass_2(nfilts, # gain
nfilts*usrp_rate, # sampling rate
90e3, # passband cutoff
30e3, # stopband cutoff
70) # stopband attenuation
rrate = usrp_rate / dev_rate
self.chan_filt = filter.pfb.arb_resampler_ccf(rrate, chan_coeffs, nfilts)
self.guts = analog.wfm_rcv_pll(demod_rate, audio_decim)
chan_rate = audio_rate / (demod_rate/audio_decim)
self.rchan_filt = filter.pfb.arb_resampler_fff(chan_rate)
self.lchan_filt = filter.pfb.arb_resampler_fff(chan_rate)
# FIXME rework {add,multiply}_const_* to handle multiple streams
self.volume_control_l = blocks.multiply_const_ff(self.vol)
self.volume_control_r = blocks.multiply_const_ff(self.vol)
# sound card as final sink
self.audio_sink = audio.sink (int (audio_rate),
options.audio_output,
False) # ok_to_block
# now wire it all together
self.connect (self.u, self.chan_filt, self.guts)
self.connect((self.guts, 0), self.lchan_filt,
self.volume_control_l, (self.audio_sink,0))
self.connect((self.guts, 1), self.rchan_filt,
self.volume_control_r, (self.audio_sink,1))
try:
self.guts.stereo_carrier_pll_recovery.squelch_enable(True)
except:
print "FYI: This implementation of the stereo_carrier_pll_recovery has no squelch implementation yet"
self._build_gui(vbox, usrp_rate, demod_rate, audio_rate)
if options.gain is None:
# if no gain was specified, use the mid-point in dB
g = self.u.get_gain_range()
options.gain = float(g.start()+g.stop())/2.0
if options.volume is None:
g = self.volume_range()
options.volume = float(g[0]+g[1])/2
frange = self.u.get_freq_range()
if(frange.start() > self.fm_freq_max or frange.stop() < self.fm_freq_min):
sys.stderr.write("Radio does not support required frequency range.\n")
sys.exit(1)
if(options.freq < self.fm_freq_min or options.freq > self.fm_freq_max):
sys.stderr.write("Requested frequency is outside of required frequency range.\n")
sys.exit(1)
# set initial values
self.set_gain(options.gain)
self.set_vol(options.volume)
try:
self.guts.stereo_carrier_pll_recovery.set_lock_threshold(options.squelch)
except:
print "FYI: This implementation of the stereo_carrier_pll_recovery has no squelch implementation yet"
if not(self.set_freq(options.freq)):
self._set_status_msg("Failed to set initial frequency")
def _set_status_msg(self, msg, which=0):
self.frame.GetStatusBar().SetStatusText(msg, which)
def _build_gui(self, vbox, usrp_rate, demod_rate, audio_rate):
def _form_set_freq(kv):
return self.set_freq(kv['freq'])
if 1:
self.src_fft = fftsink2.fft_sink_c(self.panel, title="Data from USRP",
fft_size=512, sample_rate=usrp_rate,
ref_scale=32768.0, ref_level=0, y_divs=12)
self.connect (self.u, self.src_fft)
vbox.Add (self.src_fft.win, 4, wx.EXPAND)
if 1:
post_fm_demod_fft = fftsink2.fft_sink_f(self.panel, title="Post FM Demod",
fft_size=512, sample_rate=demod_rate,
y_per_div=10, ref_level=0)
self.connect (self.guts.fm_demod, post_fm_demod_fft)
vbox.Add (post_fm_demod_fft.win, 4, wx.EXPAND)
if 0:
post_stereo_carrier_generator_fft = fftsink2.fft_sink_c (self.panel, title="Post Stereo_carrier_generator",
fft_size=512, sample_rate=audio_rate,
y_per_div=10, ref_level=0)
self.connect (self.guts.stereo_carrier_generator, post_stereo_carrier_generator_fft)
vbox.Add (post_stereo_carrier_generator_fft.win, 4, wx.EXPAND)
if 0:
post_deemphasis_left = fftsink2.fft_sink_f (self.panel, title="Post_Deemphasis_Left",
fft_size=512, sample_rate=audio_rate,
y_per_div=10, ref_level=0)
self.connect (self.guts.deemph_Left, post_deemphasis_left)
vbox.Add (post_deemphasis_left.win, 4, wx.EXPAND)
if 0:
post_deemphasis_right = fftsink2.fft_sink_f(self.panel, title="Post_Deemphasis_Right",
fft_size=512, sample_rate=audio_rate,
y_per_div=10, ref_level=-20)
self.connect (self.guts.deemph_Left, post_deemphasis_right)
vbox.Add (post_deemphasis_right.win, 4, wx.EXPAND)
if 0:
LmR_fft = fftsink2.fft_sink_f(self.panel, title="LmR",
fft_size=512, sample_rate=audio_rate,
y_per_div=10, ref_level=-20)
self.connect (self.guts.LmR_real,LmR_fft)
vbox.Add (LmR_fft.win, 4, wx.EXPAND)
if 0:
self.scope = scopesink2.scope_sink_f(self.panel, sample_rate=demod_rate)
self.connect (self.guts.fm_demod,self.scope)
vbox.Add (self.scope.win,4,wx.EXPAND)
# control area form at bottom
self.myform = myform = form.form()
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0)
myform['freq'] = form.float_field(
parent=self.panel, sizer=hbox, label="Freq", weight=1,
callback=myform.check_input_and_call(_form_set_freq, self._set_status_msg))
hbox.Add((5,0), 0)
myform['freq_slider'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, weight=3,
range=(self.fm_freq_min, self.fm_freq_max, 0.1e6),
callback=self.set_freq)
hbox.Add((5,0), 0)
vbox.Add(hbox, 0, wx.EXPAND)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0)
myform['volume'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, label="Volume",
weight=3, range=self.volume_range(),
callback=self.set_vol)
hbox.Add((5,0), 1)
g = self.u.get_gain_range()
myform['gain'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, label="Gain",
weight=3, range=(g.start(), g.stop(), g.step()),
callback=self.set_gain)
hbox.Add((5,0), 0)
myform['sqlch_thrsh'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, label="Stereo Squelch Threshold",
weight=3, range=(0.0,1.0,0.01),
callback=self.set_squelch)
hbox.Add((5,0), 0)
vbox.Add(hbox, 0, wx.EXPAND)
try:
self.knob = powermate.powermate(self.frame)
self.rot = 0
powermate.EVT_POWERMATE_ROTATE (self.frame, self.on_rotate)
powermate.EVT_POWERMATE_BUTTON (self.frame, self.on_button)
except:
print "FYI: No Powermate or Contour Knob found"
def on_rotate (self, event):
self.rot += event.delta
if (self.state == "FREQ"):
if self.rot >= 3:
self.set_freq(self.freq + .1e6)
self.rot -= 3
elif self.rot <=-3:
self.set_freq(self.freq - .1e6)
self.rot += 3
else:
step = self.volume_range()[2]
if self.rot >= 3:
self.set_vol(self.vol + step)
self.rot -= 3
elif self.rot <=-3:
self.set_vol(self.vol - step)
self.rot += 3
def on_button (self, event):
if event.value == 0: # button up
return
self.rot = 0
if self.state == "FREQ":
self.state = "VOL"
else:
self.state = "FREQ"
self.update_status_bar ()
def set_vol (self, vol):
g = self.volume_range()
self.vol = max(g[0], min(g[1], vol))
self.volume_control_l.set_k(10**(self.vol/10))
self.volume_control_r.set_k(10**(self.vol/10))
self.myform['volume'].set_value(self.vol)
self.update_status_bar ()
def set_squelch(self,squelch_threshold):
try:
self.guts.stereo_carrier_pll_recovery.set_lock_threshold(squelch_threshold);
except:
print "FYI: This implementation of the stereo_carrier_pll_recovery has no squelch implementation yet"
def set_freq(self, target_freq):
"""
Set the center frequency we're interested in.
Args:
target_freq: frequency in Hz
@rypte: bool
"""
r = self.u.set_center_freq(target_freq)
if r:
self.freq = target_freq
self.myform['freq'].set_value(target_freq) # update displayed value
self.myform['freq_slider'].set_value(target_freq) # update displayed value
self.update_status_bar()
self._set_status_msg("OK", 0)
return True
self._set_status_msg("Failed", 0)
return False
def set_gain(self, gain):
self.myform['gain'].set_value(gain) # update displayed value
self.u.set_gain(gain)
def update_status_bar (self):
msg = "Volume:%r Setting:%s" % (self.vol, self.state)
self._set_status_msg(msg, 1)
self.src_fft.set_baseband_freq(self.freq)
def volume_range(self):
return (-20.0, 0.0, 0.5)
if __name__ == '__main__':
app = stdgui2.stdapp (wfm_rx_block, "USRP WFM RX")
app.MainLoop ()
| gpl-3.0 |
boundlessgeo/QGIS | python/plugins/db_manager/dlg_sql_window.py | 6 | 24639 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : brush.tyler@gmail.com
The content of this file is based on
- PG_Manager by Martin Dobias (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import zip
from builtins import next
from builtins import str
from hashlib import md5
import os
from qgis.PyQt.QtCore import Qt, pyqtSignal, QDir
from qgis.PyQt.QtWidgets import QDialog, QWidget, QAction, QApplication, QInputDialog, QStyledItemDelegate, QTableWidgetItem, QFileDialog
from qgis.PyQt.QtGui import QKeySequence, QCursor, QClipboard, QIcon, QStandardItemModel, QStandardItem
from qgis.PyQt.Qsci import QsciAPIs
from qgis.core import (
QgsProject,
QgsApplication,
QgsTask,
QgsSettings,
QgsMapLayerType
)
from qgis.utils import OverrideCursor
from .db_plugins.plugin import BaseError
from .db_plugins.postgis.plugin import PGDatabase
from .dlg_db_error import DlgDbError
from .dlg_query_builder import QueryBuilderDlg
try:
from qgis.gui import QgsCodeEditorSQL # NOQA
except:
from .sqledit import SqlEdit
from qgis import gui
gui.QgsCodeEditorSQL = SqlEdit
from .ui.ui_DlgSqlWindow import Ui_DbManagerDlgSqlWindow as Ui_Dialog
import re
class DlgSqlWindow(QWidget, Ui_Dialog):
nameChanged = pyqtSignal(str)
QUERY_HISTORY_LIMIT = 20
def __init__(self, iface, db, parent=None):
QWidget.__init__(self, parent)
self.mainWindow = parent
self.iface = iface
self.db = db
self.dbType = db.connection().typeNameString()
self.connectionName = db.connection().connectionName()
self.filter = ""
self.modelAsync = None
self.allowMultiColumnPk = isinstance(db, PGDatabase) # at the moment only PostgreSQL allows a primary key to span multiple columns, SpatiaLite doesn't
self.aliasSubQuery = isinstance(db, PGDatabase) # only PostgreSQL requires subqueries to be aliases
self.setupUi(self)
self.setWindowTitle(
self.tr(u"{0} - {1} [{2}]").format(self.windowTitle(), self.connectionName, self.dbType))
self.defaultLayerName = self.tr('QueryLayer')
if self.allowMultiColumnPk:
self.uniqueColumnCheck.setText(self.tr("Column(s) with unique values"))
else:
self.uniqueColumnCheck.setText(self.tr("Column with unique values"))
self.editSql.setFocus()
self.editSql.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.editSql.setMarginVisible(True)
self.initCompleter()
settings = QgsSettings()
self.history = settings.value('DB_Manager/queryHistory/' + self.dbType, {self.connectionName: []})
if self.connectionName not in self.history:
self.history[self.connectionName] = []
self.queryHistoryWidget.setVisible(False)
self.queryHistoryTableWidget.verticalHeader().hide()
self.queryHistoryTableWidget.doubleClicked.connect(self.insertQueryInEditor)
self.populateQueryHistory()
self.btnQueryHistory.toggled.connect(self.showHideQueryHistory)
self.btnCancel.setEnabled(False)
self.btnCancel.clicked.connect(self.executeSqlCanceled)
self.btnCancel.setShortcut(QKeySequence.Cancel)
self.progressBar.setEnabled(False)
self.progressBar.setRange(0, 100)
self.progressBar.setValue(0)
self.progressBar.setFormat("")
self.progressBar.setAlignment(Qt.AlignCenter)
# allow copying results
copyAction = QAction("copy", self)
self.viewResult.addAction(copyAction)
copyAction.setShortcuts(QKeySequence.Copy)
copyAction.triggered.connect(self.copySelectedResults)
self.btnExecute.clicked.connect(self.executeSql)
self.btnSetFilter.clicked.connect(self.setFilter)
self.btnClear.clicked.connect(self.clearSql)
self.presetStore.clicked.connect(self.storePreset)
self.presetSaveAsFile.clicked.connect(self.saveAsFilePreset)
self.presetLoadFile.clicked.connect(self.loadFilePreset)
self.presetDelete.clicked.connect(self.deletePreset)
self.presetCombo.activated[str].connect(self.loadPreset)
self.presetCombo.activated[str].connect(self.presetName.setText)
self.updatePresetsCombobox()
self.geomCombo.setEditable(True)
self.geomCombo.lineEdit().setReadOnly(True)
self.uniqueCombo.setEditable(True)
self.uniqueCombo.lineEdit().setReadOnly(True)
self.uniqueModel = QStandardItemModel(self.uniqueCombo)
self.uniqueCombo.setModel(self.uniqueModel)
if self.allowMultiColumnPk:
self.uniqueCombo.setItemDelegate(QStyledItemDelegate())
self.uniqueModel.itemChanged.connect(self.uniqueChanged) # react to the (un)checking of an item
self.uniqueCombo.lineEdit().textChanged.connect(self.uniqueTextChanged) # there are other events that change the displayed text and some of them can not be caught directly
# hide the load query as layer if feature is not supported
self._loadAsLayerAvailable = self.db.connector.hasCustomQuerySupport()
self.loadAsLayerGroup.setVisible(self._loadAsLayerAvailable)
if self._loadAsLayerAvailable:
self.layerTypeWidget.hide() # show if load as raster is supported
self.loadLayerBtn.clicked.connect(self.loadSqlLayer)
self.getColumnsBtn.clicked.connect(self.fillColumnCombos)
self.loadAsLayerGroup.toggled.connect(self.loadAsLayerToggled)
self.loadAsLayerToggled(False)
self._createViewAvailable = self.db.connector.hasCreateSpatialViewSupport()
self.btnCreateView.setVisible(self._createViewAvailable)
if self._createViewAvailable:
self.btnCreateView.clicked.connect(self.createView)
self.queryBuilderFirst = True
self.queryBuilderBtn.setIcon(QIcon(":/db_manager/icons/sql.gif"))
self.queryBuilderBtn.clicked.connect(self.displayQueryBuilder)
self.presetName.textChanged.connect(self.nameChanged)
def insertQueryInEditor(self, item):
sql = item.data(Qt.DisplayRole)
self.editSql.insertText(sql)
def showHideQueryHistory(self, visible):
self.queryHistoryWidget.setVisible(visible)
def populateQueryHistory(self):
self.queryHistoryTableWidget.clearContents()
self.queryHistoryTableWidget.setRowCount(0)
dictlist = self.history[self.connectionName]
if not dictlist:
return
for i in range(len(dictlist)):
self.queryHistoryTableWidget.insertRow(0)
queryItem = QTableWidgetItem(dictlist[i]['query'])
rowsItem = QTableWidgetItem(str(dictlist[i]['rows']))
durationItem = QTableWidgetItem(str(dictlist[i]['secs']))
self.queryHistoryTableWidget.setItem(0, 0, queryItem)
self.queryHistoryTableWidget.setItem(0, 1, rowsItem)
self.queryHistoryTableWidget.setItem(0, 2, durationItem)
self.queryHistoryTableWidget.resizeColumnsToContents()
self.queryHistoryTableWidget.resizeRowsToContents()
def writeQueryHistory(self, sql, affectedRows, secs):
if len(self.history[self.connectionName]) >= self.QUERY_HISTORY_LIMIT:
self.history[self.connectionName].pop(0)
settings = QgsSettings()
self.history[self.connectionName].append({'query': sql,
'rows': affectedRows,
'secs': secs})
settings.setValue('DB_Manager/queryHistory/' + self.dbType, self.history)
self.populateQueryHistory()
def getQueryHash(self, name):
return 'q%s' % md5(name.encode('utf8')).hexdigest()
def updatePresetsCombobox(self):
self.presetCombo.clear()
names = []
entries = QgsProject.instance().subkeyList('DBManager', 'savedQueries')
for entry in entries:
name = QgsProject.instance().readEntry('DBManager', 'savedQueries/' + entry + '/name')[0]
names.append(name)
for name in sorted(names):
self.presetCombo.addItem(name)
self.presetCombo.setCurrentIndex(-1)
def storePreset(self):
query = self._getSqlQuery()
if query == "":
return
name = str(self.presetName.text())
QgsProject.instance().writeEntry('DBManager', 'savedQueries/' + self.getQueryHash(name) + '/name', name)
QgsProject.instance().writeEntry('DBManager', 'savedQueries/' + self.getQueryHash(name) + '/query', query)
index = self.presetCombo.findText(name)
if index == -1:
self.presetCombo.addItem(name)
self.presetCombo.setCurrentIndex(self.presetCombo.count() - 1)
else:
self.presetCombo.setCurrentIndex(index)
def saveAsFilePreset(self):
settings = QgsSettings()
lastDir = settings.value('DB_Manager/lastDirSQLFIle', "")
query = self._getSqlQuery()
if query == "":
return
filename, _ = QFileDialog.getSaveFileName(
self,
self.tr('Save SQL Query'),
lastDir,
self.tr("SQL File (*.sql, *.SQL)"))
if filename:
if not filename.lower().endswith('.sql'):
filename += ".sql"
with open(filename, 'w') as f:
f.write(query)
lastDir = os.path.dirname(filename)
settings.setValue('DB_Manager/lastDirSQLFile', lastDir)
def loadFilePreset(self):
settings = QgsSettings()
lastDir = settings.value('DB_Manager/lastDirSQLFIle', "")
filename, _ = QFileDialog.getOpenFileName(
self,
self.tr("Load SQL Query"),
lastDir,
self.tr("SQL File (*.sql, *.SQL)"))
if filename:
with open(filename, 'r') as f:
self.editSql.clear()
for line in f:
self.editSql.insertText(line)
lastDir = os.path.dirname(filename)
settings.setValue('DB_Manager/lastDirSQLFile', lastDir)
def deletePreset(self):
name = self.presetCombo.currentText()
QgsProject.instance().removeEntry('DBManager', 'savedQueries/' + self.getQueryHash(name))
self.presetCombo.removeItem(self.presetCombo.findText(name))
self.presetCombo.setCurrentIndex(-1)
def loadPreset(self, name):
query = QgsProject.instance().readEntry('DBManager', 'savedQueries/' + self.getQueryHash(name) + '/query')[0]
self.editSql.setText(query)
def loadAsLayerToggled(self, checked):
self.loadAsLayerGroup.setChecked(checked)
self.loadAsLayerWidget.setVisible(checked)
if checked:
self.fillColumnCombos()
def clearSql(self):
self.editSql.clear()
self.editSql.setFocus()
self.filter = ""
def updateUiWhileSqlExecution(self, status):
if status:
for i in range(0, self.mainWindow.tabs.count()):
if i != self.mainWindow.tabs.currentIndex():
self.mainWindow.tabs.setTabEnabled(i, False)
self.mainWindow.menuBar.setEnabled(False)
self.mainWindow.toolBar.setEnabled(False)
self.mainWindow.tree.setEnabled(False)
for w in self.findChildren(QWidget):
w.setEnabled(False)
self.btnCancel.setEnabled(True)
self.progressBar.setEnabled(True)
self.progressBar.setRange(0, 0)
else:
for i in range(0, self.mainWindow.tabs.count()):
if i != self.mainWindow.tabs.currentIndex():
self.mainWindow.tabs.setTabEnabled(i, True)
self.mainWindow.refreshTabs()
self.mainWindow.menuBar.setEnabled(True)
self.mainWindow.toolBar.setEnabled(True)
self.mainWindow.tree.setEnabled(True)
for w in self.findChildren(QWidget):
w.setEnabled(True)
self.btnCancel.setEnabled(False)
self.progressBar.setRange(0, 100)
self.progressBar.setEnabled(False)
def executeSqlCanceled(self):
self.btnCancel.setEnabled(False)
self.modelAsync.cancel()
def executeSqlCompleted(self):
self.updateUiWhileSqlExecution(False)
with OverrideCursor(Qt.WaitCursor):
if self.modelAsync.task.status() == QgsTask.Complete:
model = self.modelAsync.model
quotedCols = []
self.viewResult.setModel(model)
self.lblResult.setText(self.tr("{0} rows, {1:.3f} seconds").format(model.affectedRows(), model.secs()))
cols = self.viewResult.model().columnNames()
for col in cols:
quotedCols.append(self.db.connector.quoteId(col))
self.setColumnCombos(cols, quotedCols)
self.writeQueryHistory(self.modelAsync.task.sql, model.affectedRows(), model.secs())
self.update()
elif not self.modelAsync.canceled:
DlgDbError.showError(self.modelAsync.error, self)
self.uniqueModel.clear()
self.geomCombo.clear()
def executeSql(self):
sql = self._getExecutableSqlQuery()
if sql == "":
return
# delete the old model
old_model = self.viewResult.model()
self.viewResult.setModel(None)
if old_model:
old_model.deleteLater()
try:
self.modelAsync = self.db.sqlResultModelAsync(sql, self)
self.modelAsync.done.connect(self.executeSqlCompleted)
self.updateUiWhileSqlExecution(True)
QgsApplication.taskManager().addTask(self.modelAsync.task)
except Exception as e:
DlgDbError.showError(e, self)
self.uniqueModel.clear()
self.geomCombo.clear()
return
def _getSqlLayer(self, _filter):
hasUniqueField = self.uniqueColumnCheck.checkState() == Qt.Checked
if hasUniqueField:
if self.allowMultiColumnPk:
checkedCols = []
for item in self.uniqueModel.findItems("*", Qt.MatchWildcard):
if item.checkState() == Qt.Checked:
checkedCols.append(item.data())
uniqueFieldName = ",".join(checkedCols)
elif self.uniqueCombo.currentIndex() >= 0:
uniqueFieldName = self.uniqueModel.item(self.uniqueCombo.currentIndex()).data()
else:
uniqueFieldName = None
else:
uniqueFieldName = None
hasGeomCol = self.hasGeometryCol.checkState() == Qt.Checked
if hasGeomCol:
geomFieldName = self.geomCombo.currentText()
else:
geomFieldName = None
query = self._getExecutableSqlQuery()
if query == "":
return None
# remove a trailing ';' from query if present
if query.strip().endswith(';'):
query = query.strip()[:-1]
layerType = QgsMapLayerType.VectorLayer if self.vectorRadio.isChecked() else QgsMapLayerType.RasterLayer
# get a new layer name
names = []
for layer in list(QgsProject.instance().mapLayers().values()):
names.append(layer.name())
layerName = self.layerNameEdit.text()
if layerName == "":
layerName = self.defaultLayerName
newLayerName = layerName
index = 1
while newLayerName in names:
index += 1
newLayerName = u"%s_%d" % (layerName, index)
# create the layer
layer = self.db.toSqlLayer(query, geomFieldName, uniqueFieldName, newLayerName, layerType,
self.avoidSelectById.isChecked(), _filter)
if layer.isValid():
return layer
else:
e = BaseError(self.tr("There was an error creating the SQL layer, please check the logs for further information."))
DlgDbError.showError(e, self)
return None
def loadSqlLayer(self):
with OverrideCursor(Qt.WaitCursor):
layer = self._getSqlLayer(self.filter)
if layer is None:
return
QgsProject.instance().addMapLayers([layer], True)
def fillColumnCombos(self):
query = self._getExecutableSqlQuery()
if query == "":
return
with OverrideCursor(Qt.WaitCursor):
# remove a trailing ';' from query if present
if query.strip().endswith(';'):
query = query.strip()[:-1]
# get all the columns
quotedCols = []
connector = self.db.connector
if self.aliasSubQuery:
# get a new alias
aliasIndex = 0
while True:
alias = "_subQuery__%d" % aliasIndex
escaped = re.compile('\\b("?)' + re.escape(alias) + '\\1\\b')
if not escaped.search(query):
break
aliasIndex += 1
sql = u"SELECT * FROM (%s\n) AS %s LIMIT 0" % (str(query), connector.quoteId(alias))
else:
sql = u"SELECT * FROM (%s\n) WHERE 1=0" % str(query)
c = None
try:
c = connector._execute(None, sql)
cols = connector._get_cursor_columns(c)
for col in cols:
quotedCols.append(connector.quoteId(col))
except BaseError as e:
DlgDbError.showError(e, self)
self.uniqueModel.clear()
self.geomCombo.clear()
return
finally:
if c:
c.close()
del c
self.setColumnCombos(cols, quotedCols)
def setColumnCombos(self, cols, quotedCols):
# get sensible default columns. do this before sorting in case there's hints in the column order (e.g., id is more likely to be first)
try:
defaultGeomCol = next(col for col in cols if col in ['geom', 'geometry', 'the_geom', 'way'])
except:
defaultGeomCol = None
try:
defaultUniqueCol = [col for col in cols if 'id' in col][0]
except:
defaultUniqueCol = None
colNames = sorted(zip(cols, quotedCols))
newItems = []
uniqueIsFilled = False
for (col, quotedCol) in colNames:
item = QStandardItem(col)
item.setData(quotedCol)
item.setEnabled(True)
item.setCheckable(self.allowMultiColumnPk)
item.setSelectable(not self.allowMultiColumnPk)
if self.allowMultiColumnPk:
matchingItems = self.uniqueModel.findItems(col)
if matchingItems:
item.setCheckState(matchingItems[0].checkState())
uniqueIsFilled = uniqueIsFilled or matchingItems[0].checkState() == Qt.Checked
else:
item.setCheckState(Qt.Unchecked)
newItems.append(item)
if self.allowMultiColumnPk:
self.uniqueModel.clear()
self.uniqueModel.appendColumn(newItems)
self.uniqueChanged()
else:
previousUniqueColumn = self.uniqueCombo.currentText()
self.uniqueModel.clear()
self.uniqueModel.appendColumn(newItems)
if self.uniqueModel.findItems(previousUniqueColumn):
self.uniqueCombo.setEditText(previousUniqueColumn)
uniqueIsFilled = True
oldGeometryColumn = self.geomCombo.currentText()
self.geomCombo.clear()
self.geomCombo.addItems(cols)
self.geomCombo.setCurrentIndex(self.geomCombo.findText(oldGeometryColumn, Qt.MatchExactly))
# set sensible default columns if the columns are not already set
try:
if self.geomCombo.currentIndex() == -1:
self.geomCombo.setCurrentIndex(cols.index(defaultGeomCol))
except:
pass
items = self.uniqueModel.findItems(defaultUniqueCol)
if items and not uniqueIsFilled:
if self.allowMultiColumnPk:
items[0].setCheckState(Qt.Checked)
else:
self.uniqueCombo.setEditText(defaultUniqueCol)
def copySelectedResults(self):
if len(self.viewResult.selectedIndexes()) <= 0:
return
model = self.viewResult.model()
# convert to string using tab as separator
text = model.headerToString("\t")
for idx in self.viewResult.selectionModel().selectedRows():
text += "\n" + model.rowToString(idx.row(), "\t")
QApplication.clipboard().setText(text, QClipboard.Selection)
QApplication.clipboard().setText(text, QClipboard.Clipboard)
def initCompleter(self):
dictionary = None
if self.db:
dictionary = self.db.connector.getSqlDictionary()
if not dictionary:
# use the generic sql dictionary
from .sql_dictionary import getSqlDictionary
dictionary = getSqlDictionary()
wordlist = []
for value in dictionary.values():
wordlist += value # concat lists
wordlist = list(set(wordlist)) # remove duplicates
api = QsciAPIs(self.editSql.lexer())
for word in wordlist:
api.add(word)
api.prepare()
self.editSql.lexer().setAPIs(api)
def displayQueryBuilder(self):
dlg = QueryBuilderDlg(self.iface, self.db, self, reset=self.queryBuilderFirst)
self.queryBuilderFirst = False
r = dlg.exec_()
if r == QDialog.Accepted:
self.editSql.setText(dlg.query)
def createView(self):
name, ok = QInputDialog.getText(None, self.tr("View Name"), self.tr("View name"))
if ok:
try:
self.db.connector.createSpatialView(name, self._getExecutableSqlQuery())
except BaseError as e:
DlgDbError.showError(e, self)
def _getSqlQuery(self):
sql = self.editSql.selectedText()
if len(sql) == 0:
sql = self.editSql.text()
return sql
def _getExecutableSqlQuery(self):
sql = self._getSqlQuery()
# Clean it up!
lines = []
for line in sql.split('\n'):
if not line.strip().startswith('--'):
lines.append(line)
sql = ' '.join(lines)
return sql.strip()
def uniqueChanged(self):
# when an item is (un)checked, simply trigger an update of the combobox text
self.uniqueTextChanged(None)
def uniqueTextChanged(self, text):
# Whenever there is new text displayed in the combobox, check if it is the correct one and if not, display the correct one.
checkedItems = []
for item in self.uniqueModel.findItems("*", Qt.MatchWildcard):
if item.checkState() == Qt.Checked:
checkedItems.append(item.text())
label = ", ".join(checkedItems)
if text != label:
self.uniqueCombo.setEditText(label)
def setFilter(self):
from qgis.gui import QgsQueryBuilder
layer = self._getSqlLayer("")
if not layer:
return
dlg = QgsQueryBuilder(layer)
dlg.setSql(self.filter)
if dlg.exec_():
self.filter = dlg.sql()
layer.deleteLater()
| gpl-2.0 |
michalkurka/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_grid_f2_f0point5_metrics_PUBDEV-6753.py | 2 | 3992 | from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.grid.grid_search import H2OGridSearch
def grid_f0point5_metrics():
gbm_grid1 = train_grid()
gbm_gridper_f0point5 = gbm_grid1.get_grid(sort_by='f0point5', decreasing=True)
print(gbm_gridper_f0point5)
sorted_metric_table_f0point5 = gbm_gridper_f0point5.sorted_metric_table()
# Lets compare values grid was sorted with and metrics from each model in the grid
print("Model 0:")
best_gbm_f0point5 = gbm_gridper_f0point5.models[0]
model0_f0point5_valid = best_gbm_f0point5.F0point5(valid=True)
print(model0_f0point5_valid)
errorMsg = "Expected that metric value from sorted_metric_table is equal to corresponding metric for the model"
assert float(model0_f0point5_valid[0][1]) == float(sorted_metric_table_f0point5['f0point5'][0]), errorMsg
print("Model 1:")
best_gbm_f0point5_1 = gbm_gridper_f0point5.models[1]
model1_f0point5_valid = best_gbm_f0point5_1.F0point5(valid=True)
print(model1_f0point5_valid)
assert float(model1_f0point5_valid[0][1]) == float(sorted_metric_table_f0point5['f0point5'][1]), errorMsg
print("Model 2:")
best_gbm_f0point5_2 = gbm_gridper_f0point5.models[2]
model2_f0point5_valid = best_gbm_f0point5_2.F0point5(valid=True)
print(model2_f0point5_valid)
assert float(model2_f0point5_valid[0][1]) == float(sorted_metric_table_f0point5['f0point5'][2]), errorMsg
def grid_f2_metrics():
gbm_grid1 = train_grid()
gbm_gridper_f2 = gbm_grid1.get_grid(sort_by='f2', decreasing=True)
print(gbm_gridper_f2)
sorted_metric_table_f2 = gbm_gridper_f2.sorted_metric_table()
# Lets compare values grid was sorted with and metrics from each model in the grid
print("Model 0:")
best_gbm_f2 = gbm_gridper_f2.models[0]
model0_f2_valid = best_gbm_f2.F2(valid=True)
print(model0_f2_valid)
errorMsg = "Expected that metric value from sorted_metric_table is equal to corresponding metric for the model"
assert float(model0_f2_valid[0][1]) == float(sorted_metric_table_f2['f2'][0]), errorMsg
print("Model 1:")
best_gbm_f2_1 = gbm_gridper_f2.models[1]
model1_f2_valid = best_gbm_f2_1.F2(valid=True)
print(model1_f2_valid)
assert float(model1_f2_valid[0][1]) == float(sorted_metric_table_f2['f2'][1]), errorMsg
print("Model 2:")
best_gbm_f2_2 = gbm_gridper_f2.models[2]
model2_f2_valid = best_gbm_f2_2.F2(valid=True)
print(model2_f2_valid)
assert float(model2_f2_valid[0][1]) == float(sorted_metric_table_f2['f2'][2]), errorMsg
def train_grid():
# Import a sample binary outcome dataset into H2O
data = h2o.import_file(pyunit_utils.locate("smalldata/testng/higgs_train_5k.csv"))
test = h2o.import_file(pyunit_utils.locate("smalldata/testng/higgs_test_5k.csv"))
# Identify predictors and response
x = data.columns
y = "response"
x.remove(y)
# For binary classification, response should be a factor
data[y] = data[y].asfactor()
test[y] = test[y].asfactor()
# Split data into train & validation
ss = data.split_frame(seed=1)
train = ss[0]
valid = ss[1]
# GBM hyperparameters
gbm_params1 = {'learn_rate': [0.01],
'max_depth': [3],
'sample_rate': [0.8],
'col_sample_rate': [0.2, 0.5, 1.0]}
# Train and validate a cartesian grid of GBMs
gbm_grid1 = H2OGridSearch(model=H2OGradientBoostingEstimator,
grid_id='gbm_grid1',
hyper_params=gbm_params1)
gbm_grid1.train(x=x, y=y,
training_frame=train,
validation_frame=valid,
ntrees=100,
seed=1)
return gbm_grid1
pyunit_utils.run_tests([
grid_f0point5_metrics,
grid_f2_metrics
])
| apache-2.0 |
felixonmars/pytest-django | tests/test_fixtures.py | 9 | 12437 | """Tests for user-visible fixtures.
Not quite all fixtures are tested here, the db and transactional_db
fixtures are tested in test_database.
"""
from __future__ import with_statement
import pytest
from django.conf import settings as real_settings
from django.test.client import Client, RequestFactory
from django.test.testcases import connections_support_transactions
from pytest_django.lazy_django import get_django_version
from pytest_django_test.app.models import Item
from pytest_django_test.compat import force_text, HTTPError, urlopen
from pytest_django_test.db_helpers import noop_transactions
def test_client(client):
assert isinstance(client, Client)
@pytest.mark.django_db
def test_admin_client(admin_client):
assert isinstance(admin_client, Client)
resp = admin_client.get('/admin-required/')
assert force_text(resp.content) == 'You are an admin'
def test_admin_client_no_db_marker(admin_client):
assert isinstance(admin_client, Client)
resp = admin_client.get('/admin-required/')
assert force_text(resp.content) == 'You are an admin'
@pytest.mark.django_db
def test_admin_user(admin_user, django_user_model):
assert isinstance(admin_user, django_user_model)
def test_admin_user_no_db_marker(admin_user, django_user_model):
assert isinstance(admin_user, django_user_model)
def test_rf(rf):
assert isinstance(rf, RequestFactory)
class TestSettings:
"""Tests for the settings fixture, order matters"""
def test_modify_existing(self, settings):
assert settings.SECRET_KEY == 'foobar'
assert real_settings.SECRET_KEY == 'foobar'
settings.SECRET_KEY = 'spam'
assert settings.SECRET_KEY == 'spam'
assert real_settings.SECRET_KEY == 'spam'
def test_modify_existing_again(self, settings):
assert settings.SECRET_KEY == 'foobar'
assert real_settings.SECRET_KEY == 'foobar'
def test_new(self, settings):
assert not hasattr(settings, 'SPAM')
assert not hasattr(real_settings, 'SPAM')
settings.SPAM = 'ham'
assert settings.SPAM == 'ham'
assert real_settings.SPAM == 'ham'
def test_new_again(self, settings):
assert not hasattr(settings, 'SPAM')
assert not hasattr(real_settings, 'SPAM')
def test_deleted(self, settings):
assert hasattr(settings, 'SECRET_KEY')
assert hasattr(real_settings, 'SECRET_KEY')
del settings.SECRET_KEY
assert not hasattr(settings, 'SECRET_KEY')
assert not hasattr(real_settings, 'SECRET_KEY')
def test_deleted_again(self, settings):
assert hasattr(settings, 'SECRET_KEY')
assert hasattr(real_settings, 'SECRET_KEY')
class TestLiveServer:
pytestmark = [
pytest.mark.skipif(get_django_version() < (1, 4),
reason="Django > 1.3 required"),
]
def test_url(self, live_server):
assert live_server.url == force_text(live_server)
def test_transactions(self, live_server):
if not connections_support_transactions():
pytest.skip('transactions required for this test')
assert not noop_transactions()
def test_db_changes_visibility(self, live_server):
response_data = urlopen(live_server + '/item_count/').read()
assert force_text(response_data) == 'Item count: 0'
Item.objects.create(name='foo')
response_data = urlopen(live_server + '/item_count/').read()
assert force_text(response_data) == 'Item count: 1'
def test_fixture_db(self, db, live_server):
Item.objects.create(name='foo')
response_data = urlopen(live_server + '/item_count/').read()
assert force_text(response_data) == 'Item count: 1'
def test_fixture_transactional_db(self, transactional_db, live_server):
Item.objects.create(name='foo')
response_data = urlopen(live_server + '/item_count/').read()
assert force_text(response_data) == 'Item count: 1'
@pytest.fixture
def item(self):
# This has not requested database access explicitly, but the
# live_server fixture auto-uses the transactional_db fixture.
Item.objects.create(name='foo')
def test_item(self, item, live_server):
pass
@pytest.fixture
def item_db(self, db):
return Item.objects.create(name='foo')
def test_item_db(self, item_db, live_server):
response_data = urlopen(live_server + '/item_count/').read()
assert force_text(response_data) == 'Item count: 1'
@pytest.fixture
def item_transactional_db(self, transactional_db):
return Item.objects.create(name='foo')
def test_item_transactional_db(self, item_transactional_db, live_server):
response_data = urlopen(live_server + '/item_count/').read()
assert force_text(response_data) == 'Item count: 1'
@pytest.mark.skipif(get_django_version() >= (1, 7),
reason="Django < 1.7 required")
def test_serve_static(self, live_server, settings):
"""
Test that the LiveServer serves static files by default.
"""
response_data = urlopen(live_server + '/static/a_file.txt').read()
assert force_text(response_data) == 'bla\n'
@pytest.mark.django_project(extra_settings="""
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'tpkg.app',
]
STATIC_URL = '/static/'
""")
def test_serve_static_with_staticfiles_app(self, django_testdir, settings):
"""
LiveServer always serves statics with ``django.contrib.staticfiles``
handler.
"""
django_testdir.create_test_module("""
import pytest
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
try:
from urllib2 import urlopen, HTTPError
except ImportError:
from urllib.request import urlopen, HTTPError
class TestLiveServer:
def test_a(self, live_server, settings):
assert ('django.contrib.staticfiles'
in settings.INSTALLED_APPS)
response_data = urlopen(
live_server + '/static/a_file.txt').read()
assert force_text(response_data) == 'bla\\n'
""")
result = django_testdir.runpytest('--tb=short', '-v')
result.stdout.fnmatch_lines(['*test_a*PASSED*'])
assert result.ret == 0
@pytest.mark.skipif(get_django_version() < (1, 7),
reason="Django >= 1.7 required")
def test_serve_static_dj17_without_staticfiles_app(self, live_server,
settings):
"""
Because ``django.contrib.staticfiles`` is not installed
LiveServer can not serve statics with django >= 1.7 .
"""
with pytest.raises(HTTPError):
urlopen(live_server + '/static/a_file.txt').read()
@pytest.mark.django_project(extra_settings="""
AUTH_USER_MODEL = 'app.MyCustomUser'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'tpkg.app',
]
ROOT_URLCONF = 'tpkg.app.urls'
""")
@pytest.mark.skipif(get_django_version() < (1, 5),
reason="Django >= 1.5 required")
def test_custom_user_model(django_testdir):
django_testdir.create_app_file("""
from django.contrib.auth.models import AbstractUser
from django.db import models
class MyCustomUser(AbstractUser):
identifier = models.CharField(unique=True, max_length=100)
USERNAME_FIELD = 'identifier'
""", 'models.py')
django_testdir.create_app_file("""
try:
from django.conf.urls import patterns # Django >1.4
except ImportError:
from django.conf.urls.defaults import patterns # Django 1.3
urlpatterns = patterns(
'',
(r'admin-required/', 'tpkg.app.views.admin_required_view'),
)
""", 'urls.py')
django_testdir.create_app_file("""
from django.http import HttpResponse
from django.template import Template
from django.template.context import Context
def admin_required_view(request):
if request.user.is_staff:
return HttpResponse(
Template('You are an admin').render(Context()))
return HttpResponse(
Template('Access denied').render(Context()))
""", 'views.py')
django_testdir.makepyfile("""
from pytest_django_test.compat import force_text
from tpkg.app.models import MyCustomUser
def test_custom_user_model(admin_client):
resp = admin_client.get('/admin-required/')
assert force_text(resp.content) == 'You are an admin'
""")
django_testdir.create_app_file('', 'migrations/__init__.py')
django_testdir.create_app_file("""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='MyCustomUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, verbose_name='username')),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=254, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('identifier', models.CharField(unique=True, max_length=100)),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
bases=None,
),
]
""", 'migrations/0001_initial.py') # noqa
result = django_testdir.runpytest('-s')
result.stdout.fnmatch_lines(['*1 passed*'])
assert result.ret == 0
| bsd-3-clause |
Titulacion-Sistemas/PythonTitulacion-EV | Lib/encodings/gb18030.py | 816 | 1031 | #
# gb18030.py: Python Unicode Codec for GB18030
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gb18030')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='gb18030',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| mit |
sgerhart/ansible | lib/ansible/modules/network/cli/cli_config.py | 24 | 12544 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: cli_config
version_added: "2.7"
author: "Trishna Guha (@trishnaguha)"
short_description: Push text based configuration to network devices over network_cli
description:
- This module provides platform agnostic way of pushing text based
configuration to network devices over network_cli connection plugin.
options:
config:
description:
- The config to be pushed to the network device. This argument
is mutually exclusive with C(rollback) and either one of the
option should be given as input. The config should have
indentation that the device uses.
type: 'str'
commit:
description:
- The C(commit) argument instructs the module to push the
configuration to the device. This is mapped to module check mode.
type: 'bool'
replace:
description:
- If the C(replace) argument is set to C(yes), it will replace
the entire running-config of the device with the C(config)
argument value. For NXOS devices, C(replace) argument takes
path to the file on the device that will be used for replacing
the entire running-config. Nexus 9K devices only support replace.
Use I(net_put) or I(nxos_file_copy) module to copy the flat file
to remote device and then use set the fullpath to this argument.
type: 'str'
rollback:
description:
- The C(rollback) argument instructs the module to rollback the
current configuration to the identifier specified in the
argument. If the specified rollback identifier does not
exist on the remote device, the module will fail. To rollback
to the most recent commit, set the C(rollback) argument to 0.
This option is mutually exclusive with C(config).
commit_comment:
description:
- The C(commit_comment) argument specifies a text string to be used
when committing the configuration. If the C(commit) argument
is set to False, this argument is silently ignored. This argument
is only valid for the platforms that support commit operation
with comment.
type: 'str'
defaults:
description:
- The I(defaults) argument will influence how the running-config
is collected from the device. When the value is set to true,
the command used to collect the running-config is append with
the all keyword. When the value is set to false, the command
is issued without the all keyword.
default: 'no'
type: 'bool'
multiline_delimiter:
description:
- This argument is used when pushing a multiline configuration
element to the device. It specifies the character to use as
the delimiting character. This only applies to the configuration
action.
type: 'str'
diff_replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the C(diff_replace) argument is set to I(line)
then the modified lines are pushed to the device in configuration
mode. If the argument is set to I(block) then the entire command
block is pushed to the device in configuration mode if any
line is not correct. Note that this parameter will be ignored if
the platform has onbox diff support.
choices: ['line', 'block', 'config']
diff_match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If C(diff_match)
is set to I(line), commands are matched line by line. If C(diff_match)
is set to I(strict), command lines are matched with respect to position.
If C(diff_match) is set to I(exact), command lines must be an equal match.
Finally, if C(diff_match) is set to I(none), the module will not attempt
to compare the source configuration with the running configuration on the
remote device. Note that this parameter will be ignored if the platform
has onbox diff support.
choices: ['line', 'strict', 'exact', 'none']
diff_ignore_lines:
description:
- Use this argument to specify one or more lines that should be
ignored during the diff. This is used for lines in the configuration
that are automatically updated by the system. This argument takes
a list of regular expressions or exact line matches.
Note that this parameter will be ignored if the platform has onbox
diff support.
"""
EXAMPLES = """
- name: configure device with config
cli_config:
config: "{{ lookup('template', 'basic/config.j2') }}"
- name: configure device with config with defaults enabled
cli_config:
config: "{{ lookup('template', 'basic/config.j2') }}"
defaults: yes
- name: Use diff_match
cli_config:
config: "{{ lookup('file', 'interface_config') }}"
diff_match: none
- name: nxos replace config
cli_config:
replace: 'bootflash:nxoscfg'
- name: commit with comment
cli_config:
config: set system host-name foo
commit_comment: this is a test
"""
RETURN = """
commands:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['interface Loopback999', 'no shutdown']
"""
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils._text import to_text
def validate_args(module, capabilities):
"""validate param if it is supported on the platform
"""
if (module.params['replace'] and
not capabilities['device_operations']['supports_replace']):
module.fail_json(msg='replace is not supported on this platform')
if (module.params['rollback'] is not None and
not capabilities['device_operations']['supports_rollback']):
module.fail_json(msg='rollback is not supported on this platform')
if (module.params['commit_comment'] and
not capabilities['device_operations']['supports_commit_comment']):
module.fail_json(msg='commit_comment is not supported on this platform')
if (module.params['defaults'] and
not capabilities['device_operations']['supports_defaults']):
module.fail_json(msg='defaults is not supported on this platform')
if (module.params['multiline_delimiter'] and
not capabilities['device_operations']['supports_multiline_delimiter']):
module.fail_json(msg='multiline_delimiter is not supported on this platform')
if (module.params['diff_replace'] and
not capabilities['device_operations']['supports_diff_replace']):
module.fail_json(msg='diff_replace is not supported on this platform')
if (module.params['diff_match'] and
not capabilities['device_operations']['supports_diff_match']):
module.fail_json(msg='diff_match is not supported on this platform')
if (module.params['diff_ignore_lines'] and
not capabilities['device_operations']['supports_diff_ignore_lines']):
module.fail_json(msg='diff_ignore_lines is not supported on this platform')
def run(module, capabilities, connection, candidate, running):
result = {}
resp = {}
config_diff = []
banner_diff = {}
replace = module.params['replace']
rollback_id = module.params['rollback']
commit_comment = module.params['commit_comment']
multiline_delimiter = module.params['multiline_delimiter']
diff_replace = module.params['diff_replace']
diff_match = module.params['diff_match']
diff_ignore_lines = module.params['diff_ignore_lines']
commit = not module.check_mode
if replace in ('yes', 'true', 'True'):
replace = True
elif replace in ('no', 'false', 'False'):
replace = False
if rollback_id is not None:
resp = connection.rollback(rollback_id, commit)
if 'diff' in resp:
result['changed'] = True
elif capabilities['device_operations']['supports_onbox_diff']:
if diff_replace:
module.warn('diff_replace is ignored as the device supports onbox diff')
if diff_match:
module.warn('diff_mattch is ignored as the device supports onbox diff')
if diff_ignore_lines:
module.warn('diff_ignore_lines is ignored as the device supports onbox diff')
if not isinstance(candidate, list):
candidate = candidate.strip('\n').splitlines()
kwargs = {'candidate': candidate, 'commit': commit, 'replace': replace,
'comment': commit_comment}
resp = connection.edit_config(**kwargs)
if 'diff' in resp:
result['changed'] = True
elif capabilities['device_operations']['supports_generate_diff']:
kwargs = {'candidate': candidate, 'running': running}
if diff_match:
kwargs.update({'diff_match': diff_match})
if diff_replace:
kwargs.update({'diff_replace': diff_replace})
if diff_ignore_lines:
kwargs.update({'diff_ignore_lines': diff_ignore_lines})
diff_response = connection.get_diff(**kwargs)
config_diff = diff_response.get('config_diff')
banner_diff = diff_response.get('banner_diff')
if config_diff:
if isinstance(config_diff, list):
candidate = config_diff
else:
candidate = config_diff.splitlines()
kwargs = {'candidate': candidate, 'commit': commit, 'replace': replace,
'comment': commit_comment}
connection.edit_config(**kwargs)
result['changed'] = True
if banner_diff:
candidate = json.dumps(banner_diff)
kwargs = {'candidate': candidate, 'commit': commit}
if multiline_delimiter:
kwargs.update({'multiline_delimiter': multiline_delimiter})
connection.edit_banner(**kwargs)
result['changed'] = True
if module._diff:
if 'diff' in resp:
result['diff'] = {'prepared': resp['diff']}
else:
diff = ''
if config_diff:
if isinstance(config_diff, list):
diff += '\n'.join(config_diff)
else:
diff += config_diff
if banner_diff:
diff += json.dumps(banner_diff)
result['diff'] = {'prepared': diff}
return result
def main():
"""main entry point for execution
"""
argument_spec = dict(
config=dict(type='str'),
commit=dict(type='bool'),
replace=dict(type='str'),
rollback=dict(type='int'),
commit_comment=dict(type='str'),
defaults=dict(default=False, type='bool'),
multiline_delimiter=dict(type='str'),
diff_replace=dict(choices=['line', 'block', 'config']),
diff_match=dict(choices=['line', 'strict', 'exact', 'none']),
diff_ignore_lines=dict(type='list')
)
mutually_exclusive = [('config', 'rollback')]
required_one_of = [['config', 'rollback']]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_one_of=required_one_of,
supports_check_mode=True)
result = {'changed': False}
connection = Connection(module._socket_path)
capabilities = module.from_json(connection.get_capabilities())
if capabilities:
validate_args(module, capabilities)
if module.params['defaults']:
if 'get_default_flag' in capabilities.get('rpc'):
flags = connection.get_default_flag()
else:
flags = 'all'
else:
flags = []
candidate = to_text(module.params['config'])
running = connection.get_config(flags=flags)
try:
result.update(run(module, capabilities, connection, candidate, running))
except Exception as exc:
module.fail_json(msg=to_text(exc))
module.exit_json(**result)
if __name__ == '__main__':
main()
| mit |
linglaiyao1314/wrapt | tests/test_synchronized_lock.py | 5 | 7368 | from __future__ import print_function
import unittest
import wrapt
@wrapt.synchronized
def function():
print('function')
class C1(object):
@wrapt.synchronized
def function1(self):
print('function1')
@wrapt.synchronized
@classmethod
def function2(cls):
print('function2')
@wrapt.synchronized
@staticmethod
def function3():
print('function3')
c1 = C1()
@wrapt.synchronized
class C2(object):
pass
@wrapt.synchronized
class C3:
pass
class C4(object):
# XXX This yields undesirable results due to how class method is
# implemented. The classmethod doesn't bind the method to the class
# before calling. As a consequence, the decorator wrapper function
# sees the instance as None with the class being explicitly passed
# as the first argument. It isn't possible to detect and correct
# this.
@classmethod
@wrapt.synchronized
def function2(cls):
print('function2')
@staticmethod
@wrapt.synchronized
def function3():
print('function3')
c4 = C4()
class TestSynchronized(unittest.TestCase):
def test_synchronized_function(self):
_lock0 = getattr(function, '_synchronized_lock', None)
self.assertEqual(_lock0, None)
function()
_lock1 = getattr(function, '_synchronized_lock', None)
self.assertNotEqual(_lock1, None)
function()
_lock2 = getattr(function, '_synchronized_lock', None)
self.assertNotEqual(_lock2, None)
self.assertEqual(_lock2, _lock1)
function()
_lock3 = getattr(function, '_synchronized_lock', None)
self.assertNotEqual(_lock3, None)
self.assertEqual(_lock3, _lock2)
def test_synchronized_inner_staticmethod(self):
_lock0 = getattr(C1.function3, '_synchronized_lock', None)
self.assertEqual(_lock0, None)
c1.function3()
_lock1 = getattr(C1.function3, '_synchronized_lock', None)
self.assertNotEqual(_lock1, None)
C1.function3()
_lock2 = getattr(C1.function3, '_synchronized_lock', None)
self.assertNotEqual(_lock2, None)
self.assertEqual(_lock2, _lock1)
C1.function3()
_lock3 = getattr(C1.function3, '_synchronized_lock', None)
self.assertNotEqual(_lock3, None)
self.assertEqual(_lock3, _lock2)
def test_synchronized_outer_staticmethod(self):
_lock0 = getattr(C4.function3, '_synchronized_lock', None)
self.assertEqual(_lock0, None)
c4.function3()
_lock1 = getattr(C4.function3, '_synchronized_lock', None)
self.assertNotEqual(_lock1, None)
C4.function3()
_lock2 = getattr(C4.function3, '_synchronized_lock', None)
self.assertNotEqual(_lock2, None)
self.assertEqual(_lock2, _lock1)
C4.function3()
_lock3 = getattr(C4.function3, '_synchronized_lock', None)
self.assertNotEqual(_lock3, None)
self.assertEqual(_lock3, _lock2)
def test_synchronized_inner_classmethod(self):
if hasattr(C1, '_synchronized_lock'):
del C1._synchronized_lock
_lock0 = getattr(C1, '_synchronized_lock', None)
self.assertEqual(_lock0, None)
c1.function2()
_lock1 = getattr(C1, '_synchronized_lock', None)
self.assertNotEqual(_lock1, None)
C1.function2()
_lock2 = getattr(C1, '_synchronized_lock', None)
self.assertNotEqual(_lock2, None)
self.assertEqual(_lock2, _lock1)
C1.function2()
_lock3 = getattr(C1, '_synchronized_lock', None)
self.assertNotEqual(_lock3, None)
self.assertEqual(_lock3, _lock2)
def test_synchronized_outer_classmethod(self):
# XXX If all was good, this would be detected as a class
# method call, but the classmethod decorator doesn't bind
# the wrapped function to the class before calling and
# just calls it direct, explicitly passing the class as
# first argument. This screws things up. Would be nice if
# Python were fixed, but that isn't likely to happen.
#_lock0 = getattr(C4, '_synchronized_lock', None)
_lock0 = getattr(C4.function2, '_synchronized_lock', None)
self.assertEqual(_lock0, None)
c4.function2()
#_lock1 = getattr(C4, '_synchronized_lock', None)
_lock1 = getattr(C4.function2, '_synchronized_lock', None)
self.assertNotEqual(_lock1, None)
C4.function2()
#_lock2 = getattr(C4, '_synchronized_lock', None)
_lock2 = getattr(C4.function2, '_synchronized_lock', None)
self.assertNotEqual(_lock2, None)
self.assertEqual(_lock2, _lock1)
C4.function2()
#_lock3 = getattr(C4, '_synchronized_lock', None)
_lock3 = getattr(C4.function2, '_synchronized_lock', None)
self.assertNotEqual(_lock3, None)
self.assertEqual(_lock3, _lock2)
def test_synchronized_instancemethod(self):
if hasattr(C1, '_synchronized_lock'):
del C1._synchronized_lock
_lock0 = getattr(c1, '_synchronized_lock', None)
self.assertEqual(_lock0, None)
C1.function1(c1)
_lock1 = getattr(c1, '_synchronized_lock', None)
self.assertNotEqual(_lock1, None)
c1.function1()
_lock2 = getattr(c1, '_synchronized_lock', None)
self.assertNotEqual(_lock2, None)
self.assertEqual(_lock2, _lock1)
c1.function1()
_lock3 = getattr(c1, '_synchronized_lock', None)
self.assertNotEqual(_lock3, None)
self.assertEqual(_lock3, _lock2)
del c1._synchronized_lock
C1.function2()
_lock4 = getattr(C1, '_synchronized_lock', None)
self.assertNotEqual(_lock4, None)
c1.function1()
_lock5 = getattr(c1, '_synchronized_lock', None)
self.assertNotEqual(_lock5, None)
self.assertNotEqual(_lock5, _lock4)
def test_synchronized_type_new_style(self):
if hasattr(C2, '_synchronized_lock'):
del C2._synchronized_lock
_lock0 = getattr(C2, '_synchronized_lock', None)
self.assertEqual(_lock0, None)
c2 = C2()
_lock1 = getattr(C2, '_synchronized_lock', None)
self.assertNotEqual(_lock1, None)
c2 = C2()
_lock2 = getattr(C2, '_synchronized_lock', None)
self.assertNotEqual(_lock2, None)
self.assertEqual(_lock2, _lock1)
c2 = C2()
_lock3 = getattr(C2, '_synchronized_lock', None)
self.assertNotEqual(_lock3, None)
self.assertEqual(_lock3, _lock2)
def test_synchronized_type_old_style(self):
if hasattr(C3, '_synchronized_lock'):
del C3._synchronized_lock
_lock0 = getattr(C3, '_synchronized_lock', None)
self.assertEqual(_lock0, None)
c2 = C3()
_lock1 = getattr(C3, '_synchronized_lock', None)
self.assertNotEqual(_lock1, None)
c2 = C3()
_lock2 = getattr(C3, '_synchronized_lock', None)
self.assertNotEqual(_lock2, None)
self.assertEqual(_lock2, _lock1)
c2 = C3()
_lock3 = getattr(C3, '_synchronized_lock', None)
self.assertNotEqual(_lock3, None)
self.assertEqual(_lock3, _lock2)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
sabi0/intellij-community | python/helpers/pydev/pydevd_attach_to_process/linux/lldb_prepare.py | 102 | 1691 | # This file is meant to be run inside lldb
# It registers command to load library and invoke attach function
# Also it marks process threads to to distinguish them from debugger
# threads later while settings trace in threads
def load_lib_and_attach(debugger, command, result, internal_dict):
import shlex
args = shlex.split(command)
dll = args[0]
is_debug = args[1]
python_code = args[2]
show_debug_info = args[3]
import lldb
options = lldb.SBExpressionOptions()
options.SetFetchDynamicValue()
options.SetTryAllThreads(run_others=False)
options.SetTimeoutInMicroSeconds(timeout=10000000)
print(dll)
target = debugger.GetSelectedTarget()
res = target.EvaluateExpression("(void*)dlopen(\"%s\", 2);" % (
dll), options)
error = res.GetError()
if error:
print(error)
print(python_code)
res = target.EvaluateExpression("(int)DoAttach(%s, \"%s\", %s);" % (
is_debug, python_code.replace('"', "'"), show_debug_info), options)
error = res.GetError()
if error:
print(error)
def __lldb_init_module(debugger, internal_dict):
import lldb
debugger.HandleCommand('command script add -f lldb_prepare.load_lib_and_attach load_lib_and_attach')
try:
target = debugger.GetSelectedTarget()
if target:
process = target.GetProcess()
if process:
for thread in process:
# print('Marking process thread %d'%thread.GetThreadID())
internal_dict['_thread_%d' % thread.GetThreadID()] = True
# thread.Suspend()
except:
import traceback;traceback.print_exc()
| apache-2.0 |
GeyerA/android_external_chromium_org | tools/python/google/platform_utils_mac.py | 183 | 5676 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Platform-specific utility methods shared by several scripts."""
import os
import subprocess
import google.path_utils
class PlatformUtility(object):
def __init__(self, base_dir):
"""Args:
base_dir: the base dir for running tests.
"""
self._base_dir = base_dir
self._httpd_cmd_string = None # used for starting/stopping httpd
self._bash = "/bin/bash"
def _UnixRoot(self):
"""Returns the path to root."""
return "/"
def GetFilesystemRoot(self):
"""Returns the root directory of the file system."""
return self._UnixRoot()
def GetTempDirectory(self):
"""Returns the file system temp directory
Note that this does not use a random subdirectory, so it's not
intrinsically secure. If you need a secure subdir, use the tempfile
package.
"""
return os.getenv("TMPDIR", "/tmp")
def FilenameToUri(self, path, use_http=False, use_ssl=False, port=8000):
"""Convert a filesystem path to a URI.
Args:
path: For an http URI, the path relative to the httpd server's
DocumentRoot; for a file URI, the full path to the file.
use_http: if True, returns a URI of the form http://127.0.0.1:8000/.
If False, returns a file:/// URI.
use_ssl: if True, returns HTTPS URL (https://127.0.0.1:8000/).
This parameter is ignored if use_http=False.
port: The port number to append when returning an HTTP URI
"""
if use_http:
protocol = 'http'
if use_ssl:
protocol = 'https'
return "%s://127.0.0.1:%d/%s" % (protocol, port, path)
return "file://" + path
def GetStartHttpdCommand(self, output_dir,
httpd_conf_path, mime_types_path,
document_root=None, apache2=False):
"""Prepares the config file and output directory to start an httpd server.
Returns a list of strings containing the server's command line+args.
Args:
output_dir: the path to the server's output directory, for log files.
It will be created if necessary.
httpd_conf_path: full path to the httpd.conf file to be used.
mime_types_path: full path to the mime.types file to be used.
document_root: full path to the DocumentRoot. If None, the DocumentRoot
from the httpd.conf file will be used. Note that the httpd.conf
file alongside this script does not specify any DocumentRoot, so if
you're using that one, be sure to specify a document_root here.
apache2: boolean if true will cause this function to return start
command for Apache 2.x as opposed to Apache 1.3.x. This flag
is ignored on Mac (but preserved here for compatibility in
function signature with win), where httpd2 is used always
"""
exe_name = "httpd"
cert_file = google.path_utils.FindUpward(self._base_dir, 'tools',
'python', 'google',
'httpd_config', 'httpd2.pem')
ssl_enabled = os.path.exists('/etc/apache2/mods-enabled/ssl.conf')
httpd_vars = {
"httpd_executable_path":
os.path.join(self._UnixRoot(), "usr", "sbin", exe_name),
"httpd_conf_path": httpd_conf_path,
"ssl_certificate_file": cert_file,
"document_root" : document_root,
"server_root": os.path.join(self._UnixRoot(), "usr"),
"mime_types_path": mime_types_path,
"output_dir": output_dir,
"ssl_mutex": "file:"+os.path.join(output_dir, "ssl_mutex"),
"user": os.environ.get("USER", "#%d" % os.geteuid()),
"lock_file": os.path.join(output_dir, "accept.lock"),
}
google.path_utils.MaybeMakeDirectory(output_dir)
# We have to wrap the command in bash
# -C: process directive before reading config files
# -c: process directive after reading config files
# Apache wouldn't run CGIs with permissions==700 unless we add
# -c User "<username>"
httpd_cmd_string = (
'%(httpd_executable_path)s'
' -f %(httpd_conf_path)s'
' -c \'TypesConfig "%(mime_types_path)s"\''
' -c \'CustomLog "%(output_dir)s/access_log.txt" common\''
' -c \'ErrorLog "%(output_dir)s/error_log.txt"\''
' -c \'PidFile "%(output_dir)s/httpd.pid"\''
' -C \'User "%(user)s"\''
' -C \'ServerRoot "%(server_root)s"\''
' -c \'LockFile "%(lock_file)s"\''
)
if document_root:
httpd_cmd_string += ' -C \'DocumentRoot "%(document_root)s"\''
if ssl_enabled:
httpd_cmd_string += (
' -c \'SSLCertificateFile "%(ssl_certificate_file)s"\''
' -c \'SSLMutex "%(ssl_mutex)s"\''
)
# Save a copy of httpd_cmd_string to use for stopping httpd
self._httpd_cmd_string = httpd_cmd_string % httpd_vars
httpd_cmd = [self._bash, "-c", self._httpd_cmd_string]
return httpd_cmd
def GetStopHttpdCommand(self):
"""Returns a list of strings that contains the command line+args needed to
stop the http server used in the http tests.
This tries to fetch the pid of httpd (if available) and returns the
command to kill it. If pid is not available, kill all httpd processes
"""
if not self._httpd_cmd_string:
return ["true"] # Haven't been asked for the start cmd yet. Just pass.
# Add a sleep after the shutdown because sometimes it takes some time for
# the port to be available again.
return [self._bash, "-c", self._httpd_cmd_string + ' -k stop && sleep 5']
| bsd-3-clause |
yongshengwang/hue | build/env/lib/python2.7/site-packages/boto-2.38.0-py2.7.egg/boto/requestlog.py | 150 | 1486 | import sys
from datetime import datetime
from threading import Thread
import Queue
from boto.utils import RequestHook
from boto.compat import long_type
class RequestLogger(RequestHook):
"""
This class implements a request logger that uses a single thread to
write to a log file.
"""
def __init__(self, filename='/tmp/request_log.csv'):
self.request_log_file = open(filename, 'w')
self.request_log_queue = Queue.Queue(100)
Thread(target=self._request_log_worker).start()
def handle_request_data(self, request, response, error=False):
len = 0 if error else response.getheader('Content-Length')
now = datetime.now()
time = now.strftime('%Y-%m-%d %H:%M:%S')
td = (now - request.start_time)
duration = (td.microseconds + long_type(td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
# write output including timestamp, status code, response time, response size, request action
self.request_log_queue.put("'%s', '%s', '%s', '%s', '%s'\n" % (time, response.status, duration, len, request.params['Action']))
def _request_log_worker(self):
while True:
try:
item = self.request_log_queue.get(True)
self.request_log_file.write(item)
self.request_log_file.flush()
self.request_log_queue.task_done()
except:
import traceback
traceback.print_exc(file=sys.stdout)
| apache-2.0 |
shubhdev/edxOnBaadal | cms/envs/devstack.py | 15 | 4013 | """
Specific overrides to the base prod settings to make development easier.
"""
from .aws import * # pylint: disable=wildcard-import, unused-wildcard-import
# Don't use S3 in devstack, fall back to filesystem
del DEFAULT_FILE_STORAGE
MEDIA_ROOT = "/edx/var/edxapp/uploads"
DEBUG = True
USE_I18N = True
TEMPLATE_DEBUG = DEBUG
################################ LOGGERS ######################################
import logging
# Disable noisy loggers
for pkg_name in ['track.contexts', 'track.middleware', 'dd.dogapi']:
logging.getLogger(pkg_name).setLevel(logging.CRITICAL)
################################ EMAIL ########################################
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
################################# LMS INTEGRATION #############################
LMS_BASE = "localhost:8000"
FEATURES['PREVIEW_LMS_BASE'] = "preview." + LMS_BASE
########################### PIPELINE #################################
# Skip RequireJS optimizer in development
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
############################# ADVANCED COMPONENTS #############################
# Make it easier to test advanced components in local dev
FEATURES['ALLOW_ALL_ADVANCED_COMPONENTS'] = True
################################# CELERY ######################################
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
################################ DEBUG TOOLBAR ################################
INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo')
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.profiling.ProfilingPanel',
)
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': 'cms.envs.devstack.should_show_debug_toolbar'
}
def should_show_debug_toolbar(_):
return True # We always want the toolbar on devstack regardless of IP, auth, etc.
# To see stacktraces for MongoDB queries, set this to True.
# Stacktraces slow down page loads drastically (for pages with lots of queries).
DEBUG_TOOLBAR_MONGO_STACKTRACES = False
################################ MILESTONES ################################
FEATURES['MILESTONES_APP'] = True
################################ ENTRANCE EXAMS ################################
FEATURES['ENTRANCE_EXAMS'] = True
################################ COURSE LICENSES ################################
FEATURES['LICENSING'] = True
# Needed to enable licensing on video modules
XBLOCK_SETTINGS = {
"VideoDescriptor": {
"licensing_enabled": True
}
}
################################ SEARCH INDEX ################################
FEATURES['ENABLE_COURSEWARE_INDEX'] = True
FEATURES['ENABLE_LIBRARY_INDEX'] = True
SEARCH_ENGINE = "search.elastic.ElasticSearchEngine"
########################## Certificates Web/HTML View #######################
FEATURES['CERTIFICATES_HTML_VIEW'] = True
################################# DJANGO-REQUIRE ###############################
# Whether to run django-require in debug mode.
REQUIRE_DEBUG = DEBUG
###############################################################################
# See if the developer has any local overrides.
try:
from .private import * # pylint: disable=import-error
except ImportError:
pass
#####################################################################
# Lastly, run any migrations, if needed.
MODULESTORE = convert_module_store_setting_if_needed(MODULESTORE)
# Dummy secret key for dev
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
| agpl-3.0 |
davidkassa/p2pool | nattraverso/pynupnp/__init__.py | 288 | 1088 | """
This package offers ways to retreive ip addresses of the machine, and map ports
through UPnP devices.
@author: Raphael Slinckx
@copyright: Copyright 2005
@license: LGPL
@contact: U{raphael@slinckx.net<mailto:raphael@slinckx.net>}
@version: 0.1.0
"""
__revision__ = "$id"
from nattraverso.pynupnp.upnp import search_upnp_device, UPnPMapper
def get_external_ip():
"""
Returns a deferred which will be called with the WAN ip address
retreived through UPnP. The ip is a string of the form "x.x.x.x"
@return: A deferred called with the external ip address of this host
@rtype: L{twisted.internet.defer.Deferred}
"""
return search_upnp_device().addCallback(lambda x: x.get_external_ip())
def get_port_mapper():
"""
Returns a deferred which will be called with a L{UPnPMapper} instance.
This is a L{nattraverso.portmapper.NATMapper} implementation.
@return: A deferred called with the L{UPnPMapper} instance.
@rtype: L{twisted.internet.defer.Deferred}
"""
return search_upnp_device().addCallback(lambda x: UPnPMapper(x))
| gpl-3.0 |
maks-us/cabot | cabot/cabotapp/admin.py | 2 | 1148 | from django.contrib import admin
from polymorphic.admin import (PolymorphicChildModelAdmin,
PolymorphicParentModelAdmin)
from .alert import AlertPlugin, AlertPluginUserData
from .models import (AlertAcknowledgement, Instance, JenkinsConfig, Service,
ServiceStatusSnapshot, Shift, StatusCheck,
StatusCheckResult, UserProfile)
class StatusCheckAdmin(PolymorphicParentModelAdmin):
base_model = StatusCheck
child_models = StatusCheck.__subclasses__()
class ChildStatusCheckAdmin(PolymorphicChildModelAdmin):
base_model = StatusCheck
for child_status_check in StatusCheck.__subclasses__():
admin.site.register(child_status_check, ChildStatusCheckAdmin)
admin.site.register(UserProfile)
admin.site.register(Shift)
admin.site.register(Service)
admin.site.register(ServiceStatusSnapshot)
admin.site.register(StatusCheck, StatusCheckAdmin)
admin.site.register(StatusCheckResult)
admin.site.register(Instance)
admin.site.register(AlertPlugin)
admin.site.register(AlertPluginUserData)
admin.site.register(AlertAcknowledgement)
admin.site.register(JenkinsConfig)
| mit |
Nexedi/neoppod | neo/tests/storage/testTransactions.py | 1 | 4348 | #
# Copyright (C) 2010-2019 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from ..mock import Mock
from .. import NeoUnitTestBase
from neo.lib.util import p64
from neo.storage.transactions import TransactionManager
class TransactionManagerTests(NeoUnitTestBase):
def setUp(self):
NeoUnitTestBase.setUp(self)
self.app = Mock()
# no history
self.app.dm = Mock({'getObjectHistory': []})
self.app.pt = Mock({'isAssigned': True, 'getPartitions': 2})
self.app.em = Mock({'setTimeout': None})
self.manager = TransactionManager(self.app)
def register(self, uuid, ttid):
self.manager.register(Mock({'getUUID': uuid}), ttid)
def test_updateObjectDataForPack(self):
ram_serial = self.getNextTID()
oid = p64(1)
orig_serial = self.getNextTID()
uuid = self.getClientUUID()
locking_serial = self.getNextTID()
other_serial = self.getNextTID()
new_serial = self.getNextTID()
checksum = "2" * 20
self.register(uuid, locking_serial)
# Object not known, nothing happens
self.assertEqual(self.manager.getObjectFromTransaction(locking_serial,
oid), None)
self.manager.updateObjectDataForPack(oid, orig_serial, None, checksum)
self.assertEqual(self.manager.getObjectFromTransaction(locking_serial,
oid), None)
self.manager.abort(locking_serial, even_if_locked=True)
# Object known, but doesn't point at orig_serial, it is not updated
self.register(uuid, locking_serial)
self.manager.storeObject(locking_serial, ram_serial, oid, 0, "3" * 20,
'bar', None)
holdData = self.app.dm.mockGetNamedCalls('holdData')
self.assertEqual(holdData.pop(0).params, ("3" * 20, oid, 'bar', 0))
orig_object = self.manager.getObjectFromTransaction(locking_serial,
oid)
self.manager.updateObjectDataForPack(oid, orig_serial, None, checksum)
self.assertEqual(self.manager.getObjectFromTransaction(locking_serial,
oid), orig_object)
self.manager.abort(locking_serial, even_if_locked=True)
self.register(uuid, locking_serial)
self.manager.storeObject(locking_serial, ram_serial, oid, None, None,
None, other_serial)
orig_object = self.manager.getObjectFromTransaction(locking_serial,
oid)
self.manager.updateObjectDataForPack(oid, orig_serial, None, checksum)
self.assertEqual(self.manager.getObjectFromTransaction(locking_serial,
oid), orig_object)
self.manager.abort(locking_serial, even_if_locked=True)
# Object known and points at undone data it gets updated
self.register(uuid, locking_serial)
self.manager.storeObject(locking_serial, ram_serial, oid, None, None,
None, orig_serial)
self.manager.updateObjectDataForPack(oid, orig_serial, new_serial,
checksum)
self.assertEqual(self.manager.getObjectFromTransaction(locking_serial,
oid), (oid, None, new_serial))
self.manager.abort(locking_serial, even_if_locked=True)
self.register(uuid, locking_serial)
self.manager.storeObject(locking_serial, ram_serial, oid, None, None,
None, orig_serial)
self.manager.updateObjectDataForPack(oid, orig_serial, None, checksum)
self.assertEqual(holdData.pop(0).params, (checksum,))
self.assertEqual(self.manager.getObjectFromTransaction(locking_serial,
oid), (oid, checksum, None))
self.manager.abort(locking_serial, even_if_locked=True)
self.assertFalse(holdData)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
openstack-hyper-v-python/numpy | numpy/lib/tests/test_polynomial.py | 8 | 4631 | from __future__ import division, absolute_import, print_function
'''
>>> p = np.poly1d([1.,2,3])
>>> p
poly1d([ 1., 2., 3.])
>>> print(p)
2
1 x + 2 x + 3
>>> q = np.poly1d([3.,2,1])
>>> q
poly1d([ 3., 2., 1.])
>>> print(q)
2
3 x + 2 x + 1
>>> print(np.poly1d([1.89999+2j, -3j, -5.12345678, 2+1j]))
3 2
(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)
>>> print(np.poly1d([-3, -2, -1]))
2
-3 x - 2 x - 1
>>> p(0)
3.0
>>> p(5)
38.0
>>> q(0)
1.0
>>> q(5)
86.0
>>> p * q
poly1d([ 3., 8., 14., 8., 3.])
>>> p / q
(poly1d([ 0.33333333]), poly1d([ 1.33333333, 2.66666667]))
>>> p + q
poly1d([ 4., 4., 4.])
>>> p - q
poly1d([-2., 0., 2.])
>>> p ** 4
poly1d([ 1., 8., 36., 104., 214., 312., 324., 216., 81.])
>>> p(q)
poly1d([ 9., 12., 16., 8., 6.])
>>> q(p)
poly1d([ 3., 12., 32., 40., 34.])
>>> np.asarray(p)
array([ 1., 2., 3.])
>>> len(p)
2
>>> p[0], p[1], p[2], p[3]
(3.0, 2.0, 1.0, 0)
>>> p.integ()
poly1d([ 0.33333333, 1. , 3. , 0. ])
>>> p.integ(1)
poly1d([ 0.33333333, 1. , 3. , 0. ])
>>> p.integ(5)
poly1d([ 0.00039683, 0.00277778, 0.025 , 0. , 0. ,
0. , 0. , 0. ])
>>> p.deriv()
poly1d([ 2., 2.])
>>> p.deriv(2)
poly1d([ 2.])
>>> q = np.poly1d([1.,2,3], variable='y')
>>> print(q)
2
1 y + 2 y + 3
>>> q = np.poly1d([1.,2,3], variable='lambda')
>>> print(q)
2
1 lambda + 2 lambda + 3
>>> np.polydiv(np.poly1d([1,0,-1]), np.poly1d([1,1]))
(poly1d([ 1., -1.]), poly1d([ 0.]))
'''
from numpy.testing import *
import numpy as np
class TestDocs(TestCase):
def test_doctests(self):
return rundocs()
def test_roots(self):
assert_array_equal(np.roots([1, 0, 0]), [0, 0])
def test_str_leading_zeros(self):
p = np.poly1d([4, 3, 2, 1])
p[3] = 0
assert_equal(str(p),
" 2\n"
"3 x + 2 x + 1")
p = np.poly1d([1, 2])
p[0] = 0
p[1] = 0
assert_equal(str(p), " \n0")
def test_polyfit(self) :
c = np.array([3., 2., 1.])
x = np.linspace(0, 2, 7)
y = np.polyval(c, x)
err = [1, -1, 1, -1, 1, -1, 1]
weights = np.arange(8, 1, -1)**2/7.0
# check 1D case
m, cov = np.polyfit(x, y+err, 2, cov=True)
est = [3.8571, 0.2857, 1.619]
assert_almost_equal(est, m, decimal=4)
val0 = [[2.9388, -5.8776, 1.6327],
[-5.8776, 12.7347, -4.2449],
[1.6327, -4.2449, 2.3220]]
assert_almost_equal(val0, cov, decimal=4)
m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True)
assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4)
val = [[ 8.7929, -10.0103, 0.9756],
[-10.0103, 13.6134, -1.8178],
[ 0.9756, -1.8178, 0.6674]]
assert_almost_equal(val, cov2, decimal=4)
# check 2D (n,1) case
y = y[:, np.newaxis]
c = c[:, np.newaxis]
assert_almost_equal(c, np.polyfit(x, y, 2))
# check 2D (n,2) case
yy = np.concatenate((y, y), axis=1)
cc = np.concatenate((c, c), axis=1)
assert_almost_equal(cc, np.polyfit(x, yy, 2))
m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True)
assert_almost_equal(est, m[:, 0], decimal=4)
assert_almost_equal(est, m[:, 1], decimal=4)
assert_almost_equal(val0, cov[:,:, 0], decimal=4)
assert_almost_equal(val0, cov[:,:, 1], decimal=4)
def test_objects(self):
from decimal import Decimal
p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')])
p2 = p * Decimal('1.333333333333333')
assert_(p2[1] == Decimal("3.9999999999999990"))
p2 = p.deriv()
assert_(p2[1] == Decimal('8.0'))
p2 = p.integ()
assert_(p2[3] == Decimal("1.333333333333333333333333333"))
assert_(p2[2] == Decimal('1.5'))
assert_(np.issubdtype(p2.coeffs.dtype, np.object_))
def test_complex(self):
p = np.poly1d([3j, 2j, 1j])
p2 = p.integ()
assert_((p2.coeffs == [1j, 1j, 1j, 0]).all())
p2 = p.deriv()
assert_((p2.coeffs == [6j, 2j]).all())
def test_integ_coeffs(self):
p = np.poly1d([3, 2, 1])
p2 = p.integ(3, k=[9, 7, 6])
assert_((p2.coeffs == [1/4./5., 1/3./4., 1/2./3., 9/1./2., 7, 6]).all())
def test_zero_dims(self):
try:
np.poly(np.zeros((0, 0)))
except ValueError:
pass
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
ojengwa/oh-mainline | vendor/packages/Django/django/forms/models.py | 98 | 44597 | """
Helper functions for creating Form classes from Django models
and database field objects.
"""
from __future__ import absolute_import, unicode_literals
from django.core.exceptions import ValidationError, NON_FIELD_ERRORS, FieldError
from django.core.validators import EMPTY_VALUES
from django.forms.fields import Field, ChoiceField
from django.forms.forms import BaseForm, get_declared_fields
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.util import ErrorList
from django.forms.widgets import (SelectMultiple, HiddenInput,
MultipleHiddenInput, media_property)
from django.utils.encoding import smart_text, force_text
from django.utils.datastructures import SortedDict
from django.utils import six
from django.utils.text import get_text_list, capfirst
from django.utils.translation import ugettext_lazy as _, ugettext
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'save_instance', 'ModelChoiceField', 'ModelMultipleChoiceField',
)
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a model instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or not f.name in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
def save_instance(form, instance, fields=None, fail_message='saved',
commit=True, exclude=None, construct=True):
"""
Saves bound Form ``form``'s cleaned_data into model instance ``instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
If construct=False, assume ``instance`` has already been constructed and
just needs to be saved.
"""
if construct:
instance = construct_instance(form, instance, fields, exclude)
opts = instance._meta
if form.errors:
raise ValueError("The %s could not be %s because the data didn't"
" validate." % (opts.object_name, fail_message))
# Wrap up the saving of m2m data as a function.
def save_m2m():
cleaned_data = form.cleaned_data
for f in opts.many_to_many:
if fields and f.name not in fields:
continue
if f.name in cleaned_data:
f.save_form_data(instance, cleaned_data[f.name])
if commit:
# If we are committing, save the instance and the m2m data immediately.
instance.save()
save_m2m()
else:
# We're not committing. Add a method to the form to allow deferred
# saving of m2m data.
form.save_m2m = save_m2m
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fields`` argument.
"""
# avoid a circular import
from django.db.models.fields.related import ManyToManyField
opts = instance._meta
data = {}
for f in opts.fields + opts.many_to_many:
if not f.editable:
continue
if fields and not f.name in fields:
continue
if exclude and f.name in exclude:
continue
if isinstance(f, ManyToManyField):
# If the object doesn't have a primary key yet, just use an empty
# list for its m2m fields. Calling f.value_from_object will raise
# an exception.
if instance.pk is None:
data[f.name] = []
else:
# MultipleChoiceWidget needs a list of pks, not object instances.
data[f.name] = list(f.value_from_object(instance).values_list('pk', flat=True))
else:
data[f.name] = f.value_from_object(instance)
return data
def fields_for_model(model, fields=None, exclude=None, widgets=None, formfield_callback=None):
"""
Returns a ``SortedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget
``formfield_callback`` is a callable that takes a model field and returns
a form field.
"""
field_list = []
ignored = []
opts = model._meta
for f in sorted(opts.fields + opts.many_to_many):
if not f.editable:
continue
if fields is not None and not f.name in fields:
continue
if exclude and f.name in exclude:
continue
if widgets and f.name in widgets:
kwargs = {'widget': widgets[f.name]}
else:
kwargs = {}
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = SortedDict(field_list)
if fields:
field_dict = SortedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
class ModelFormOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
class ModelFormMetaclass(type):
def __new__(cls, name, bases, attrs):
formfield_callback = attrs.pop('formfield_callback', None)
try:
parents = [b for b in bases if issubclass(b, ModelForm)]
except NameError:
# We are defining ModelForm itself.
parents = None
declared_fields = get_declared_fields(bases, attrs, False)
new_class = super(ModelFormMetaclass, cls).__new__(cls, name, bases,
attrs)
if not parents:
return new_class
if 'media' not in attrs:
new_class.media = media_property(new_class)
opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None))
if opts.model:
# If a model is defined, extract form fields from it.
fields = fields_for_model(opts.model, opts.fields,
opts.exclude, opts.widgets, formfield_callback)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = [k for k, v in six.iteritems(fields) if not v]
missing_fields = set(none_model_fields) - \
set(declared_fields.keys())
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(declared_fields)
else:
fields = declared_fields
new_class.declared_fields = declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False, instance=None):
opts = self._meta
if instance is None:
if opts.model is None:
raise ValueError('ModelForm has no model class specified.')
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super(BaseModelForm, self).__init__(data, files, auto_id, prefix, object_data,
error_class, label_suffix, empty_permitted)
def _update_errors(self, message_dict):
for k, v in message_dict.items():
if k != NON_FIELD_ERRORS:
self._errors.setdefault(k, self.error_class()).extend(v)
# Remove the data from the cleaned_data dict since it was invalid
if k in self.cleaned_data:
del self.cleaned_data[k]
if NON_FIELD_ERRORS in message_dict:
messages = message_dict[NON_FIELD_ERRORS]
self._errors.setdefault(NON_FIELD_ERRORS, self.error_class()).extend(messages)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, several types of fields need to be
excluded from model validation. See the following tickets for
details: #12507, #12521, #12553
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors.keys():
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validation if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field, None)
if not f.blank and not form_field.required and field_value in EMPTY_VALUES:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _post_clean(self):
opts = self._meta
# Update the model instance with self.cleaned_data.
self.instance = construct_instance(self, self.instance, opts.fields, opts.exclude)
exclude = self._get_validation_exclusions()
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for f_name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.append(f_name)
# Clean the model instance's fields.
try:
self.instance.clean_fields(exclude=exclude)
except ValidationError as e:
self._update_errors(e.message_dict)
# Call the model instance's clean method.
try:
self.instance.clean()
except ValidationError as e:
self._update_errors({NON_FIELD_ERRORS: e.messages})
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Calls the instance's validate_unique() method and updates the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError as e:
self._update_errors(e.message_dict)
def save(self, commit=True):
"""
Saves this ``form``'s cleaned_data into model instance
``self.instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
"""
if self.instance.pk is None:
fail_message = 'created'
else:
fail_message = 'changed'
return save_instance(self, self.instance, self._meta.fields,
fail_message, commit, construct=False)
save.alters_data = True
class ModelForm(six.with_metaclass(ModelFormMetaclass, BaseModelForm)):
pass
def modelform_factory(model, form=ModelForm, fields=None, exclude=None,
formfield_callback=None, widgets=None):
"""
Returns a ModelForm containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
"""
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if widgets is not None:
attrs['widgets'] = widgets
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
parent = (object,)
if hasattr(form, 'Meta'):
parent = (form.Meta, object)
Meta = type(str('Meta'), parent, attrs)
# Give this new form class a reasonable name.
class_name = model.__name__ + str('Form')
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
# Instatiate type(form) in order to use the same metaclass as form.
return type(form)(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=None, **kwargs):
self.queryset = queryset
self.initial_extra = kwargs.pop('initial', None)
defaults = {'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix}
defaults.update(kwargs)
super(BaseModelFormSet, self).__init__(**defaults)
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if not (self.data or self.files):
return len(self.get_queryset())
return super(BaseModelFormSet, self).initial_form_count()
def _existing_object(self, pk):
if not hasattr(self, '_object_dict'):
self._object_dict = dict([(o.pk, o) for o in self.get_queryset()])
return self._object_dict.get(pk)
def _construct_form(self, i, **kwargs):
if self.is_bound and i < self.initial_form_count():
# Import goes here instead of module-level because importing
# django.db has side effects.
from django.db import connections
pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name)
pk = self.data[pk_key]
pk_field = self.model._meta.pk
pk = pk_field.get_db_prep_lookup('exact', pk,
connection=connections[self.get_queryset().db])
if isinstance(pk, list):
pk = pk[0]
kwargs['instance'] = self._existing_object(pk)
if i < self.initial_form_count() and not kwargs.get('instance'):
kwargs['instance'] = self.get_queryset()[i]
if i >= self.initial_form_count() and self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i-self.initial_form_count()]
except IndexError:
pass
return super(BaseModelFormSet, self)._construct_form(i, **kwargs)
def get_queryset(self):
if not hasattr(self, '_queryset'):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_query_set()
# If the queryset isn't already ordered we need to add an
# artificial ordering here to make sure that all formsets
# constructed from this queryset have the same form order.
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
# Removed queryset limiting here. As per discussion re: #13023
# on django-dev, max_num should not prevent existing
# related objects/inlines from being displayed.
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Saves and returns a new model instance for the given form."""
return form.save(commit=commit)
def save_existing(self, form, instance, commit=True):
"""Saves and returns an existing model instance for the given form."""
return form.save(commit=commit)
def save(self, commit=True):
"""Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
for form in self.forms:
if not form.is_valid():
continue
exclude = form._get_validation_exclusions()
unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude)
all_unique_checks = all_unique_checks.union(set(unique_checks))
all_date_checks = all_date_checks.union(set(date_checks))
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in self.forms:
if not form.is_valid():
continue
# get data for each field of each of unique_check
row_data = tuple([form.cleaned_data[field] for field in unique_check if field in form.cleaned_data])
if row_data and not None in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in self.forms:
if not form.is_valid():
continue
# see if we have data for both fields
if (form.cleaned_data and form.cleaned_data[field] is not None
and form.cleaned_data[unique_for] is not None):
# if it's a date lookup we need to get the data for all the fields
if lookup == 'date':
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've already seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(data)
if errors:
raise ValidationError(errors)
def get_unique_error_message(self, unique_check):
if len(unique_check) == 1:
return ugettext("Please correct the duplicate data for %(field)s.") % {
"field": unique_check[0],
}
else:
return ugettext("Please correct the duplicate data for %(field)s, "
"which must be unique.") % {
"field": get_text_list(unique_check, six.text_type(_("and"))),
}
def get_date_error_message(self, date_check):
return ugettext("Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s in %(date_field)s.") % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': six.text_type(date_check[1]),
}
def get_form_error(self):
return ugettext("Please correct the duplicate values below.")
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
try:
forms_to_delete = self.deleted_forms
except AttributeError:
forms_to_delete = []
for form in self.initial_forms:
pk_name = self._pk_field.name
raw_pk_value = form._raw_value(pk_name)
# clean() for different types of PK fields can sometimes return
# the model instance, and sometimes the PK. Handle either.
pk_value = form.fields[pk_name].clean(raw_pk_value)
pk_value = getattr(pk_value, 'pk', pk_value)
obj = self._existing_object(pk_value)
if form in forms_to_delete:
self.deleted_objects.append(obj)
obj.delete()
continue
if form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField, OneToOneField, ForeignKey
self._pk_field = pk = self.model._meta.pk
# If a pk isn't editable, then it won't be on the form, so we need to
# add it here so we can tell which object is which when we get the
# data back. Generally, pk.editable should be false, but for some
# reason, auto_created pk fields and AutoField's editable attribute is
# True, so check for that as well.
def pk_is_not_editable(pk):
return ((not pk.editable) or (pk.auto_created or isinstance(pk, AutoField))
or (pk.rel and pk.rel.parent_link and pk_is_not_editable(pk.rel.to._meta.pk)))
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
pk_value = form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, OneToOneField) or isinstance(pk, ForeignKey):
qs = pk.rel.to._default_manager.get_query_set()
else:
qs = self.model._default_manager.get_query_set()
qs = qs.using(form.instance._state.db)
form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=HiddenInput)
super(BaseModelFormSet, self).add_fields(form, index)
def modelformset_factory(model, form=ModelForm, formfield_callback=None,
formset=BaseModelFormSet,
extra=1, can_delete=False, can_order=False,
max_num=None, fields=None, exclude=None):
"""
Returns a FormSet class for the given Django model class.
"""
form = modelform_factory(model, form=form, fields=fields, exclude=exclude,
formfield_callback=formfield_callback)
FormSet = formset_factory(form, formset, extra=extra, max_num=max_num,
can_order=can_order, can_delete=can_delete)
FormSet.model = model
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None, **kwargs):
from django.db.models.fields.related import RelatedObject
if instance is None:
self.instance = self.fk.rel.to()
else:
self.instance = instance
self.save_as_new = save_as_new
# is there a better way to get the object descriptor?
self.rel_name = RelatedObject(self.fk.rel.to, self.model, self.fk).get_accessor_name()
if queryset is None:
queryset = self.model._default_manager
if self.instance.pk:
qs = queryset.filter(**{self.fk.name: self.instance})
else:
qs = queryset.filter(pk__in=[])
super(BaseInlineFormSet, self).__init__(data, files, prefix=prefix,
queryset=qs, **kwargs)
def initial_form_count(self):
if self.save_as_new:
return 0
return super(BaseInlineFormSet, self).initial_form_count()
def _construct_form(self, i, **kwargs):
form = super(BaseInlineFormSet, self)._construct_form(i, **kwargs)
if self.save_as_new:
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
# Set the fk value here so that the form can do its validation.
setattr(form.instance, self.fk.get_attname(), self.instance.pk)
return form
@classmethod
def get_default_prefix(cls):
from django.db.models.fields.related import RelatedObject
return RelatedObject(cls.fk.rel.to, cls.model, cls.fk).get_accessor_name().replace('+','')
def save_new(self, form, commit=True):
# Use commit=False so we can assign the parent key afterwards, then
# save the object.
obj = form.save(commit=False)
pk_value = getattr(self.instance, self.fk.rel.field_name)
setattr(obj, self.fk.get_attname(), getattr(pk_value, 'pk', pk_value))
if commit:
obj.save()
# form.save_m2m() can be called via the formset later on if commit=False
if commit and hasattr(form, 'save_m2m'):
form.save_m2m()
return obj
def add_fields(self, form, index):
super(BaseInlineFormSet, self).add_fields(form, index)
if self._pk_field == self.fk:
name = self._pk_field.name
kwargs = {'pk_field': True}
else:
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
kwargs = {
'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name))
}
if self.fk.rel.field_name != self.fk.rel.to._meta.pk.name:
kwargs['to_field'] = self.fk.rel.field_name
form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if form._meta.fields:
if isinstance(form._meta.fields, tuple):
form._meta.fields = list(form._meta.fields)
form._meta.fields.append(self.fk.name)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super(BaseInlineFormSet, self).get_unique_error_message(unique_check)
def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False):
"""
Finds and returns the ForeignKey from model to parent if there is one
(returns None if can_fail is True and no such field exists). If fk_name is
provided, assume it is the name of the ForeignKey field. Unles can_fail is
True, an exception is raised if there is no ForeignKey from model to
parent_model.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
if not isinstance(fk, ForeignKey) or \
(fk.rel.to != parent_model and
fk.rel.to not in parent_model._meta.get_parent_list()):
raise Exception("fk_name '%s' is not a ForeignKey to %s" % (fk_name, parent_model))
elif len(fks_to_parent) == 0:
raise Exception("%s has no field named '%s'" % (model, fk_name))
else:
# Try to discover what the ForeignKey from model to parent_model is
fks_to_parent = [
f for f in opts.fields
if isinstance(f, ForeignKey)
and (f.rel.to == parent_model
or f.rel.to in parent_model._meta.get_parent_list())
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif len(fks_to_parent) == 0:
if can_fail:
return
raise Exception("%s has no ForeignKey to %s" % (model, parent_model))
else:
raise Exception("%s has more than 1 ForeignKey to %s" % (model, parent_model))
return fk
def inlineformset_factory(parent_model, model, form=ModelForm,
formset=BaseInlineFormSet, fk_name=None,
fields=None, exclude=None,
extra=3, can_order=False, can_delete=True, max_num=None,
formfield_callback=None):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'max_num': max_num,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class InlineForeignKeyHiddenInput(HiddenInput):
def _has_changed(self, initial, data):
return False
class InlineForeignKeyField(Field):
"""
A basic integer field that deals with validating the given value to a
given parent instance in an inline.
"""
default_error_messages = {
'invalid_choice': _('The inline foreign key did not match the parent instance primary key.'),
}
def __init__(self, parent_instance, *args, **kwargs):
self.parent_instance = parent_instance
self.pk_field = kwargs.pop("pk_field", False)
self.to_field = kwargs.pop("to_field", None)
if self.parent_instance is not None:
if self.to_field:
kwargs["initial"] = getattr(self.parent_instance, self.to_field)
else:
kwargs["initial"] = self.parent_instance.pk
kwargs["required"] = False
kwargs["widget"] = InlineForeignKeyHiddenInput
super(InlineForeignKeyField, self).__init__(*args, **kwargs)
def clean(self, value):
if value in EMPTY_VALUES:
if self.pk_field:
return None
# if there is no value act as we did before.
return self.parent_instance
# ensure the we compare the values as equal types.
if self.to_field:
orig = getattr(self.parent_instance, self.to_field)
else:
orig = self.parent_instance.pk
if force_text(value) != force_text(orig):
raise ValidationError(self.error_messages['invalid_choice'])
return self.parent_instance
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
if self.field.cache_choices:
if self.field.choice_cache is None:
self.field.choice_cache = [
self.choice(obj) for obj in self.queryset.all()
]
for choice in self.field.choice_cache:
yield choice
else:
for obj in self.queryset.all():
yield self.choice(obj)
def __len__(self):
return len(self.queryset)
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj))
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
'invalid_choice': _('Select a valid choice. That choice is not one of'
' the available choices.'),
}
def __init__(self, queryset, empty_label="---------", cache_choices=False,
required=True, widget=None, label=None, initial=None,
help_text=None, to_field_name=None, *args, **kwargs):
if required and (initial is not None):
self.empty_label = None
else:
self.empty_label = empty_label
self.cache_choices = cache_choices
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.queryset = queryset
self.choice_cache = None
self.to_field_name = to_field_name
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
result.queryset = result.queryset
return result
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = queryset
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
This method is used to convert objects into strings; it's used to
generate the labels for the choices presented by this object. Subclasses
can override this method to customize the display of the choices.
"""
return smart_text(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return ModelChoiceIterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def prepare_value(self, value):
if hasattr(value, '_meta'):
if self.to_field_name:
return value.serializable_value(self.to_field_name)
else:
return value.pk
return super(ModelChoiceField, self).prepare_value(value)
def to_python(self, value):
if value in EMPTY_VALUES:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.get(**{key: value})
except (ValueError, self.queryset.model.DoesNotExist):
raise ValidationError(self.error_messages['invalid_choice'])
return value
def validate(self, value):
return Field.validate(self, value)
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
'list': _('Enter a list of values.'),
'invalid_choice': _('Select a valid choice. %s is not one of the'
' available choices.'),
'invalid_pk_value': _('"%s" is not a valid value for a primary key.')
}
def __init__(self, queryset, cache_choices=False, required=True,
widget=None, label=None, initial=None,
help_text=None, *args, **kwargs):
super(ModelMultipleChoiceField, self).__init__(queryset, None,
cache_choices, required, widget, label, initial, help_text,
*args, **kwargs)
def clean(self, value):
if self.required and not value:
raise ValidationError(self.error_messages['required'])
elif not self.required and not value:
return self.queryset.none()
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'])
key = self.to_field_name or 'pk'
for pk in value:
try:
self.queryset.filter(**{key: pk})
except ValueError:
raise ValidationError(self.error_messages['invalid_pk_value'] % pk)
qs = self.queryset.filter(**{'%s__in' % key: value})
pks = set([force_text(getattr(o, key)) for o in qs])
for val in value:
if force_text(val) not in pks:
raise ValidationError(self.error_messages['invalid_choice'] % val)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(value)
return qs
def prepare_value(self, value):
if (hasattr(value, '__iter__') and
not isinstance(value, six.text_type) and
not hasattr(value, '_meta')):
return [super(ModelMultipleChoiceField, self).prepare_value(v) for v in value]
return super(ModelMultipleChoiceField, self).prepare_value(value)
| agpl-3.0 |
Philippe12/external_chromium_org | third_party/closure_linter/closure_linter/common/position.py | 285 | 3324 | #!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to represent positions within strings."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
class Position(object):
"""Object representing a segment of a string.
Attributes:
start: The index in to the string where the segment starts.
length: The length of the string segment.
"""
def __init__(self, start, length):
"""Initialize the position object.
Args:
start: The start index.
length: The number of characters to include.
"""
self.start = start
self.length = length
def Get(self, string):
"""Returns this range of the given string.
Args:
string: The string to slice.
Returns:
The string within the range specified by this object.
"""
return string[self.start:self.start + self.length]
def Set(self, target, source):
"""Sets this range within the target string to the source string.
Args:
target: The target string.
source: The source string.
Returns:
The resulting string
"""
return target[:self.start] + source + target[self.start + self.length:]
def AtEnd(string):
"""Create a Position representing the end of the given string.
Args:
string: The string to represent the end of.
Returns:
The created Position object.
"""
return Position(len(string), 0)
AtEnd = staticmethod(AtEnd)
def IsAtEnd(self, string):
"""Returns whether this position is at the end of the given string.
Args:
string: The string to test for the end of.
Returns:
Whether this position is at the end of the given string.
"""
return self.start == len(string) and self.length == 0
def AtBeginning():
"""Create a Position representing the beginning of any string.
Returns:
The created Position object.
"""
return Position(0, 0)
AtBeginning = staticmethod(AtBeginning)
def IsAtBeginning(self):
"""Returns whether this position is at the beginning of any string.
Returns:
Whether this position is at the beginning of any string.
"""
return self.start == 0 and self.length == 0
def All(string):
"""Create a Position representing the entire string.
Args:
string: The string to represent the entirety of.
Returns:
The created Position object.
"""
return Position(0, len(string))
All = staticmethod(All)
def Index(index):
"""Returns a Position object for the specified index.
Args:
index: The index to select, inclusively.
Returns:
The created Position object.
"""
return Position(index, 1)
Index = staticmethod(Index)
| bsd-3-clause |
JimCircadian/ansible | lib/ansible/modules/storage/netapp/netapp_e_amg_role.py | 22 | 7838 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_amg_role
short_description: Update the role of a storage array within an Asynchronous Mirror Group (AMG).
description:
- Update a storage array to become the primary or secondary instance in an asynchronous mirror group
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
description:
- The ID of the primary storage array for the async mirror action
required: yes
role:
description:
- Whether the array should be the primary or secondary array for the AMG
required: yes
choices: ['primary', 'secondary']
noSync:
description:
- Whether to avoid synchronization prior to role reversal
required: no
default: no
type: bool
force:
description:
- Whether to force the role reversal regardless of the online-state of the primary
required: no
default: no
"""
EXAMPLES = """
- name: Update the role of a storage array
netapp_e_amg_role:
name: updating amg role
role: primary
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
"""
RETURN = """
msg:
description: Failure message
returned: failure
type: string
sample: "No Async Mirror Group with the name."
"""
import json
import traceback
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as e:
r = e.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def has_match(module, ssid, api_url, api_pwd, api_usr, body, name):
amg_exists = False
has_desired_role = False
amg_id = None
amg_data = None
get_amgs = 'storage-systems/%s/async-mirrors' % ssid
url = api_url + get_amgs
try:
amg_rc, amgs = request(url, url_username=api_usr, url_password=api_pwd,
headers=HEADERS)
except:
module.fail_json(msg="Failed to find AMGs on storage array. Id [%s]" % (ssid))
for amg in amgs:
if amg['label'] == name:
amg_exists = True
amg_id = amg['id']
amg_data = amg
if amg['localRole'] == body.get('role'):
has_desired_role = True
return amg_exists, has_desired_role, amg_id, amg_data
def update_amg(module, ssid, api_url, api_usr, api_pwd, body, amg_id):
endpoint = 'storage-systems/%s/async-mirrors/%s/role' % (ssid, amg_id)
url = api_url + endpoint
post_data = json.dumps(body)
try:
request(url, data=post_data, method='POST', url_username=api_usr,
url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.fail_json(
msg="Failed to change role of AMG. Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
exception=traceback.format_exc())
status_endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, amg_id)
status_url = api_url + status_endpoint
try:
rc, status = request(status_url, method='GET', url_username=api_usr,
url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.fail_json(
msg="Failed to check status of AMG after role reversal. "
"Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
exception=traceback.format_exc())
# Here we wait for the role reversal to complete
if 'roleChangeProgress' in status:
while status['roleChangeProgress'] != "none":
try:
rc, status = request(status_url, method='GET',
url_username=api_usr, url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.fail_json(
msg="Failed to check status of AMG after role reversal. "
"Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
exception=traceback.format_exc())
return status
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
name=dict(required=True, type='str'),
role=dict(required=True, choices=['primary', 'secondary']),
noSync=dict(required=False, type='bool', default=False),
force=dict(required=False, type='bool', default=False),
ssid=dict(required=True, type='str'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True),
))
module = AnsibleModule(argument_spec=argument_spec)
p = module.params
ssid = p.pop('ssid')
api_url = p.pop('api_url')
user = p.pop('api_username')
pwd = p.pop('api_password')
name = p.pop('name')
if not api_url.endswith('/'):
api_url += '/'
agm_exists, has_desired_role, async_id, amg_data = has_match(module, ssid, api_url, pwd, user, p, name)
if not agm_exists:
module.fail_json(msg="No Async Mirror Group with the name: '%s' was found" % name)
elif has_desired_role:
module.exit_json(changed=False, **amg_data)
else:
amg_data = update_amg(module, ssid, api_url, user, pwd, p, async_id)
if amg_data:
module.exit_json(changed=True, **amg_data)
else:
module.exit_json(changed=True, msg="AMG role changed.")
if __name__ == '__main__':
main()
| gpl-3.0 |
ppasq/geonode | geonode/monitoring/probes.py | 1 | 4337 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2017 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import os
import sys
import time
import socket
import psutil
class BaseProbe(object):
@staticmethod
def get_loadavg():
try:
return os.getloadavg()
except (AttributeError, OSError,):
return []
@staticmethod
def get_uname():
"""
returns list similar to https://docs.python.org/2/library/os.html#os.uname
"""
try:
return os.uname()
except Exception:
return [sys.platform, socket.gethostbyaddr(socket.gethostname()), None, None, None]
@staticmethod
def get_uptime():
"""
Get uptime in seconds
"""
return time.time() - psutil.boot_time()
@staticmethod
def get_mem():
"""
Returns dictionary with memory information (in MB) with keys:
all
usage
buffers
free
percent
"""
vm = psutil.virtual_memory()
def m(val):
return val
return {'all': m(vm.total),
# 'usage': m(vm.used),
'used': m(vm.used),
'free': m(vm.available),
'usage': vm.used,
'usage.percent': ((vm.used * 100.0) / vm.total),
}
@staticmethod
def get_cpu():
cpu = psutil.cpu_times()
return {
'usage': cpu.user + cpu.system,
}
@staticmethod
def get_disk():
"""
Returns list of drives with capacity and utilization
list item contains:
block device (/dev/sXX)
total capacity (in bytes)
used space
free space
utilization (as a percent)
mount point
"""
partitions = psutil.disk_partitions()
out = []
usage = psutil.disk_io_counters(True)
for p in partitions:
dev = p.device
dev_name = dev.split('/')[-1]
part = p.mountpoint
du = psutil.disk_usage(part)
_dusage = usage.get(dev_name)
dusage = {'write': 0,
'read': 0}
if _dusage:
dusage['write'] = _dusage.write_bytes
dusage['read'] = _dusage.read_bytes
out.append({'device': dev,
'total': du.total,
'used': du.used,
'free': du.free,
'percent': du.percent,
'usage': dusage,
'mountpoint': part})
return out
@staticmethod
def get_network():
"""
returns dictionary with ip information:
{ifname: {'mac': mac,
'ip': ip,
'traffic': {'in': txin,
'out': txout}
}
}
"""
out = {}
iostats = psutil.net_io_counters(True)
for ifname, ifdata in psutil.net_if_addrs().iteritems():
ifstats = iostats.get(ifname)
if not ifstats:
continue
mac = None
if len(ifdata) == 2:
mac = ifdata[1].address
ip = ifdata[0].address
out[ifname] = {'ip': ip,
'mac': mac,
'traffic': {'in': ifstats.bytes_recv,
'out': ifstats.bytes_sent}}
return out
def get_probe():
return BaseProbe()
| gpl-3.0 |
akrherz/pyWWA | parsers/pywwa/workflows/spammer.py | 1 | 1682 | """
Handle things that need emailed to me for my situational awareness.
"""
# stdlib
from email.mime.text import MIMEText
# 3rd Party
from twisted.internet import reactor
from twisted.mail import smtp
from pyiem.util import LOG
from pyiem.nws import product
# Local
from pywwa import common
from pywwa.ldm import bridge
IOWA_WFOS = ["KDMX", "KDVN", "KARX", "KFSD", "KOAX"]
def process_data(data):
"""
Actual ingestor
"""
try:
real_process(data)
except Exception as exp:
common.email_error(exp, data)
def real_process(data):
"""Go!"""
prod = product.TextProduct(data)
if prod.afos == "ADMNES":
LOG.info("Dumping %s on the floor", prod.get_product_id())
return
# Strip off stuff at the top
msg = MIMEText(prod.unixtext[2:], "plain", "utf-8")
# some products have no AWIPS ID, sigh
subject = prod.wmo
if prod.afos is not None:
subject = prod.afos
if prod.afos[:3] == "ADM":
subject = "ADMIN NOTICE %s" % (prod.afos[3:],)
elif prod.afos[:3] == "RER":
subject = "[RER] %s %s" % (prod.source, prod.afos[3:])
if prod.source in IOWA_WFOS:
msg["Cc"] = "Justin.Glisan@iowaagriculture.gov"
msg["subject"] = subject
msg["From"] = common.SETTINGS.get("pywwa_errors_from", "ldm@localhost")
msg["To"] = "akrherz@iastate.edu"
df = smtp.sendmail(
common.SETTINGS.get("pywwa_smtp", "smtp"), msg["From"], msg["To"], msg
)
df.addErrback(LOG.error)
def main():
"""Go Main Go."""
common.main(with_jabber=False)
bridge(process_data)
reactor.run()
if __name__ == "__main__":
main()
| mit |
Mushirahmed/gnuradio | gnuradio-core/src/python/gnuradio/blks2impl/pfb_channelizer.py | 17 | 2869 | #!/usr/bin/env python
#
# Copyright 2009,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, optfir
class pfb_channelizer_ccf(gr.hier_block2):
'''
Make a Polyphase Filter channelizer (complex in, complex out, floating-point taps)
This simplifies the interface by allowing a single input stream to connect to this block.
It will then output a stream for each channel.
'''
def __init__(self, numchans, taps=None, oversample_rate=1, atten=100):
gr.hier_block2.__init__(self, "pfb_channelizer_ccf",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(numchans, numchans, gr.sizeof_gr_complex)) # Output signature
self._nchans = numchans
self._oversample_rate = oversample_rate
if taps is not None:
self._taps = taps
else:
# Create a filter that covers the full bandwidth of the input signal
bw = 0.4
tb = 0.2
ripple = 0.1
made = False
while not made:
try:
self._taps = optfir.low_pass(1, self._nchans, bw, bw+tb, ripple, atten)
made = True
except RuntimeError:
ripple += 0.01
made = False
print("Warning: set ripple to %.4f dB. If this is a problem, adjust the attenuation or create your own filter taps." % (ripple))
# Build in an exit strategy; if we've come this far, it ain't working.
if(ripple >= 1.0):
raise RuntimeError("optfir could not generate an appropriate filter.")
self.s2ss = gr.stream_to_streams(gr.sizeof_gr_complex, self._nchans)
self.pfb = gr.pfb_channelizer_ccf(self._nchans, self._taps,
self._oversample_rate)
self.connect(self, self.s2ss)
for i in xrange(self._nchans):
self.connect((self.s2ss,i), (self.pfb,i))
self.connect((self.pfb,i), (self,i))
def set_channel_map(self, newmap):
self.pfb.set_channel_map(newmap)
| gpl-3.0 |
40223136/-2015cd_midterm | static/Brython3.1.0-20150301-090019/Lib/unittest/test/_test_warnings.py | 858 | 2304 | # helper module for test_runner.Test_TextTestRunner.test_warnings
"""
This module has a number of tests that raise different kinds of warnings.
When the tests are run, the warnings are caught and their messages are printed
to stdout. This module also accepts an arg that is then passed to
unittest.main to affect the behavior of warnings.
Test_TextTestRunner.test_warnings executes this script with different
combinations of warnings args and -W flags and check that the output is correct.
See #10535.
"""
import sys
import unittest
import warnings
def warnfun():
warnings.warn('rw', RuntimeWarning)
class TestWarnings(unittest.TestCase):
# unittest warnings will be printed at most once per type (max one message
# for the fail* methods, and one for the assert* methods)
def test_assert(self):
self.assertEquals(2+2, 4)
self.assertEquals(2*2, 4)
self.assertEquals(2**2, 4)
def test_fail(self):
self.failUnless(1)
self.failUnless(True)
def test_other_unittest(self):
self.assertAlmostEqual(2+2, 4)
self.assertNotAlmostEqual(4+4, 2)
# these warnings are normally silenced, but they are printed in unittest
def test_deprecation(self):
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
def test_import(self):
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
# user warnings should always be printed
def test_warning(self):
warnings.warn('uw')
warnings.warn('uw')
warnings.warn('uw')
# these warnings come from the same place; they will be printed
# only once by default or three times if the 'always' filter is used
def test_function(self):
warnfun()
warnfun()
warnfun()
if __name__ == '__main__':
with warnings.catch_warnings(record=True) as ws:
# if an arg is provided pass it to unittest.main as 'warnings'
if len(sys.argv) == 2:
unittest.main(exit=False, warnings=sys.argv.pop())
else:
unittest.main(exit=False)
# print all the warning messages collected
for w in ws:
print(w.message)
| gpl-3.0 |
bendykst/deluge | deluge/plugins/Extractor/deluge/plugins/extractor/__init__.py | 4 | 1158 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Andrew Resch <andrewresch@gmail.com>
#
# Basic plugin template created by:
# Copyright (C) 2008 Martijn Voncken <mvoncken@gmail.com>
# Copyright (C) 2007-2009 Andrew Resch <andrewresch@gmail.com>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
from deluge.plugins.init import PluginInitBase
class CorePlugin(PluginInitBase):
def __init__(self, plugin_name):
from .core import Core as _pluginCls
self._plugin_cls = _pluginCls
super(CorePlugin, self).__init__(plugin_name)
class GtkUIPlugin(PluginInitBase):
def __init__(self, plugin_name):
from .gtkui import GtkUI as _pluginCls
self._plugin_cls = _pluginCls
super(GtkUIPlugin, self).__init__(plugin_name)
class WebUIPlugin(PluginInitBase):
def __init__(self, plugin_name):
from .webui import WebUI as _pluginCls
self._plugin_cls = _pluginCls
super(WebUIPlugin, self).__init__(plugin_name)
| gpl-3.0 |
samabhi/pstHealth | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/big5prober.py | 2931 | 1684 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
| mit |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/celery/contrib/rdb.py | 2 | 5107 | # -*- coding: utf-8 -*-
"""Remote Debugger.
Introduction
============
This is a remote debugger for Celery tasks running in multiprocessing
pool workers. Inspired by http://snippets.dzone.com/posts/show/7248
Usage
-----
.. code-block:: python
from celery.contrib import rdb
from celery import task
@task()
def add(x, y):
result = x + y
rdb.set_trace()
return result
Environment Variables
=====================
.. envvar:: CELERY_RDB_HOST
``CELERY_RDB_HOST``
-------------------
Hostname to bind to. Default is '127.0.01' (only accessable from
localhost).
.. envvar:: CELERY_RDB_PORT
``CELERY_RDB_PORT``
-------------------
Base port to bind to. Default is 6899.
The debugger will try to find an available port starting from the
base port. The selected port will be logged by the worker.
"""
from __future__ import absolute_import, print_function, unicode_literals
import errno
import os
import socket
import sys
from pdb import Pdb
from billiard.process import current_process
from celery.five import range
__all__ = [
'CELERY_RDB_HOST', 'CELERY_RDB_PORT', 'DEFAULT_PORT',
'Rdb', 'debugger', 'set_trace',
]
DEFAULT_PORT = 6899
CELERY_RDB_HOST = os.environ.get('CELERY_RDB_HOST') or '127.0.0.1'
CELERY_RDB_PORT = int(os.environ.get('CELERY_RDB_PORT') or DEFAULT_PORT)
#: Holds the currently active debugger.
_current = [None]
_frame = getattr(sys, '_getframe')
NO_AVAILABLE_PORT = """\
{self.ident}: Couldn't find an available port.
Please specify one using the CELERY_RDB_PORT environment variable.
"""
BANNER = """\
{self.ident}: Ready to connect: telnet {self.host} {self.port}
Type `exit` in session to continue.
{self.ident}: Waiting for client...
"""
SESSION_STARTED = '{self.ident}: Now in session with {self.remote_addr}.'
SESSION_ENDED = '{self.ident}: Session with {self.remote_addr} ended.'
class Rdb(Pdb):
"""Remote debugger."""
me = 'Remote Debugger'
_prev_outs = None
_sock = None
def __init__(self, host=CELERY_RDB_HOST, port=CELERY_RDB_PORT,
port_search_limit=100, port_skew=+0, out=sys.stdout):
self.active = True
self.out = out
self._prev_handles = sys.stdin, sys.stdout
self._sock, this_port = self.get_avail_port(
host, port, port_search_limit, port_skew,
)
self._sock.setblocking(1)
self._sock.listen(1)
self.ident = '{0}:{1}'.format(self.me, this_port)
self.host = host
self.port = this_port
self.say(BANNER.format(self=self))
self._client, address = self._sock.accept()
self._client.setblocking(1)
self.remote_addr = ':'.join(str(v) for v in address)
self.say(SESSION_STARTED.format(self=self))
self._handle = sys.stdin = sys.stdout = self._client.makefile('rw')
Pdb.__init__(self, completekey='tab',
stdin=self._handle, stdout=self._handle)
def get_avail_port(self, host, port, search_limit=100, skew=+0):
try:
_, skew = current_process().name.split('-')
skew = int(skew)
except ValueError:
pass
this_port = None
for i in range(search_limit):
_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
this_port = port + skew + i
try:
_sock.bind((host, this_port))
except socket.error as exc:
if exc.errno in [errno.EADDRINUSE, errno.EINVAL]:
continue
raise
else:
return _sock, this_port
else:
raise Exception(NO_AVAILABLE_PORT.format(self=self))
def say(self, m):
print(m, file=self.out)
def __enter__(self):
return self
def __exit__(self, *exc_info):
self._close_session()
def _close_session(self):
self.stdin, self.stdout = sys.stdin, sys.stdout = self._prev_handles
if self.active:
if self._handle is not None:
self._handle.close()
if self._client is not None:
self._client.close()
if self._sock is not None:
self._sock.close()
self.active = False
self.say(SESSION_ENDED.format(self=self))
def do_continue(self, arg):
self._close_session()
self.set_continue()
return 1
do_c = do_cont = do_continue
def do_quit(self, arg):
self._close_session()
self.set_quit()
return 1
do_q = do_exit = do_quit
def set_quit(self):
# this raises a BdbQuit exception that we're unable to catch.
sys.settrace(None)
def debugger():
"""Return the current debugger instance, or create if none."""
rdb = _current[0]
if rdb is None or not rdb.active:
rdb = _current[0] = Rdb()
return rdb
def set_trace(frame=None):
"""Set break-point at current location, or a specified frame."""
if frame is None:
frame = _frame().f_back
return debugger().set_trace(frame)
| gpl-3.0 |
CanalTP/navitia | source/jormungandr/jormungandr/parking_space_availability/car/divia.py | 3 | 3340 | # encoding: utf-8
# Copyright (c) 2001-2018, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
import jmespath
from collections import namedtuple
from jormungandr.parking_space_availability.car.common_car_park_provider import CommonCarParkProvider
from jormungandr.parking_space_availability.car.parking_places import ParkingPlaces
DEFAULT_DIVIA_FEED_PUBLISHER = None
SearchPattern = namedtuple('SearchPattern', ['id_park', 'available', 'total'])
def divia_maker(search_patterns):
class _DiviaProvider(CommonCarParkProvider):
# search patterns are different depending on divia's dataset
id_park = None
available = None
total = None
def __init__(
self, url, operators, dataset, timeout=1, feed_publisher=DEFAULT_DIVIA_FEED_PUBLISHER, **kwargs
):
self.provider_name = 'DIVIA'
super(_DiviaProvider, self).__init__(url, operators, dataset, timeout, feed_publisher, **kwargs)
def process_data(self, data, poi):
park = jmespath.search(
'records[?to_number(fields.{})==`{}`]|[0]'.format(self.id_park, poi['properties']['ref']), data
)
if park:
available = jmespath.search('fields.{}'.format(self.available), park)
nb_places = jmespath.search('fields.{}'.format(self.total), park)
if available is not None and nb_places is not None and nb_places >= available:
occupied = nb_places - available
else:
occupied = None
return ParkingPlaces(available, occupied, None, None)
_DiviaProvider.id_park = search_patterns.id_park
_DiviaProvider.available = search_patterns.available
_DiviaProvider.total = search_patterns.total
return _DiviaProvider
DiviaProvider = divia_maker(
SearchPattern(id_park='numero_parking', available='nombre_places_libres', total='nombre_places')
)
DiviaPRParkProvider = divia_maker(
SearchPattern(id_park='numero_parc', available='nb_places_libres', total='nombre_places')
)
| agpl-3.0 |
justinpaulthekkan/oracle-blog-examples | RIDCJythonScripts/createshortcuts.py | 1 | 2084 | ################################################################################
# Created By: Justin Paul
# Source: https://blogs.oracle.com/OracleWebCenterSuite
#
# NOTE: Please note that these code snippets should be used for development and
# testing purposes only, as such it is unsupported and should not be used
# on production environments.
################################################################################
from oracle.stellent.ridc import IdcClientManager
from oracle.stellent.ridc import IdcContext
manager = IdcClientManager ()
client = manager.createClient ("idc://127.0.0.1:4444")
userContext = IdcContext ("weblogic")
# client = manager.createClient ("http://127.0.0.1:16200/cs/idcplg")
# userContext = IdcContext ("<user>", "<password>")
# Create shortcut for a folder
# The user must have logged in at least once to have the profile folder created
username="weblogic"
binder = client.createBinder ()
binder.putLocal("IdcService", "FLD_CREATE_FOLDER")
binder.putLocal("fParentGUID", "5517F2B282C5KJB40E5574E4D5AU7Y9A") # This is the GUID of the parent folder
binder.putLocal("fTargetGUID", "5517F2B282C574B40E5574E4D5AF1C9A") # This is the GUID of original folder
binder.putLocal("fFolderName", "My Shortcut to Folder1")
binder.putLocal("fFolderType", "soft")
userContext = IdcContext (username)
# get the response
response = client.sendRequest (userContext, binder)
responseBinder = response.getResponseAsBinder ()
# Create shortcut for a file
# The user must have logged in at least once to have the profile folder created
username="weblogic"
binder = client.createBinder ()
binder.putLocal("IdcService", "FLD_CREATE_FILE")
binder.putLocal("fParentGUID", "5517F2B282C5KJB40E5574E4D5AU7Y9A") # This is the GUID of the parent folder
binder.putLocal("fFileName", "My Shortcut to dDocName - 000421")
binder.putLocal("dDocName", "000421")
binder.putLocal("fFileType", "soft")
userContext = IdcContext (username)
# get the response
response = client.sendRequest (userContext, binder)
responseBinder = response.getResponseAsBinder ()
| apache-2.0 |
roopali8/tempest | tempest/services/image/v1/json/image_client.py | 6 | 11995 | # Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import errno
import json
import os
import time
from oslo_log import log as logging
import six
from six.moves.urllib import parse as urllib
from tempest_lib.common.utils import misc as misc_utils
from tempest_lib import exceptions as lib_exc
from tempest.common import glance_http
from tempest.common import service_client
from tempest import exceptions
LOG = logging.getLogger(__name__)
class ImageClient(service_client.ServiceClient):
def __init__(self, auth_provider, catalog_type, region, endpoint_type=None,
build_interval=None, build_timeout=None,
disable_ssl_certificate_validation=None,
ca_certs=None, trace_requests=None):
super(ImageClient, self).__init__(
auth_provider,
catalog_type,
region,
endpoint_type=endpoint_type,
build_interval=build_interval,
build_timeout=build_timeout,
disable_ssl_certificate_validation=(
disable_ssl_certificate_validation),
ca_certs=ca_certs,
trace_requests=trace_requests)
self._http = None
self.dscv = disable_ssl_certificate_validation
self.ca_certs = ca_certs
def _image_meta_from_headers(self, headers):
meta = {'properties': {}}
for key, value in six.iteritems(headers):
if key.startswith('x-image-meta-property-'):
_key = key[22:]
meta['properties'][_key] = value
elif key.startswith('x-image-meta-'):
_key = key[13:]
meta[_key] = value
for key in ['is_public', 'protected', 'deleted']:
if key in meta:
meta[key] = meta[key].strip().lower() in ('t', 'true', 'yes',
'1')
for key in ['size', 'min_ram', 'min_disk']:
if key in meta:
try:
meta[key] = int(meta[key])
except ValueError:
pass
return meta
def _image_meta_to_headers(self, fields):
headers = {}
fields_copy = copy.deepcopy(fields)
copy_from = fields_copy.pop('copy_from', None)
if copy_from is not None:
headers['x-glance-api-copy-from'] = copy_from
for key, value in six.iteritems(fields_copy.pop('properties', {})):
headers['x-image-meta-property-%s' % key] = str(value)
for key, value in six.iteritems(fields_copy.pop('api', {})):
headers['x-glance-api-property-%s' % key] = str(value)
for key, value in six.iteritems(fields_copy):
headers['x-image-meta-%s' % key] = str(value)
return headers
def _get_file_size(self, obj):
"""Analyze file-like object and attempt to determine its size.
:param obj: file-like object, typically redirected from stdin.
:retval The file's size or None if it cannot be determined.
"""
# For large images, we need to supply the size of the
# image file. See LP Bugs #827660 and #845788.
if hasattr(obj, 'seek') and hasattr(obj, 'tell'):
try:
obj.seek(0, os.SEEK_END)
obj_size = obj.tell()
obj.seek(0)
return obj_size
except IOError as e:
if e.errno == errno.ESPIPE:
# Illegal seek. This means the user is trying
# to pipe image data to the client, e.g.
# echo testdata | bin/glance add blah..., or
# that stdin is empty, or that a file-like
# object which doesn't support 'seek/tell' has
# been supplied.
return None
else:
raise
else:
# Cannot determine size of input image
return None
def _get_http(self):
return glance_http.HTTPClient(auth_provider=self.auth_provider,
filters=self.filters,
insecure=self.dscv,
ca_certs=self.ca_certs)
def _create_with_data(self, headers, data):
resp, body_iter = self.http.raw_request('POST', '/v1/images',
headers=headers, body=data)
self._error_checker('POST', '/v1/images', headers, data, resp,
body_iter)
body = json.loads(''.join([c for c in body_iter]))
return service_client.ResponseBody(resp, body['image'])
def _update_with_data(self, image_id, headers, data):
url = '/v1/images/%s' % image_id
resp, body_iter = self.http.raw_request('PUT', url, headers=headers,
body=data)
self._error_checker('PUT', url, headers, data,
resp, body_iter)
body = json.loads(''.join([c for c in body_iter]))
return service_client.ResponseBody(resp, body['image'])
@property
def http(self):
if self._http is None:
self._http = self._get_http()
return self._http
def create_image(self, name, container_format, disk_format, **kwargs):
params = {
"name": name,
"container_format": container_format,
"disk_format": disk_format,
}
headers = {}
for option in ['is_public', 'location', 'properties',
'copy_from', 'min_ram']:
if option in kwargs:
params[option] = kwargs.get(option)
headers.update(self._image_meta_to_headers(params))
if 'data' in kwargs:
return self._create_with_data(headers, kwargs.get('data'))
resp, body = self.post('v1/images', None, headers)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['image'])
def update_image(self, image_id, name=None, container_format=None,
data=None, properties=None):
params = {}
headers = {}
if name is not None:
params['name'] = name
if container_format is not None:
params['container_format'] = container_format
if properties is not None:
params['properties'] = properties
headers.update(self._image_meta_to_headers(params))
if data is not None:
return self._update_with_data(image_id, headers, data)
url = 'v1/images/%s' % image_id
resp, body = self.put(url, data, headers)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['image'])
def delete_image(self, image_id):
url = 'v1/images/%s' % image_id
resp, body = self.delete(url)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
def list_images(self, detail=False, properties=dict(),
changes_since=None, **kwargs):
url = 'v1/images'
if detail:
url += '/detail'
params = {}
for key, value in properties.items():
params['property-%s' % key] = value
kwargs.update(params)
if changes_since is not None:
kwargs['changes-since'] = changes_since
if len(kwargs) > 0:
url += '?%s' % urllib.urlencode(kwargs)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['images'])
def get_image_meta(self, image_id):
url = 'v1/images/%s' % image_id
resp, __ = self.head(url)
self.expected_success(200, resp.status)
body = self._image_meta_from_headers(resp)
return service_client.ResponseBody(resp, body)
def show_image(self, image_id):
url = 'v1/images/%s' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
return service_client.ResponseBodyData(resp, body)
def is_resource_deleted(self, id):
try:
self.get_image_meta(id)
except lib_exc.NotFound:
return True
return False
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'image_meta'
def list_image_members(self, image_id):
url = 'v1/images/%s/members' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def list_shared_images(self, tenant_id):
"""List shared images with the specified tenant"""
url = 'v1/shared-images/%s' % tenant_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def add_member(self, member_id, image_id, can_share=False):
url = 'v1/images/%s/members/%s' % (image_id, member_id)
body = None
if can_share:
body = json.dumps({'member': {'can_share': True}})
resp, __ = self.put(url, body)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp)
def delete_member(self, member_id, image_id):
url = 'v1/images/%s/members/%s' % (image_id, member_id)
resp, __ = self.delete(url)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp)
# NOTE(afazekas): just for the wait function
def _get_image_status(self, image_id):
meta = self.get_image_meta(image_id)
status = meta['status']
return status
# NOTE(afazkas): Wait reinvented again. It is not in the correct layer
def wait_for_image_status(self, image_id, status):
"""Waits for a Image to reach a given status."""
start_time = time.time()
old_value = value = self._get_image_status(image_id)
while True:
dtime = time.time() - start_time
time.sleep(self.build_interval)
if value != old_value:
LOG.info('Value transition from "%s" to "%s"'
'in %d second(s).', old_value,
value, dtime)
if value == status:
return value
if value == 'killed':
raise exceptions.ImageKilledException(image_id=image_id,
status=status)
if dtime > self.build_timeout:
message = ('Time Limit Exceeded! (%ds)'
'while waiting for %s, '
'but we got %s.' %
(self.build_timeout, status, value))
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
old_value = value
value = self._get_image_status(image_id)
| apache-2.0 |
ncoghlan/pip | pip/_vendor/requests/packages/urllib3/contrib/pyopenssl.py | 488 | 9326 | '''SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 0.13)
* ndg-httpsclient (tested with 0.3.2)
* pyasn1 (tested with 0.1.6)
You can install them with the following command:
pip install pyopenssl ndg-httpsclient pyasn1
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
compression in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
Module Variables
----------------
:var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites.
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
'''
try:
from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName
except SyntaxError as e:
raise ImportError(e)
import OpenSSL.SSL
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.type import univ, constraint
from socket import _fileobject, timeout
import ssl
import select
from .. import connection
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI only *really* works if we can read the subjectAltName of certificates.
HAS_SNI = SUBJ_ALT_NAME_SUPPORT
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
try:
_openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
except AttributeError:
pass
_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
+ OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
DEFAULT_SSL_CIPHER_LIST = util.ssl_.DEFAULT_CIPHERS
orig_util_HAS_SNI = util.HAS_SNI
orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
connection.ssl_wrap_socket = ssl_wrap_socket
util.HAS_SNI = HAS_SNI
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
util.HAS_SNI = orig_util_HAS_SNI
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
class SubjectAltName(BaseSubjectAltName):
'''ASN.1 implementation for subjectAltNames support'''
# There is no limit to how many SAN certificates a certificate may have,
# however this needs to have some limit so we'll set an arbitrarily high
# limit.
sizeSpec = univ.SequenceOf.sizeSpec + \
constraint.ValueSizeConstraint(1, 1024)
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
def get_subj_alt_name(peer_cert):
# Search through extensions
dns_name = []
if not SUBJ_ALT_NAME_SUPPORT:
return dns_name
general_names = SubjectAltName()
for i in range(peer_cert.get_extension_count()):
ext = peer_cert.get_extension(i)
ext_name = ext.get_short_name()
if ext_name != 'subjectAltName':
continue
# PyOpenSSL returns extension data in ASN.1 encoded form
ext_dat = ext.get_data()
decoded_dat = der_decoder.decode(ext_dat,
asn1Spec=general_names)
for name in decoded_dat:
if not isinstance(name, SubjectAltName):
continue
for entry in range(len(name)):
component = name.getComponentByPosition(entry)
if component.getName() != 'dNSName':
continue
dns_name.append(str(component.getComponent()))
return dns_name
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
'''
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def fileno(self):
return self.socket.fileno()
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
else:
raise
except OpenSSL.SSL.ZeroReturnError as e:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return b''
else:
raise
except OpenSSL.SSL.WantReadError:
rd, wd, ed = select.select(
[self.socket], [], [], self.socket.gettimeout())
if not rd:
raise timeout('The read operation timed out')
else:
return self.recv(*args, **kwargs)
else:
return data
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
_, wlist, _ = select.select([], [self.socket], [],
self.socket.gettimeout())
if not wlist:
raise timeout()
continue
def sendall(self, data):
while len(data):
sent = self._send_until_done(data)
data = data[sent:]
def close(self):
if self._makefile_refs < 1:
return self.connection.shutdown()
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': [
('DNS', value)
for value in get_subj_alt_name(x509)
]
}
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])
if certfile:
keyfile = keyfile or certfile # Match behaviour of the normal python ssl library
ctx.use_certificate_file(certfile)
if keyfile:
ctx.use_privatekey_file(keyfile)
if cert_reqs != ssl.CERT_NONE:
ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback)
if ca_certs:
try:
ctx.load_verify_locations(ca_certs, None)
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
else:
ctx.set_default_verify_paths()
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
ctx.set_options(OP_NO_COMPRESSION)
# Set list of supported ciphersuites.
ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST)
cnx = OpenSSL.SSL.Connection(ctx, sock)
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
rd, _, _ = select.select([sock], [], [], sock.gettimeout())
if not rd:
raise timeout('select timed out')
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake', e)
break
return WrappedSocket(cnx, sock)
| mit |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/frontend/qt/console/qtconsoleapp.py | 3 | 12925 | """ A minimal application using the Qt console-style IPython frontend.
This is not a complete console app, as subprocess will not be able to receive
input, there is no real readline support, among other limitations.
Authors:
* Evan Patterson
* Min RK
* Erik Tollerud
* Fernando Perez
* Bussonnier Matthias
* Thomas Kluyver
* Paul Ivanov
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib imports
import json
import os
import signal
import sys
import uuid
# If run on Windows, install an exception hook which pops up a
# message box. Pythonw.exe hides the console, so without this
# the application silently fails to load.
#
# We always install this handler, because the expectation is for
# qtconsole to bring up a GUI even if called from the console.
# The old handler is called, so the exception is printed as well.
# If desired, check for pythonw with an additional condition
# (sys.executable.lower().find('pythonw.exe') >= 0).
if os.name == 'nt':
old_excepthook = sys.excepthook
def gui_excepthook(exctype, value, tb):
try:
import ctypes, traceback
MB_ICONERROR = 0x00000010L
title = u'Error starting IPython QtConsole'
msg = u''.join(traceback.format_exception(exctype, value, tb))
ctypes.windll.user32.MessageBoxW(0, msg, title, MB_ICONERROR)
finally:
# Also call the old exception hook to let it do
# its thing too.
old_excepthook(exctype, value, tb)
sys.excepthook = gui_excepthook
# System library imports
from IPython.external.qt import QtCore, QtGui
# Local imports
from IPython.config.application import boolean_flag, catch_config_error
from IPython.core.application import BaseIPythonApplication
from IPython.core.profiledir import ProfileDir
from IPython.lib.kernel import tunnel_to_kernel, find_connection_file
from IPython.frontend.qt.console.frontend_widget import FrontendWidget
from IPython.frontend.qt.console.ipython_widget import IPythonWidget
from IPython.frontend.qt.console.rich_ipython_widget import RichIPythonWidget
from IPython.frontend.qt.console import styles
from IPython.frontend.qt.console.mainwindow import MainWindow
from IPython.frontend.qt.kernelmanager import QtKernelManager
from IPython.utils.path import filefind
from IPython.utils.py3compat import str_to_bytes
from IPython.utils.traitlets import (
Dict, List, Unicode, Integer, CaselessStrEnum, CBool, Any
)
from IPython.zmq.ipkernel import IPKernelApp
from IPython.zmq.session import Session, default_secure
from IPython.zmq.zmqshell import ZMQInteractiveShell
from IPython.frontend.consoleapp import (
IPythonConsoleApp, app_aliases, app_flags, flags, aliases
)
#-----------------------------------------------------------------------------
# Network Constants
#-----------------------------------------------------------------------------
from IPython.utils.localinterfaces import LOCALHOST, LOCAL_IPS
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
_examples = """
ipython qtconsole # start the qtconsole
ipython qtconsole --pylab=inline # start with pylab in inline plotting mode
"""
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
# start with copy of flags
flags = dict(flags)
qt_flags = {
'plain' : ({'IPythonQtConsoleApp' : {'plain' : True}},
"Disable rich text support."),
}
# and app_flags from the Console Mixin
qt_flags.update(app_flags)
# add frontend flags to the full set
flags.update(qt_flags)
# start with copy of front&backend aliases list
aliases = dict(aliases)
qt_aliases = dict(
style = 'IPythonWidget.syntax_style',
stylesheet = 'IPythonQtConsoleApp.stylesheet',
colors = 'ZMQInteractiveShell.colors',
editor = 'IPythonWidget.editor',
paging = 'ConsoleWidget.paging',
)
# and app_aliases from the Console Mixin
qt_aliases.update(app_aliases)
qt_aliases.update({'gui-completion':'ConsoleWidget.gui_completion'})
# add frontend aliases to the full set
aliases.update(qt_aliases)
# get flags&aliases into sets, and remove a couple that
# shouldn't be scrubbed from backend flags:
qt_aliases = set(qt_aliases.keys())
qt_aliases.remove('colors')
qt_flags = set(qt_flags.keys())
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# IPythonQtConsole
#-----------------------------------------------------------------------------
class IPythonQtConsoleApp(BaseIPythonApplication, IPythonConsoleApp):
name = 'ipython-qtconsole'
description = """
The IPython QtConsole.
This launches a Console-style application using Qt. It is not a full
console, in that launched terminal subprocesses will not be able to accept
input.
The QtConsole supports various extra features beyond the Terminal IPython
shell, such as inline plotting with matplotlib, via:
ipython qtconsole --pylab=inline
as well as saving your session as HTML, and printing the output.
"""
examples = _examples
classes = [IPythonWidget] + IPythonConsoleApp.classes
flags = Dict(flags)
aliases = Dict(aliases)
frontend_flags = Any(qt_flags)
frontend_aliases = Any(qt_aliases)
kernel_manager_class = QtKernelManager
stylesheet = Unicode('', config=True,
help="path to a custom CSS stylesheet")
plain = CBool(False, config=True,
help="Use a plaintext widget instead of rich text (plain can't print/save).")
def _plain_changed(self, name, old, new):
kind = 'plain' if new else 'rich'
self.config.ConsoleWidget.kind = kind
if new:
self.widget_factory = IPythonWidget
else:
self.widget_factory = RichIPythonWidget
# the factory for creating a widget
widget_factory = Any(RichIPythonWidget)
def parse_command_line(self, argv=None):
super(IPythonQtConsoleApp, self).parse_command_line(argv)
self.build_kernel_argv(argv)
def new_frontend_master(self):
""" Create and return new frontend attached to new kernel, launched on localhost.
"""
ip = self.ip if self.ip in LOCAL_IPS else LOCALHOST
kernel_manager = self.kernel_manager_class(
ip=ip,
connection_file=self._new_connection_file(),
config=self.config,
)
# start the kernel
kwargs = dict()
kwargs['extra_arguments'] = self.kernel_argv
kernel_manager.start_kernel(**kwargs)
kernel_manager.start_channels()
widget = self.widget_factory(config=self.config,
local_kernel=True)
self.init_colors(widget)
widget.kernel_manager = kernel_manager
widget._existing = False
widget._may_close = True
widget._confirm_exit = self.confirm_exit
return widget
def new_frontend_slave(self, current_widget):
"""Create and return a new frontend attached to an existing kernel.
Parameters
----------
current_widget : IPythonWidget
The IPythonWidget whose kernel this frontend is to share
"""
kernel_manager = self.kernel_manager_class(
connection_file=current_widget.kernel_manager.connection_file,
config = self.config,
)
kernel_manager.load_connection_file()
kernel_manager.start_channels()
widget = self.widget_factory(config=self.config,
local_kernel=False)
self.init_colors(widget)
widget._existing = True
widget._may_close = False
widget._confirm_exit = False
widget.kernel_manager = kernel_manager
return widget
def init_qt_elements(self):
# Create the widget.
self.app = QtGui.QApplication([])
base_path = os.path.abspath(os.path.dirname(__file__))
icon_path = os.path.join(base_path, 'resources', 'icon', 'IPythonConsole.svg')
self.app.icon = QtGui.QIcon(icon_path)
QtGui.QApplication.setWindowIcon(self.app.icon)
local_kernel = (not self.existing) or self.ip in LOCAL_IPS
self.widget = self.widget_factory(config=self.config,
local_kernel=local_kernel)
self.init_colors(self.widget)
self.widget._existing = self.existing
self.widget._may_close = not self.existing
self.widget._confirm_exit = self.confirm_exit
self.widget.kernel_manager = self.kernel_manager
self.window = MainWindow(self.app,
confirm_exit=self.confirm_exit,
new_frontend_factory=self.new_frontend_master,
slave_frontend_factory=self.new_frontend_slave,
)
self.window.log = self.log
self.window.add_tab_with_frontend(self.widget)
self.window.init_menu_bar()
self.window.setWindowTitle('IPython')
def init_colors(self, widget):
"""Configure the coloring of the widget"""
# Note: This will be dramatically simplified when colors
# are removed from the backend.
# parse the colors arg down to current known labels
try:
colors = self.config.ZMQInteractiveShell.colors
except AttributeError:
colors = None
try:
style = self.config.IPythonWidget.syntax_style
except AttributeError:
style = None
try:
sheet = self.config.IPythonWidget.style_sheet
except AttributeError:
sheet = None
# find the value for colors:
if colors:
colors=colors.lower()
if colors in ('lightbg', 'light'):
colors='lightbg'
elif colors in ('dark', 'linux'):
colors='linux'
else:
colors='nocolor'
elif style:
if style=='bw':
colors='nocolor'
elif styles.dark_style(style):
colors='linux'
else:
colors='lightbg'
else:
colors=None
# Configure the style
if style:
widget.style_sheet = styles.sheet_from_template(style, colors)
widget.syntax_style = style
widget._syntax_style_changed()
widget._style_sheet_changed()
elif colors:
# use a default dark/light/bw style
widget.set_default_style(colors=colors)
if self.stylesheet:
# we got an explicit stylesheet
if os.path.isfile(self.stylesheet):
with open(self.stylesheet) as f:
sheet = f.read()
else:
raise IOError("Stylesheet %r not found." % self.stylesheet)
if sheet:
widget.style_sheet = sheet
widget._style_sheet_changed()
def init_signal(self):
"""allow clean shutdown on sigint"""
signal.signal(signal.SIGINT, lambda sig, frame: self.exit(-2))
# need a timer, so that QApplication doesn't block until a real
# Qt event fires (can require mouse movement)
# timer trick from http://stackoverflow.com/q/4938723/938949
timer = QtCore.QTimer()
# Let the interpreter run each 200 ms:
timer.timeout.connect(lambda: None)
timer.start(200)
# hold onto ref, so the timer doesn't get cleaned up
self._sigint_timer = timer
@catch_config_error
def initialize(self, argv=None):
super(IPythonQtConsoleApp, self).initialize(argv)
IPythonConsoleApp.initialize(self,argv)
self.init_qt_elements()
self.init_signal()
def start(self):
# draw the window
self.window.show()
self.window.raise_()
# Start the application main loop.
self.app.exec_()
#-----------------------------------------------------------------------------
# Main entry point
#-----------------------------------------------------------------------------
def main():
app = IPythonQtConsoleApp()
app.initialize()
app.start()
if __name__ == '__main__':
main()
| lgpl-3.0 |
SlimRoms/kernel_sony_msm8x27 | tools/perf/python/twatch.py | 7370 | 1334 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
jessekl/flixr | venv/lib/python2.7/site-packages/twilio/rest/client.py | 12 | 4436 | from twilio.rest.base import TwilioClient
from twilio.rest.resources import (
UNSET_TIMEOUT,
Accounts,
Applications,
AuthorizedConnectApps,
CallFeedback,
CallFeedbackFactory,
CallerIds,
Calls,
Conferences,
ConnectApps,
DependentPhoneNumbers,
MediaList,
Members,
Messages,
Notifications,
Participants,
PhoneNumbers,
Queues,
Recordings,
Sandboxes,
Sip,
Sms,
Tokens,
Transcriptions,
Usage,
)
class TwilioRestClient(TwilioClient):
"""
A client for accessing the Twilio REST API
:param str account: Your Account SID from `your dashboard
<https://twilio.com/user/account>`_
:param str token: Your Auth Token from `your dashboard
<https://twilio.com/user/account>`_
:param float timeout: The socket and read timeout for requests to Twilio
"""
def __init__(self, account=None, token=None, base="https://api.twilio.com",
version="2010-04-01", timeout=UNSET_TIMEOUT):
"""
Create a Twilio REST API client.
"""
super(TwilioRestClient, self).__init__(account, token, base, version,
timeout)
version_uri = "%s/%s" % (base, version)
self.accounts = Accounts(version_uri, self.auth, timeout)
self.applications = Applications(self.account_uri, self.auth, timeout)
self.authorized_connect_apps = AuthorizedConnectApps(
self.account_uri,
self.auth,
timeout
)
self.calls = Calls(self.account_uri, self.auth, timeout)
self.caller_ids = CallerIds(self.account_uri, self.auth, timeout)
self.connect_apps = ConnectApps(self.account_uri, self.auth, timeout)
self.notifications = Notifications(self.account_uri, self.auth,
timeout)
self.recordings = Recordings(self.account_uri, self.auth, timeout)
self.transcriptions = Transcriptions(self.account_uri, self.auth,
timeout)
self.sms = Sms(self.account_uri, self.auth, timeout)
self.phone_numbers = PhoneNumbers(self.account_uri, self.auth, timeout)
self.conferences = Conferences(self.account_uri, self.auth, timeout)
self.queues = Queues(self.account_uri, self.auth, timeout)
self.sandboxes = Sandboxes(self.account_uri, self.auth, timeout)
self.usage = Usage(self.account_uri, self.auth, timeout)
self.messages = Messages(self.account_uri, self.auth, timeout)
self.media = MediaList(self.account_uri, self.auth, timeout)
self.sip = Sip(self.account_uri, self.auth, timeout)
self.tokens = Tokens(self.account_uri, self.auth, timeout)
def participants(self, conference_sid):
"""
Return a :class:`~twilio.rest.resources.Participants` instance for the
:class:`~twilio.rest.resources.Conference` with given conference_sid
"""
base_uri = "%s/Conferences/%s" % (self.account_uri, conference_sid)
return Participants(base_uri, self.auth, self.timeout)
def members(self, queue_sid):
"""
Return a :class:`Members <twilio.rest.resources.Members>` instance for
the :class:`Queue <twilio.rest.resources.Queue>` with the
given queue_sid
"""
base_uri = "%s/Queues/%s" % (self.account_uri, queue_sid)
return Members(base_uri, self.auth, self.timeout)
def feedback(self, call_sid):
"""
Return a :class:`CallFeedback <twilio.rest.resources.CallFeedback>`
instance for the :class:`Call <twilio.rest.resources.calls.Call>`
with the given call_sid
"""
base_uri = "%s/Calls/%s/Feedback" % (self.account_uri, call_sid)
call_feedback_list = CallFeedbackFactory(
base_uri,
self.auth,
self.timeout
)
return CallFeedback(call_feedback_list)
def dependent_phone_numbers(self, address_sid):
"""
Return a :class:`DependentPhoneNumbers
<twilio.rest.resources.DependentPhoneNumbers>` instance for the
:class:`Address <twilio.rest.resources.Address>` with the given
address_sid
"""
base_uri = "%s/Addresses/%s" % (self.account_uri, address_sid)
return DependentPhoneNumbers(base_uri, self.auth, self.timeout)
| mit |
ZhangXinNan/tensorflow | tensorflow/python/compat/compat.py | 1 | 4728 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See
@{$guide/version_compat#backward_and_partial_forward_compatibility}
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2018, 8, 16)
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See @{$guide/version_compat#backward_and_partial_forward_compatibility}.
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See @{$guide/version_compat#backward_and_partial_forward_compatibility}.
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args :
year: A year (e.g. 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Yields:
Nothing.
"""
global _FORWARD_COMPATIBILITY_HORIZON
try:
old_compat_date = _FORWARD_COMPATIBILITY_HORIZON
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day)
yield
finally:
_FORWARD_COMPATIBILITY_HORIZON = old_compat_date
| apache-2.0 |
theheros/kbengine | kbe/src/lib/python/Lib/gzip.py | 47 | 21968 | """Functions that read and write gzipped files.
The user of the file doesn't have to worry about the compression,
but random access is not allowed."""
# based on Andrew Kuchling's minigzip.py distributed with the zlib module
import struct, sys, time, os
import zlib
import builtins
import io
__all__ = ["GzipFile", "open", "compress", "decompress"]
FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
READ, WRITE = 1, 2
def U32(i):
"""Return i as an unsigned integer, assuming it fits in 32 bits.
If it's >= 2GB when viewed as a 32-bit unsigned int, return a long.
"""
if i < 0:
i += 1 << 32
return i
def LOWU32(i):
"""Return the low-order 32 bits, as a non-negative int"""
return i & 0xFFFFFFFF
def write32u(output, value):
# The L format writes the bit pattern correctly whether signed
# or unsigned.
output.write(struct.pack("<L", value))
def read32(input):
return struct.unpack("<I", input.read(4))[0]
def open(filename, mode="rb", compresslevel=9):
"""Shorthand for GzipFile(filename, mode, compresslevel).
The filename argument is required; mode defaults to 'rb'
and compresslevel defaults to 9.
"""
return GzipFile(filename, mode, compresslevel)
class _PaddedFile:
"""Minimal read-only file object that prepends a string to the contents
of an actual file. Shouldn't be used outside of gzip.py, as it lacks
essential functionality."""
def __init__(self, f, prepend=b''):
self._buffer = prepend
self._length = len(prepend)
self.file = f
self._read = 0
def read(self, size):
if self._read is None:
return self.file.read(size)
if self._read + size <= self._length:
read = self._read
self._read += size
return self._buffer[read:self._read]
else:
read = self._read
self._read = None
return self._buffer[read:] + \
self.file.read(size-self._length+read)
def prepend(self, prepend=b'', readprevious=False):
if self._read is None:
self._buffer = prepend
elif readprevious and len(prepend) <= self._read:
self._read -= len(prepend)
return
else:
self._buffer = self._buffer[read:] + prepend
self._length = len(self._buffer)
self._read = 0
def unused(self):
if self._read is None:
return b''
return self._buffer[self._read:]
def seek(self, offset, whence=0):
# This is only ever called with offset=whence=0
if whence == 1 and self._read is not None:
if 0 <= offset + self._read <= self._length:
self._read += offset
return
else:
offset += self._length - self._read
self._read = None
self._buffer = None
return self.file.seek(offset, whence)
def __getattr__(self, name):
return getattr(self.file, name)
class GzipFile(io.BufferedIOBase):
"""The GzipFile class simulates most of the methods of a file object with
the exception of the readinto() and truncate() methods.
"""
myfileobj = None
max_read_chunk = 10 * 1024 * 1024 # 10Mb
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None, mtime=None):
"""Constructor for the GzipFile class.
At least one of fileobj and filename must be given a
non-trivial value.
The new class instance is based on fileobj, which can be a regular
file, a StringIO object, or any other object which simulates a file.
It defaults to None, in which case filename is opened to provide
a file object.
When fileobj is not None, the filename argument is only used to be
included in the gzip file header, which may includes the original
filename of the uncompressed file. It defaults to the filename of
fileobj, if discernible; otherwise, it defaults to the empty string,
and in this case the original filename is not included in the header.
The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', or 'wb',
depending on whether the file will be read or written. The default
is the mode of fileobj if discernible; otherwise, the default is 'rb'.
Be aware that only the 'rb', 'ab', and 'wb' values should be used
for cross-platform portability.
The compresslevel argument is an integer from 1 to 9 controlling the
level of compression; 1 is fastest and produces the least compression,
and 9 is slowest and produces the most compression. The default is 9.
The mtime argument is an optional numeric timestamp to be written
to the stream when compressing. All gzip compressed streams
are required to contain a timestamp. If omitted or None, the
current time is used. This module ignores the timestamp when
decompressing; however, some programs, such as gunzip, make use
of it. The format of the timestamp is the same as that of the
return value of time.time() and of the st_mtime member of the
object returned by os.stat().
"""
# guarantee the file is opened in binary mode on platforms
# that care about that sort of thing
if mode and 'b' not in mode:
mode += 'b'
if fileobj is None:
fileobj = self.myfileobj = builtins.open(filename, mode or 'rb')
if filename is None:
if hasattr(fileobj, 'name'): filename = fileobj.name
else: filename = ''
if mode is None:
if hasattr(fileobj, 'mode'): mode = fileobj.mode
else: mode = 'rb'
if mode[0:1] == 'r':
self.mode = READ
# Set flag indicating start of a new member
self._new_member = True
# Buffer data read from gzip file. extrastart is offset in
# stream where buffer starts. extrasize is number of
# bytes remaining in buffer from current stream position.
self.extrabuf = b""
self.extrasize = 0
self.extrastart = 0
self.name = filename
# Starts small, scales exponentially
self.min_readsize = 100
fileobj = _PaddedFile(fileobj)
elif mode[0:1] == 'w' or mode[0:1] == 'a':
self.mode = WRITE
self._init_write(filename)
self.compress = zlib.compressobj(compresslevel,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
else:
raise IOError("Mode " + mode + " not supported")
self.fileobj = fileobj
self.offset = 0
self.mtime = mtime
if self.mode == WRITE:
self._write_gzip_header()
@property
def filename(self):
import warnings
warnings.warn("use the name attribute", DeprecationWarning, 2)
if self.mode == WRITE and self.name[-3:] != ".gz":
return self.name + ".gz"
return self.name
def __repr__(self):
fileobj = self.fileobj
if isinstance(fileobj, _PaddedFile):
fileobj = fileobj.file
s = repr(fileobj)
return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
def _check_closed(self):
"""Raises a ValueError if the underlying file object has been closed.
"""
if self.closed:
raise ValueError('I/O operation on closed file.')
def _init_write(self, filename):
self.name = filename
self.crc = zlib.crc32(b"") & 0xffffffff
self.size = 0
self.writebuf = []
self.bufsize = 0
def _write_gzip_header(self):
self.fileobj.write(b'\037\213') # magic header
self.fileobj.write(b'\010') # compression method
try:
# RFC 1952 requires the FNAME field to be Latin-1. Do not
# include filenames that cannot be represented that way.
fname = os.path.basename(self.name)
fname = fname.encode('latin-1')
if fname.endswith(b'.gz'):
fname = fname[:-3]
except UnicodeEncodeError:
fname = b''
flags = 0
if fname:
flags = FNAME
self.fileobj.write(chr(flags).encode('latin-1'))
mtime = self.mtime
if mtime is None:
mtime = time.time()
write32u(self.fileobj, int(mtime))
self.fileobj.write(b'\002')
self.fileobj.write(b'\377')
if fname:
self.fileobj.write(fname + b'\000')
def _init_read(self):
self.crc = zlib.crc32(b"") & 0xffffffff
self.size = 0
def _read_gzip_header(self):
magic = self.fileobj.read(2)
if magic == b'':
raise EOFError("Reached EOF")
if magic != b'\037\213':
raise IOError('Not a gzipped file')
method = ord( self.fileobj.read(1) )
if method != 8:
raise IOError('Unknown compression method')
flag = ord( self.fileobj.read(1) )
self.mtime = read32(self.fileobj)
# extraflag = self.fileobj.read(1)
# os = self.fileobj.read(1)
self.fileobj.read(2)
if flag & FEXTRA:
# Read & discard the extra field, if present
xlen = ord(self.fileobj.read(1))
xlen = xlen + 256*ord(self.fileobj.read(1))
self.fileobj.read(xlen)
if flag & FNAME:
# Read and discard a null-terminated string containing the filename
while True:
s = self.fileobj.read(1)
if not s or s==b'\000':
break
if flag & FCOMMENT:
# Read and discard a null-terminated string containing a comment
while True:
s = self.fileobj.read(1)
if not s or s==b'\000':
break
if flag & FHCRC:
self.fileobj.read(2) # Read & discard the 16-bit header CRC
unused = self.fileobj.unused()
if unused:
uncompress = self.decompress.decompress(unused)
self._add_read_data(uncompress)
def write(self,data):
self._check_closed()
if self.mode != WRITE:
import errno
raise IOError(errno.EBADF, "write() on read-only GzipFile object")
if self.fileobj is None:
raise ValueError("write() on closed GzipFile object")
# Convert data type if called by io.BufferedWriter.
if isinstance(data, memoryview):
data = data.tobytes()
if len(data) > 0:
self.size = self.size + len(data)
self.crc = zlib.crc32(data, self.crc) & 0xffffffff
self.fileobj.write( self.compress.compress(data) )
self.offset += len(data)
return len(data)
def read(self, size=-1):
self._check_closed()
if self.mode != READ:
import errno
raise IOError(errno.EBADF, "read() on write-only GzipFile object")
if self.extrasize <= 0 and self.fileobj is None:
return b''
readsize = 1024
if size < 0: # get the whole thing
try:
while True:
self._read(readsize)
readsize = min(self.max_read_chunk, readsize * 2)
except EOFError:
size = self.extrasize
else: # just get some more of it
try:
while size > self.extrasize:
self._read(readsize)
readsize = min(self.max_read_chunk, readsize * 2)
except EOFError:
if size > self.extrasize:
size = self.extrasize
offset = self.offset - self.extrastart
chunk = self.extrabuf[offset: offset + size]
self.extrasize = self.extrasize - size
self.offset += size
return chunk
def peek(self, n):
if self.mode != READ:
import errno
raise IOError(errno.EBADF, "peek() on write-only GzipFile object")
# Do not return ridiculously small buffers, for one common idiom
# is to call peek(1) and expect more bytes in return.
if n < 100:
n = 100
if self.extrasize == 0:
if self.fileobj is None:
return b''
try:
# 1024 is the same buffering heuristic used in read()
self._read(max(n, 1024))
except EOFError:
pass
offset = self.offset - self.extrastart
remaining = self.extrasize
assert remaining == len(self.extrabuf) - offset
return self.extrabuf[offset:offset + n]
def _unread(self, buf):
self.extrasize = len(buf) + self.extrasize
self.offset -= len(buf)
def _read(self, size=1024):
if self.fileobj is None:
raise EOFError("Reached EOF")
if self._new_member:
# If the _new_member flag is set, we have to
# jump to the next member, if there is one.
self._init_read()
self._read_gzip_header()
self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
self._new_member = False
# Read a chunk of data from the file
buf = self.fileobj.read(size)
# If the EOF has been reached, flush the decompression object
# and mark this object as finished.
if buf == b"":
uncompress = self.decompress.flush()
# Prepend the already read bytes to the fileobj to they can be
# seen by _read_eof()
self.fileobj.prepend(self.decompress.unused_data, True)
self._read_eof()
self._add_read_data( uncompress )
raise EOFError('Reached EOF')
uncompress = self.decompress.decompress(buf)
self._add_read_data( uncompress )
if self.decompress.unused_data != b"":
# Ending case: we've come to the end of a member in the file,
# so seek back to the start of the unused data, finish up
# this member, and read a new gzip header.
# Prepend the already read bytes to the fileobj to they can be
# seen by _read_eof() and _read_gzip_header()
self.fileobj.prepend(self.decompress.unused_data, True)
# Check the CRC and file size, and set the flag so we read
# a new member on the next call
self._read_eof()
self._new_member = True
def _add_read_data(self, data):
self.crc = zlib.crc32(data, self.crc) & 0xffffffff
offset = self.offset - self.extrastart
self.extrabuf = self.extrabuf[offset:] + data
self.extrasize = self.extrasize + len(data)
self.extrastart = self.offset
self.size = self.size + len(data)
def _read_eof(self):
# We've read to the end of the file
# We check the that the computed CRC and size of the
# uncompressed data matches the stored values. Note that the size
# stored is the true file size mod 2**32.
crc32 = read32(self.fileobj)
isize = read32(self.fileobj) # may exceed 2GB
if crc32 != self.crc:
raise IOError("CRC check failed %s != %s" % (hex(crc32),
hex(self.crc)))
elif isize != (self.size & 0xffffffff):
raise IOError("Incorrect length of data produced")
# Gzip files can be padded with zeroes and still have archives.
# Consume all zero bytes and set the file position to the first
# non-zero byte. See http://www.gzip.org/#faq8
c = b"\x00"
while c == b"\x00":
c = self.fileobj.read(1)
if c:
self.fileobj.prepend(c, True)
@property
def closed(self):
return self.fileobj is None
def close(self):
if self.fileobj is None:
return
if self.mode == WRITE:
self.fileobj.write(self.compress.flush())
write32u(self.fileobj, self.crc)
# self.size may exceed 2GB, or even 4GB
write32u(self.fileobj, self.size & 0xffffffff)
self.fileobj = None
elif self.mode == READ:
self.fileobj = None
if self.myfileobj:
self.myfileobj.close()
self.myfileobj = None
def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH):
self._check_closed()
if self.mode == WRITE:
# Ensure the compressor's buffer is flushed
self.fileobj.write(self.compress.flush(zlib_mode))
self.fileobj.flush()
def fileno(self):
"""Invoke the underlying file object's fileno() method.
This will raise AttributeError if the underlying file object
doesn't support fileno().
"""
return self.fileobj.fileno()
def rewind(self):
'''Return the uncompressed stream file position indicator to the
beginning of the file'''
if self.mode != READ:
raise IOError("Can't rewind in write mode")
self.fileobj.seek(0)
self._new_member = True
self.extrabuf = b""
self.extrasize = 0
self.extrastart = 0
self.offset = 0
def readable(self):
return self.mode == READ
def writable(self):
return self.mode == WRITE
def seekable(self):
return True
def seek(self, offset, whence=0):
if whence:
if whence == 1:
offset = self.offset + offset
else:
raise ValueError('Seek from end not supported')
if self.mode == WRITE:
if offset < self.offset:
raise IOError('Negative seek in write mode')
count = offset - self.offset
chunk = bytes(1024)
for i in range(count // 1024):
self.write(chunk)
self.write(bytes(count % 1024))
elif self.mode == READ:
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
return self.offset
def readline(self, size=-1):
if size < 0:
# Shortcut common case - newline found in buffer.
offset = self.offset - self.extrastart
i = self.extrabuf.find(b'\n', offset) + 1
if i > 0:
self.extrasize -= i - offset
self.offset += i - offset
return self.extrabuf[offset: i]
size = sys.maxsize
readsize = self.min_readsize
else:
readsize = size
bufs = []
while size != 0:
c = self.read(readsize)
i = c.find(b'\n')
# We set i=size to break out of the loop under two
# conditions: 1) there's no newline, and the chunk is
# larger than size, or 2) there is a newline, but the
# resulting line would be longer than 'size'.
if (size <= i) or (i == -1 and len(c) > size):
i = size - 1
if i >= 0 or c == b'':
bufs.append(c[:i + 1]) # Add portion of last chunk
self._unread(c[i + 1:]) # Push back rest of chunk
break
# Append chunk to list, decrease 'size',
bufs.append(c)
size = size - len(c)
readsize = min(size, readsize * 2)
if readsize > self.min_readsize:
self.min_readsize = min(readsize, self.min_readsize * 2, 512)
return b''.join(bufs) # Return resulting line
def compress(data, compresslevel=9):
"""Compress data in one shot and return the compressed string.
Optional argument is the compression level, in range of 1-9.
"""
buf = io.BytesIO()
with GzipFile(fileobj=buf, mode='wb', compresslevel=compresslevel) as f:
f.write(data)
return buf.getvalue()
def decompress(data):
"""Decompress a gzip compressed string in one shot.
Return the decompressed string.
"""
with GzipFile(fileobj=io.BytesIO(data)) as f:
return f.read()
def _test():
# Act like gzip; with -d, act like gunzip.
# The input file is not deleted, however, nor are any other gzip
# options or features supported.
args = sys.argv[1:]
decompress = args and args[0] == "-d"
if decompress:
args = args[1:]
if not args:
args = ["-"]
for arg in args:
if decompress:
if arg == "-":
f = GzipFile(filename="", mode="rb", fileobj=sys.stdin.buffer)
g = sys.stdout.buffer
else:
if arg[-3:] != ".gz":
print("filename doesn't end in .gz:", repr(arg))
continue
f = open(arg, "rb")
g = builtins.open(arg[:-3], "wb")
else:
if arg == "-":
f = sys.stdin.buffer
g = GzipFile(filename="", mode="wb", fileobj=sys.stdout.buffer)
else:
f = builtins.open(arg, "rb")
g = open(arg + ".gz", "wb")
while True:
chunk = f.read(1024)
if not chunk:
break
g.write(chunk)
if g is not sys.stdout:
g.close()
if f is not sys.stdin:
f.close()
if __name__ == '__main__':
_test()
| lgpl-3.0 |
density215/d215-miniblog | unidecode/x0d6.py | 253 | 4765 | data = (
'hyeo', # 0x00
'hyeog', # 0x01
'hyeogg', # 0x02
'hyeogs', # 0x03
'hyeon', # 0x04
'hyeonj', # 0x05
'hyeonh', # 0x06
'hyeod', # 0x07
'hyeol', # 0x08
'hyeolg', # 0x09
'hyeolm', # 0x0a
'hyeolb', # 0x0b
'hyeols', # 0x0c
'hyeolt', # 0x0d
'hyeolp', # 0x0e
'hyeolh', # 0x0f
'hyeom', # 0x10
'hyeob', # 0x11
'hyeobs', # 0x12
'hyeos', # 0x13
'hyeoss', # 0x14
'hyeong', # 0x15
'hyeoj', # 0x16
'hyeoc', # 0x17
'hyeok', # 0x18
'hyeot', # 0x19
'hyeop', # 0x1a
'hyeoh', # 0x1b
'hye', # 0x1c
'hyeg', # 0x1d
'hyegg', # 0x1e
'hyegs', # 0x1f
'hyen', # 0x20
'hyenj', # 0x21
'hyenh', # 0x22
'hyed', # 0x23
'hyel', # 0x24
'hyelg', # 0x25
'hyelm', # 0x26
'hyelb', # 0x27
'hyels', # 0x28
'hyelt', # 0x29
'hyelp', # 0x2a
'hyelh', # 0x2b
'hyem', # 0x2c
'hyeb', # 0x2d
'hyebs', # 0x2e
'hyes', # 0x2f
'hyess', # 0x30
'hyeng', # 0x31
'hyej', # 0x32
'hyec', # 0x33
'hyek', # 0x34
'hyet', # 0x35
'hyep', # 0x36
'hyeh', # 0x37
'ho', # 0x38
'hog', # 0x39
'hogg', # 0x3a
'hogs', # 0x3b
'hon', # 0x3c
'honj', # 0x3d
'honh', # 0x3e
'hod', # 0x3f
'hol', # 0x40
'holg', # 0x41
'holm', # 0x42
'holb', # 0x43
'hols', # 0x44
'holt', # 0x45
'holp', # 0x46
'holh', # 0x47
'hom', # 0x48
'hob', # 0x49
'hobs', # 0x4a
'hos', # 0x4b
'hoss', # 0x4c
'hong', # 0x4d
'hoj', # 0x4e
'hoc', # 0x4f
'hok', # 0x50
'hot', # 0x51
'hop', # 0x52
'hoh', # 0x53
'hwa', # 0x54
'hwag', # 0x55
'hwagg', # 0x56
'hwags', # 0x57
'hwan', # 0x58
'hwanj', # 0x59
'hwanh', # 0x5a
'hwad', # 0x5b
'hwal', # 0x5c
'hwalg', # 0x5d
'hwalm', # 0x5e
'hwalb', # 0x5f
'hwals', # 0x60
'hwalt', # 0x61
'hwalp', # 0x62
'hwalh', # 0x63
'hwam', # 0x64
'hwab', # 0x65
'hwabs', # 0x66
'hwas', # 0x67
'hwass', # 0x68
'hwang', # 0x69
'hwaj', # 0x6a
'hwac', # 0x6b
'hwak', # 0x6c
'hwat', # 0x6d
'hwap', # 0x6e
'hwah', # 0x6f
'hwae', # 0x70
'hwaeg', # 0x71
'hwaegg', # 0x72
'hwaegs', # 0x73
'hwaen', # 0x74
'hwaenj', # 0x75
'hwaenh', # 0x76
'hwaed', # 0x77
'hwael', # 0x78
'hwaelg', # 0x79
'hwaelm', # 0x7a
'hwaelb', # 0x7b
'hwaels', # 0x7c
'hwaelt', # 0x7d
'hwaelp', # 0x7e
'hwaelh', # 0x7f
'hwaem', # 0x80
'hwaeb', # 0x81
'hwaebs', # 0x82
'hwaes', # 0x83
'hwaess', # 0x84
'hwaeng', # 0x85
'hwaej', # 0x86
'hwaec', # 0x87
'hwaek', # 0x88
'hwaet', # 0x89
'hwaep', # 0x8a
'hwaeh', # 0x8b
'hoe', # 0x8c
'hoeg', # 0x8d
'hoegg', # 0x8e
'hoegs', # 0x8f
'hoen', # 0x90
'hoenj', # 0x91
'hoenh', # 0x92
'hoed', # 0x93
'hoel', # 0x94
'hoelg', # 0x95
'hoelm', # 0x96
'hoelb', # 0x97
'hoels', # 0x98
'hoelt', # 0x99
'hoelp', # 0x9a
'hoelh', # 0x9b
'hoem', # 0x9c
'hoeb', # 0x9d
'hoebs', # 0x9e
'hoes', # 0x9f
'hoess', # 0xa0
'hoeng', # 0xa1
'hoej', # 0xa2
'hoec', # 0xa3
'hoek', # 0xa4
'hoet', # 0xa5
'hoep', # 0xa6
'hoeh', # 0xa7
'hyo', # 0xa8
'hyog', # 0xa9
'hyogg', # 0xaa
'hyogs', # 0xab
'hyon', # 0xac
'hyonj', # 0xad
'hyonh', # 0xae
'hyod', # 0xaf
'hyol', # 0xb0
'hyolg', # 0xb1
'hyolm', # 0xb2
'hyolb', # 0xb3
'hyols', # 0xb4
'hyolt', # 0xb5
'hyolp', # 0xb6
'hyolh', # 0xb7
'hyom', # 0xb8
'hyob', # 0xb9
'hyobs', # 0xba
'hyos', # 0xbb
'hyoss', # 0xbc
'hyong', # 0xbd
'hyoj', # 0xbe
'hyoc', # 0xbf
'hyok', # 0xc0
'hyot', # 0xc1
'hyop', # 0xc2
'hyoh', # 0xc3
'hu', # 0xc4
'hug', # 0xc5
'hugg', # 0xc6
'hugs', # 0xc7
'hun', # 0xc8
'hunj', # 0xc9
'hunh', # 0xca
'hud', # 0xcb
'hul', # 0xcc
'hulg', # 0xcd
'hulm', # 0xce
'hulb', # 0xcf
'huls', # 0xd0
'hult', # 0xd1
'hulp', # 0xd2
'hulh', # 0xd3
'hum', # 0xd4
'hub', # 0xd5
'hubs', # 0xd6
'hus', # 0xd7
'huss', # 0xd8
'hung', # 0xd9
'huj', # 0xda
'huc', # 0xdb
'huk', # 0xdc
'hut', # 0xdd
'hup', # 0xde
'huh', # 0xdf
'hweo', # 0xe0
'hweog', # 0xe1
'hweogg', # 0xe2
'hweogs', # 0xe3
'hweon', # 0xe4
'hweonj', # 0xe5
'hweonh', # 0xe6
'hweod', # 0xe7
'hweol', # 0xe8
'hweolg', # 0xe9
'hweolm', # 0xea
'hweolb', # 0xeb
'hweols', # 0xec
'hweolt', # 0xed
'hweolp', # 0xee
'hweolh', # 0xef
'hweom', # 0xf0
'hweob', # 0xf1
'hweobs', # 0xf2
'hweos', # 0xf3
'hweoss', # 0xf4
'hweong', # 0xf5
'hweoj', # 0xf6
'hweoc', # 0xf7
'hweok', # 0xf8
'hweot', # 0xf9
'hweop', # 0xfa
'hweoh', # 0xfb
'hwe', # 0xfc
'hweg', # 0xfd
'hwegg', # 0xfe
'hwegs', # 0xff
)
| bsd-3-clause |
Danisan/odoo-1 | addons/marketing_campaign/__init__.py | 380 | 1087 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import marketing_campaign
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rubyAce71697/cricket-score-applet | cricket_score_indicator/cric_indicator.py | 1 | 24906 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import gi.repository
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk, GObject
gi.require_version('Notify', '0.7')
from gi.repository import Notify
gi.require_version('AppIndicator3', '0.1')
from gi.repository import AppIndicator3 as appindicator
import threading
import time
import signal
import webbrowser
import sys
from cricket_score_indicator.espn_scrap import get_matches_summary, get_match_data, DEFAULT_ICON, MATCH_URL_HTML
# the timeout between each fetch
REFRESH_INTERVAL = 10 # second(s)
ICON_PREFIX= "cricscore_indicator-"
ICON_SUFFIX = ""
# DEBUG=1
# from os import path
# ICON_PATH = path.join(path.abspath(path.dirname(__file__)), "..", "icons")
# DARK_ICONS = path.join(ICON_PATH, "dark")
# LIGHT_ICONS = path.join(ICON_PATH, "light")
class CricInd:
def __init__(self):
Notify.init("cricket score indicator")
self.notification = Notify.Notification.new("")
self.notification.set_app_name("Cricket Score")
"""
Initialize appindicator and other menus
"""
self.indicator = appindicator.Indicator.new("cricket-indicator",
ICON_PREFIX + DEFAULT_ICON+ ICON_SUFFIX ,
appindicator.IndicatorCategory.APPLICATION_STATUS)
# if DEBUG:
# self.indicator.set_icon_theme_path(DARK_ICONS)
self.indicator.set_status(appindicator.IndicatorStatus.ACTIVE)
self.indicator.set_label("Loading", "")
self.indicator.connect("scroll-event", self.scroll_event_cb)
self.menu_setup()
# the 'id' of match selected for display as indicator label
self.label_match_id = None
self.open_scorecard = set()
self.intl_menu = []
self.dom_menu = []
def main(self):
"""
Main entry point of execution
"""
# handle 'C-c'
signal.signal(signal.SIGINT, signal.SIG_DFL)
self.thread = threading.Thread(target=self.main_update_data)
self.thread.daemon = True
self.thread.start()
Gtk.main()
def menu_setup(self):
"""
Setup the Gtk self.menu of the indicator
"""
self.flag = 1
self.middleClickID = 0
self.menu = Gtk.Menu.new()
intl_header = Gtk.MenuItem.new_with_label("INTERNATIONAL")
intl_header.set_sensitive(False)
intl_header.show()
self.menu.append(intl_header)
intl_sep = Gtk.SeparatorMenuItem.new()
intl_sep.show()
self.menu.append(intl_sep)
dom_header = Gtk.MenuItem.new_with_label("DOMESTIC")
dom_header.set_sensitive(False)
dom_header.show()
self.menu.append(dom_header)
dom_sep = Gtk.SeparatorMenuItem.new()
dom_sep.show()
self.menu.append(dom_sep)
# separate out matches from "About" and "Quit"
sep_item = Gtk.SeparatorMenuItem.new()
sep_item.show()
# hide our middle-click callback inside the separator
sep_item.connect("activate", self.middle_click_cb)
self.menu.append(sep_item)
# some self promotion
about_item = Gtk.MenuItem("About")
about_item.connect("activate", about)
about_item.show()
self.menu.append(about_item)
# if DEBUG:
# theme_item = Gtk.MenuItem("Change theme")
# theme_item.connect("activate", self.change_icon_theme)
# theme_item.show()
# self.menu.append(theme_item)
#we need a way to quit if the indicator is irritating ;-)
quit_item = Gtk.MenuItem("Quit")
quit_item.connect("activate", quit_indicator)
quit_item.show()
self.menu.append(quit_item)
self.indicator.set_menu(self.menu)
#create menu for middle click
self.create_scorecard_menu()
# use the separator's cb for toggling scoreboard
self.indicator.set_secondary_activate_target(sep_item)
def create_scorecard_menu(self):
self.scoreboardMenu = Gtk.Menu.new()
descriptionItem = Gtk.MenuItem("This is desctiption item")
scorecardItem = Gtk.MenuItem("This is sorecard")
commentaryItem = Gtk.MenuItem("Commentary is Loading")
quitItem = Gtk.MenuItem("Quit")
aboutItem = Gtk.MenuItem("About")
toogleItem = Gtk.MenuItem("Back to List")
quitItem.connect("activate",quit_indicator)
aboutItem.connect("activate",about)
self.scoreboardMenu.append(descriptionItem)
self.scoreboardMenu.append(Gtk.SeparatorMenuItem())
self.scoreboardMenu.append(scorecardItem)
self.scoreboardMenu.append(Gtk.SeparatorMenuItem())
self.scoreboardMenu.append(commentaryItem)
self.scoreboardMenu.append(Gtk.SeparatorMenuItem())
self.scoreboardMenu.append(toogleItem)
self.scoreboardMenu.append(aboutItem)
self.scoreboardMenu.append(quitItem)
descriptionItem.show()
scorecardItem.show()
commentaryItem.show()
quitItem.show()
#toogleItem.show()
aboutItem.show()
self.scoreboardMenu.get_children()[1].show()
self.scoreboardMenu.get_children()[3].show()
self.scoreboardMenu.get_children()[5].show()
def middle_click_cb(self, widget):
if self.label_match_id is None:
return
match_item = None
# linear search FTW
for i, v in enumerate(self.intl_menu):
if v['id'] == self.label_match_id:
match_item = v
break
else:
for i, v in enumerate(self.dom_menu):
if v['id'] == self.label_match_id:
match_item = v
break
if match_item is None:
return
# simulate the button-click
match_item['gtk_check'].set_active(True)
self.scoreboardMenu.get_children()[0].set_label(match_item['gtk_menu'].get_label())
self.scoreboardMenu.get_children()[2].set_label(match_item['gtk_scorecard'].get_label())
self.scoreboardMenu.get_children()[4].set_label(match_item['gtk_commentary'].get_label())
# self.indicator.set_menu(self.scoreboardMenu)
self.scoreboardMenu.popup(None,self.menu,None,None,0,0)
# def change_icon_theme(self, widget):
# if DEBUG:
# if self.indicator.get_icon_theme_path() == DARK_ICONS:
# self.indicator.set_icon_theme_path(LIGHT_ICONS)
# else:
# self.indicator.set_icon_theme_path(DARK_ICONS)
def show_scorecard_cb(self, widget, match_item):
if widget.get_active(): # ON state
# remember the 'id' of the match;
# needed when upstream list is updated
self.open_scorecard.add(match_item['id'])
self.expand_submenu(match_item)
else: # OFF state
if match_item['id'] in self.open_scorecard:
self.open_scorecard.remove(match_item['id'])
self.contract_submenu(match_item)
def contract_submenu(self, match_item):
match_item['gtk_check'].set_active(False)
match_item['gtk_description'].hide()
match_item['gtk_separator_2'].hide()
match_item['gtk_scorecard'].hide()
match_item['gtk_separator_3'].hide()
match_item['gtk_commentary'].hide()
match_item['gtk_separator_4'].hide()
match_item['last_ball'] = DEFAULT_ICON # set to default
match_item['status'] = ""
match_item['label_scoreline'] = ""
# force update in current cycle
self.update_menu_icon(match_item)
#GObject.idle_add(match_item['gtk_menu'].set_image,Gtk.Image.new_from_icon_name(ICON_PREFIX + match_item['last_ball'], Gtk.IconSize.BUTTON))
if match_item['id'] == self.label_match_id:
self.set_indicator_icon(match_item['last_ball'])
self.set_indicator_label(match_item['gtk_menu'].get_label())
def expand_submenu(self, match_item):
match_item['gtk_check'].set_active(True)
match_item['gtk_description'].show()
match_item['gtk_separator_2'].show()
match_item['gtk_scorecard'].show()
match_item['gtk_separator_3'].show()
if match_item['gtk_commentary'].get_label() != "":
match_item['gtk_commentary'].show()
match_item['gtk_separator_4'].show()
def create_match_item(self, match_info):
match_item = {
# GTK stuff
'gtk_menu': Gtk.ImageMenuItem.new_with_label(match_info['scoreline']),
# NOTE: Gtk.ImageMenuItem has been deprecated in GTK 3.10
'gtk_submenu': Gtk.Menu.new(),
'gtk_set_as_label': Gtk.MenuItem.new_with_label("Set as label"),
'gtk_description': Gtk.MenuItem.new_with_label(match_info['description']),
'gtk_scorecard': Gtk.MenuItem.new_with_label(match_info['scorecard']),
'gtk_commentary': Gtk.MenuItem.new_with_label(match_info['comms']),
'gtk_check': Gtk.CheckMenuItem.new_with_label("Scorecard"),
'gtk_open_in_browser': Gtk.MenuItem.new_with_label('Open in browser'),
'gtk_separator_1': Gtk.SeparatorMenuItem().new(),
'gtk_separator_2': Gtk.SeparatorMenuItem().new(),
'gtk_separator_3': Gtk.SeparatorMenuItem().new(),
'gtk_separator_4': Gtk.SeparatorMenuItem().new(),
# our stuff
'id': match_info['id'],
'url': match_info['url'],
"last_ball": match_info['last_ball'],
"status" : match_info['status'],
#added as part of shortlabel branch
"label_scoreline": match_info['label_scoreline']
}
match_item['gtk_menu'].set_image(Gtk.Image.new_from_icon_name(ICON_PREFIX + match_info['last_ball'] + ICON_SUFFIX, Gtk.IconSize.BUTTON))
match_item['gtk_menu'].set_always_show_image(True)
match_item['gtk_set_as_label'].connect("activate", self.set_as_label_cb, match_item)
match_item['gtk_description'].set_sensitive(False)
match_item['gtk_scorecard'].set_sensitive(False)
match_item['gtk_commentary'].set_sensitive(False)
match_item['gtk_check'].set_active(False)
match_item['gtk_check'].connect("toggled", self.show_scorecard_cb, match_item)
match_item['gtk_open_in_browser'].connect("activate", self.open_in_browser_cb, match_item)
match_item['gtk_submenu'].append(match_item['gtk_set_as_label'])
match_item['gtk_submenu'].append(match_item['gtk_separator_1'])
match_item['gtk_submenu'].append(match_item['gtk_description'])
match_item['gtk_submenu'].append(match_item['gtk_separator_2'])
match_item['gtk_submenu'].append(match_item['gtk_scorecard'])
match_item['gtk_submenu'].append(match_item['gtk_separator_3'])
match_item['gtk_submenu'].append(match_item['gtk_commentary'])
match_item['gtk_submenu'].append(match_item['gtk_separator_4'])
match_item['gtk_submenu'].append(match_item['gtk_check'])
match_item['gtk_submenu'].append(match_item['gtk_open_in_browser'])
match_item['gtk_menu'].set_submenu(match_item['gtk_submenu'])
# everything is "hidden" by default, so we call "show"
match_item['gtk_menu'].show()
match_item['gtk_set_as_label'].show()
match_item['gtk_separator_1'].show()
match_item['gtk_check'].show()
match_item['gtk_open_in_browser'].show()
return match_item
def set_as_label_cb(self, widget, match_item):
"""
Callback for 'set as label' menuitem
"""
# the user has selected this 'm_id' as current label, so we remember it
self.label_match_id = match_item['id']
# removed as part of shortlabel branch
if match_item['label_scoreline']:
label = match_item['label_scoreline']
else:
label = match_item['gtk_menu'].get_label()
self.set_indicator_label(label )
self.set_indicator_icon(match_item['last_ball'])
def scroll_event_cb(self, obj, delta, direction):
"""
Process scroll-event(s)
Change indicator label to next/prev in list depending on direction
"""
if self.label_match_id is None:
return
index = -1
# linear search FTW
for i, v in enumerate(self.intl_menu):
if v['id'] == self.label_match_id:
index = i
break
else:
for i, v in enumerate(self.dom_menu):
if v['id'] == self.label_match_id:
index = len(self.intl_menu) + i
break
if index == -1:
return
if direction == Gdk.ScrollDirection.DOWN:
# activate the button
(self.intl_menu + self.dom_menu)[(index-1)%len(self.intl_menu+self.dom_menu)]['gtk_set_as_label'].activate()
else:
(self.intl_menu + self.dom_menu)[(index+1)%len(self.intl_menu+self.dom_menu)]['gtk_set_as_label'].activate()
def open_in_browser_cb(self, widget, match_item):
webbrowser.open(MATCH_URL_HTML(match_item['url']))
def main_update_data(self):
while self.flag:
start = time.time() # get UNIX time
self.update_labels()
self.update_sublabels()
#print self.indicator.get_icon_theme_path()
duration = time.time() - start # resolution of 1 second is guaranteed
if duration < REFRESH_INTERVAL:
# sleep if we still have some time left before website update
time.sleep(REFRESH_INTERVAL-duration)
def update_labels(self):
"""
Fetch the current matches' summary and update the menuitems
Maybe add or remove menuitems as per the fetched data
"""
intl_matches, dom_matches = get_matches_summary()
print intl_matches, dom_matches
if intl_matches == None: # request failed! we've got nothing new
return
# remove items
while len(self.intl_menu) > 0 and len(self.intl_menu) > len(intl_matches):
# GTK updates shouldn't be done in a separate thread,
# so we add our update to idle queue
GObject.idle_add(self.remove_menu, (self.intl_menu.pop())['gtk_menu'])
while len(self.dom_menu) > 0 and len(self.dom_menu) > len(dom_matches):
GObject.idle_add(self.remove_menu, (self.dom_menu.pop())['gtk_menu'])
# add items
while len(self.intl_menu) < len(intl_matches):
match_item = self.create_match_item(intl_matches[0])
GObject.idle_add(self.add_menu, match_item['gtk_menu'], 2) # <-- append after "INTERNATIONAL" header + separator
self.intl_menu.append(match_item)
while len(self.dom_menu) < len(dom_matches):
match_item = self.create_match_item(dom_matches[0])
GObject.idle_add(self.add_menu, match_item['gtk_menu'], len(self.intl_menu) + 4) # <-- append after "DOMESTIC" header + separator
self.dom_menu.append(match_item)
intl_iter, dom_iter = iter(self.intl_menu), iter(self.dom_menu)
m_id_set = False
all_m_id = set()
for match_info in intl_matches + dom_matches:
if match_info['intl']:
intl_item = next(intl_iter)
intl_item['id'] = match_info['id']
intl_item['url'] = match_info['url']
GObject.idle_add(intl_item['gtk_menu'].set_label, match_info['scoreline'])
curr_item = intl_item
else:
dom_item = next(dom_iter)
dom_item['id'] = match_info['id']
dom_item['url'] = match_info['url']
GObject.idle_add(dom_item['gtk_menu'].set_label, match_info['scoreline'])
curr_item = dom_item
if match_info['id'] in self.open_scorecard:
GObject.idle_add(self.expand_submenu, curr_item)
else:
GObject.idle_add(self.contract_submenu, curr_item)
# if current id is set as label
if not m_id_set and (self.label_match_id is None or match_info['id'] == self.label_match_id):
self.label_match_id = match_info['id']
# added in branch shortlabel: Requirement: DD - 170/6 Over(20.0)
#Logic: if current is set_as_label and open scorecard --> no update will be done since it is updated during updating submenu
print not match_info['id'] in self.open_scorecard
print not match_info['label_scoreline']
"""
{u'team_id': u'3', u'remaining_wickets': u'5', u'event': u'0', u'live_current_name': u'current innings', u'over_limit': u'0.0', u'lead': u'86', u'batted': u'1', u'bowling_team_id': u'2', u'live_current': u'1', u'event_name': None, u'wickets': u'5', u'over_split_limit': u'0.0', u'overs': u'55.0', u'over_limit_run_rate': None, u'runs': u'171', u'balls': u'330', u'remaining_balls': u'0', u'target': u'0', u'remaining_overs': u'0.0', u'innings_number': u'2', u'bpo': u'6', u'required_run_rate': None, u'ball_limit': u'0', u'batting_team_id': u'3', u'run_rate': u'3.10'}
"""
if not match_info['id'] in self.open_scorecard :
label = match_info['scoreline']
print "label while updating: " + label
GObject.idle_add(self.set_indicator_label,label)
"""
:: removed in shortlabel branch
print len(self.get_indicator_label().split(" "))
label += " " if len(self.get_indicator_label().split(" "))>1 and match_info['id'] in self.open_scorecard else ""
#label += self.get_indicator_label().split(" -- ")[1] if len(self.get_indicator_label().split(" -- ")) else ""
print len(self.get_indicator_label().split(" ") and match_info['status'])
if len(self.get_indicator_label().split(" "))>1 and match_info['id'] in self.open_scorecard:
label += self.get_indicator_label().split(" ")[1]
"""
"""
label in updation:
"""
m_id_set = True
all_m_id.add(match_info['id'])
# don't keep stale m_id's
self.open_scorecard.intersection_update(all_m_id)
# we don't want the indicator label to point at old stuff
if not m_id_set:
# try setting with intl first
if len(self.intl_menu) > 0:
self.label_match_id = self.intl_menu[0]['id']
GObject.idle_add(self.set_indicator_label, self.intl_menu[0]['gtk_menu'].get_label())
elif len(self.dom_menu) > 0:
self.label_match_id = self.dom_menu[0]['id']
GObject.idle_add(self.set_indicator_label, self.dom_menu[0]['gtk_menu'].get_label())
# set to default
else:
self.label_match_id = None
GObject.idle_add(self.set_indicator_label, 'Nothings')
# set the default icon
GObject.idle_add(self.set_indicator_icon, DEFAULT_ICON)
def update_sublabels(self):
"""
update the scorecard, commentary text for each match
"""
threads = []
for m in self.intl_menu + self.dom_menu:
if m['gtk_check'].get_active():
threads.append(threading.Thread(
target=self.update_submenu_data, args=(m,)))
threads[-1].start()
for thread in threads:
thread.join()
def update_submenu_data(self, match_item):
match_info = get_match_data(match_item['url'])
if match_info is None:
return
# we've been away for a while, some things may have changed
if match_item['gtk_check'].get_active():
match_item['status'] = match_info['status']
print "match_item status: " + match_item['status']
match_item['last_ball'] = match_info['last_ball']
GObject.idle_add(self.update_menu_icon, match_item)
GObject.idle_add(self.set_submenu_items, match_item, match_info)
match_item['label_scoreline'] = match_info['label_scoreline']
if match_item['id'] == self.label_match_id:
"""
:: Removed in shortlabel branch
print len(self.get_indicator_label().split(" "))
label = self.get_indicator_label().split(" ")[0]
label += " " if match_item['status'].strip() else ""
label += match_item['status'].strip() if match_item['status'].strip() else ""
"""
#added in shotlabel branch
if match_item['label_scoreline']:
label = match_info['label_scoreline']
print "label in updation: " + label
GObject.idle_add(self.set_indicator_label,label)
else:
label = match_item['gtk_menu'].get_label()
GObject.idle_add(self.set_indicator_label,label)
GObject.idle_add(self.set_indicator_icon, match_info['last_ball'])
GObject.idle_add(self.setScoreBoardMenu,match_info)
if match_item['last_ball'] in ['4','6','W']:
self.notification.update(
match_item['gtk_menu'].get_label(),
match_item['gtk_scorecard'].get_label() + "\n" + ("\n").join(match_item['gtk_commentary'].get_label().split("\n")[:2]),
ICON_PREFIX + match_info['last_ball'] + ICON_SUFFIX
)
print "for notification : " + ICON_PREFIX + match_info['last_ball'] + ICON_SUFFIX
self.notification.show()
else:
match_item['status'] = ""
### Helpers
def set_indicator_label(self, label):
print "label receivied: " + label
self.indicator.set_label(label, "Cricket Score Indicator")
def get_indicator_label(self):
return self.indicator.get_label()
def set_indicator_icon(self, icon):
self.indicator.set_icon(ICON_PREFIX + icon+ ICON_SUFFIX)
def add_menu(self, widget, pos):
self.indicator.get_menu().insert(widget, pos)
def remove_menu(self, widget):
self.indicator.get_menu().remove(widget)
def setScoreBoardMenu(self,match_info):
self.scoreboardMenu.get_children()[0].set_label(match_info['description'])
self.scoreboardMenu.get_children()[2].set_label(match_info['scorecard'])
self.scoreboardMenu.get_children()[4].set_label(match_info['comms'])
def set_submenu_items(self, match_item, match_info):
match_item['gtk_scorecard'].set_label(match_info['scorecard'])
match_item['gtk_description'].set_label(match_info['description'])
match_item['gtk_commentary'].set_label(match_info['comms'])
def update_menu_icon(self, match_item):
print ICON_PREFIX + match_item['last_ball'] + ICON_SUFFIX
match_item['gtk_menu'].set_image(Gtk.Image.new_from_icon_name(ICON_PREFIX + match_item['last_ball'] + ICON_SUFFIX, Gtk.IconSize.BUTTON))
def run():
"""
The function which should be called to run the indicator
"""
my_indicator = CricInd()
my_indicator.main()
def quit_indicator(widget):
Gtk.main_quit()
def about(widget):
dialog = Gtk.AboutDialog.new()
# fixes the "mapped without transient parent" warning
dialog.set_transient_for(widget.get_parent().get_parent())
dialog.set_program_name("Cricket Score Indicator")
dialog.add_credit_section("Authors:", ['Nishant Kukreja (github.com/rubyace71697)', 'Abhishek (github.com/rawcoder)'])
dialog.set_license_type(Gtk.License.GPL_3_0)
dialog.set_website("https://github.com/rubyAce71697/cricket-score-applet")
dialog.set_website_label("Github page")
dialog.set_comments("Displays live scores from ESPN website in your indicator panel")
dialog.set_logo_icon_name("cricscore_indicator")
dialog.run()
dialog.destroy()
if __name__ == "__main__":
print ("Use 'cricscore_indicator' to run the applet")
my_indicator = CricInd()
my_indicator.main()
| gpl-3.0 |
charbeljc/account-financial-tools | account_constraints/tests/__init__.py | 39 | 1269 | # -*- coding: utf-8 -*-
#
#
# Authors: Adrien Peiffer
# Copyright (c) 2014 Acsone SA/NV (http://www.acsone.eu)
# All Rights Reserved
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contact a Free Software
# Service Company.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from . import test_account_constraints
| agpl-3.0 |
lukauskas/scipy | scipy/sparse/csgraph/_validation.py | 137 | 2405 | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.sparse import csr_matrix, isspmatrix, isspmatrix_csc
from ._tools import csgraph_to_dense, csgraph_from_dense,\
csgraph_masked_from_dense, csgraph_from_masked
DTYPE = np.float64
def validate_graph(csgraph, directed, dtype=DTYPE,
csr_output=True, dense_output=True,
copy_if_dense=False, copy_if_sparse=False,
null_value_in=0, null_value_out=np.inf,
infinity_null=True, nan_null=True):
"""Routine for validation and conversion of csgraph inputs"""
if not (csr_output or dense_output):
raise ValueError("Internal: dense or csr output must be true")
# if undirected and csc storage, then transposing in-place
# is quicker than later converting to csr.
if (not directed) and isspmatrix_csc(csgraph):
csgraph = csgraph.T
if isspmatrix(csgraph):
if csr_output:
csgraph = csr_matrix(csgraph, dtype=DTYPE, copy=copy_if_sparse)
else:
csgraph = csgraph_to_dense(csgraph, null_value=null_value_out)
elif np.ma.isMaskedArray(csgraph):
if dense_output:
mask = csgraph.mask
csgraph = np.array(csgraph.data, dtype=DTYPE, copy=copy_if_dense)
csgraph[mask] = null_value_out
else:
csgraph = csgraph_from_masked(csgraph)
else:
if dense_output:
csgraph = csgraph_masked_from_dense(csgraph,
copy=copy_if_dense,
null_value=null_value_in,
nan_null=nan_null,
infinity_null=infinity_null)
mask = csgraph.mask
csgraph = np.asarray(csgraph.data, dtype=DTYPE)
csgraph[mask] = null_value_out
else:
csgraph = csgraph_from_dense(csgraph, null_value=null_value_in,
infinity_null=infinity_null,
nan_null=nan_null)
if csgraph.ndim != 2:
raise ValueError("compressed-sparse graph must be two dimensional")
if csgraph.shape[0] != csgraph.shape[1]:
raise ValueError("compressed-sparse graph must be shape (N, N)")
return csgraph
| bsd-3-clause |
vitmod/enigma2-test | lib/python/Screens/SkinSelector.py | 6 | 5315 | # -*- coding: utf-8 -*-
from Screens.Screen import Screen
from Screens.Standby import TryQuitMainloop
from Screens.MessageBox import MessageBox
from Components.ActionMap import NumberActionMap
from Components.Pixmap import Pixmap
from Components.Sources.StaticText import StaticText
from Components.MenuList import MenuList
from Components.config import config, configfile
from Tools.Directories import resolveFilename, SCOPE_ACTIVE_SKIN
from enigma import eEnv, ePicLoad
import os
class SkinSelectorBase:
def __init__(self, session, args = None):
self.setTitle(_("Skin Selector"))
self.skinlist = []
self.previewPath = ""
if self.SKINXML and os.path.exists(os.path.join(self.root, self.SKINXML)):
self.skinlist.append(self.DEFAULTSKIN)
if self.PICONSKINXML and os.path.exists(os.path.join(self.root, self.PICONSKINXML)):
self.skinlist.append(self.PICONDEFAULTSKIN)
for root, dirs, files in os.walk(self.root, followlinks=True):
for subdir in dirs:
dir = os.path.join(root,subdir)
if os.path.exists(os.path.join(dir,self.SKINXML)):
self.skinlist.append(subdir)
dirs = []
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Save"))
self["introduction"] = StaticText(_("Press OK to activate the selected skin."))
self["SkinList"] = MenuList(self.skinlist)
self["Preview"] = Pixmap()
self.skinlist.sort()
self["actions"] = NumberActionMap(["SetupActions", "DirectionActions", "TimerEditActions", "ColorActions"],
{
"ok": self.ok,
"cancel": self.close,
"red": self.close,
"green": self.ok,
"up": self.up,
"down": self.down,
"left": self.left,
"right": self.right,
"log": self.info,
}, -1)
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.showPic)
self.onLayoutFinish.append(self.layoutFinished)
def showPic(self, picInfo=""):
ptr = self.picload.getData()
if ptr is not None:
self["Preview"].instance.setPixmap(ptr.__deref__())
self["Preview"].show()
def layoutFinished(self):
self.picload.setPara((self["Preview"].instance.size().width(), self["Preview"].instance.size().height(), 0, 0, 1, 1, "#00000000"))
tmp = self.config.value.find("/"+self.SKINXML)
if tmp != -1:
tmp = self.config.value[:tmp]
idx = 0
for skin in self.skinlist:
if skin == tmp:
break
idx += 1
if idx < len(self.skinlist):
self["SkinList"].moveToIndex(idx)
self.loadPreview()
def ok(self):
if self["SkinList"].getCurrent() == self.DEFAULTSKIN:
skinfile = ""
skinfile = os.path.join(skinfile, self.SKINXML)
elif self["SkinList"].getCurrent() == self.PICONDEFAULTSKIN:
skinfile = ""
skinfile = os.path.join(skinfile, self.PICONSKINXML)
else:
skinfile = self["SkinList"].getCurrent()
skinfile = os.path.join(skinfile, self.SKINXML)
print "Skinselector: Selected Skin: "+self.root+skinfile
self.config.value = skinfile
self.config.save()
configfile.save()
restartbox = self.session.openWithCallback(self.restartGUI,MessageBox,_("GUI needs a restart to apply a new skin\nDo you want to restart the GUI now?"), MessageBox.TYPE_YESNO)
restartbox.setTitle(_("Restart GUI now?"))
def up(self):
self["SkinList"].up()
self.loadPreview()
def down(self):
self["SkinList"].down()
self.loadPreview()
def left(self):
self["SkinList"].pageUp()
self.loadPreview()
def right(self):
self["SkinList"].pageDown()
self.loadPreview()
def info(self):
aboutbox = self.session.open(MessageBox,_("Enigma2 skin selector"), MessageBox.TYPE_INFO)
aboutbox.setTitle(_("About..."))
def loadPreview(self):
if self["SkinList"].getCurrent() == self.DEFAULTSKIN:
pngpath = "."
pngpath = os.path.join(os.path.join(self.root, pngpath), "prev.png")
elif self["SkinList"].getCurrent() == self.PICONDEFAULTSKIN:
pngpath = "."
pngpath = os.path.join(os.path.join(self.root, pngpath), "piconprev.png")
else:
pngpath = self["SkinList"].getCurrent()
try:
pngpath = os.path.join(os.path.join(self.root, pngpath), "prev.png")
except:
pass
if not os.path.exists(pngpath):
pngpath = resolveFilename(SCOPE_ACTIVE_SKIN, "noprev.png")
if self.previewPath != pngpath:
self.previewPath = pngpath
self.picload.startDecode(self.previewPath)
def restartGUI(self, answer):
if answer is True:
self.session.open(TryQuitMainloop, 3)
class SkinSelector(Screen, SkinSelectorBase):
SKINXML = "skin.xml"
DEFAULTSKIN = "< Default >"
PICONSKINXML = None
PICONDEFAULTSKIN = None
skinlist = []
root = os.path.join(eEnv.resolve("${datadir}"),"enigma2")
def __init__(self, session, args = None):
Screen.__init__(self, session)
SkinSelectorBase.__init__(self, args)
Screen.setTitle(self, _("Skin setup"))
self.skinName = "SkinSelector"
self.config = config.skin.primary_skin
class LcdSkinSelector(Screen, SkinSelectorBase):
SKINXML = "skin_display.xml"
DEFAULTSKIN = "< Default >"
PICONSKINXML = "skin_display_picon.xml"
PICONDEFAULTSKIN = "< Default with Picon >"
skinlist = []
root = os.path.join(eEnv.resolve("${datadir}"),"enigma2/display/")
def __init__(self, session, args = None):
Screen.__init__(self, session)
SkinSelectorBase.__init__(self, args)
Screen.setTitle(self, _("Skin setup"))
self.skinName = "SkinSelector"
self.config = config.skin.display_skin
| gpl-2.0 |
jnewland/home-assistant | homeassistant/components/flux/switch.py | 7 | 12159 | """
Flux for Home-Assistant.
The idea was taken from https://github.com/KpaBap/hue-flux/
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/switch.flux/
"""
import datetime
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.light import (
is_on, ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_RGB_COLOR, ATTR_TRANSITION,
ATTR_WHITE_VALUE, ATTR_XY_COLOR, DOMAIN as LIGHT_DOMAIN, VALID_TRANSITION)
from homeassistant.components.switch import DOMAIN, SwitchDevice
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_NAME, CONF_PLATFORM, CONF_LIGHTS, CONF_MODE,
SERVICE_TURN_ON, SUN_EVENT_SUNRISE, SUN_EVENT_SUNSET)
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.sun import get_astral_event_date
from homeassistant.util import slugify
from homeassistant.util.color import (
color_temperature_to_rgb, color_RGB_to_xy_brightness,
color_temperature_kelvin_to_mired)
from homeassistant.util.dt import utcnow as dt_utcnow, as_local
_LOGGER = logging.getLogger(__name__)
CONF_START_TIME = 'start_time'
CONF_STOP_TIME = 'stop_time'
CONF_START_CT = 'start_colortemp'
CONF_SUNSET_CT = 'sunset_colortemp'
CONF_STOP_CT = 'stop_colortemp'
CONF_BRIGHTNESS = 'brightness'
CONF_DISABLE_BRIGHTNESS_ADJUST = 'disable_brightness_adjust'
CONF_INTERVAL = 'interval'
MODE_XY = 'xy'
MODE_MIRED = 'mired'
MODE_RGB = 'rgb'
DEFAULT_MODE = MODE_XY
PLATFORM_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): 'flux',
vol.Required(CONF_LIGHTS): cv.entity_ids,
vol.Optional(CONF_NAME, default="Flux"): cv.string,
vol.Optional(CONF_START_TIME): cv.time,
vol.Optional(CONF_STOP_TIME): cv.time,
vol.Optional(CONF_START_CT, default=4000):
vol.All(vol.Coerce(int), vol.Range(min=1000, max=40000)),
vol.Optional(CONF_SUNSET_CT, default=3000):
vol.All(vol.Coerce(int), vol.Range(min=1000, max=40000)),
vol.Optional(CONF_STOP_CT, default=1900):
vol.All(vol.Coerce(int), vol.Range(min=1000, max=40000)),
vol.Optional(CONF_BRIGHTNESS):
vol.All(vol.Coerce(int), vol.Range(min=0, max=255)),
vol.Optional(CONF_DISABLE_BRIGHTNESS_ADJUST): cv.boolean,
vol.Optional(CONF_MODE, default=DEFAULT_MODE):
vol.Any(MODE_XY, MODE_MIRED, MODE_RGB),
vol.Optional(CONF_INTERVAL, default=30): cv.positive_int,
vol.Optional(ATTR_TRANSITION, default=30): VALID_TRANSITION
})
async def async_set_lights_xy(hass, lights, x_val, y_val, brightness,
transition):
"""Set color of array of lights."""
for light in lights:
if is_on(hass, light):
service_data = {ATTR_ENTITY_ID: light}
if x_val is not None and y_val is not None:
service_data[ATTR_XY_COLOR] = [x_val, y_val]
if brightness is not None:
service_data[ATTR_BRIGHTNESS] = brightness
service_data[ATTR_WHITE_VALUE] = brightness
if transition is not None:
service_data[ATTR_TRANSITION] = transition
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_ON, service_data)
async def async_set_lights_temp(hass, lights, mired, brightness, transition):
"""Set color of array of lights."""
for light in lights:
if is_on(hass, light):
service_data = {ATTR_ENTITY_ID: light}
if mired is not None:
service_data[ATTR_COLOR_TEMP] = int(mired)
if brightness is not None:
service_data[ATTR_BRIGHTNESS] = brightness
if transition is not None:
service_data[ATTR_TRANSITION] = transition
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_ON, service_data)
async def async_set_lights_rgb(hass, lights, rgb, transition):
"""Set color of array of lights."""
for light in lights:
if is_on(hass, light):
service_data = {ATTR_ENTITY_ID: light}
if rgb is not None:
service_data[ATTR_RGB_COLOR] = rgb
if transition is not None:
service_data[ATTR_TRANSITION] = transition
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_ON, service_data)
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the Flux switches."""
name = config.get(CONF_NAME)
lights = config.get(CONF_LIGHTS)
start_time = config.get(CONF_START_TIME)
stop_time = config.get(CONF_STOP_TIME)
start_colortemp = config.get(CONF_START_CT)
sunset_colortemp = config.get(CONF_SUNSET_CT)
stop_colortemp = config.get(CONF_STOP_CT)
brightness = config.get(CONF_BRIGHTNESS)
disable_brightness_adjust = config.get(CONF_DISABLE_BRIGHTNESS_ADJUST)
mode = config.get(CONF_MODE)
interval = config.get(CONF_INTERVAL)
transition = config.get(ATTR_TRANSITION)
flux = FluxSwitch(name, hass, lights, start_time, stop_time,
start_colortemp, sunset_colortemp, stop_colortemp,
brightness, disable_brightness_adjust, mode, interval,
transition)
async_add_entities([flux])
async def async_update(call=None):
"""Update lights."""
await flux.async_flux_update()
service_name = slugify("{} {}".format(name, 'update'))
hass.services.async_register(DOMAIN, service_name, async_update)
class FluxSwitch(SwitchDevice):
"""Representation of a Flux switch."""
def __init__(self, name, hass, lights, start_time, stop_time,
start_colortemp, sunset_colortemp, stop_colortemp,
brightness, disable_brightness_adjust, mode, interval,
transition):
"""Initialize the Flux switch."""
self._name = name
self.hass = hass
self._lights = lights
self._start_time = start_time
self._stop_time = stop_time
self._start_colortemp = start_colortemp
self._sunset_colortemp = sunset_colortemp
self._stop_colortemp = stop_colortemp
self._brightness = brightness
self._disable_brightness_adjust = disable_brightness_adjust
self._mode = mode
self._interval = interval
self._transition = transition
self.unsub_tracker = None
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return self.unsub_tracker is not None
async def async_turn_on(self, **kwargs):
"""Turn on flux."""
if self.is_on:
return
self.unsub_tracker = async_track_time_interval(
self.hass,
self.async_flux_update,
datetime.timedelta(seconds=self._interval))
# Make initial update
await self.async_flux_update()
self.async_schedule_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn off flux."""
if self.is_on:
self.unsub_tracker()
self.unsub_tracker = None
self.async_schedule_update_ha_state()
async def async_flux_update(self, utcnow=None):
"""Update all the lights using flux."""
if utcnow is None:
utcnow = dt_utcnow()
now = as_local(utcnow)
sunset = get_astral_event_date(self.hass, SUN_EVENT_SUNSET, now.date())
start_time = self.find_start_time(now)
stop_time = self.find_stop_time(now)
if stop_time <= start_time:
# stop_time does not happen in the same day as start_time
if start_time < now:
# stop time is tomorrow
stop_time += datetime.timedelta(days=1)
elif now < start_time:
# stop_time was yesterday since the new start_time is not reached
stop_time -= datetime.timedelta(days=1)
if start_time < now < sunset:
# Daytime
time_state = 'day'
temp_range = abs(self._start_colortemp - self._sunset_colortemp)
day_length = int(sunset.timestamp() - start_time.timestamp())
seconds_from_start = int(now.timestamp() - start_time.timestamp())
percentage_complete = seconds_from_start / day_length
temp_offset = temp_range * percentage_complete
if self._start_colortemp > self._sunset_colortemp:
temp = self._start_colortemp - temp_offset
else:
temp = self._start_colortemp + temp_offset
else:
# Night time
time_state = 'night'
if now < stop_time:
if stop_time < start_time and stop_time.day == sunset.day:
# we need to use yesterday's sunset time
sunset_time = sunset - datetime.timedelta(days=1)
else:
sunset_time = sunset
night_length = int(stop_time.timestamp() -
sunset_time.timestamp())
seconds_from_sunset = int(now.timestamp() -
sunset_time.timestamp())
percentage_complete = seconds_from_sunset / night_length
else:
percentage_complete = 1
temp_range = abs(self._sunset_colortemp - self._stop_colortemp)
temp_offset = temp_range * percentage_complete
if self._sunset_colortemp > self._stop_colortemp:
temp = self._sunset_colortemp - temp_offset
else:
temp = self._sunset_colortemp + temp_offset
rgb = color_temperature_to_rgb(temp)
x_val, y_val, b_val = color_RGB_to_xy_brightness(*rgb)
brightness = self._brightness if self._brightness else b_val
if self._disable_brightness_adjust:
brightness = None
if self._mode == MODE_XY:
await async_set_lights_xy(self.hass, self._lights, x_val,
y_val, brightness, self._transition)
_LOGGER.info("Lights updated to x:%s y:%s brightness:%s, %s%% "
"of %s cycle complete at %s", x_val, y_val,
brightness, round(
percentage_complete * 100), time_state, now)
elif self._mode == MODE_RGB:
await async_set_lights_rgb(self.hass, self._lights, rgb,
self._transition)
_LOGGER.info("Lights updated to rgb:%s, %s%% "
"of %s cycle complete at %s", rgb,
round(percentage_complete * 100), time_state, now)
else:
# Convert to mired and clamp to allowed values
mired = color_temperature_kelvin_to_mired(temp)
await async_set_lights_temp(self.hass, self._lights, mired,
brightness, self._transition)
_LOGGER.info("Lights updated to mired:%s brightness:%s, %s%% "
"of %s cycle complete at %s", mired, brightness,
round(percentage_complete * 100), time_state, now)
def find_start_time(self, now):
"""Return sunrise or start_time if given."""
if self._start_time:
sunrise = now.replace(
hour=self._start_time.hour, minute=self._start_time.minute,
second=0)
else:
sunrise = get_astral_event_date(self.hass, SUN_EVENT_SUNRISE,
now.date())
return sunrise
def find_stop_time(self, now):
"""Return dusk or stop_time if given."""
if self._stop_time:
dusk = now.replace(
hour=self._stop_time.hour, minute=self._stop_time.minute,
second=0)
else:
dusk = get_astral_event_date(self.hass, 'dusk', now.date())
return dusk
| apache-2.0 |
cgstudiomap/cgstudiomap | main/eggs/python_stdnum-1.2-py2.7.egg/stdnum/iso6346.py | 1 | 2906 | # iso6346.py - functions for handling ISO 6346
#
# Copyright (C) 2014 Openlabs Technologies & Consulting (P) Limited
# Copyright (C) 2014 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""ISO 6346 (International standard for container identification)
ISO 6346 is an international standard covering the coding, identification and
marking of intermodal (shipping) containers used within containerized
intermodal freight transport. The standard establishes a visual identification
system for every container that includes a unique serial number (with check
digit), the owner, a country code, a size, type and equipment category as well
as any operational marks. The standard is managed by the International
Container Bureau (BIC).
>>> validate('csqu3054383')
'CSQU3054383'
>>> validate('CSQU3054384')
Traceback (most recent call last):
...
InvalidChecksum: ...
"""
import re
from stdnum.exceptions import InvalidChecksum, InvalidFormat, InvalidLength, \
ValidationError
from stdnum.util import clean
_iso6346_re = re.compile(r'^\w{3}(U|J|Z|R)\d{7}$')
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number).strip().upper()
def calc_check_digit(number):
"""Calculate check digit and return it for the 10 digit owner code and
serial number."""
number = compact(number)
alphabet = '0123456789A BCDEFGHIJK LMNOPQRSTU VWXYZ'
return str(sum(
alphabet.index(n) * pow(2, i)
for i, n in enumerate(number)) % 11)
def validate(number):
"""Validate the given number (unicode) for conformity to ISO 6346."""
number = compact(number)
if len(number) != 11:
raise InvalidLength()
if not _iso6346_re.match(number):
raise InvalidFormat()
if calc_check_digit(number[:-1]) != number[-1]:
raise InvalidChecksum()
return number
def is_valid(number):
"""Check whether the number conforms to the standard ISO6346. Unlike
the validate function, this will not raise ValidationError(s)."""
try:
return bool(validate(number))
except ValidationError:
return False
| agpl-3.0 |
benfinke/ns_python | build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/ns/nslimitsessions_args.py | 3 | 1607 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class nslimitsessions_args :
ur""" Provides additional arguments required for fetching the nslimitsessions resource.
"""
def __init__(self) :
self._limitidentifier = ""
self._detail = False
@property
def limitidentifier(self) :
ur"""Name of the rate limit identifier for which to display the sessions.<br/>Minimum length = 1.
"""
try :
return self._limitidentifier
except Exception as e:
raise e
@limitidentifier.setter
def limitidentifier(self, limitidentifier) :
ur"""Name of the rate limit identifier for which to display the sessions.<br/>Minimum length = 1
"""
try :
self._limitidentifier = limitidentifier
except Exception as e:
raise e
@property
def detail(self) :
ur"""Show the individual hash values.
"""
try :
return self._detail
except Exception as e:
raise e
@detail.setter
def detail(self, detail) :
ur"""Show the individual hash values.
"""
try :
self._detail = detail
except Exception as e:
raise e
| apache-2.0 |
scigghia/account-payment | __unported__/account_voucher_supplier_invoice_number/voucher.py | 13 | 2976 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013 Agile Business Group sagl
# (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
from tools.translate import _
class voucher_line(orm.Model):
_inherit = 'account.voucher.line'
def get_suppl_inv_num(self, cr, uid, move_line_id, context=None):
move_line = self.pool.get('account.move.line').browse(cr, uid, move_line_id, context)
return (move_line.invoice and move_line.invoice.supplier_invoice_number or '')
def _get_supplier_invoice_number(self, cr, uid, ids, name, args, context=None):
res={}
for line in self.browse(cr, uid, ids, context):
res[line.id] = ''
if line.move_line_id:
res[line.id] = self.get_suppl_inv_num(cr, uid,
line.move_line_id.id, context=context)
return res
_columns = {
'supplier_invoice_number': fields.function(_get_supplier_invoice_number,
type='char', size=64, string="Supplier Invoice Number"),
}
class voucher(orm.Model):
_inherit = 'account.voucher'
def recompute_voucher_lines(self, cr, uid, ids, partner_id, journal_id, price,
currency_id, ttype, date, context=None):
res = super(voucher,self).recompute_voucher_lines(cr, uid, ids, partner_id,
journal_id, price,
currency_id, ttype, date, context=context)
line_obj = self.pool.get('account.voucher.line')
if res.get('value') and res['value'].get('line_cr_ids'):
for vals in res['value']['line_cr_ids']:
if vals.get('move_line_id'):
vals['supplier_invoice_number'] = line_obj.get_suppl_inv_num(
cr, uid, vals['move_line_id'], context=context)
if res.get('value') and res['value'].get('line_dr_ids'):
for vals in res['value']['line_dr_ids']:
if vals.get('move_line_id'):
vals['supplier_invoice_number'] = line_obj.get_suppl_inv_num(
cr, uid, vals['move_line_id'], context=context)
return res
| agpl-3.0 |
Shrhawk/edx-platform | common/lib/xmodule/xmodule/tests/test_lti_unit.py | 98 | 22100 | # -*- coding: utf-8 -*-
"""Test for LTI Xmodule functional logic."""
import datetime
from django.utils.timezone import UTC
from mock import Mock, patch, PropertyMock
import textwrap
from lxml import etree
from webob.request import Request
from copy import copy
import urllib
from xmodule.fields import Timedelta
from xmodule.lti_module import LTIDescriptor
from xmodule.lti_2_util import LTIError
from . import LogicTest
class LTIModuleTest(LogicTest):
"""Logic tests for LTI module."""
descriptor_class = LTIDescriptor
def setUp(self):
super(LTIModuleTest, self).setUp()
self.environ = {'wsgi.url_scheme': 'http', 'REQUEST_METHOD': 'POST'}
self.request_body_xml_template = textwrap.dedent("""
<?xml version = "1.0" encoding = "UTF-8"?>
<imsx_POXEnvelopeRequest xmlns = "{namespace}">
<imsx_POXHeader>
<imsx_POXRequestHeaderInfo>
<imsx_version>V1.0</imsx_version>
<imsx_messageIdentifier>{messageIdentifier}</imsx_messageIdentifier>
</imsx_POXRequestHeaderInfo>
</imsx_POXHeader>
<imsx_POXBody>
<{action}>
<resultRecord>
<sourcedGUID>
<sourcedId>{sourcedId}</sourcedId>
</sourcedGUID>
<result>
<resultScore>
<language>en-us</language>
<textString>{grade}</textString>
</resultScore>
</result>
</resultRecord>
</{action}>
</imsx_POXBody>
</imsx_POXEnvelopeRequest>
""")
self.system.get_real_user = Mock()
self.system.publish = Mock()
self.system.rebind_noauth_module_to_user = Mock()
self.user_id = self.xmodule.runtime.anonymous_student_id
self.lti_id = self.xmodule.lti_id
self.unquoted_resource_link_id = u'{}-i4x-2-3-lti-31de800015cf4afb973356dbe81496df'.format(self.xmodule.runtime.hostname)
sourced_id = u':'.join(urllib.quote(i) for i in (self.lti_id, self.unquoted_resource_link_id, self.user_id))
self.defaults = {
'namespace': "http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0",
'sourcedId': sourced_id,
'action': 'replaceResultRequest',
'grade': 0.5,
'messageIdentifier': '528243ba5241b',
}
self.xmodule.due = None
self.xmodule.graceperiod = None
def get_request_body(self, params=None):
"""Fetches the body of a request specified by params"""
if params is None:
params = {}
data = copy(self.defaults)
data.update(params)
return self.request_body_xml_template.format(**data)
def get_response_values(self, response):
"""Gets the values from the given response"""
parser = etree.XMLParser(ns_clean=True, recover=True, encoding='utf-8')
root = etree.fromstring(response.body.strip(), parser=parser)
lti_spec_namespace = "http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0"
namespaces = {'def': lti_spec_namespace}
code_major = root.xpath("//def:imsx_codeMajor", namespaces=namespaces)[0].text
description = root.xpath("//def:imsx_description", namespaces=namespaces)[0].text
message_identifier = root.xpath("//def:imsx_messageIdentifier", namespaces=namespaces)[0].text
imsx_pox_body = root.xpath("//def:imsx_POXBody", namespaces=namespaces)[0]
try:
action = imsx_pox_body.getchildren()[0].tag.replace('{' + lti_spec_namespace + '}', '')
except Exception: # pylint: disable=broad-except
action = None
return {
'code_major': code_major,
'description': description,
'messageIdentifier': message_identifier,
'action': action
}
@patch('xmodule.lti_module.LTIModule.get_client_key_secret', return_value=('test_client_key', u'test_client_secret'))
def test_authorization_header_not_present(self, _get_key_secret):
"""
Request has no Authorization header.
This is an unknown service request, i.e., it is not a part of the original service specification.
"""
request = Request(self.environ)
request.body = self.get_request_body()
response = self.xmodule.grade_handler(request, '')
real_response = self.get_response_values(response)
expected_response = {
'action': None,
'code_major': 'failure',
'description': 'OAuth verification error: Malformed authorization header',
'messageIdentifier': self.defaults['messageIdentifier'],
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
@patch('xmodule.lti_module.LTIModule.get_client_key_secret', return_value=('test_client_key', u'test_client_secret'))
def test_authorization_header_empty(self, _get_key_secret):
"""
Request Authorization header has no value.
This is an unknown service request, i.e., it is not a part of the original service specification.
"""
request = Request(self.environ)
request.authorization = "bad authorization header"
request.body = self.get_request_body()
response = self.xmodule.grade_handler(request, '')
real_response = self.get_response_values(response)
expected_response = {
'action': None,
'code_major': 'failure',
'description': 'OAuth verification error: Malformed authorization header',
'messageIdentifier': self.defaults['messageIdentifier'],
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
def test_real_user_is_none(self):
"""
If we have no real user, we should send back failure response.
"""
self.xmodule.verify_oauth_body_sign = Mock()
self.xmodule.has_score = True
self.system.get_real_user = Mock(return_value=None)
request = Request(self.environ)
request.body = self.get_request_body()
response = self.xmodule.grade_handler(request, '')
real_response = self.get_response_values(response)
expected_response = {
'action': None,
'code_major': 'failure',
'description': 'User not found.',
'messageIdentifier': self.defaults['messageIdentifier'],
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
def test_grade_past_due(self):
"""
Should fail if we do not accept past due grades, and it is past due.
"""
self.xmodule.accept_grades_past_due = False
self.xmodule.due = datetime.datetime.now(UTC())
self.xmodule.graceperiod = Timedelta().from_json("0 seconds")
request = Request(self.environ)
request.body = self.get_request_body()
response = self.xmodule.grade_handler(request, '')
real_response = self.get_response_values(response)
expected_response = {
'action': None,
'code_major': 'failure',
'description': 'Grade is past due',
'messageIdentifier': 'unknown',
}
self.assertEqual(response.status_code, 200)
self.assertEqual(expected_response, real_response)
def test_grade_not_in_range(self):
"""
Grade returned from Tool Provider is outside the range 0.0-1.0.
"""
self.xmodule.verify_oauth_body_sign = Mock()
request = Request(self.environ)
request.body = self.get_request_body(params={'grade': '10'})
response = self.xmodule.grade_handler(request, '')
real_response = self.get_response_values(response)
expected_response = {
'action': None,
'code_major': 'failure',
'description': 'Request body XML parsing error: score value outside the permitted range of 0-1.',
'messageIdentifier': 'unknown',
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
def test_bad_grade_decimal(self):
"""
Grade returned from Tool Provider doesn't use a period as the decimal point.
"""
self.xmodule.verify_oauth_body_sign = Mock()
request = Request(self.environ)
request.body = self.get_request_body(params={'grade': '0,5'})
response = self.xmodule.grade_handler(request, '')
real_response = self.get_response_values(response)
expected_response = {
'action': None,
'code_major': 'failure',
'description': 'Request body XML parsing error: invalid literal for float(): 0,5',
'messageIdentifier': 'unknown',
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
def test_unsupported_action(self):
"""
Action returned from Tool Provider isn't supported.
`replaceResultRequest` is supported only.
"""
self.xmodule.verify_oauth_body_sign = Mock()
request = Request(self.environ)
request.body = self.get_request_body({'action': 'wrongAction'})
response = self.xmodule.grade_handler(request, '')
real_response = self.get_response_values(response)
expected_response = {
'action': None,
'code_major': 'unsupported',
'description': 'Target does not support the requested operation.',
'messageIdentifier': self.defaults['messageIdentifier'],
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
def test_good_request(self):
"""
Response from Tool Provider is correct.
"""
self.xmodule.verify_oauth_body_sign = Mock()
self.xmodule.has_score = True
request = Request(self.environ)
request.body = self.get_request_body()
response = self.xmodule.grade_handler(request, '')
description_expected = 'Score for {sourcedId} is now {score}'.format(
sourcedId=self.defaults['sourcedId'],
score=self.defaults['grade'],
)
real_response = self.get_response_values(response)
expected_response = {
'action': 'replaceResultResponse',
'code_major': 'success',
'description': description_expected,
'messageIdentifier': self.defaults['messageIdentifier'],
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
self.assertEqual(self.xmodule.module_score, float(self.defaults['grade']))
def test_user_id(self):
expected_user_id = unicode(urllib.quote(self.xmodule.runtime.anonymous_student_id))
real_user_id = self.xmodule.get_user_id()
self.assertEqual(real_user_id, expected_user_id)
def test_outcome_service_url(self):
mock_url_prefix = 'https://hostname/'
test_service_name = "test_service"
def mock_handler_url(block, handler_name, **kwargs): # pylint: disable=unused-argument
"""Mock function for returning fully-qualified handler urls"""
return mock_url_prefix + handler_name
self.xmodule.runtime.handler_url = Mock(side_effect=mock_handler_url)
real_outcome_service_url = self.xmodule.get_outcome_service_url(service_name=test_service_name)
self.assertEqual(real_outcome_service_url, mock_url_prefix + test_service_name)
def test_resource_link_id(self):
with patch('xmodule.lti_module.LTIModule.location', new_callable=PropertyMock):
self.xmodule.location.html_id = lambda: 'i4x-2-3-lti-31de800015cf4afb973356dbe81496df'
expected_resource_link_id = unicode(urllib.quote(self.unquoted_resource_link_id))
real_resource_link_id = self.xmodule.get_resource_link_id()
self.assertEqual(real_resource_link_id, expected_resource_link_id)
def test_lis_result_sourcedid(self):
expected_sourced_id = u':'.join(urllib.quote(i) for i in (
self.system.course_id.to_deprecated_string(),
self.xmodule.get_resource_link_id(),
self.user_id
))
real_lis_result_sourcedid = self.xmodule.get_lis_result_sourcedid()
self.assertEqual(real_lis_result_sourcedid, expected_sourced_id)
def test_client_key_secret(self):
"""
LTI module gets client key and secret provided.
"""
#this adds lti passports to system
mocked_course = Mock(lti_passports=['lti_id:test_client:test_secret'])
modulestore = Mock()
modulestore.get_course.return_value = mocked_course
runtime = Mock(modulestore=modulestore)
self.xmodule.descriptor.runtime = runtime
self.xmodule.lti_id = "lti_id"
key, secret = self.xmodule.get_client_key_secret()
expected = ('test_client', 'test_secret')
self.assertEqual(expected, (key, secret))
def test_client_key_secret_not_provided(self):
"""
LTI module attempts to get client key and secret provided in cms.
There are key and secret but not for specific LTI.
"""
# this adds lti passports to system
mocked_course = Mock(lti_passports=['test_id:test_client:test_secret'])
modulestore = Mock()
modulestore.get_course.return_value = mocked_course
runtime = Mock(modulestore=modulestore)
self.xmodule.descriptor.runtime = runtime
# set another lti_id
self.xmodule.lti_id = "another_lti_id"
key_secret = self.xmodule.get_client_key_secret()
expected = ('', '')
self.assertEqual(expected, key_secret)
def test_bad_client_key_secret(self):
"""
LTI module attempts to get client key and secret provided in cms.
There are key and secret provided in wrong format.
"""
# this adds lti passports to system
mocked_course = Mock(lti_passports=['test_id_test_client_test_secret'])
modulestore = Mock()
modulestore.get_course.return_value = mocked_course
runtime = Mock(modulestore=modulestore)
self.xmodule.descriptor.runtime = runtime
self.xmodule.lti_id = 'lti_id'
with self.assertRaises(LTIError):
self.xmodule.get_client_key_secret()
@patch('xmodule.lti_module.signature.verify_hmac_sha1', Mock(return_value=True))
@patch('xmodule.lti_module.LTIModule.get_client_key_secret', Mock(return_value=('test_client_key', u'test_client_secret')))
def test_successful_verify_oauth_body_sign(self):
"""
Test if OAuth signing was successful.
"""
self.xmodule.verify_oauth_body_sign(self.get_signed_grade_mock_request())
@patch('xmodule.lti_module.LTIModule.get_outcome_service_url', Mock(return_value=u'https://testurl/'))
@patch('xmodule.lti_module.LTIModule.get_client_key_secret',
Mock(return_value=(u'__consumer_key__', u'__lti_secret__')))
def test_failed_verify_oauth_body_sign_proxy_mangle_url(self):
"""
Oauth signing verify fail.
"""
request = self.get_signed_grade_mock_request_with_correct_signature()
self.xmodule.verify_oauth_body_sign(request)
# we should verify against get_outcome_service_url not
# request url proxy and load balancer along the way may
# change url presented to the method
request.url = 'http://testurl/'
self.xmodule.verify_oauth_body_sign(request)
def get_signed_grade_mock_request_with_correct_signature(self):
"""
Generate a proper LTI request object
"""
mock_request = Mock()
mock_request.headers = {
'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': (
u'OAuth realm="https://testurl/", oauth_body_hash="wwzA3s8gScKD1VpJ7jMt9b%2BMj9Q%3D",'
'oauth_nonce="18821463", oauth_timestamp="1409321145", '
'oauth_consumer_key="__consumer_key__", oauth_signature_method="HMAC-SHA1", '
'oauth_version="1.0", oauth_signature="fHsE1hhIz76/msUoMR3Lyb7Aou4%3D"'
)
}
mock_request.url = u'https://testurl'
mock_request.http_method = u'POST'
mock_request.method = mock_request.http_method
mock_request.body = (
'<?xml version=\'1.0\' encoding=\'utf-8\'?>\n'
'<imsx_POXEnvelopeRequest xmlns="http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0">'
'<imsx_POXHeader><imsx_POXRequestHeaderInfo><imsx_version>V1.0</imsx_version>'
'<imsx_messageIdentifier>edX_fix</imsx_messageIdentifier></imsx_POXRequestHeaderInfo>'
'</imsx_POXHeader><imsx_POXBody><replaceResultRequest><resultRecord><sourcedGUID>'
'<sourcedId>MITxLTI/MITxLTI/201x:localhost%3A8000-i4x-MITxLTI-MITxLTI-lti-3751833a214a4f66a0d18f63234207f2:363979ef768ca171b50f9d1bfb322131</sourcedId>'
'</sourcedGUID><result><resultScore><language>en</language><textString>0.32</textString></resultScore>'
'</result></resultRecord></replaceResultRequest></imsx_POXBody></imsx_POXEnvelopeRequest>'
)
return mock_request
def test_wrong_xml_namespace(self):
"""
Test wrong XML Namespace.
Tests that tool provider returned grade back with wrong XML Namespace.
"""
with self.assertRaises(IndexError):
mocked_request = self.get_signed_grade_mock_request(namespace_lti_v1p1=False)
self.xmodule.parse_grade_xml_body(mocked_request.body)
def test_parse_grade_xml_body(self):
"""
Test XML request body parsing.
Tests that xml body was parsed successfully.
"""
mocked_request = self.get_signed_grade_mock_request()
message_identifier, sourced_id, grade, action = self.xmodule.parse_grade_xml_body(mocked_request.body)
self.assertEqual(self.defaults['messageIdentifier'], message_identifier)
self.assertEqual(self.defaults['sourcedId'], sourced_id)
self.assertEqual(self.defaults['grade'], grade)
self.assertEqual(self.defaults['action'], action)
@patch('xmodule.lti_module.signature.verify_hmac_sha1', Mock(return_value=False))
@patch('xmodule.lti_module.LTIModule.get_client_key_secret', Mock(return_value=('test_client_key', u'test_client_secret')))
def test_failed_verify_oauth_body_sign(self):
"""
Oauth signing verify fail.
"""
with self.assertRaises(LTIError):
req = self.get_signed_grade_mock_request()
self.xmodule.verify_oauth_body_sign(req)
def get_signed_grade_mock_request(self, namespace_lti_v1p1=True):
"""
Example of signed request from LTI Provider.
When `namespace_v1p0` is set to True then the default namespase from
LTI 1.1 will be used. Otherwise fake namespace will be added to XML.
"""
mock_request = Mock()
mock_request.headers = { # pylint: disable=no-space-before-operator
'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': u'OAuth oauth_nonce="135685044251684026041377608307", \
oauth_timestamp="1234567890", oauth_version="1.0", \
oauth_signature_method="HMAC-SHA1", \
oauth_consumer_key="test_client_key", \
oauth_signature="my_signature%3D", \
oauth_body_hash="JEpIArlNCeV4ceXxric8gJQCnBw="'
}
mock_request.url = u'http://testurl'
mock_request.http_method = u'POST'
params = {}
if not namespace_lti_v1p1:
params = {
'namespace': "http://www.fakenamespace.com/fake"
}
mock_request.body = self.get_request_body(params)
return mock_request
def test_good_custom_params(self):
"""
Custom parameters are presented in right format.
"""
self.xmodule.custom_parameters = ['test_custom_params=test_custom_param_value']
self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))
self.xmodule.oauth_params = Mock()
self.xmodule.get_input_fields()
self.xmodule.oauth_params.assert_called_with(
{u'custom_test_custom_params': u'test_custom_param_value'},
'test_client_key', 'test_client_secret'
)
def test_bad_custom_params(self):
"""
Custom parameters are presented in wrong format.
"""
bad_custom_params = ['test_custom_params: test_custom_param_value']
self.xmodule.custom_parameters = bad_custom_params
self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))
self.xmodule.oauth_params = Mock()
with self.assertRaises(LTIError):
self.xmodule.get_input_fields()
def test_max_score(self):
self.xmodule.weight = 100.0
self.assertFalse(self.xmodule.has_score)
self.assertEqual(self.xmodule.max_score(), None)
self.xmodule.has_score = True
self.assertEqual(self.xmodule.max_score(), 100.0)
def test_context_id(self):
"""
Tests that LTI parameter context_id is equal to course_id.
"""
self.assertEqual(self.system.course_id.to_deprecated_string(), self.xmodule.context_id)
| agpl-3.0 |
phecy/Deep-Neural-Network-Ranking-Dropout | train-ranknet/ordereddict.py | 85 | 8822 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| gpl-3.0 |
apache/qpid-proton | python/proton/_io.py | 3 | 5423 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
import errno
import socket
import select
import time
PN_INVALID_SOCKET = -1
class IO(object):
@staticmethod
def _setupsocket(s):
s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, True)
s.setblocking(False)
@staticmethod
def close(s):
s.close()
@staticmethod
def listen(host, port):
s = socket.socket()
IO._setupsocket(s)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
s.bind((host, port))
s.listen(10)
return s
@staticmethod
def accept(s):
n = s.accept()
IO._setupsocket(n[0])
return n
@staticmethod
def connect(addr):
s = socket.socket(addr[0], addr[1], addr[2])
IO._setupsocket(s)
try:
s.connect(addr[4])
except socket.error as e:
if e.errno not in (errno.EINPROGRESS, errno.EWOULDBLOCK, errno.EAGAIN):
raise
return s
@staticmethod
def select(*args, **kwargs):
return select.select(*args, **kwargs)
@staticmethod
def sleep(t):
time.sleep(t)
return
class Selector(object):
def __init__(self):
self._selectables = set()
self._reading = set()
self._writing = set()
self._deadline = None
def add(self, selectable):
self._selectables.add(selectable)
if selectable.reading:
self._reading.add(selectable)
if selectable.writing:
self._writing.add(selectable)
if selectable.deadline:
if self._deadline is None:
self._deadline = selectable.deadline
else:
self._deadline = min(selectable.deadline, self._deadline)
def remove(self, selectable):
self._selectables.discard(selectable)
self._reading.discard(selectable)
self._writing.discard(selectable)
self.update_deadline()
@property
def selectables(self):
return len(self._selectables)
def update_deadline(self):
for sel in self._selectables:
if sel.deadline:
if self._deadline is None:
self._deadline = sel.deadline
else:
self._deadline = min(sel.deadline, self._deadline)
def update(self, selectable):
self._reading.discard(selectable)
self._writing.discard(selectable)
if selectable.reading:
self._reading.add(selectable)
if selectable.writing:
self._writing.add(selectable)
self.update_deadline()
def select(self, timeout):
def select_inner(timeout):
# This inner select adds the writing fds to the exception fd set
# because Windows returns connected fds in the exception set not the
# writable set
r = self._reading
w = self._writing
now = time.time()
# No timeout or deadline
if timeout is None and self._deadline is None:
return IO.select(r, w, w)
if timeout is None:
t = max(0, self._deadline - now)
return IO.select(r, w, w, t)
if self._deadline is None:
return IO.select(r, w, w, timeout)
t = max(0, min(timeout, self._deadline - now))
if len(r) == 0 and len(w) == 0:
if t > 0:
IO.sleep(t)
return ([], [], [])
return IO.select(r, w, w, t)
# Need to allow for signals interrupting us on Python 2
# In this case the signal handler could have messed up our internal state
# so don't retry just return with no handles.
try:
r, w, ex = select_inner(timeout)
except select.error as e:
if e.errno != errno.EINTR:
raise
r, w, ex = ([], [], [])
# For windows non blocking connect we get exception not writable so add exceptions to writable
w += ex
# Calculate timed out selectables
now = time.time()
t = [s for s in self._selectables if s.deadline and now > s.deadline]
self._deadline = None
self.update_deadline()
return r, w, t
| apache-2.0 |
flavour/cert | modules/jsonrpclib.py | 45 | 10955 | # -*- coding: utf-8 -*-
# a port of xmlrpclib to json....
#
#
# The JSON-RPC client interface is based on the XML-RPC client
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by Fredrik Lundh
# Copyright (c) 2006 by Matt Harrison
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
import urllib
import httplib
import base64
import gluon.contrib.simplejson
import types
__version__ = "0.0.1"
ID = 1
def _gen_id():
global ID
ID = ID + 1
return ID
def getparser():
un = Unmarshaller()
par = Parser(un)
return par, un
def dumps(params, methodname=None, methodresponse=None, encoding=None,
allow_none=0):
if methodname:
request = {}
request["method"] = methodname
request["params"] = params
request["id"] = _gen_id()
return simplejson.dumps(request)
class Unmarshaller(object):
def __init__(self):
self.data = None
def feed(self, data):
if self.data is None:
self.data = data
else:
self.data = self.data + data
def close(self):
#try to convert string to json
return simplejson.loads(self.data)
class Parser(object):
def __init__(self, unmarshaller):
self._target = unmarshaller
self.data = None
def feed(self, data):
if self.data is None:
self.data = data
else:
self.data = self.data + data
def close(self):
self._target.feed(self.data)
class _Method(object):
# some magic to bind an JSON-RPC method to an RPC server.
# supports "nested" methods (e.g. examples.getStateName)
def __init__(self, send, name):
self.__send = send
self.__name = name
def __getattr__(self, name):
return _Method(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
##
# Standard transport class for JSON-RPC over HTTP.
# <p>
# You can create custom transports by subclassing this method, and
# overriding selected methods.
class Transport:
"""Handles an HTTP transaction to an JSON-RPC server."""
# client identifier (may be overridden)
user_agent = "jsonlib.py/%s (by matt harrison)" % __version__
##
# Send a complete request, and parse the response.
#
# @param host Target host.
# @param handler Target PRC handler.
# @param request_body JSON-RPC request body.
# @param verbose Debugging flag.
# @return Parsed response.
def request(self, host, handler, request_body, verbose=0):
# issue JSON-RPC request
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
self.send_request(h, handler, request_body)
self.send_host(h, host)
self.send_user_agent(h)
self.send_content(h, request_body)
errcode, errmsg, headers = h.getreply()
if errcode != 200:
raise ProtocolError(
host + handler,
errcode, errmsg,
headers
)
self.verbose = verbose
try:
sock = h._conn.sock
except AttributeError:
sock = None
return self._parse_response(h.getfile(), sock)
##
# Create parser.
#
# @return A 2-tuple containing a parser and a unmarshaller.
def getparser(self):
# get parser and unmarshaller
return getparser()
##
# Get authorization info from host parameter
# Host may be a string, or a (host, x509-dict) tuple; if a string,
# it is checked for a "user:pw@host" format, and a "Basic
# Authentication" header is added if appropriate.
#
# @param host Host descriptor (URL or (URL, x509 info) tuple).
# @return A 3-tuple containing (actual host, extra headers,
# x509 info). The header and x509 fields may be None.
def get_host_info(self, host):
x509 = {}
if isinstance(host, types.TupleType):
host, x509 = host
auth, host = urllib.splituser(host)
if auth:
auth = base64.encodestring(urllib.unquote(auth))
auth = string.join(string.split(auth), "") # get rid of whitespace
extra_headers = [
("Authorization", "Basic " + auth)
]
else:
extra_headers = None
return host, extra_headers, x509
##
# Connect to server.
#
# @param host Target host.
# @return A connection handle.
def make_connection(self, host):
# create a HTTP connection object from a host descriptor
host, extra_headers, x509 = self.get_host_info(host)
return httplib.HTTP(host)
##
# Send request header.
#
# @param connection Connection handle.
# @param handler Target RPC handler.
# @param request_body JSON-RPC body.
def send_request(self, connection, handler, request_body):
connection.putrequest("POST", handler)
##
# Send host name.
#
# @param connection Connection handle.
# @param host Host name.
def send_host(self, connection, host):
host, extra_headers, x509 = self.get_host_info(host)
connection.putheader("Host", host)
if extra_headers:
if isinstance(extra_headers, DictType):
extra_headers = extra_headers.items()
for key, value in extra_headers:
connection.putheader(key, value)
##
# Send user-agent identifier.
#
# @param connection Connection handle.
def send_user_agent(self, connection):
connection.putheader("User-Agent", self.user_agent)
##
# Send request body.
#
# @param connection Connection handle.
# @param request_body JSON-RPC request body.
def send_content(self, connection, request_body):
connection.putheader("Content-Type", "text/xml")
connection.putheader("Content-Length", str(len(request_body)))
connection.endheaders()
if request_body:
connection.send(request_body)
##
# Parse response.
#
# @param file Stream.
# @return Response tuple and target method.
def parse_response(self, file):
# compatibility interface
return self._parse_response(file, None)
##
# Parse response (alternate interface). This is similar to the
# parse_response method, but also provides direct access to the
# underlying socket object (where available).
#
# @param file Stream.
# @param sock Socket handle (or None, if the socket object
# could not be accessed).
# @return Response tuple and target method.
def _parse_response(self, file, sock):
# read response from input file/socket, and parse it
p, u = self.getparser()
while 1:
if sock:
response = sock.recv(1024)
else:
response = file.read(1024)
if not response:
break
if self.verbose:
print "body:", repr(response)
p.feed(response)
file.close()
p.close()
return u.close()
##
# Standard transport class for JSON-RPC over HTTPS.
class SafeTransport(Transport):
"""Handles an HTTPS transaction to an JSON-RPC server."""
# FIXME: mostly untested
def make_connection(self, host):
# create a HTTPS connection object from a host descriptor
# host may be a string, or a (host, x509-dict) tuple
host, extra_headers, x509 = self.get_host_info(host)
try:
HTTPS = httplib.HTTPS
except AttributeError:
raise NotImplementedError(
"your version of httplib doesn't support HTTPS"
)
else:
return HTTPS(host, None, **(x509 or {}))
class ServerProxy(object):
def __init__(self, uri, transport=None, encoding=None,
verbose=None, allow_none=0):
utype, uri = urllib.splittype(uri)
if utype not in ("http", "https"):
raise IOError, "Unsupported JSONRPC protocol"
self.__host, self.__handler = urllib.splithost(uri)
if not self.__handler:
self.__handler = "/RPC2"
if transport is None:
if utype == "https":
transport = SafeTransport()
else:
transport = Transport()
self.__transport = transport
self.__encoding = encoding
self.__verbose = verbose
self.__allow_none = allow_none
def __request(self, methodname, params):
"""call a method on the remote server
"""
request = dumps(params, methodname, encoding=self.__encoding,
allow_none=self.__allow_none)
response = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=self.__verbose
)
if len(response) == 1:
response = response[0]
return response
def __repr__(self):
return ("<JSONProxy for %s%s>" %
(self.__host, self.__handler)
)
__str__ = __repr__
def __getattr__(self, name):
#dispatch
return _Method(self.__request, name)
# note: to call a remote object with an non-standard name, use
# result getattr(server, "strange-python-name")(args)
if __name__ == "__main__":
s = ServerProxy("http://localhost:8080/foo/", verbose = 1)
c = s.echo("foo bar")
print c
d = s.bad("other")
print d
e = s.echo("foo bar", "baz")
print e
f = s.echo(5)
print f
| mit |
edlabh/SickRage | lib/feedparser/urls.py | 43 | 4592 | from __future__ import absolute_import, unicode_literals
import re
try:
import urllib.parse as urlparse
except ImportError:
import urlparse as urlparse
from .html import _BaseHTMLProcessor
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
#ACCEPTABLE_URI_SCHEMES = ()
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
try:
uri = urlparse.urljoin(base, uri)
except ValueError:
uri = ''
return uri
def _convert_to_idn(url):
"""Convert a URL to IDN notation"""
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urlparse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
# the url needs to be converted to idn notation
host = parts[1].rsplit(':', 1)
newhost = []
port = ''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urlparse.urlunsplit(parts)
else:
return url
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
return _urljoin(base, rel or '')
if not base:
return rel or ''
if not rel:
try:
scheme = urlparse.urlparse(base)[0]
except ValueError:
return ''
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return ''
uri = _urljoin(base, rel)
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return ''
return uri
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = set([('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('audio', 'src'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src'),
('source', 'src'),
('video', 'poster'),
('video', 'src')])
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _makeSafeAbsoluteURI(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
# if not _SGML_AVAILABLE:
# return htmlSource
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
| gpl-3.0 |
siosio/intellij-community | plugins/hg4idea/testData/bin/hgext/bookmarks.py | 87 | 11771 | # Mercurial extension to provide the 'hg bookmark' command
#
# Copyright 2008 David Soria Parra <dsp@php.net>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''track a line of development with movable markers
Bookmarks are local movable markers to changesets. Every bookmark
points to a changeset identified by its hash. If you commit a
changeset that is based on a changeset that has a bookmark on it, the
bookmark shifts to the new changeset.
It is possible to use bookmark names in every revision lookup (e.g. hg
merge, hg update).
By default, when several bookmarks point to the same changeset, they
will all move forward together. It is possible to obtain a more
git-like experience by adding the following configuration option to
your .hgrc::
[bookmarks]
track.current = True
This will cause Mercurial to track the bookmark that you are currently
using, and only update it. This is similar to git's approach to
branching.
'''
from mercurial.i18n import _
from mercurial.node import nullid, nullrev, hex, short
from mercurial import util, commands, repair, extensions
import os
def write(repo):
'''Write bookmarks
Write the given bookmark => hash dictionary to the .hg/bookmarks file
in a format equal to those of localtags.
We also store a backup of the previous state in undo.bookmarks that
can be copied back on rollback.
'''
refs = repo._bookmarks
if os.path.exists(repo.join('bookmarks')):
util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks'))
if repo._bookmarkcurrent not in refs:
setcurrent(repo, None)
wlock = repo.wlock()
try:
file = repo.opener('bookmarks', 'w', atomictemp=True)
for refspec, node in refs.iteritems():
file.write("%s %s\n" % (hex(node), refspec))
file.rename()
finally:
wlock.release()
def setcurrent(repo, mark):
'''Set the name of the bookmark that we are currently on
Set the name of the bookmark that we are on (hg update <bookmark>).
The name is recorded in .hg/bookmarks.current
'''
current = repo._bookmarkcurrent
if current == mark:
return
refs = repo._bookmarks
# do not update if we do update to a rev equal to the current bookmark
if (mark and mark not in refs and
current and refs[current] == repo.changectx('.').node()):
return
if mark not in refs:
mark = ''
wlock = repo.wlock()
try:
file = repo.opener('bookmarks.current', 'w', atomictemp=True)
file.write(mark)
file.rename()
finally:
wlock.release()
repo._bookmarkcurrent = mark
def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None):
'''track a line of development with movable markers
Bookmarks are pointers to certain commits that move when
committing. Bookmarks are local. They can be renamed, copied and
deleted. It is possible to use bookmark names in 'hg merge' and
'hg update' to merge and update respectively to a given bookmark.
You can use 'hg bookmark NAME' to set a bookmark on the working
directory's parent revision with the given name. If you specify
a revision using -r REV (where REV may be an existing bookmark),
the bookmark is assigned to that revision.
'''
hexfn = ui.debugflag and hex or short
marks = repo._bookmarks
cur = repo.changectx('.').node()
if rename:
if rename not in marks:
raise util.Abort(_("a bookmark of this name does not exist"))
if mark in marks and not force:
raise util.Abort(_("a bookmark of the same name already exists"))
if mark is None:
raise util.Abort(_("new bookmark name required"))
marks[mark] = marks[rename]
del marks[rename]
if repo._bookmarkcurrent == rename:
setcurrent(repo, mark)
write(repo)
return
if delete:
if mark is None:
raise util.Abort(_("bookmark name required"))
if mark not in marks:
raise util.Abort(_("a bookmark of this name does not exist"))
if mark == repo._bookmarkcurrent:
setcurrent(repo, None)
del marks[mark]
write(repo)
return
if mark != None:
if "\n" in mark:
raise util.Abort(_("bookmark name cannot contain newlines"))
mark = mark.strip()
if mark in marks and not force:
raise util.Abort(_("a bookmark of the same name already exists"))
if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
and not force):
raise util.Abort(
_("a bookmark cannot have the name of an existing branch"))
if rev:
marks[mark] = repo.lookup(rev)
else:
marks[mark] = repo.changectx('.').node()
setcurrent(repo, mark)
write(repo)
return
if mark is None:
if rev:
raise util.Abort(_("bookmark name required"))
if len(marks) == 0:
ui.status(_("no bookmarks set\n"))
else:
for bmark, n in marks.iteritems():
if ui.configbool('bookmarks', 'track.current'):
current = repo._bookmarkcurrent
prefix = (bmark == current and n == cur) and '*' or ' '
else:
prefix = (n == cur) and '*' or ' '
if ui.quiet:
ui.write("%s\n" % bmark)
else:
ui.write(" %s %-25s %d:%s\n" % (
prefix, bmark, repo.changelog.rev(n), hexfn(n)))
return
def _revstostrip(changelog, node):
srev = changelog.rev(node)
tostrip = [srev]
saveheads = []
for r in xrange(srev, len(changelog)):
parents = changelog.parentrevs(r)
if parents[0] in tostrip or parents[1] in tostrip:
tostrip.append(r)
if parents[1] != nullrev:
for p in parents:
if p not in tostrip and p > srev:
saveheads.append(p)
return [r for r in tostrip if r not in saveheads]
def strip(oldstrip, ui, repo, node, backup="all"):
"""Strip bookmarks if revisions are stripped using
the mercurial.strip method. This usually happens during
qpush and qpop"""
revisions = _revstostrip(repo.changelog, node)
marks = repo._bookmarks
update = []
for mark, n in marks.iteritems():
if repo.changelog.rev(n) in revisions:
update.append(mark)
oldstrip(ui, repo, node, backup)
if len(update) > 0:
for m in update:
marks[m] = repo.changectx('.').node()
write(repo)
def reposetup(ui, repo):
if not repo.local():
return
class bookmark_repo(repo.__class__):
@util.propertycache
def _bookmarks(self):
'''Parse .hg/bookmarks file and return a dictionary
Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
in the .hg/bookmarks file. They are read returned as a dictionary
with name => hash values.
'''
try:
bookmarks = {}
for line in self.opener('bookmarks'):
sha, refspec = line.strip().split(' ', 1)
bookmarks[refspec] = super(bookmark_repo, self).lookup(sha)
except:
pass
return bookmarks
@util.propertycache
def _bookmarkcurrent(self):
'''Get the current bookmark
If we use gittishsh branches we have a current bookmark that
we are on. This function returns the name of the bookmark. It
is stored in .hg/bookmarks.current
'''
mark = None
if os.path.exists(self.join('bookmarks.current')):
file = self.opener('bookmarks.current')
# No readline() in posixfile_nt, reading everything is cheap
mark = (file.readlines() or [''])[0]
if mark == '':
mark = None
file.close()
return mark
def rollback(self):
if os.path.exists(self.join('undo.bookmarks')):
util.rename(self.join('undo.bookmarks'), self.join('bookmarks'))
return super(bookmark_repo, self).rollback()
def lookup(self, key):
if key in self._bookmarks:
key = self._bookmarks[key]
return super(bookmark_repo, self).lookup(key)
def _bookmarksupdate(self, parents, node):
marks = self._bookmarks
update = False
if ui.configbool('bookmarks', 'track.current'):
mark = self._bookmarkcurrent
if mark and marks[mark] in parents:
marks[mark] = node
update = True
else:
for mark, n in marks.items():
if n in parents:
marks[mark] = node
update = True
if update:
write(self)
def commitctx(self, ctx, error=False):
"""Add a revision to the repository and
move the bookmark"""
wlock = self.wlock() # do both commit and bookmark with lock held
try:
node = super(bookmark_repo, self).commitctx(ctx, error)
if node is None:
return None
parents = self.changelog.parents(node)
if parents[1] == nullid:
parents = (parents[0],)
self._bookmarksupdate(parents, node)
return node
finally:
wlock.release()
def addchangegroup(self, source, srctype, url, emptyok=False):
parents = self.dirstate.parents()
result = super(bookmark_repo, self).addchangegroup(
source, srctype, url, emptyok)
if result > 1:
# We have more heads than before
return result
node = self.changelog.tip()
self._bookmarksupdate(parents, node)
return result
def _findtags(self):
"""Merge bookmarks with normal tags"""
(tags, tagtypes) = super(bookmark_repo, self)._findtags()
tags.update(self._bookmarks)
return (tags, tagtypes)
if hasattr(repo, 'invalidate'):
def invalidate(self):
super(bookmark_repo, self).invalidate()
for attr in ('_bookmarks', '_bookmarkcurrent'):
if attr in self.__dict__:
delattr(repo, attr)
repo.__class__ = bookmark_repo
def uisetup(ui):
extensions.wrapfunction(repair, "strip", strip)
if ui.configbool('bookmarks', 'track.current'):
extensions.wrapcommand(commands.table, 'update', updatecurbookmark)
def updatecurbookmark(orig, ui, repo, *args, **opts):
'''Set the current bookmark
If the user updates to a bookmark we update the .hg/bookmarks.current
file.
'''
res = orig(ui, repo, *args, **opts)
rev = opts['rev']
if not rev and len(args) > 0:
rev = args[0]
setcurrent(repo, rev)
return res
cmdtable = {
"bookmarks":
(bookmark,
[('f', 'force', False, _('force')),
('r', 'rev', '', _('revision')),
('d', 'delete', False, _('delete a given bookmark')),
('m', 'rename', '', _('rename a given bookmark'))],
_('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')),
}
| apache-2.0 |
jonmwalton/demobuilder | layers/rhel-server-7:gui:ose-3.0:offline/@target/install.py | 4 | 6610 | #!/usr/bin/python
import k8s
import os
import re
import socket
import subprocess
import sys
import tempfile
def get_service_endpoint(url):
s = api.get(url)
return "%s:%u" % (s.spec.portalIP, s.spec.ports[0].port)
def resolve_values(t, x):
parameters = {p.name: p.get("value", None) for p in t.parameters}
return re.sub("\${([^}]+)}", lambda m: parameters[m.group(1)], x)
def system(cmd, check=True, **kwargs):
print >>sys.stderr, "+ " + cmd
if check:
subprocess.check_call(cmd, shell=True, **kwargs)
else:
subprocess.call(cmd, shell=True, **kwargs)
def download_referenced_images():
images = set()
for t in oapi.get("/namespaces/openshift/templates")._items:
for o in t.objects:
if o.kind == "DeploymentConfig":
# return container image names, unless triggered by imageChange
for c in o.spec.template.spec.containers:
for tr in o.spec.triggers:
if "imageChangeParams" in tr and \
c.name in tr.imageChangeParams.containerNames:
break
else:
images.add(resolve_values(t, c.image))
c.imagePullPolicy = "IfNotPresent"
t.kind = "Template"
t.apiVersion = "v1"
oapi.put("/namespaces/openshift/templates/" + t.metadata.name, t)
for i in images:
system("docker pull " + i)
def download_referenced_images_imagestreams(repo):
images = set()
istrmap = {}
for istr in oapi.get("/namespaces/openshift/imagestreams")._items:
for t in istr.spec.tags:
srctag = t.name
if "_from" in t and t._from.kind == "ImageStreamTag":
srctag = t._from.name
istrmap[istr.metadata.name + ":" + t.name] = istr.spec.dockerImageRepository + ":" + srctag
for t in oapi.get("/namespaces/openshift/templates")._items:
for o in t.objects:
if o.kind == "DeploymentConfig":
# return container images triggered by imageChange, if in
# openshift namespace
for tr in o.spec.triggers:
if "imageChangeParams" in tr:
oo = tr.imageChangeParams._from
if oo.kind == "ImageStreamTag" and "namespace" in oo \
and oo.namespace == "openshift":
images.add(istrmap[resolve_values(t, oo.name)])
elif o["kind"] == "BuildConfig":
# return builder images in openshift namespace
oo = o.spec.strategy.sourceStrategy._from
if oo.kind == "ImageStreamTag" and oo.namespace == "openshift":
images.add(istrmap[resolve_values(t, oo.name)])
for i in images:
newi = repo + "/" + i.split("/", 1)[1]
if i != newi:
if os.path.exists(i.split("/", 1)[1].split(":")[0]):
system("docker build -t " + newi + " " + i.split("/", 1)[1].split(":")[0])
system("docker push " + newi)
system("docker rmi " + newi)
system("docker rmi " + i, False)
else:
system("docker pull " + i)
system("docker tag " + i + " " + newi)
system("docker push " + newi)
system("docker rmi " + newi)
system("docker rmi " + i, False)
for im in oapi.get("/images")._items:
oapi.delete("/images/" + im.metadata.name)
for istr in oapi.get("/namespaces/openshift/imagestreams")._items:
istr.kind = "ImageStream"
istr.apiVersion = "v1"
istr.metadata = k8s.AttrDict({"name": istr.metadata.name,
"annotations": k8s.AttrDict({"openshift.io/image.insecureRepository": "true"})})
istr.spec.dockerImageRepository = repo + "/" + istr.spec.dockerImageRepository.split("/", 1)[1]
del istr.status
oapi.delete("/namespaces/openshift/imagestreams/" + istr.metadata.name)
oapi.post("/namespaces/openshift/imagestreams", istr)
def download_git_repos():
hostname = socket.gethostname()
uris = {}
for t in oapi.get("/namespaces/openshift/templates")._items:
for o in t.objects:
if o.kind == "BuildConfig":
uri = resolve_values(t, o.spec.source.git.uri)
if uri and not uri.startswith("git://" + hostname):
uris[uri] = "git://%s/%s" % (hostname,
uri.split("://", 1)[1])
for uri in uris:
print uri
root = "/var/lib/git/" + uri.split("://", 1)[1]
if not os.path.exists(root):
system("git clone --bare " + uri + " " + root)
system("chown -R nobody:nobody /var/lib/git")
for t in oapi.get("/namespaces/openshift/templates")._items:
for o in t.objects:
if o.kind == "BuildConfig":
m = re.match("^\${([^}]+)}$", o.spec.source.git.uri)
if not m:
raise Exception
for p in t.parameters:
if p.name == m.group(1) and "value" in p and \
not p.value.startswith("git://" + hostname):
p.value = uris[p.value]
if o.spec.strategy.type != "Source":
raise Exception
env = o.spec.strategy.sourceStrategy.get("env", [])
env = [x for x in env if x.name not in ["http_proxy", "https_proxy"]]
env.append(k8s.AttrDict({"name": "http_proxy",
"value": "http://%s:8080/" % hostname}))
env.append(k8s.AttrDict({"name": "https_proxy",
"value": "http://%s:8080/" % hostname}))
o.spec.strategy.sourceStrategy.env = env
t.kind = "Template"
t.apiVersion = "v1"
oapi.put("/namespaces/openshift/templates/" + t.metadata.name, t)
def main():
url = "https://openshift.example.com:8443"
cert = ("/etc/openshift/master/openshift-master.crt",
"/etc/openshift/master/openshift-master.key")
global api, oapi
api = k8s.API(url + "/api/v1", cert)
oapi = k8s.API(url + "/oapi/v1", cert)
ep = get_service_endpoint("/namespaces/default/services/image-registry")
download_referenced_images()
download_referenced_images_imagestreams(ep)
download_git_repos()
if __name__ == "__main__":
main()
| gpl-3.0 |
markr622/moose | framework/contrib/nsiqcppstyle/rules/RULE_5_3_A_provide_doxygen_function_comment_on_function_in_impl.py | 43 | 4295 | """
Provide the function doxygen comment in impl file.
It check if there is doxygen sytle comment in front of each function definition.
It only check none static and none private funcions definition.
Unfortunately, this rule can not determine the method is private or not,
if the function definition is located in a cpp file.
Please put the '// NS' if the right side of the private function signature to suppress the false alarms.
Example)
= a.cpp =
void KK::C() // NS
{
}
== Violation ==
= a.cpp =
void FunctionA() { <== Violation. No doxygen comment.
}
/* <== Violation. It's not the doxygen comment
*
*/
void FunctionB()
{
}
== Good ==
= a.cpp =
/** <== OK
* blar blar
*/
void FunctionA()
{
}
/**
* blar blar
*/
void FunctionB(); <== OK.
class A {
private :
void FunctionC() { <== Don't care. it's the private function.
}
}
static void FunctionD() <== Don't care. it's the c style private function.
{
}
= a.h =
void FunctionB(); <== Don't care. It's the declared in the header.
"""
import nsiqcppstyle_reporter
from nsiqcppstyle_rulemanager import *
def RunRule(lexer, fullName, decl, contextStack, context) :
ext = lexer.filename[lexer.filename.rfind("."):]
if not decl and ext != ".h" and context != None:
upperBlock = contextStack.SigPeek()
if upperBlock != None and upperBlock.type == "CLASS_BLOCK" and upperBlock.additional == "PRIVATE":
return
t1 = lexer.GetPrevTokenInType("STATIC", True)
t2 = lexer.GetPrevTokenInTypeList(["SEMI", "RBRACE"], True)
if t1 != None and (t2 == None or t1.lexpos > t2.lexpos) :
return
t = lexer.GetCurToken()
lexer.PushTokenIndex()
t2 = lexer.GetPrevTokenInType("COMMENT")
lexer.PopTokenIndex()
lexer.PushTokenIndex()
t3 = lexer.GetPrevTokenInTypeList(["SEMI", "PREPROCESSOR"], False, True)
lexer.PopTokenIndex()
if t2 != None and t2.additional == "DOXYGEN" :
if t3 == None or t.lexpos > t3.lexpos :
return
nsiqcppstyle_reporter.Error(t, __name__, "Doxygen Comment should be provided in front of function (%s) in impl file." % fullName)
ruleManager.AddFunctionNameRule(RunRule)
def RunTypeScopeRule(lexer, contextStack) :
t = lexer.GetCurToken()
if t.type in ["PUBLIC", "PRIVATE", "PROTECTED"] :
curContext = contextStack.SigPeek()
if curContext.type in ["CLASS_BLOCK", "STRUCT_BLOCK"]:
curContext.additional = t.type
ruleManager.AddTypeScopeRule(RunTypeScopeRule)
###########################################################################################
# Unit Test
###########################################################################################
from nsiqunittest.nsiqcppstyle_unittestbase import *
class testRule(nct):
def setUpRule(self):
ruleManager.AddFunctionNameRule(RunRule)
ruleManager.AddTypeScopeRule(RunTypeScopeRule)
def test1(self):
self.Analyze("thisfile.c",
"""
void FunctionA() {
}
""")
assert CheckErrorContent(__name__)
def test2(self):
self.Analyze("thisfile.c",
"""
/*
*
*/
extern void FunctionB() {
}
""")
assert CheckErrorContent(__name__)
def test3(self):
self.Analyze("thisfile.c",
"""
class A {
public:
void ~A() {
}
}
""")
assert CheckErrorContent(__name__)
def test4(self):
self.Analyze("thisfile.c",
"""
class J {
/** HELLO */
C() {
}
public :
/** HELLO */
A();
private :
B() {}
}
""")
assert not CheckErrorContent(__name__)
def test5(self):
self.Analyze("thisfile.c",
"""
/*
*
*/
static void FunctionB() {
}
""")
assert not CheckErrorContent(__name__)
def test6(self):
self.Analyze("thisfile.h",
"""
int a;
void FunctionB(){
}
""")
assert not CheckErrorContent(__name__)
def test7(self):
self.Analyze("thisfile.c",
"""
int a;
void FunctionB(){
}
""")
assert CheckErrorContent(__name__)
def test8(self):
self.Analyze("thisfile.c",
"""
class J {
C() {
}
}
""")
assert CheckErrorContent(__name__)
| lgpl-2.1 |
googleapis/python-api-gateway | google/cloud/apigateway_v1/services/api_gateway_service/transports/grpc.py | 1 | 27310 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.apigateway_v1.types import apigateway
from google.longrunning import operations_pb2 # type: ignore
from .base import ApiGatewayServiceTransport, DEFAULT_CLIENT_INFO
class ApiGatewayServiceGrpcTransport(ApiGatewayServiceTransport):
"""gRPC backend transport for ApiGatewayService.
The API Gateway Service is the interface for managing API
Gateways.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "apigateway.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "apigateway.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def list_gateways(
self,
) -> Callable[[apigateway.ListGatewaysRequest], apigateway.ListGatewaysResponse]:
r"""Return a callable for the list gateways method over gRPC.
Lists Gateways in a given project and location.
Returns:
Callable[[~.ListGatewaysRequest],
~.ListGatewaysResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_gateways" not in self._stubs:
self._stubs["list_gateways"] = self.grpc_channel.unary_unary(
"/google.cloud.apigateway.v1.ApiGatewayService/ListGateways",
request_serializer=apigateway.ListGatewaysRequest.serialize,
response_deserializer=apigateway.ListGatewaysResponse.deserialize,
)
return self._stubs["list_gateways"]
@property
def get_gateway(
self,
) -> Callable[[apigateway.GetGatewayRequest], apigateway.Gateway]:
r"""Return a callable for the get gateway method over gRPC.
Gets details of a single Gateway.
Returns:
Callable[[~.GetGatewayRequest],
~.Gateway]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_gateway" not in self._stubs:
self._stubs["get_gateway"] = self.grpc_channel.unary_unary(
"/google.cloud.apigateway.v1.ApiGatewayService/GetGateway",
request_serializer=apigateway.GetGatewayRequest.serialize,
response_deserializer=apigateway.Gateway.deserialize,
)
return self._stubs["get_gateway"]
@property
def create_gateway(
self,
) -> Callable[[apigateway.CreateGatewayRequest], operations_pb2.Operation]:
r"""Return a callable for the create gateway method over gRPC.
Creates a new Gateway in a given project and
location.
Returns:
Callable[[~.CreateGatewayRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_gateway" not in self._stubs:
self._stubs["create_gateway"] = self.grpc_channel.unary_unary(
"/google.cloud.apigateway.v1.ApiGatewayService/CreateGateway",
request_serializer=apigateway.CreateGatewayRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_gateway"]
@property
def update_gateway(
self,
) -> Callable[[apigateway.UpdateGatewayRequest], operations_pb2.Operation]:
r"""Return a callable for the update gateway method over gRPC.
Updates the parameters of a single Gateway.
Returns:
Callable[[~.UpdateGatewayRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_gateway" not in self._stubs:
self._stubs["update_gateway"] = self.grpc_channel.unary_unary(
"/google.cloud.apigateway.v1.ApiGatewayService/UpdateGateway",
request_serializer=apigateway.UpdateGatewayRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_gateway"]
@property
def delete_gateway(
self,
) -> Callable[[apigateway.DeleteGatewayRequest], operations_pb2.Operation]:
r"""Return a callable for the delete gateway method over gRPC.
Deletes a single Gateway.
Returns:
Callable[[~.DeleteGatewayRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_gateway" not in self._stubs:
self._stubs["delete_gateway"] = self.grpc_channel.unary_unary(
"/google.cloud.apigateway.v1.ApiGatewayService/DeleteGateway",
request_serializer=apigateway.DeleteGatewayRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_gateway"]
@property
def list_apis(
self,
) -> Callable[[apigateway.ListApisRequest], apigateway.ListApisResponse]:
r"""Return a callable for the list apis method over gRPC.
Lists Apis in a given project and location.
Returns:
Callable[[~.ListApisRequest],
~.ListApisResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_apis" not in self._stubs:
self._stubs["list_apis"] = self.grpc_channel.unary_unary(
"/google.cloud.apigateway.v1.ApiGatewayService/ListApis",
request_serializer=apigateway.ListApisRequest.serialize,
response_deserializer=apigateway.ListApisResponse.deserialize,
)
return self._stubs["list_apis"]
@property
def get_api(self) -> Callable[[apigateway.GetApiRequest], apigateway.Api]:
r"""Return a callable for the get api method over gRPC.
Gets details of a single Api.
Returns:
Callable[[~.GetApiRequest],
~.Api]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_api" not in self._stubs:
self._stubs["get_api"] = self.grpc_channel.unary_unary(
"/google.cloud.apigateway.v1.ApiGatewayService/GetApi",
request_serializer=apigateway.GetApiRequest.serialize,
response_deserializer=apigateway.Api.deserialize,
)
return self._stubs["get_api"]
@property
def create_api(
self,
) -> Callable[[apigateway.CreateApiRequest], operations_pb2.Operation]:
r"""Return a callable for the create api method over gRPC.
Creates a new Api in a given project and location.
Returns:
Callable[[~.CreateApiRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_api" not in self._stubs:
self._stubs["create_api"] = self.grpc_channel.unary_unary(
"/google.cloud.apigateway.v1.ApiGatewayService/CreateApi",
request_serializer=apigateway.CreateApiRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_api"]
@property
def update_api(
self,
) -> Callable[[apigateway.UpdateApiRequest], operations_pb2.Operation]:
r"""Return a callable for the update api method over gRPC.
Updates the parameters of a single Api.
Returns:
Callable[[~.UpdateApiRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_api" not in self._stubs:
self._stubs["update_api"] = self.grpc_channel.unary_unary(
"/google.cloud.apigateway.v1.ApiGatewayService/UpdateApi",
request_serializer=apigateway.UpdateApiRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_api"]
@property
def delete_api(
self,
) -> Callable[[apigateway.DeleteApiRequest], operations_pb2.Operation]:
r"""Return a callable for the delete api method over gRPC.
Deletes a single Api.
Returns:
Callable[[~.DeleteApiRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_api" not in self._stubs:
self._stubs["delete_api"] = self.grpc_channel.unary_unary(
"/google.cloud.apigateway.v1.ApiGatewayService/DeleteApi",
request_serializer=apigateway.DeleteApiRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_api"]
@property
def list_api_configs(
self,
) -> Callable[
[apigateway.ListApiConfigsRequest], apigateway.ListApiConfigsResponse
]:
r"""Return a callable for the list api configs method over gRPC.
Lists ApiConfigs in a given project and location.
Returns:
Callable[[~.ListApiConfigsRequest],
~.ListApiConfigsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_api_configs" not in self._stubs:
self._stubs["list_api_configs"] = self.grpc_channel.unary_unary(
"/google.cloud.apigateway.v1.ApiGatewayService/ListApiConfigs",
request_serializer=apigateway.ListApiConfigsRequest.serialize,
response_deserializer=apigateway.ListApiConfigsResponse.deserialize,
)
return self._stubs["list_api_configs"]
@property
def get_api_config(
self,
) -> Callable[[apigateway.GetApiConfigRequest], apigateway.ApiConfig]:
r"""Return a callable for the get api config method over gRPC.
Gets details of a single ApiConfig.
Returns:
Callable[[~.GetApiConfigRequest],
~.ApiConfig]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_api_config" not in self._stubs:
self._stubs["get_api_config"] = self.grpc_channel.unary_unary(
"/google.cloud.apigateway.v1.ApiGatewayService/GetApiConfig",
request_serializer=apigateway.GetApiConfigRequest.serialize,
response_deserializer=apigateway.ApiConfig.deserialize,
)
return self._stubs["get_api_config"]
@property
def create_api_config(
self,
) -> Callable[[apigateway.CreateApiConfigRequest], operations_pb2.Operation]:
r"""Return a callable for the create api config method over gRPC.
Creates a new ApiConfig in a given project and
location.
Returns:
Callable[[~.CreateApiConfigRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_api_config" not in self._stubs:
self._stubs["create_api_config"] = self.grpc_channel.unary_unary(
"/google.cloud.apigateway.v1.ApiGatewayService/CreateApiConfig",
request_serializer=apigateway.CreateApiConfigRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_api_config"]
@property
def update_api_config(
self,
) -> Callable[[apigateway.UpdateApiConfigRequest], operations_pb2.Operation]:
r"""Return a callable for the update api config method over gRPC.
Updates the parameters of a single ApiConfig.
Returns:
Callable[[~.UpdateApiConfigRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_api_config" not in self._stubs:
self._stubs["update_api_config"] = self.grpc_channel.unary_unary(
"/google.cloud.apigateway.v1.ApiGatewayService/UpdateApiConfig",
request_serializer=apigateway.UpdateApiConfigRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_api_config"]
@property
def delete_api_config(
self,
) -> Callable[[apigateway.DeleteApiConfigRequest], operations_pb2.Operation]:
r"""Return a callable for the delete api config method over gRPC.
Deletes a single ApiConfig.
Returns:
Callable[[~.DeleteApiConfigRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_api_config" not in self._stubs:
self._stubs["delete_api_config"] = self.grpc_channel.unary_unary(
"/google.cloud.apigateway.v1.ApiGatewayService/DeleteApiConfig",
request_serializer=apigateway.DeleteApiConfigRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_api_config"]
__all__ = ("ApiGatewayServiceGrpcTransport",)
| apache-2.0 |
tmenjo/cinder-2015.1.0 | cinder/backup/drivers/ceph.py | 5 | 49735 | # Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Ceph Backup Service Implementation.
This driver supports backing up volumes of any type to a Ceph object store. It
is also capable of detecting whether the volume to be backed up is a Ceph RBD
volume and, if so, attempts to perform incremental/differential backups.
Support is also included for the following in the case of a source volume being
a Ceph RBD volume:
* backing up within the same Ceph pool (not recommended)
* backing up between different Ceph pools
* backing up between different Ceph clusters
At the time of writing, differential backup support in Ceph/librbd was quite
new so this driver accounts for this by first attempting differential backup
and falling back to full backup/copy if the former fails. It is recommended
that you upgrade to Ceph Dumpling (>= v0.67) or above to get the best results.
If incremental backups are used, multiple backups of the same volume are stored
as snapshots so that minimal space is consumed in the object store and
restoring the volume takes a far reduced amount of time compared to a full
copy.
Note that Cinder supports restoring to a new volume or the original volume the
backup was taken from. For the latter case, a full copy is enforced since this
was deemed the safest action to take. It is therefore recommended to always
restore to a new volume (default).
"""
import fcntl
import os
import re
import subprocess
import time
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import units
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import utils
import cinder.volume.drivers.rbd as rbd_driver
try:
import rados
import rbd
except ImportError:
rados = None
rbd = None
LOG = logging.getLogger(__name__)
service_opts = [
cfg.StrOpt('backup_ceph_conf', default='/etc/ceph/ceph.conf',
help='Ceph configuration file to use.'),
cfg.StrOpt('backup_ceph_user', default='cinder',
help='The Ceph user to connect with. Default here is to use '
'the same user as for Cinder volumes. If not using cephx '
'this should be set to None.'),
cfg.IntOpt('backup_ceph_chunk_size', default=(units.Mi * 128),
help='The chunk size, in bytes, that a backup is broken into '
'before transfer to the Ceph object store.'),
cfg.StrOpt('backup_ceph_pool', default='backups',
help='The Ceph pool where volume backups are stored.'),
cfg.IntOpt('backup_ceph_stripe_unit', default=0,
help='RBD stripe unit to use when creating a backup image.'),
cfg.IntOpt('backup_ceph_stripe_count', default=0,
help='RBD stripe count to use when creating a backup image.'),
cfg.BoolOpt('restore_discard_excess_bytes', default=True,
help='If True, always discard excess bytes when restoring '
'volumes i.e. pad with zeroes.')
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
class VolumeMetadataBackup(object):
def __init__(self, client, backup_id):
self._client = client
self._backup_id = backup_id
@property
def name(self):
return encodeutils.safe_encode("backup.%s.meta" % self._backup_id)
@property
def exists(self):
meta_obj = rados.Object(self._client.ioctx, self.name)
return self._exists(meta_obj)
def _exists(self, obj):
try:
obj.stat()
except rados.ObjectNotFound:
return False
else:
return True
def set(self, json_meta):
"""Write JSON metadata to a new object.
This should only be called once per backup. Raises
VolumeMetadataBackupExists if the object already exists.
"""
meta_obj = rados.Object(self._client.ioctx, self.name)
if self._exists(meta_obj):
msg = _("Metadata backup object '%s' already exists") % self.name
raise exception.VolumeMetadataBackupExists(msg)
meta_obj.write(json_meta)
def get(self):
"""Get metadata backup object.
Returns None if the object does not exist.
"""
meta_obj = rados.Object(self._client.ioctx, self.name)
if not self._exists(meta_obj):
LOG.debug("Metadata backup object %s does not exist", self.name)
return None
return meta_obj.read()
def remove_if_exists(self):
meta_obj = rados.Object(self._client.ioctx, self.name)
try:
meta_obj.remove()
except rados.ObjectNotFound:
LOG.debug("Metadata backup object '%s' not found - ignoring",
self.name)
class CephBackupDriver(driver.BackupDriver):
"""Backup Cinder volumes to Ceph Object Store.
This class enables backing up Cinder volumes to a Ceph object store.
Backups may be stored in their own pool or even cluster. Store location is
defined by the Ceph conf file and service config options supplied.
If the source volume is itself an RBD volume, the backup will be performed
using incremental differential backups which *should* give a performance
gain.
"""
def __init__(self, context, db_driver=None, execute=None):
super(CephBackupDriver, self).__init__(context, db_driver)
self.rbd = rbd
self.rados = rados
self.chunk_size = CONF.backup_ceph_chunk_size
self._execute = execute or utils.execute
if self._supports_stripingv2:
self.rbd_stripe_unit = CONF.backup_ceph_stripe_unit
self.rbd_stripe_count = CONF.backup_ceph_stripe_count
else:
LOG.info(_LI("RBD striping not supported - ignoring configuration "
"settings for rbd striping"))
self.rbd_stripe_count = 0
self.rbd_stripe_unit = 0
self._ceph_backup_user = encodeutils.safe_encode(CONF.backup_ceph_user)
self._ceph_backup_pool = encodeutils.safe_encode(CONF.backup_ceph_pool)
self._ceph_backup_conf = encodeutils.safe_encode(CONF.backup_ceph_conf)
def _validate_string_args(self, *args):
"""Ensure all args are non-None and non-empty."""
return all(args)
def _ceph_args(self, user, conf=None, pool=None):
"""Create default ceph args for executing rbd commands.
If no --conf is provided, rbd will look in the default locations e.g.
/etc/ceph/ceph.conf
"""
# Make sure user arg is valid since rbd command may not fail if
# invalid/no user provided, resulting in unexpected behaviour.
if not self._validate_string_args(user):
raise exception.BackupInvalidCephArgs(_("invalid user '%s'") %
user)
args = ['--id', user]
if conf:
args.extend(['--conf', conf])
if pool:
args.extend(['--pool', pool])
return args
@property
def _supports_layering(self):
"""Determine if copy-on-write is supported by our version of librbd."""
return hasattr(self.rbd, 'RBD_FEATURE_LAYERING')
@property
def _supports_stripingv2(self):
"""Determine if striping is supported by our version of librbd."""
return hasattr(self.rbd, 'RBD_FEATURE_STRIPINGV2')
def _get_rbd_support(self):
"""Determine RBD features supported by our version of librbd."""
old_format = True
features = 0
if self._supports_layering:
old_format = False
features |= self.rbd.RBD_FEATURE_LAYERING
if self._supports_stripingv2:
old_format = False
features |= self.rbd.RBD_FEATURE_STRIPINGV2
return (old_format, features)
def _connect_to_rados(self, pool=None):
"""Establish connection to the backup Ceph cluster."""
client = self.rados.Rados(rados_id=self._ceph_backup_user,
conffile=self._ceph_backup_conf)
try:
client.connect()
pool_to_open = encodeutils.safe_encode(pool or
self._ceph_backup_pool)
ioctx = client.open_ioctx(pool_to_open)
return client, ioctx
except self.rados.Error:
# shutdown cannot raise an exception
client.shutdown()
raise
def _disconnect_from_rados(self, client, ioctx):
"""Terminate connection with the backup Ceph cluster."""
# closing an ioctx cannot raise an exception
ioctx.close()
client.shutdown()
def _get_backup_base_name(self, volume_id, backup_id=None,
diff_format=False):
"""Return name of base image used for backup.
Incremental backups use a new base name so we support old and new style
format.
"""
# Ensure no unicode
if diff_format:
return encodeutils.safe_encode("volume-%s.backup.base" % volume_id)
else:
if backup_id is None:
msg = _("Backup id required")
raise exception.InvalidParameterValue(msg)
return encodeutils.safe_encode("volume-%s.backup.%s" %
(volume_id, backup_id))
def _discard_bytes(self, volume, offset, length):
"""Trim length bytes from offset.
If the volume is an rbd do a discard() otherwise assume it is a file
and pad with zeroes.
"""
if length:
LOG.debug("Discarding %(length)s bytes from offset %(offset)s",
{'length': length, 'offset': offset})
if self._file_is_rbd(volume):
volume.rbd_image.discard(offset, length)
else:
zeroes = '\0' * length
chunks = int(length / self.chunk_size)
for chunk in xrange(0, chunks):
LOG.debug("Writing zeroes chunk %d", chunk)
volume.write(zeroes)
volume.flush()
# yield to any other pending backups
eventlet.sleep(0)
rem = int(length % self.chunk_size)
if rem:
zeroes = '\0' * rem
volume.write(zeroes)
volume.flush()
def _transfer_data(self, src, src_name, dest, dest_name, length):
"""Transfer data between files (Python IO objects)."""
LOG.debug("Transferring data between '%(src)s' and '%(dest)s'",
{'src': src_name, 'dest': dest_name})
chunks = int(length / self.chunk_size)
LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred",
{'chunks': chunks, 'bytes': self.chunk_size})
for chunk in xrange(0, chunks):
before = time.time()
data = src.read(self.chunk_size)
# If we have reach end of source, discard any extraneous bytes from
# destination volume if trim is enabled and stop writing.
if data == '':
if CONF.restore_discard_excess_bytes:
self._discard_bytes(dest, dest.tell(),
length - dest.tell())
return
dest.write(data)
dest.flush()
delta = (time.time() - before)
rate = (self.chunk_size / delta) / 1024
LOG.debug("Transferred chunk %(chunk)s of %(chunks)s "
"(%(rate)dK/s)",
{'chunk': chunk + 1,
'chunks': chunks,
'rate': rate})
# yield to any other pending backups
eventlet.sleep(0)
rem = int(length % self.chunk_size)
if rem:
LOG.debug("Transferring remaining %s bytes", rem)
data = src.read(rem)
if data == '':
if CONF.restore_discard_excess_bytes:
self._discard_bytes(dest, dest.tell(), rem)
else:
dest.write(data)
dest.flush()
# yield to any other pending backups
eventlet.sleep(0)
def _create_base_image(self, name, size, rados_client):
"""Create a base backup image.
This will be the base image used for storing differential exports.
"""
LOG.debug("Creating base image '%s'", name)
old_format, features = self._get_rbd_support()
self.rbd.RBD().create(ioctx=rados_client.ioctx,
name=name,
size=size,
old_format=old_format,
features=features,
stripe_unit=self.rbd_stripe_unit,
stripe_count=self.rbd_stripe_count)
def _delete_backup_snapshot(self, rados_client, base_name, backup_id):
"""Delete snapshot associated with this backup if one exists.
A backup should have at most ONE associated snapshot.
This is required before attempting to delete the base image. The
snapshot on the original volume can be left as it will be purged when
the volume is deleted.
Returns tuple(deleted_snap_name, num_of_remaining_snaps).
"""
remaining_snaps = 0
base_rbd = self.rbd.Image(rados_client.ioctx, base_name)
try:
snap_name = self._get_backup_snap_name(base_rbd, base_name,
backup_id)
if snap_name:
LOG.debug("Deleting backup snapshot='%s'", snap_name)
base_rbd.remove_snap(snap_name)
else:
LOG.debug("No backup snapshot to delete")
# Now check whether any snapshots remain on the base image
backup_snaps = self.get_backup_snaps(base_rbd)
if backup_snaps:
remaining_snaps = len(backup_snaps)
finally:
base_rbd.close()
return snap_name, remaining_snaps
def _try_delete_base_image(self, backup_id, volume_id, base_name=None):
"""Try to delete backup RBD image.
If the rbd image is a base image for incremental backups, it may have
snapshots. Delete the snapshot associated with backup_id and if the
image has no more snapshots, delete it. Otherwise return.
If no base name is provided try normal (full) format then diff format
image name.
If a base name is provided but does not exist, ImageNotFound will be
raised.
If the image is busy, a number of retries will be performed if
ImageBusy is received, after which the exception will be propagated to
the caller.
"""
retries = 3
delay = 5
try_diff_format = False
if base_name is None:
try_diff_format = True
base_name = self._get_backup_base_name(volume_id, backup_id)
LOG.debug("Trying diff format basename='%(basename)s' for "
"backup base image of volume %(volume)s.",
{'basename': base_name, 'volume': volume_id})
with rbd_driver.RADOSClient(self) as client:
rbd_exists, base_name = \
self._rbd_image_exists(base_name, volume_id, client,
try_diff_format=try_diff_format)
if not rbd_exists:
raise self.rbd.ImageNotFound(_("image %s not found") %
base_name)
while retries >= 0:
# First delete associated snapshot from base image (if exists)
snap, rem = self._delete_backup_snapshot(client, base_name,
backup_id)
if rem:
LOG.info(
_LI("Backup base image of volume %(volume)s still "
"has %(snapshots)s snapshots so skipping base "
"image delete."),
{'snapshots': rem, 'volume': volume_id})
return
LOG.info(_LI("Deleting backup base image='%(basename)s' of "
"volume %(volume)s."),
{'basename': base_name, 'volume': volume_id})
# Delete base if no more snapshots
try:
self.rbd.RBD().remove(client.ioctx, base_name)
except self.rbd.ImageBusy:
# Allow a retry if the image is busy
if retries > 0:
LOG.info(_LI("Backup image of volume %(volume)s is "
"busy, retrying %(retries)s more time(s) "
"in %(delay)ss."),
{'retries': retries,
'delay': delay,
'volume': volume_id})
eventlet.sleep(delay)
else:
LOG.error(_LE("Max retries reached deleting backup "
"%(basename)s image of volume "
"%(volume)s."),
{'volume': volume_id,
'basename': base_name})
raise
else:
LOG.debug("Base backup image='%(basename)s' of volume "
"%(volume)s deleted.",
{'basename': base_name, 'volume': volume_id})
retries = 0
finally:
retries -= 1
# Since we have deleted the base image we can delete the source
# volume backup snapshot.
src_name = encodeutils.safe_encode(volume_id)
if src_name in self.rbd.RBD().list(client.ioctx):
LOG.debug("Deleting source volume snapshot '%(snapshot)s' "
"for backup %(basename)s.",
{'snapshot': snap, 'basename': base_name})
src_rbd = self.rbd.Image(client.ioctx, src_name)
try:
src_rbd.remove_snap(snap)
finally:
src_rbd.close()
def _piped_execute(self, cmd1, cmd2):
"""Pipe output of cmd1 into cmd2."""
LOG.debug("Piping cmd1='%s' into...", ' '.join(cmd1))
LOG.debug("cmd2='%s'", ' '.join(cmd2))
try:
p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
LOG.error(_LE("Pipe1 failed - %s "), e)
raise
# NOTE(dosaboy): ensure that the pipe is blocking. This is to work
# around the case where evenlet.green.subprocess is used which seems to
# use a non-blocking pipe.
flags = fcntl.fcntl(p1.stdout, fcntl.F_GETFL) & (~os.O_NONBLOCK)
fcntl.fcntl(p1.stdout, fcntl.F_SETFL, flags)
try:
p2 = subprocess.Popen(cmd2, stdin=p1.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
LOG.error(_LE("Pipe2 failed - %s "), e)
raise
p1.stdout.close()
stdout, stderr = p2.communicate()
return p2.returncode, stderr
def _rbd_diff_transfer(self, src_name, src_pool, dest_name, dest_pool,
src_user, src_conf, dest_user, dest_conf,
src_snap=None, from_snap=None):
"""Copy only extents changed between two points.
If no snapshot is provided, the diff extents will be all those changed
since the rbd volume/base was created, otherwise it will be those
changed since the snapshot was created.
"""
LOG.debug("Performing differential transfer from '%(src)s' to "
"'%(dest)s'",
{'src': src_name, 'dest': dest_name})
# NOTE(dosaboy): Need to be tolerant of clusters/clients that do
# not support these operations since at the time of writing they
# were very new.
src_ceph_args = self._ceph_args(src_user, src_conf, pool=src_pool)
dest_ceph_args = self._ceph_args(dest_user, dest_conf, pool=dest_pool)
cmd1 = ['rbd', 'export-diff'] + src_ceph_args
if from_snap is not None:
cmd1.extend(['--from-snap', from_snap])
if src_snap:
path = encodeutils.safe_encode("%s/%s@%s" %
(src_pool, src_name, src_snap))
else:
path = encodeutils.safe_encode("%s/%s" % (src_pool, src_name))
cmd1.extend([path, '-'])
cmd2 = ['rbd', 'import-diff'] + dest_ceph_args
rbd_path = encodeutils.safe_encode("%s/%s" % (dest_pool, dest_name))
cmd2.extend(['-', rbd_path])
ret, stderr = self._piped_execute(cmd1, cmd2)
if ret:
msg = (_("RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)") %
{'ret': ret, 'stderr': stderr})
LOG.info(msg)
raise exception.BackupRBDOperationFailed(msg)
def _rbd_image_exists(self, name, volume_id, client,
try_diff_format=False):
"""Return tuple (exists, name)."""
rbds = self.rbd.RBD().list(client.ioctx)
if name not in rbds:
LOG.debug("Image '%s' not found - trying diff format name", name)
if try_diff_format:
name = self._get_backup_base_name(volume_id, diff_format=True)
if name not in rbds:
LOG.debug("Diff format image '%s' not found", name)
return False, name
else:
return False, name
return True, name
def _snap_exists(self, base_name, snap_name, client):
"""Return True if snapshot exists in base image."""
base_rbd = self.rbd.Image(client.ioctx, base_name, read_only=True)
try:
snaps = base_rbd.list_snaps()
finally:
base_rbd.close()
if snaps is None:
return False
for snap in snaps:
if snap['name'] == snap_name:
return True
return False
def _backup_rbd(self, backup_id, volume_id, volume_file, volume_name,
length):
"""Create an incremental backup from an RBD image."""
rbd_user = volume_file.rbd_user
rbd_pool = volume_file.rbd_pool
rbd_conf = volume_file.rbd_conf
source_rbd_image = volume_file.rbd_image
# Identify our --from-snap point (if one exists)
from_snap = self._get_most_recent_snap(source_rbd_image)
LOG.debug("Using --from-snap '%(snap)s' for incremental backup of "
"volume %(volume)s.",
{'snap': from_snap, 'volume': volume_id})
base_name = self._get_backup_base_name(volume_id, diff_format=True)
image_created = False
with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client:
# If from_snap does not exist at the destination (and the
# destination exists), this implies a previous backup has failed.
# In this case we will force a full backup.
#
# TODO(dosaboy): find a way to repair the broken backup
#
if base_name not in self.rbd.RBD().list(ioctx=client.ioctx):
# If a from_snap is defined but the base does not exist, we
# ignore it since it is stale and waiting to be cleaned up.
if from_snap:
LOG.debug("Source snapshot '%(snapshot)s' of volume "
"%(volume)s is stale so deleting.",
{'snapshot': from_snap, 'volume': volume_id})
source_rbd_image.remove_snap(from_snap)
from_snap = None
# Create new base image
self._create_base_image(base_name, length, client)
image_created = True
else:
# If a from_snap is defined but does not exist in the back base
# then we cannot proceed (see above)
if not self._snap_exists(base_name, from_snap, client):
errmsg = (_("Snapshot='%(snap)s' does not exist in base "
"image='%(base)s' - aborting incremental "
"backup") %
{'snap': from_snap, 'base': base_name})
LOG.info(errmsg)
# Raise this exception so that caller can try another
# approach
raise exception.BackupRBDOperationFailed(errmsg)
# Snapshot source volume so that we have a new point-in-time
new_snap = self._get_new_snap_name(backup_id)
LOG.debug("Creating backup snapshot='%s'", new_snap)
source_rbd_image.create_snap(new_snap)
# Attempt differential backup. If this fails, perhaps because librbd
# or Ceph cluster version does not support it, do a full backup
# instead.
#
# TODO(dosaboy): find a way to determine if the operation is supported
# rather than brute force approach.
try:
before = time.time()
self._rbd_diff_transfer(volume_name, rbd_pool, base_name,
self._ceph_backup_pool,
src_user=rbd_user,
src_conf=rbd_conf,
dest_user=self._ceph_backup_user,
dest_conf=self._ceph_backup_conf,
src_snap=new_snap,
from_snap=from_snap)
LOG.debug("Differential backup transfer completed in %.4fs",
(time.time() - before))
# We don't need the previous snapshot (if there was one) anymore so
# delete it.
if from_snap:
source_rbd_image.remove_snap(from_snap)
except exception.BackupRBDOperationFailed:
with excutils.save_and_reraise_exception():
LOG.debug("Differential backup transfer failed")
# Clean up if image was created as part of this operation
if image_created:
self._try_delete_base_image(backup_id, volume_id,
base_name=base_name)
# Delete snapshot
LOG.debug("Deleting diff backup snapshot='%(snapshot)s' of "
"source volume='%(volume)s'.",
{'snapshot': new_snap, 'volume': volume_id})
source_rbd_image.remove_snap(new_snap)
def _file_is_rbd(self, volume_file):
"""Returns True if the volume_file is actually an RBD image."""
return hasattr(volume_file, 'rbd_image')
def _full_backup(self, backup_id, volume_id, src_volume, src_name, length):
"""Perform a full backup of src volume.
First creates a base backup image in our backup location then performs
an chunked copy of all data from source volume to a new backup rbd
image.
"""
backup_name = self._get_backup_base_name(volume_id, backup_id)
with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client:
# First create base backup image
old_format, features = self._get_rbd_support()
LOG.debug("Creating backup base image='%(name)s' for volume "
"%(volume)s.",
{'name': backup_name, 'volume': volume_id})
self.rbd.RBD().create(ioctx=client.ioctx,
name=backup_name,
size=length,
old_format=old_format,
features=features,
stripe_unit=self.rbd_stripe_unit,
stripe_count=self.rbd_stripe_count)
LOG.debug("Copying data from volume %s.", volume_id)
dest_rbd = self.rbd.Image(client.ioctx, backup_name)
try:
rbd_meta = rbd_driver.RBDImageMetadata(dest_rbd,
self._ceph_backup_pool,
self._ceph_backup_user,
self._ceph_backup_conf)
rbd_fd = rbd_driver.RBDImageIOWrapper(rbd_meta)
self._transfer_data(src_volume, src_name, rbd_fd, backup_name,
length)
finally:
dest_rbd.close()
@staticmethod
def backup_snapshot_name_pattern():
"""Returns the pattern used to match backup snapshots.
It is essential that snapshots created for purposes other than backups
do not have this name format.
"""
return r"^backup\.([a-z0-9\-]+?)\.snap\.(.+)$"
@classmethod
def get_backup_snaps(cls, rbd_image, sort=False):
"""Get all backup snapshots for the given rbd image.
NOTE: this call is made public since these snapshots must be deleted
before the base volume can be deleted.
"""
snaps = rbd_image.list_snaps()
backup_snaps = []
for snap in snaps:
search_key = cls.backup_snapshot_name_pattern()
result = re.search(search_key, snap['name'])
if result:
backup_snaps.append({'name': result.group(0),
'backup_id': result.group(1),
'timestamp': result.group(2)})
if sort:
# Sort into ascending order of timestamp
backup_snaps.sort(key=lambda x: x['timestamp'], reverse=True)
return backup_snaps
def _get_new_snap_name(self, backup_id):
return encodeutils.safe_encode("backup.%s.snap.%s" %
(backup_id, time.time()))
def _get_backup_snap_name(self, rbd_image, name, backup_id):
"""Return the name of the snapshot associated with backup_id.
The rbd image provided must be the base image used for an incremental
backup.
A backup is only allowed ONE associated snapshot. If more are found,
exception.BackupOperationError is raised.
"""
snaps = self.get_backup_snaps(rbd_image)
LOG.debug("Looking for snapshot of backup base '%s'", name)
if not snaps:
LOG.debug("Backup base '%s' has no snapshots", name)
return None
snaps = [snap['name'] for snap in snaps
if snap['backup_id'] == backup_id]
if not snaps:
LOG.debug("Backup '%s' has no snapshot", backup_id)
return None
if len(snaps) > 1:
msg = (_("Backup should only have one snapshot but instead has %s")
% len(snaps))
LOG.error(msg)
raise exception.BackupOperationError(msg)
LOG.debug("Found snapshot '%s'", snaps[0])
return snaps[0]
def _get_most_recent_snap(self, rbd_image):
"""Get the most recent backup snapshot of the provided image.
Returns name of most recent backup snapshot or None if there are no
backup snapshots.
"""
backup_snaps = self.get_backup_snaps(rbd_image, sort=True)
if not backup_snaps:
return None
return backup_snaps[0]['name']
def _get_volume_size_gb(self, volume):
"""Return the size in gigabytes of the given volume.
Raises exception.InvalidParameterValue if volume size is 0.
"""
if int(volume['size']) == 0:
errmsg = _("Need non-zero volume size")
raise exception.InvalidParameterValue(errmsg)
return int(volume['size']) * units.Gi
def _backup_metadata(self, backup):
"""Backup volume metadata.
NOTE(dosaboy): the metadata we are backing up is obtained from a
versioned api so we should not alter it in any way here.
We must also be sure that the service that will perform
the restore is compatible with version used.
"""
json_meta = self.get_metadata(backup['volume_id'])
if not json_meta:
LOG.debug("No metadata to backup for volume %s.",
backup['volume_id'])
return
LOG.debug("Backing up metadata for volume %s.",
backup['volume_id'])
try:
with rbd_driver.RADOSClient(self) as client:
vol_meta_backup = VolumeMetadataBackup(client, backup['id'])
vol_meta_backup.set(json_meta)
except exception.VolumeMetadataBackupExists as e:
msg = (_("Failed to backup volume metadata - %s") % e)
raise exception.BackupOperationError(msg)
def backup(self, backup, volume_file, backup_metadata=True):
"""Backup volume and metadata (if available) to Ceph object store.
If the source volume is an RBD we will attempt to do an
incremental/differential backup, otherwise a full copy is performed.
If this fails we will attempt to fall back to full copy.
"""
backup_id = backup['id']
volume = self.db.volume_get(self.context, backup['volume_id'])
volume_id = volume['id']
volume_name = volume['name']
LOG.debug("Starting backup of volume='%s'.", volume_id)
# Ensure we are at the beginning of the volume
volume_file.seek(0)
length = self._get_volume_size_gb(volume)
do_full_backup = False
if self._file_is_rbd(volume_file):
# If volume an RBD, attempt incremental backup.
try:
self._backup_rbd(backup_id, volume_id, volume_file,
volume_name, length)
except exception.BackupRBDOperationFailed:
LOG.debug("Forcing full backup of volume %s.", volume_id)
do_full_backup = True
else:
do_full_backup = True
if do_full_backup:
self._full_backup(backup_id, volume_id, volume_file,
volume_name, length)
self.db.backup_update(self.context, backup_id,
{'container': self._ceph_backup_pool})
if backup_metadata:
try:
self._backup_metadata(backup)
except exception.BackupOperationError:
with excutils.save_and_reraise_exception():
# Cleanup.
self.delete(backup)
LOG.debug("Backup '%(backup_id)s' of volume %(volume_id)s finished.",
{'backup_id': backup_id, 'volume_id': volume_id})
def _full_restore(self, backup_id, volume_id, dest_file, dest_name,
length, src_snap=None):
"""Restore volume using full copy i.e. all extents.
This will result in all extents being copied from source to
destination.
"""
with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client:
# If a source snapshot is provided we assume the base is diff
# format.
if src_snap:
diff_format = True
else:
diff_format = False
backup_name = self._get_backup_base_name(volume_id,
backup_id=backup_id,
diff_format=diff_format)
# Retrieve backup volume
src_rbd = self.rbd.Image(client.ioctx, backup_name,
snapshot=src_snap, read_only=True)
try:
rbd_meta = rbd_driver.RBDImageMetadata(src_rbd,
self._ceph_backup_pool,
self._ceph_backup_user,
self._ceph_backup_conf)
rbd_fd = rbd_driver.RBDImageIOWrapper(rbd_meta)
self._transfer_data(rbd_fd, backup_name, dest_file, dest_name,
length)
finally:
src_rbd.close()
def _check_restore_vol_size(self, backup_base, restore_vol, restore_length,
src_pool):
"""Ensure that the restore volume is the correct size.
If the restore volume was bigger than the backup, the diff restore will
shrink it to the size of the original backup so we need to
post-process and resize it back to its expected size.
"""
with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client:
adjust_size = 0
base_image = self.rbd.Image(client.ioctx,
encodeutils.safe_encode(backup_base),
read_only=True)
try:
if restore_length != base_image.size():
adjust_size = restore_length
finally:
base_image.close()
if adjust_size:
with rbd_driver.RADOSClient(self, src_pool) as client:
restore_vol_encode = encodeutils.safe_encode(restore_vol)
dest_image = self.rbd.Image(client.ioctx, restore_vol_encode)
try:
LOG.debug("Adjusting restore vol size")
dest_image.resize(adjust_size)
finally:
dest_image.close()
def _diff_restore_rbd(self, base_name, restore_file, restore_name,
restore_point, restore_length):
"""Attempt restore rbd volume from backup using diff transfer."""
rbd_user = restore_file.rbd_user
rbd_pool = restore_file.rbd_pool
rbd_conf = restore_file.rbd_conf
LOG.debug("Attempting incremental restore from base='%(base)s' "
"snap='%(snap)s'",
{'base': base_name, 'snap': restore_point})
before = time.time()
try:
self._rbd_diff_transfer(base_name, self._ceph_backup_pool,
restore_name, rbd_pool,
src_user=self._ceph_backup_user,
src_conf=self._ceph_backup_conf,
dest_user=rbd_user, dest_conf=rbd_conf,
src_snap=restore_point)
except exception.BackupRBDOperationFailed:
LOG.exception(_LE("Differential restore failed, trying full "
"restore"))
raise
# If the volume we are restoring to is larger than the backup volume,
# we will need to resize it after the diff import since import-diff
# appears to shrink the target rbd volume to the size of the original
# backup volume.
self._check_restore_vol_size(base_name, restore_name, restore_length,
rbd_pool)
LOG.debug("Restore transfer completed in %.4fs",
(time.time() - before))
def _num_backup_snaps(self, backup_base_name):
"""Return the number of snapshots that exist on the base image."""
with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client:
base_rbd = self.rbd.Image(client.ioctx, backup_base_name,
read_only=True)
try:
snaps = self.get_backup_snaps(base_rbd)
finally:
base_rbd.close()
if snaps:
return len(snaps)
else:
return 0
def _get_restore_point(self, base_name, backup_id):
"""Get restore point snapshot name for incremental backup.
If the backup was not incremental (determined by the fact that the
base has no snapshots/restore points), None is returned. Otherwise, the
restore point associated with backup_id is returned.
"""
with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client:
base_rbd = self.rbd.Image(client.ioctx, base_name, read_only=True)
try:
restore_point = self._get_backup_snap_name(base_rbd, base_name,
backup_id)
finally:
base_rbd.close()
return restore_point
def _rbd_has_extents(self, rbd_volume):
"""Check whether the given rbd volume has extents.
Return True if has extents, otherwise False.
"""
extents = []
def iter_cb(offset, length, exists):
if exists:
extents.append(length)
rbd_volume.diff_iterate(0, rbd_volume.size(), None, iter_cb)
if extents:
LOG.debug("RBD has %s extents", sum(extents))
return True
return False
def _diff_restore_allowed(self, base_name, backup, volume, volume_file,
rados_client):
"""Determine whether a differential restore is possible/allowed.
In order for a differential restore to be performed we need:
* destination volume must be RBD
* destination volume must have zero extents
* backup base image must exist
* backup must have a restore point
Returns True if differential restore is allowed, False otherwise.
"""
not_allowed = (False, None)
if self._file_is_rbd(volume_file):
# NOTE(dosaboy): base_name here must be diff format.
rbd_exists, base_name = self._rbd_image_exists(base_name,
backup['volume_id'],
rados_client)
if not rbd_exists:
return not_allowed
# Get the restore point. If no restore point is found, we assume
# that the backup was not performed using diff/incremental methods
# so we enforce full copy.
restore_point = self._get_restore_point(base_name, backup['id'])
# If the volume we are restoring to is the volume the backup was
# made from, force a full restore since a diff will not work in
# this case.
if volume['id'] == backup['volume_id']:
LOG.debug("Destination volume is same as backup source volume "
"%s - forcing full copy.", volume['id'])
return False, restore_point
if restore_point:
# If the destination volume has extents we cannot allow a diff
# restore.
if self._rbd_has_extents(volume_file.rbd_image):
# We return the restore point so that a full copy is done
# from snapshot.
LOG.debug("Destination has extents - forcing full copy")
return False, restore_point
return True, restore_point
else:
LOG.info(_LI("No restore point found for "
"backup='%(backup)s' of "
"volume %(volume)s - forcing full copy."),
{'backup': backup['id'],
'volume': backup['volume_id']})
return not_allowed
def _restore_volume(self, backup, volume, volume_file):
"""Restore volume from backup using diff transfer if possible.
Attempts a differential restore and reverts to full copy if diff fails.
"""
volume_name = volume['name']
backup_id = backup['id']
backup_volume_id = backup['volume_id']
length = int(volume['size']) * units.Gi
base_name = self._get_backup_base_name(backup['volume_id'],
diff_format=True)
with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client:
diff_allowed, restore_point = \
self._diff_restore_allowed(base_name, backup, volume,
volume_file, client)
do_full_restore = True
if diff_allowed:
# Attempt diff
try:
self._diff_restore_rbd(base_name, volume_file, volume_name,
restore_point, length)
do_full_restore = False
except exception.BackupRBDOperationFailed:
LOG.debug("Forcing full restore to volume %s.",
volume['id'])
if do_full_restore:
# Otherwise full copy
self._full_restore(backup_id, backup_volume_id, volume_file,
volume_name, length, src_snap=restore_point)
def _restore_metadata(self, backup, volume_id):
"""Restore volume metadata from backup.
If this backup has associated metadata, save it to the restore target
otherwise do nothing.
"""
try:
with rbd_driver.RADOSClient(self) as client:
meta_bak = VolumeMetadataBackup(client, backup['id'])
meta = meta_bak.get()
if meta is not None:
self.put_metadata(volume_id, meta)
else:
LOG.debug("Volume %s has no backed up metadata.",
backup['volume_id'])
except exception.BackupMetadataUnsupportedVersion:
msg = _("Metadata restore failed due to incompatible version")
LOG.error(msg)
raise exception.BackupOperationError(msg)
def restore(self, backup, volume_id, volume_file):
"""Restore volume from backup in Ceph object store.
If volume metadata is available this will also be restored.
"""
target_volume = self.db.volume_get(self.context, volume_id)
LOG.debug('Starting restore from Ceph backup=%(src)s to '
'volume=%(dest)s',
{'src': backup['id'], 'dest': target_volume['name']})
try:
self._restore_volume(backup, target_volume, volume_file)
# Be tolerant of IO implementations that do not support fileno()
try:
fileno = volume_file.fileno()
except IOError:
LOG.debug("Restore target I/O object does not support "
"fileno() - skipping call to fsync().")
else:
os.fsync(fileno)
self._restore_metadata(backup, volume_id)
LOG.debug('Restore to volume %s finished successfully.',
volume_id)
except exception.BackupOperationError as e:
LOG.error(_LE('Restore to volume %(volume)s finished with error - '
'%(error)s.'), {'error': e, 'volume': volume_id})
raise
def delete(self, backup):
"""Delete the given backup from Ceph object store."""
LOG.debug('Delete started for backup=%s', backup['id'])
delete_failed = False
try:
self._try_delete_base_image(backup['id'], backup['volume_id'])
except self.rbd.ImageNotFound:
LOG.warning(
_LW("RBD image for backup %(backup)s of volume %(volume)s "
"not found. Deleting backup metadata."),
{'backup': backup['id'], 'volume': backup['volume_id']})
delete_failed = True
with rbd_driver.RADOSClient(self) as client:
VolumeMetadataBackup(client, backup['id']).remove_if_exists()
if delete_failed:
LOG.info(_LI("Delete of backup '%(backup)s' "
"for volume '%(volume)s' "
"finished with warning."),
{'backup': backup['id'], 'volume': backup['volume_id']})
else:
LOG.debug("Delete of backup '%(backup)s' for volume "
"'%(volume)s' finished.",
{'backup': backup['id'], 'volume': backup['volume_id']})
def get_backup_driver(context):
return CephBackupDriver(context)
| apache-2.0 |
andyzsf/django | tests/test_client/urls.py | 9 | 2123 | from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.views.generic import RedirectView
from . import views
urlpatterns = [
url(r'^get_view/$', views.get_view, name='get_view'),
url(r'^post_view/$', views.post_view),
url(r'^trace_view/$', views.trace_view),
url(r'^header_view/$', views.view_with_header),
url(r'^raw_post_view/$', views.raw_post_view),
url(r'^redirect_view/$', views.redirect_view),
url(r'^secure_view/$', views.view_with_secure),
url(r'^permanent_redirect_view/$', RedirectView.as_view(url='/get_view/', permanent=True)),
url(r'^temporary_redirect_view/$', RedirectView.as_view(url='/get_view/', permanent=False)),
url(r'^http_redirect_view/$', RedirectView.as_view(url='/secure_view/', permanent=True)),
url(r'^https_redirect_view/$', RedirectView.as_view(url='https://testserver/secure_view/', permanent=True)),
url(r'^double_redirect_view/$', views.double_redirect_view),
url(r'^bad_view/$', views.bad_view),
url(r'^form_view/$', views.form_view),
url(r'^form_view_with_template/$', views.form_view_with_template),
url(r'^formset_view/$', views.formset_view),
url(r'^login_protected_view/$', views.login_protected_view),
url(r'^login_protected_method_view/$', views.login_protected_method_view),
url(r'^login_protected_view_custom_redirect/$', views.login_protected_view_changed_redirect),
url(r'^permission_protected_view/$', views.permission_protected_view),
url(r'^permission_protected_view_exception/$', views.permission_protected_view_exception),
url(r'^permission_protected_method_view/$', views.permission_protected_method_view),
url(r'^session_view/$', views.session_view),
url(r'^broken_view/$', views.broken_view),
url(r'^mail_sending_view/$', views.mail_sending_view),
url(r'^mass_mail_sending_view/$', views.mass_mail_sending_view),
url(r'^django_project_redirect/$', views.django_project_redirect),
url(r'^accounts/login/$', auth_views.login, {'template_name': 'login.html'}),
url(r'^accounts/logout/$', auth_views.logout),
]
| bsd-3-clause |
reinout/django | tests/admin_changelist/models.py | 13 | 2892 | from django.db import models
class Event(models.Model):
# Oracle can have problems with a column named "date"
date = models.DateField(db_column="event_date")
class Parent(models.Model):
name = models.CharField(max_length=128)
class Child(models.Model):
parent = models.ForeignKey(Parent, models.SET_NULL, editable=False, null=True)
name = models.CharField(max_length=30, blank=True)
age = models.IntegerField(null=True, blank=True)
class Genre(models.Model):
name = models.CharField(max_length=20)
class Band(models.Model):
name = models.CharField(max_length=20)
nr_of_members = models.PositiveIntegerField()
genres = models.ManyToManyField(Genre)
class Musician(models.Model):
name = models.CharField(max_length=30)
age = models.IntegerField(null=True, blank=True)
def __str__(self):
return self.name
class Group(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(Musician, through='Membership')
def __str__(self):
return self.name
class Concert(models.Model):
name = models.CharField(max_length=30)
group = models.ForeignKey(Group, models.CASCADE)
class Membership(models.Model):
music = models.ForeignKey(Musician, models.CASCADE)
group = models.ForeignKey(Group, models.CASCADE)
role = models.CharField(max_length=15)
class Quartet(Group):
pass
class ChordsMusician(Musician):
pass
class ChordsBand(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(ChordsMusician, through='Invitation')
class Invitation(models.Model):
player = models.ForeignKey(ChordsMusician, models.CASCADE)
band = models.ForeignKey(ChordsBand, models.CASCADE)
instrument = models.CharField(max_length=15)
class Swallow(models.Model):
origin = models.CharField(max_length=255)
load = models.FloatField()
speed = models.FloatField()
class Meta:
ordering = ('speed', 'load')
class SwallowOneToOne(models.Model):
swallow = models.OneToOneField(Swallow, models.CASCADE)
class UnorderedObject(models.Model):
"""
Model without any defined `Meta.ordering`.
Refs #17198.
"""
bool = models.BooleanField(default=True)
class OrderedObjectManager(models.Manager):
def get_queryset(self):
return super().get_queryset().order_by('number')
class OrderedObject(models.Model):
"""
Model with Manager that defines a default order.
Refs #17198.
"""
name = models.CharField(max_length=255)
bool = models.BooleanField(default=True)
number = models.IntegerField(default=0, db_column='number_val')
objects = OrderedObjectManager()
class CustomIdUser(models.Model):
uuid = models.AutoField(primary_key=True)
class CharPK(models.Model):
char_pk = models.CharField(max_length=100, primary_key=True)
| bsd-3-clause |
h3llrais3r/SickRage | sickbeard/name_parser/regexes.py | 11 | 24869 | # coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
# all regexes are case insensitive
from __future__ import print_function, unicode_literals
normal_regexes = [
('standard_repeat',
# Show.Name.S01E02.S01E03.Source.Quality.Etc-Group
# Show Name - S01E02 - S01E03 - S01E04 - Ep Name
r'''
^(?P<series_name>.+?)[. _-]+ # Show_Name and separator
s(?P<season_num>\d+)[. _-]* # S01 and optional separator
e(?P<ep_num>\d+) # E02 and separator
([. _-]+s(?P=season_num)[. _-]* # S01 and optional separator
e(?P<extra_ep_num>\d+))+ # E03/etc and separator
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
('fov_repeat',
# Show.Name.1x02.1x03.Source.Quality.Etc-Group
# Show Name - 1x02 - 1x03 - 1x04 - Ep Name
r'''
^(?P<series_name>.+?)[. _-]+ # Show_Name and separator
(?P<season_num>\d+)x # 1x
(?P<ep_num>\d+) # 02 and separator
([. _-]+(?P=season_num)x # 1x
(?P<extra_ep_num>\d+))+ # 03/etc and separator
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
('standard',
# Show.Name.S01E02.Source.Quality.Etc-Group
# Show Name - S01E02 - My Ep Name
# Show.Name.S01.E03.My.Ep.Name
# Show.Name.S01E02E03.Source.Quality.Etc-Group
# Show Name - S01E02-03 - My Ep Name
# Show.Name.S01.E02.E03
r'''
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
\(?s(?P<season_num>\d+)[. _-]* # S01 and optional separator
e(?P<ep_num>\d+)\)? # E02 and separator
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>(?!(1080|720|480)[pi])\d+)(\))?)* # additional E03/etc
([. _,-]+((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?)?$ # Group
'''),
('newpct',
# American Horror Story - Temporada 4 HDTV x264[Cap.408_409]SPANISH AUDIO -NEWPCT
# American Horror Story - Temporada 4 [HDTV][Cap.408][Espanol Castellano]
# American Horror Story - Temporada 4 HDTV x264[Cap.408]SPANISH AUDIO –NEWPCT)
r'''
(?P<series_name>.+?).-.+\d{1,2}[ ,.] # Show name: American Horror Story
(?P<extra_info>.+)\[Cap\. # Quality: HDTV x264, [HDTV], HDTV x264
(?P<season_num>\d{1,2}) # Season Number: 4
(?P<ep_num>\d{2}) # Episode Number: 08
((_\d{1,2}(?P<extra_ep_num>\d{2}))|.*\]) # Episode number2: 09
'''),
('fov',
# Show_Name.1x02.Source_Quality_Etc-Group
# Show Name - 1x02 - My Ep Name
# Show_Name.1x02x03x04.Source_Quality_Etc-Group
# Show Name - 1x02-03-04 - My Ep Name
r'''
^((?!\[.+?\])(?P<series_name>.+?)[\[. _-]+)? # Show_Name and separator if no brackets group
(?P<season_num>\d+)x # 1x
(?P<ep_num>\d+) # 02 and separator
(([. _-]*x|-) # linking x/- char
(?P<extra_ep_num>
(?!(1080|720|480)[pi])(?!(?<=x)264) # ignore obviously wrong multi-eps
\d+))* # additional x03/etc
[\]. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
('scene_date_format',
# Show.Name.2010.11.23.Source.Quality.Etc-Group
# Show Name - 2010-11-23 - Ep Name
r'''
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
(?P<air_date>(\d+[. _-]\d+[. _-]\d+)|(\d+\w+[. _-]\w+[. _-]\d+))
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
('scene_sports_format',
# Show.Name.100.Event.2010.11.23.Source.Quality.Etc-Group
# Show.Name.2010.11.23.Source.Quality.Etc-Group
# Show Name - 2010-11-23 - Ep Name
r'''
^(?P<series_name>.*?(UEFA|MLB|ESPN|WWE|MMA|UFC|TNA|EPL|NASCAR|NBA|NFL|NHL|NRL|PGA|SUPER LEAGUE|FORMULA|FIFA|NETBALL|MOTOGP).*?)[. _-]+
((?P<series_num>\d{1,3})[. _-]+)?
(?P<air_date>(\d+[. _-]\d+[. _-]\d+)|(\d+\w+[. _-]\w+[. _-]\d+))[. _-]+
((?P<extra_info>.+?)((?<![. _-])
(?<!WEB)-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$
'''),
('stupid_with_denotative',
# aaf-sns03e09
# flhd-supernaturals07e02-1080p
r'''
(?P<release_group>.+?)(?<!WEB)-(?P<series_name>\w*)(?<!\d)[\. ]? # aaf-sn
(?!264) # don't count x264
s(?P<season_num>\d{1,2}) # s03
e(?P<ep_num>\d{2})(?:(rp|-(1080p|720p)))?$ # e09
'''),
('stupid',
# tpz-abc102
r'''
(?P<release_group>.+?)(?<!WEB)-(?P<series_name>\w*)(?<!\d)[\. ]? # tpz-abc
(?!264) # don't count x264
(?P<season_num>\d{1,2}) # 1
(?P<ep_num>\d{2})$ # 02
'''),
('verbose',
# Show Name Season 1 Episode 2 Ep Name
r'''
^(?P<series_name>.+?)[. _-]+ # Show Name and separator
(season|series)[. _-]+ # season and separator
(?P<season_num>\d+)[. _-]+ # 1
episode[. _-]+ # episode and separator
(?P<ep_num>\d+)[. _-]+ # 02 and separator
(?P<extra_info>.+)$ # Source_Quality_Etc-
'''),
('season_only',
# Show.Name.S01.Source.Quality.Etc-Group
r'''
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
s(eason[. _-])? # S01/Season 01
(?P<season_num>\d+)[. _-]* # S01 and optional separator
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
('no_season_multi_ep',
# Show.Name.E02-03
# Show.Name.E02.2010
r'''
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
(e(p(isode)?)?|part|pt)[. _-]? # e, ep, episode, or part
(?P<ep_num>(\d+|(?<!e)[ivx]+)) # first ep num
((([. _-]+(and|&|to)[. _-]+)|-) # and/&/to joiner
(?P<extra_ep_num>(?!(1080|720|480)[pi])(\d+|(?<!e)[ivx]+))[. _-]) # second ep num
([. _-]*(?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
('no_season_general',
# Show.Name.E23.Test
# Show.Name.Part.3.Source.Quality.Etc-Group
# Show.Name.Part.1.and.Part.2.Blah-Group
r'''
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
(e(p(isode)?)?|part|pt)[. _-]? # e, ep, episode, or part
(?P<ep_num>(\d+|((?<!e)[ivx]+(?=[. _-])))) # first ep num
([. _-]+((and|&|to)[. _-]+)? # and/&/to joiner
((e(p(isode)?)?|part|pt)[. _-]?) # e, ep, episode, or part
(?P<extra_ep_num>(?!(1080|720|480)[pi])
(\d+|((?<!e)[ivx]+(?=[. _-]))))[. _-])* # second ep num
([. _-]*(?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
('bare',
# Show.Name.102.Source.Quality.Etc-Group
r'''
^(?P<series_name>.+?)[. _-]+ # Show_Name and separator
(?P<season_num>\d{1,2}) # 1
(e?) # Optional episode separator
(?P<ep_num>\d{2}) # 02 and separator
([. _-]+(?P<extra_info>(?!\d{3}[. _-]+)[^-]+) # Source_Quality_Etc-
(-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
('no_season',
# Show Name - 01 - Ep Name
# 01 - Ep Name
# 01 - Ep Name
r'''
^((?P<series_name>.+?)(?:[. _-]{2,}|[. _]))? # Show_Name and separator
(?P<ep_num>\d{1,3}) # 02
(?:-(?P<extra_ep_num>\d{1,3}))* # -03-04-05 etc
(\s*(?:of)?\s*\d{1,3})? # of joiner (with or without spaces) and series total ep
[. _-]+((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
]
anime_regexes = [
('anime_horriblesubs',
# [HorribleSubs] Maria the Virgin Witch - 01 [720p].mkv
r'''
^(?:\[(?P<release_group>HorribleSubs)\][\s\.])
(?:(?P<series_name>.+?)[\s\.]-[\s\.])
(?P<ep_ab_num>((?!(1080|720|480)[pi]))\d{1,3})
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))?
(?:v(?P<version>[0-9]))?
(?:[\w\.\s]*)
(?:(?:(?:[\[\(])(?P<extra_info>\d{3,4}[xp]?\d{0,4}[\.\w\s-]*)(?:[\]\)]))|(?:\d{3,4}[xp]))
.*?
'''),
('anime_ultimate',
r'''
^(?:\[(?P<release_group>.+?)\][ ._-]*)
(?P<series_name>.+?)[ ._-]+
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3})
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))?[ ._-]+?
(?:v(?P<version>[0-9]))?
(?:[\w\.]*)
(?:(?:(?:[\[\(])(?P<extra_info>\d{3,4}[xp]?\d{0,4}[\.\w\s-]*)(?:[\]\)]))|(?:\d{3,4}[xp]))
(?:[ ._]?\[(?P<crc>\w+)\])?
.*?
'''),
('anime_french_fansub',
# [Kaerizaki-Fansub]_One_Piece_727_[VOSTFR][HD_1280x720].mp4
# [Titania-Fansub]_Fairy_Tail_269_[VOSTFR]_[720p]_[1921E00C].mp4
# [ISLAND]One_Piece_726_[VOSTFR]_[V1]_[8bit]_[720p]_[2F7B3FA2].mp4
# Naruto Shippuden 445 VOSTFR par Fansub-Resistance (1280*720) - version MQ
# Dragon Ball Super 015 VOSTFR par Fansub-Resistance (1280x720) - HQ version
# [Mystic.Z-Team].Dragon.Ball.Super.-.épisode.36.VOSTFR.720p
# [Z-Team][DBSuper.pw] Dragon Ball Super - 028 (VOSTFR)(720p AAC)(MP4)
# [SnF] Shokugeki no Souma - 24 VOSTFR [720p][41761A60].mkv
# [Y-F] Ao no Kanata no Four Rhythm - 03 Vostfr HD 8bits
# Phantasy Star Online 2 - The Animation 04 vostfr FHD
# Detective Conan 804 vostfr HD
# Active Raid 04 vostfr [1080p]
# Sekko Boys 04 vostfr [720p]
r'''
^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator (Optional)
((\[|\().+?(\]|\))[ ._-]*)? # Extra info (Optionnal)
(?P<series_name>.+?)[ ._-]+ # Show_Name and separator
((épisode|episode|Episode)[ ._-]+)? # Sentence for special fansub (Optionnal)
(?P<ep_ab_num>\d{1,3})[ ._-]+ # Episode number and separator
(((\[|\())?(VOSTFR|vostfr|Vostfr|VostFR|vostFR)((\]|\)))?([ ._-])*)+ # Subtitle Language and separator
(par Fansub-Resistance)? # Sentence for special fansub (Optionnal)
(\[((v|V)(?P<version>[0-9]))\]([ ._-])*)? # Version and separator (Optional)
((\[(8|10)(Bits|bits|Bit|bit)\])?([ ._-])*)? # Colour resolution and separator (Optional)
((\[|\()((FHD|HD|SD)*([ ._-])*((?P<extra_info>\d{3,4}[xp*]?\d{0,4}[\.\w\s-]*)))(\]|\)))? # Source_Quality_Etc-
([ ._-]*\[(?P<crc>\w{8})\])? # CRC (Optional)
.* # Separator and EOL
'''),
('anime_standard',
# [Group Name] Show Name.13-14
# [Group Name] Show Name - 13-14
# Show Name 13-14
# [Group Name] Show Name.13
# [Group Name] Show Name - 13
# Show Name 13
r'''
^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator
(?P<series_name>.+?)[ ._-]+ # Show_Name and separator
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # E01
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # E02
(v(?P<version>[0-9]))? # version
[ ._-]+\[(?P<extra_info>\d{3,4}[xp]?\d{0,4}[\.\w\s-]*)\] # Source_Quality_Etc-
(\[(?P<crc>\w{8})\])? # CRC
.*? # Separator and EOL
'''),
('anime_standard_round',
# [Stratos-Subs]_Infinite_Stratos_-_12_(1280x720_H.264_AAC)_[379759DB]
# [ShinBunBu-Subs] Bleach - 02-03 (CX 1280x720 x264 AAC)
r'''
^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator
(?P<series_name>.+?)[ ._-]+ # Show_Name and separator
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # E01
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # E02
(v(?P<version>[0-9]))? # version
[ ._-]+\((?P<extra_info>(CX[ ._-]?)?\d{3,4}[xp]?\d{0,4}[\.\w\s-]*)\) # Source_Quality_Etc-
(\[(?P<crc>\w{8})\])? # CRC
.*? # Separator and EOL
'''),
('anime_slash',
# [SGKK] Bleach 312v1 [720p/MKV]
r'''
^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator
(?P<series_name>.+?)[ ._-]+ # Show_Name and separator
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # E01
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # E02
(v(?P<version>[0-9]))? # version
[ ._-]+\[(?P<extra_info>\d{3,4}p) # Source_Quality_Etc-
(\[(?P<crc>\w{8})\])? # CRC
.*? # Separator and EOL
'''),
('anime_standard_codec',
# [Ayako]_Infinite_Stratos_-_IS_-_07_[H264][720p][EB7838FC]
# [Ayako] Infinite Stratos - IS - 07v2 [H264][720p][44419534]
# [Ayako-Shikkaku] Oniichan no Koto Nanka Zenzen Suki Janain Dakara ne - 10 [LQ][h264][720p] [8853B21C]
r'''
^(\[(?P<release_group>.+?)\][ ._-]*)? # Release Group and separator
(?P<series_name>.+?)[ ._]* # Show_Name and separator
([ ._-]+-[ ._-]+[A-Z]+[ ._-]+)?[ ._-]+ # funny stuff, this is sooo nuts ! this will kick me in the butt one day
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # E01
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # E02
(v(?P<version>[0-9]))? # version
([ ._-](\[\w{1,2}\])?\[[a-z][.]?\w{2,4}\])? #codec
[ ._-]*\[(?P<extra_info>(\d{3,4}[xp]?\d{0,4})?[\.\w\s-]*)\] # Source_Quality_Etc-
(\[(?P<crc>\w{8})\])?
.*? # Separator and EOL
'''),
('anime_codec_crc',
r'''
^(?:\[(?P<release_group>.*?)\][ ._-]*)?
(?:(?P<series_name>.*?)[ ._-]*)?
(?:(?P<ep_ab_num>(((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))[ ._-]*).+?
(?:\[(?P<codec>.*?)\][ ._-]*)
(?:\[(?P<crc>\w{8})\])?
.*?
'''),
('anime SxEE',
# Show_Name.1x02.Source_Quality_Etc-Group
# Show Name - 1x02 - My Ep Name
# Show_Name.1x02x03x04.Source_Quality_Etc-Group
# Show Name - 1x02-03-04 - My Ep Name
r'''
^((?!\[.+?\])(?P<series_name>.+?)[\[. _-]+)? # Show_Name and separator if no brackets group
(?P<season_num>\d+)x # 1x
(?P<ep_num>\d+) # 02 and separator
(([. _-]*x|-) # linking x/- char
(?P<extra_ep_num>
(?!(1080|720|480)[pi])(?!(?<=x)264) # ignore obviously wrong multi-eps
\d+))* # additional x03/etc
[\]. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
('anime_SxxExx',
# Show.Name.S01E02.Source.Quality.Etc-Group
# Show Name - S01E02 - My Ep Name
# Show.Name.S01.E03.My.Ep.Name
# Show.Name.S01E02E03.Source.Quality.Etc-Group
# Show Name - S01E02-03 - My Ep Name
# Show.Name.S01.E02.E03
# Show Name - S01E02
# Show Name - S01E02-03
r'''
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
(\()?s(?P<season_num>\d+)[. _-]* # S01 and optional separator
e(?P<ep_num>\d+)(\))? # E02 and separator
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>(?!(1080|720|480)[pi])\d+)(\))?)* # additional E03/etc
([. _-]+((?P<extra_info>.+?))? # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
'''),
('anime_and_normal',
# Bleach - s16e03-04 - 313-314
# Bleach.s16e03-04.313-314
# Bleach s16e03e04 313-314
r'''
^(?P<series_name>.+?)[ ._-]+ # start of string and series name and non optinal separator
[sS](?P<season_num>\d+)[. _-]* # S01 and optional separator
[eE](?P<ep_num>\d+) # epipisode E02
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>\d+))* # additional E03/etc
([ ._-]{2,}|[ ._]+) # if "-" is used to separate at least something else has to be there(->{2,}) "s16e03-04-313-314" would make sens any way
((?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # absolute number
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # "-" as separator and anditional absolute number, all optinal
(v(?P<version>[0-9]))? # the version e.g. "v2"
.*?
'''),
('anime_and_normal_x',
# Bleach - s16e03-04 - 313-314
# Bleach.s16e03-04.313-314
# Bleach s16e03e04 313-314
r'''
^(?P<series_name>.+?)[ ._-]+ # start of string and series name and non optinal separator
(?P<season_num>\d+)[. _-]* # S01 and optional separator
[xX](?P<ep_num>\d+) # epipisode E02
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>\d+))* # additional E03/etc
([ ._-]{2,}|[ ._]+) # if "-" is used to separate at least something else has to be there(->{2,}) "s16e03-04-313-314" would make sens any way
((?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # absolute number
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # "-" as separator and anditional absolute number, all optinal
(v(?P<version>[0-9]))? # the version e.g. "v2"
.*?
'''),
('anime_and_normal_reverse',
# Bleach - 313-314 - s16e03-04
r'''
^(?P<series_name>.+?)[ ._-]+ # start of string and series name and non optinal separator
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # absolute number
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # "-" as separator and anditional absolute number, all optinal
(v(?P<version>[0-9]))? # the version e.g. "v2"
([ ._-]{2,}|[ ._]+) # if "-" is used to separate at least something else has to be there(->{2,}) "s16e03-04-313-314" would make sens any way
[sS](?P<season_num>\d+)[. _-]* # S01 and optional separator
[eE](?P<ep_num>\d+) # epipisode E02
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>\d+))* # additional E03/etc
.*?
'''),
('anime_and_normal_front',
# 165.Naruto Shippuuden.s08e014
r'''
^(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # start of string and absolute number
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # "-" as separator and anditional absolute number, all optinal
(v(?P<version>[0-9]))?[ ._-]+ # the version e.g. "v2"
(?P<series_name>.+?)[ ._-]+
[sS](?P<season_num>\d+)[. _-]* # S01 and optional separator
[eE](?P<ep_num>\d+)
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>\d+))* # additional E03/etc
.*?
'''),
('anime_ep_name',
r'''
^(?:\[(?P<release_group>.+?)\][ ._-]*)
(?P<series_name>.+?)[ ._-]+
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3})
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))?[ ._-]*?
(?:v(?P<version>[0-9])[ ._-]+?)?
(?:.+?[ ._-]+?)?
\[(?P<extra_info>\w+)\][ ._-]?
(?:\[(?P<crc>\w{8})\])?
.*?
'''),
('anime_WarB3asT',
# 003. Show Name - Ep Name.ext
# 003-004. Show Name - Ep Name.ext
r'''
^(?P<ep_ab_num>\d{3,4})(-(?P<extra_ab_ep_num>\d{3,4}))?\.\s+(?P<series_name>.+?)\s-\s.*
'''),
('anime_bare',
# One Piece - 102
# [ACX]_Wolf's_Spirit_001.mkv
r'''
^(\[(?P<release_group>.+?)\][ ._-]*)?
(?P<series_name>.+?)[ ._-]+ # Show_Name and separator
(?P<ep_ab_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}) # E01
(-(?P<extra_ab_ep_num>((?!(1080|720|480)[pi])|(?![hx].?264))\d{1,3}))? # E02
(v(?P<version>[0-9]))? # v2
.*? # Separator and EOL
'''),
]
| gpl-3.0 |
cantora/sefi | sefi/disassembler/sefi_llvm.py | 1 | 3855 | # Copyright 2013 anthony cantor
# This file is part of sefi.
#
# sefi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# sefi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with sefi. If not, see <http://www.gnu.org/licenses/>.
from sefi.disassembler import *
import sefi.arch
try:
import llvm
except Exception as e:
raise LibNotFound("error loading llvm: %r" % e)
import llvm.target
import llvm.mc
from llvm.target import TargetMachine
llvm.target.initialize_all()
class LLVMInstr(Instr):
def display(self):
if self.dasm.arch() == sefi.arch.x86_64:
addr_fmt = "%016x"
else:
addr_fmt = "%08x"
return self.internal_display(addr_fmt, str(self), "")
class BadLLVMInstr(LLVMInstr):
def __init__(self, addr, data, dasm):
super(BadLLVMInstr, self).__init__(addr, data, dasm)
def __str__(self):
return "(bad)"
def nop(self):
return False
def has_uncond_ctrl_flow(self):
return False
def has_cond_ctrl_flow(self):
return False
def bad(self):
return True
def ret(self):
return False
def jmp_reg_uncond(self):
return False
def call_reg(self):
return False
class GoodLLVMInstr(LLVMInstr):
def __init__(self, addr, data, llvminst, dasm):
self.llvminst = llvminst
super(GoodLLVMInstr, self).__init__(addr, data, dasm)
def __str__(self):
return str(self.llvminst).strip()
def nop(self):
'''
i cant seem to find a way to get llvm to report
whether an instruction is a nop. this will probably
miss more nops than i would like, but i cant think
of anything to improve it. ultimately, mistaking a
nop for a real instruction isnt that destructive to
finding gadgets, so its ok.
'''
reg = 'noo?p(?: |$)'
return self.match_regexp(reg)
def has_uncond_ctrl_flow(self):
return self.llvminst.is_uncond_branch() \
or self.ret() \
or self.llvminst.is_call()
def has_cond_ctrl_flow(self):
return self.llvminst.is_cond_branch()
def bad(self):
return isinstance(self.llvminst, llvm.mc.BadInstr)
def ret(self):
return self.llvminst.is_return()
def jmp_reg_uncond(self):
return self.llvminst.is_uncond_branch() \
and self.llvminst.is_indirect_branch()
def call_reg(self):
return self.llvminst.is_call() \
and self.llvminst.operands()[0].is_reg()
class LLVMDasm(Disassembler):
def __init__(self, llvmdasm, arch):
self.llvmdasm = llvmdasm
self.arch
def decode(self, addr, data):
if not isinstance(data, tuple):
raise TypeError("expected tuple of integers for data, got %s" % type(data))
str_data = "".join([chr(x) for x in data])
for (addr, data, llvminst) in self.llvmdasm.decode(str_data, addr):
if llvminst is None:
yield BadLLVMInstr(addr, data, self)
else:
yield GoodLLVMInstr(addr, data, llvminst, self)
def arch(self):
return self.arch
def new(arch):
tm = None
if arch == sefi.arch.x86:
tm = TargetMachine.x86()
elif arch == sefi.arch.x86_64:
tm = TargetMachine.x86_64()
elif arch == sefi.arch.arm:
tm = TargetMachine.arm()
elif arch == sefi.arch.thumb1:
tm = TargetMachine.thumb()
else:
tm = TargetMachine.lookup(arch)
if not tm:
raise ArchNotSupported("llvm does not recognize " + \
"architecture %s" % arch)
try:
llvmdasm = llvm.mc.Disassembler(tm)
except llvm.LLVMException as e:
raise ArchNotSupported("llvm does not have a " + \
"disassembler for %s" % arch)
return LLVMDasm(llvmdasm, arch)
| gpl-3.0 |
SlimRoms/android_external_chromium_org | tools/metrics/histograms/update_net_error_codes.py | 43 | 1352 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Updates NetErrorCodes enum in histograms.xml file with values read
from net_error_list.h.
If the file was pretty-printed, the updated version is pretty-printed too.
"""
import os.path
import re
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
from update_histogram_enum import UpdateHistogramFromDict
NET_ERROR_LIST_PATH = '../../../net/base/net_error_list.h'
def ReadNetErrorCodes(filename):
"""Reads in values from net_error_list.h, returning a dictionary mapping
error code to error name.
"""
# Read the file as a list of lines
with open(filename) as f:
content = f.readlines()
ERROR_REGEX = re.compile(r'^NET_ERROR\(([\w]+), -([0-9]+)\)')
# Parse out lines that are net errors.
errors = {}
for line in content:
m = ERROR_REGEX.match(line)
if m:
errors[int(m.group(2))] = m.group(1)
return errors
def main():
if len(sys.argv) > 1:
print >>sys.stderr, 'No arguments expected!'
sys.stderr.write(__doc__)
sys.exit(1)
UpdateHistogramFromDict(
'NetErrorCodes', ReadNetErrorCodes(NET_ERROR_LIST_PATH),
NET_ERROR_LIST_PATH)
if __name__ == '__main__':
main()
| bsd-3-clause |
mezklador/load_push_sblogs | dwn_sb_logs.py | 1 | 2353 | #!/home/me/Documents/Codes/python/sb_dwn_logs/env/bin/python3
from datetime import datetime
import logging
import logging.config
import os
import re
import sys
from time import time
import requests
from tqdm import tqdm
logging.config.fileConfig('log_config.ini', defaults={'logfilename':'apilogs/downloads/timeline.log'})
logger = logging.getLogger(__name__)
'''
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s ~ %(levelname)s: %(message)s")
fh = logging.FileHandler("apilogs/downloads/timeline.log")
fh.setFormatter(formatter)
logger.addHandler(fh)
'''
sb_logs_file = "http://soda-bar.com/hide/loading_page.log"
if __name__ == '__main__':
start = time()
directory_name = "logs"
here = os.getcwd()
logs_dir = os.path.join(here, directory_name)
if not os.path.isdir(logs_dir):
os.makedirs(logs_dir)
try:
r = requests.get(sb_logs_file, stream=True)
'''
Check if status code if OK (200)
'''
if r.status_code != 200:
r.raise_for_status()
content_type = re.sub('[\s+]',
'',
r.headers['content-type']).split(';')[0]
''' Check if script is receiving the good data format
(in case of error in URL)
'''
if content_type != 'text/plain':
r.status_code = 500
r.raise_for_status()
total_size = int(r.headers.get('content-length', 0))
now = datetime.now()
filename_date = f"{now.year:}{now.month:02d}{now.day:02d}-{now.hour:02d}_{now.minute:02d}_{now.second:02d}.log"
with open(os.path.join(logs_dir, filename_date), 'wb') as f:
for data in tqdm(r.iter_content(32*1024),
total=total_size, unit='B',
unit_scale=True):
f.write(data)
end = time() - start
logger.info(f"Downloaded {filename_date} to /logs/ in {end:.4f} sec.")
except requests.exceptions.RequestException as e:
logger.warning(f"Bad request: {e}")
sys.exit(1)
except requests.exceptions.Timeout as tm:
logger.warning(f"Request Timeout: {tm}")
sys.exit(1)
except requests.exceptions.HTTPError as he:
logger.warning(f"HTTP error: {he}")
sys.exit(1)
| mit |
hn8841182/20150623-test02 | static/Brython3.1.1-20150328-091302/Lib/shutil.py | 720 | 39101 | """Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
from os.path import abspath
import fnmatch
import collections
import errno
import tarfile
try:
import bz2
del bz2
_BZ2_SUPPORTED = True
except ImportError:
_BZ2_SUPPORTED = False
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format",
"get_unpack_formats", "register_unpack_format",
"unregister_unpack_format", "unpack_archive",
"ignore_patterns", "chown", "which"]
# disk_usage is added later, if available on the platform
class Error(EnvironmentError):
pass
class SpecialFileError(EnvironmentError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(EnvironmentError):
"""Raised when a command could not be executed"""
class ReadError(EnvironmentError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registry operation with the archiving
and unpacking registeries fails"""
try:
WindowsError
except NameError:
WindowsError = None
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst, *, follow_symlinks=True):
"""Copy data from src to dst.
If follow_symlinks is not set and src is a symbolic link, a new
symlink will be created instead of copying the file it points to.
"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
if not follow_symlinks and os.path.islink(src):
os.symlink(os.readlink(src), dst)
else:
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
return dst
def copymode(src, dst, *, follow_symlinks=True):
"""Copy mode bits from src to dst.
If follow_symlinks is not set, symlinks aren't followed if and only
if both `src` and `dst` are symlinks. If `lchmod` isn't available
(e.g. Linux) this method does nothing.
"""
if not follow_symlinks and os.path.islink(src) and os.path.islink(dst):
if hasattr(os, 'lchmod'):
stat_func, chmod_func = os.lstat, os.lchmod
else:
return
elif hasattr(os, 'chmod'):
stat_func, chmod_func = os.stat, os.chmod
else:
return
st = stat_func(src)
chmod_func(dst, stat.S_IMODE(st.st_mode))
if hasattr(os, 'listxattr'):
def _copyxattr(src, dst, *, follow_symlinks=True):
"""Copy extended filesystem attributes from `src` to `dst`.
Overwrite existing attributes.
If `follow_symlinks` is false, symlinks won't be followed.
"""
try:
names = os.listxattr(src, follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno not in (errno.ENOTSUP, errno.ENODATA):
raise
return
for name in names:
try:
value = os.getxattr(src, name, follow_symlinks=follow_symlinks)
os.setxattr(dst, name, value, follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA):
raise
else:
def _copyxattr(*args, **kwargs):
pass
def copystat(src, dst, *, follow_symlinks=True):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst.
If the optional flag `follow_symlinks` is not set, symlinks aren't followed if and
only if both `src` and `dst` are symlinks.
"""
def _nop(*args, ns=None, follow_symlinks=None):
pass
# follow symlinks (aka don't not follow symlinks)
follow = follow_symlinks or not (os.path.islink(src) and os.path.islink(dst))
if follow:
# use the real function if it exists
def lookup(name):
return getattr(os, name, _nop)
else:
# use the real function only if it exists
# *and* it supports follow_symlinks
def lookup(name):
fn = getattr(os, name, _nop)
if fn in os.supports_follow_symlinks:
return fn
return _nop
st = lookup("stat")(src, follow_symlinks=follow)
mode = stat.S_IMODE(st.st_mode)
lookup("utime")(dst, ns=(st.st_atime_ns, st.st_mtime_ns),
follow_symlinks=follow)
try:
lookup("chmod")(dst, mode, follow_symlinks=follow)
except NotImplementedError:
# if we got a NotImplementedError, it's because
# * follow_symlinks=False,
# * lchown() is unavailable, and
# * either
# * fchownat() is unavailable or
# * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW.
# (it returned ENOSUP.)
# therefore we're out of options--we simply cannot chown the
# symlink. give up, suppress the error.
# (which is what shutil always did in this circumstance.)
pass
if hasattr(st, 'st_flags'):
try:
lookup("chflags")(dst, st.st_flags, follow_symlinks=follow)
except OSError as why:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and why.errno == getattr(errno, err):
break
else:
raise
_copyxattr(src, dst, follow_symlinks=follow)
def copy(src, dst, *, follow_symlinks=True):
"""Copy data and mode bits ("cp src dst"). Return the file's destination.
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
copymode(src, dst, follow_symlinks=follow_symlinks)
return dst
def copy2(src, dst, *, follow_symlinks=True):
"""Copy data and all stat info ("cp -p src dst"). Return the file's
destination."
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
copystat(src, dst, follow_symlinks=follow_symlinks)
return dst
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
# We can't just leave it to `copy_function` because legacy
# code with a custom `copy_function` may rely on copytree
# doing the right thing.
os.symlink(linkto, dstname)
copystat(srcname, dstname, follow_symlinks=not symlinks)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.append((src, dst, str(why)))
if errors:
raise Error(errors)
return dst
# version vulnerable to race conditions
def _rmtree_unsafe(path, onerror):
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
_rmtree_unsafe(fullname, onerror)
else:
try:
os.unlink(fullname)
except os.error:
onerror(os.unlink, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
# Version using fd-based APIs to protect against races
def _rmtree_safe_fd(topfd, path, onerror):
names = []
try:
names = os.listdir(topfd)
except OSError as err:
err.filename = path
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
orig_st = os.stat(name, dir_fd=topfd, follow_symlinks=False)
mode = orig_st.st_mode
except OSError:
mode = 0
if stat.S_ISDIR(mode):
try:
dirfd = os.open(name, os.O_RDONLY, dir_fd=topfd)
except OSError:
onerror(os.open, fullname, sys.exc_info())
else:
try:
if os.path.samestat(orig_st, os.fstat(dirfd)):
_rmtree_safe_fd(dirfd, fullname, onerror)
try:
os.rmdir(name, dir_fd=topfd)
except OSError:
onerror(os.rmdir, fullname, sys.exc_info())
else:
try:
# This can only happen if someone replaces
# a directory with a symlink after the call to
# stat.S_ISDIR above.
raise OSError("Cannot call rmtree on a symbolic "
"link")
except OSError:
onerror(os.path.islink, fullname, sys.exc_info())
finally:
os.close(dirfd)
else:
try:
os.unlink(name, dir_fd=topfd)
except OSError:
onerror(os.unlink, fullname, sys.exc_info())
_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <=
os.supports_dir_fd and
os.listdir in os.supports_fd and
os.stat in os.supports_follow_symlinks)
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is platform and implementation dependent;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
if _use_fd_functions:
# While the unsafe rmtree works fine on bytes, the fd based does not.
if isinstance(path, bytes):
path = os.fsdecode(path)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
try:
orig_st = os.lstat(path)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
fd = os.open(path, os.O_RDONLY)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
if os.path.samestat(orig_st, os.fstat(fd)):
_rmtree_safe_fd(fd, path, onerror)
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
else:
try:
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
finally:
os.close(fd)
else:
return _rmtree_unsafe(path, onerror)
# Allow introspection of whether or not the hardening against symlink
# attacks is supported on the current platform
rmtree.avoids_symlink_attacks = _use_fd_functions
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
return os.path.basename(path.rstrip(os.path.sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command. Return the file or directory's
destination.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed. Symlinks are
recreated under the new name if os.rename() fails because of cross
filesystem renames.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.islink(src):
linkto = os.readlink(src)
os.symlink(linkto, real_dst)
os.unlink(src)
elif os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src)
return real_dst
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", or ".bz2").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', None: ''}
compress_ext = {'gzip': '.gz'}
if _BZ2_SUPPORTED:
tar_compression['bzip2'] = 'bz2'
compress_ext['bzip2'] = '.bz2'
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext:
raise ValueError("bad value for 'compress', or compression format not "
"supported : {0}".format(compress))
archive_name = base_name + '.tar' + compress_ext.get(compress, '')
archive_dir = os.path.dirname(archive_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# XXX see if we want to keep an external call here
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
try:
spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise ExecError("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zipfile
except ImportError:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
zip.close()
return zip_filename
_ARCHIVE_FORMATS = {
'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
"bzip2'ed tar-file")
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not callable(function):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2:
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
def get_unpack_formats():
"""Returns a list of supported formats for unpacking.
Each element of the returned sequence is a tuple
(name, extensions, description)
"""
formats = [(name, info[0], info[3]) for name, info in
_UNPACK_FORMATS.items()]
formats.sort()
return formats
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not callable(function):
raise TypeError('The registered function must be a callable')
def register_unpack_format(name, extensions, function, extra_args=None,
description=''):
"""Registers an unpack format.
`name` is the name of the format. `extensions` is a list of extensions
corresponding to the format.
`function` is the callable that will be
used to unpack archives. The callable will receive archives to unpack.
If it's unable to handle an archive, it needs to raise a ReadError
exception.
If provided, `extra_args` is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_unpack_formats() function.
"""
if extra_args is None:
extra_args = []
_check_unpack_options(extensions, function, extra_args)
_UNPACK_FORMATS[name] = extensions, function, extra_args, description
def unregister_unpack_format(name):
"""Removes the pack format from the registery."""
del _UNPACK_FORMATS[name]
def _ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _unpack_zipfile(filename, extract_dir):
"""Unpack zip `filename` to `extract_dir`
"""
try:
import zipfile
except ImportError:
raise ReadError('zlib not supported, cannot unpack this archive.')
if not zipfile.is_zipfile(filename):
raise ReadError("%s is not a zip file" % filename)
zip = zipfile.ZipFile(filename)
try:
for info in zip.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
_ensure_directory(target)
if not name.endswith('/'):
# file
data = zip.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
finally:
zip.close()
def _unpack_tarfile(filename, extract_dir):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise ReadError(
"%s is not a compressed or uncompressed tar file" % filename)
try:
tarobj.extractall(extract_dir)
finally:
tarobj.close()
_UNPACK_FORMATS = {
'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [],
"bzip2'ed tar-file")
def _find_unpack_format(filename):
for name, info in _UNPACK_FORMATS.items():
for extension in info[0]:
if filename.endswith(extension):
return name
return None
def unpack_archive(filename, extract_dir=None, format=None):
"""Unpack an archive.
`filename` is the name of the archive.
`extract_dir` is the name of the target directory, where the archive
is unpacked. If not provided, the current working directory is used.
`format` is the archive format: one of "zip", "tar", or "gztar". Or any
other registered format. If not provided, unpack_archive will use the
filename extension and see if an unpacker was registered for that
extension.
In case none is found, a ValueError is raised.
"""
if extract_dir is None:
extract_dir = os.getcwd()
if format is not None:
try:
format_info = _UNPACK_FORMATS[format]
except KeyError:
raise ValueError("Unknown unpack format '{0}'".format(format))
func = format_info[1]
func(filename, extract_dir, **dict(format_info[2]))
else:
# we need to look at the registered unpackers supported extensions
format = _find_unpack_format(filename)
if format is None:
raise ReadError("Unknown archive format '{0}'".format(filename))
func = _UNPACK_FORMATS[format][1]
kwargs = dict(_UNPACK_FORMATS[format][2])
func(filename, extract_dir, **kwargs)
if hasattr(os, 'statvfs'):
__all__.append('disk_usage')
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
def disk_usage(path):
"""Return disk usage statistics about the given path.
Returned value is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
"""
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return _ntuple_diskusage(total, used, free)
elif os.name == 'nt':
import nt
__all__.append('disk_usage')
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
def disk_usage(path):
"""Return disk usage statistics about the given path.
Returned values is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
"""
total, free = nt._getdiskusage(path)
used = total - free
return _ntuple_diskusage(total, used, free)
def chown(path, user=None, group=None):
"""Change owner user and group of the given path.
user and group can be the uid/gid or the user/group names, and in that case,
they are converted to their respective uid/gid.
"""
if user is None and group is None:
raise ValueError("user and/or group must be set")
_user = user
_group = group
# -1 means don't change it
if user is None:
_user = -1
# user can either be an int (the uid) or a string (the system username)
elif isinstance(user, str):
_user = _get_uid(user)
if _user is None:
raise LookupError("no such user: {!r}".format(user))
if group is None:
_group = -1
elif not isinstance(group, int):
_group = _get_gid(group)
if _group is None:
raise LookupError("no such group: {!r}".format(group))
os.chown(path, _user, _group)
def get_terminal_size(fallback=(80, 24)):
"""Get the size of the terminal window.
For each of the two dimensions, the environment variable, COLUMNS
and LINES respectively, is checked. If the variable is defined and
the value is a positive integer, it is used.
When COLUMNS or LINES is not defined, which is the common case,
the terminal connected to sys.__stdout__ is queried
by invoking os.get_terminal_size.
If the terminal size cannot be successfully queried, either because
the system doesn't support querying, or because we are not
connected to a terminal, the value given in fallback parameter
is used. Fallback defaults to (80, 24) which is the default
size used by many terminal emulators.
The value returned is a named tuple of type os.terminal_size.
"""
# columns, lines are the working values
try:
columns = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
columns = 0
try:
lines = int(os.environ['LINES'])
except (KeyError, ValueError):
lines = 0
# only query if necessary
if columns <= 0 or lines <= 0:
try:
size = os.get_terminal_size(sys.__stdout__.fileno())
except (NameError, OSError):
size = os.terminal_size(fallback)
if columns <= 0:
columns = size.columns
if lines <= 0:
lines = size.lines
return os.terminal_size((columns, lines))
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
| gpl-3.0 |
msmolens/VTK | ThirdParty/Twisted/twisted/internet/_posixstdio.py | 23 | 4683 | # -*- test-case-name: twisted.test.test_stdio -*-
"""Standard input/out/err support.
Future Plans::
support for stderr, perhaps
Rewrite to use the reactor instead of an ad-hoc mechanism for connecting
protocols to transport.
Maintainer: James Y Knight
"""
from zope.interface import implements
from twisted.internet import process, error, interfaces
from twisted.python import log, failure
class PipeAddress(object):
implements(interfaces.IAddress)
class StandardIO(object):
implements(interfaces.ITransport, interfaces.IProducer,
interfaces.IConsumer, interfaces.IHalfCloseableDescriptor)
_reader = None
_writer = None
disconnected = False
disconnecting = False
def __init__(self, proto, stdin=0, stdout=1, reactor=None):
if reactor is None:
from twisted.internet import reactor
self.protocol = proto
self._writer = process.ProcessWriter(reactor, self, 'write', stdout)
self._reader = process.ProcessReader(reactor, self, 'read', stdin)
self._reader.startReading()
self.protocol.makeConnection(self)
# ITransport
# XXX Actually, see #3597.
def loseWriteConnection(self):
if self._writer is not None:
self._writer.loseConnection()
def write(self, data):
if self._writer is not None:
self._writer.write(data)
def writeSequence(self, data):
if self._writer is not None:
self._writer.writeSequence(data)
def loseConnection(self):
self.disconnecting = True
if self._writer is not None:
self._writer.loseConnection()
if self._reader is not None:
# Don't loseConnection, because we don't want to SIGPIPE it.
self._reader.stopReading()
def getPeer(self):
return PipeAddress()
def getHost(self):
return PipeAddress()
# Callbacks from process.ProcessReader/ProcessWriter
def childDataReceived(self, fd, data):
self.protocol.dataReceived(data)
def childConnectionLost(self, fd, reason):
if self.disconnected:
return
if reason.value.__class__ == error.ConnectionDone:
# Normal close
if fd == 'read':
self._readConnectionLost(reason)
else:
self._writeConnectionLost(reason)
else:
self.connectionLost(reason)
def connectionLost(self, reason):
self.disconnected = True
# Make sure to cleanup the other half
_reader = self._reader
_writer = self._writer
protocol = self.protocol
self._reader = self._writer = None
self.protocol = None
if _writer is not None and not _writer.disconnected:
_writer.connectionLost(reason)
if _reader is not None and not _reader.disconnected:
_reader.connectionLost(reason)
try:
protocol.connectionLost(reason)
except:
log.err()
def _writeConnectionLost(self, reason):
self._writer=None
if self.disconnecting:
self.connectionLost(reason)
return
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
if p:
try:
p.writeConnectionLost()
except:
log.err()
self.connectionLost(failure.Failure())
def _readConnectionLost(self, reason):
self._reader=None
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
if p:
try:
p.readConnectionLost()
except:
log.err()
self.connectionLost(failure.Failure())
else:
self.connectionLost(reason)
# IConsumer
def registerProducer(self, producer, streaming):
if self._writer is None:
producer.stopProducing()
else:
self._writer.registerProducer(producer, streaming)
def unregisterProducer(self):
if self._writer is not None:
self._writer.unregisterProducer()
# IProducer
def stopProducing(self):
self.loseConnection()
def pauseProducing(self):
if self._reader is not None:
self._reader.pauseProducing()
def resumeProducing(self):
if self._reader is not None:
self._reader.resumeProducing()
def stopReading(self):
"""Compatibility only, don't use. Call pauseProducing."""
self.pauseProducing()
def startReading(self):
"""Compatibility only, don't use. Call resumeProducing."""
self.resumeProducing()
| bsd-3-clause |
Seynen/egfrd | samples/mapk/run_ecell_model4_all.py | 6 | 1268 | import os
import math
import numpy
MODEL_FILE0 = 'model4-0.eml'
MODEL_FILE1 = 'model4.eml'
ESS_FILE = 'run_ecell_model4.py'
N_K_total = 300
# Register jobs.
jobs = {}
for ti_str in ['0', '1e-7', '3e-7',
'1e-6', '3e-6',
'1e-5', '3e-5',
'1e-4', '3e-4',
'1e-3', '3e-3',
'1e-2', '3e-2',
'1e-1', '3e-1',
'1e-0']:
ti = float(ti_str)
if ti == 0:
MODEL_FILE = MODEL_FILE0
else:
MODEL_FILE = MODEL_FILE1
KI = math.log(2) / ti
for kpp_ratio in [0.11, 0.66]:
N_KPP = N_K_total * kpp_ratio
N_K = N_K_total - N_KPP
if ti != 0:
parameter_dict = { 'MODEL_FILE': MODEL_FILE,
'N_KPP': N_KPP, 'N_K': N_K, 'KI': KI }
else:
parameter_dict = { 'MODEL_FILE': MODEL_FILE,
'N_KPP': N_KPP, 'N_K': N_K }
jobID = registerEcellSession(ESS_FILE, parameter_dict, [MODEL_FILE, ])
jobs[jobID] = [ti_str, kpp_ratio]
run()
import sys
for jobID in jobs.keys():
#print " --- job id = %s ---" % jobID
sys.stdout.write('%s %s' % (jobs[jobID][0],
getStdout(jobID)))
| gpl-2.0 |
dracos/django | django/contrib/gis/db/backends/postgis/schema.py | 33 | 2647 | from django.db.backends.postgresql.schema import DatabaseSchemaEditor
class PostGISSchemaEditor(DatabaseSchemaEditor):
geom_index_type = 'GIST'
geom_index_ops_nd = 'GIST_GEOMETRY_OPS_ND'
rast_index_wrapper = 'ST_ConvexHull(%s)'
sql_alter_column_to_3d = "ALTER COLUMN %(column)s TYPE %(type)s USING ST_Force3D(%(column)s)::%(type)s"
sql_alter_column_to_2d = "ALTER COLUMN %(column)s TYPE %(type)s USING ST_Force2D(%(column)s)::%(type)s"
def geo_quote_name(self, name):
return self.connection.ops.geo_quote_name(name)
def _field_should_be_indexed(self, model, field):
if getattr(field, 'spatial_index', False):
return True
return super()._field_should_be_indexed(model, field)
def _create_index_sql(self, model, fields, **kwargs):
if len(fields) != 1 or not hasattr(fields[0], 'geodetic'):
return super()._create_index_sql(model, fields, **kwargs)
field = fields[0]
field_column = self.quote_name(field.column)
if field.geom_type == 'RASTER':
# For raster fields, wrap index creation SQL statement with ST_ConvexHull.
# Indexes on raster columns are based on the convex hull of the raster.
field_column = self.rast_index_wrapper % field_column
elif field.dim > 2 and not field.geography:
# Use "nd" ops which are fast on multidimensional cases
field_column = "%s %s" % (field_column, self.geom_index_ops_nd)
return self.sql_create_index % {
"name": self.quote_name('%s_%s_id' % (model._meta.db_table, field.column)),
"table": self.quote_name(model._meta.db_table),
"using": "USING %s" % self.geom_index_type,
"columns": field_column,
"extra": '',
}
def _alter_column_type_sql(self, table, old_field, new_field, new_type):
"""
Special case when dimension changed.
"""
if not hasattr(old_field, 'dim') or not hasattr(new_field, 'dim'):
return super()._alter_column_type_sql(table, old_field, new_field, new_type)
if old_field.dim == 2 and new_field.dim == 3:
sql_alter = self.sql_alter_column_to_3d
elif old_field.dim == 3 and new_field.dim == 2:
sql_alter = self.sql_alter_column_to_2d
else:
sql_alter = self.sql_alter_column_type
return (
(
sql_alter % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
),
[],
)
| bsd-3-clause |
gsehub/edx-platform | lms/djangoapps/support/views/refund.py | 14 | 4967 | """
Views for manual refunds in the student support UI.
This interface is used by the support team to track refunds
entered manually in CyberSource (our payment gateway).
DEPRECATION WARNING:
We are currently in the process of replacing lms/djangoapps/shoppingcart
with an E-Commerce service that supports automatic refunds. Once that
transition is complete, we can remove this view.
"""
import logging
from django import forms
from django.contrib import messages
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.generic.edit import FormView
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from openedx.core.lib.courses import clean_course_id
from student.models import CourseEnrollment
from support.decorators import require_support_permission
log = logging.getLogger(__name__)
class RefundForm(forms.Form):
"""
Form for manual refunds
"""
user = forms.EmailField(label=_("Email Address"), required=True)
course_id = forms.CharField(label=_("Course ID"), required=True)
confirmed = forms.CharField(widget=forms.HiddenInput, required=False)
def clean_user(self):
"""
validate user field
"""
user_email = self.cleaned_data['user']
try:
user = User.objects.get(email=user_email)
except User.DoesNotExist:
raise forms.ValidationError(_("User not found"))
return user
def clean_course_id(self):
"""
Validate the course id
"""
return clean_course_id(self)
def clean(self):
"""
clean form
"""
user, course_id = self.cleaned_data.get('user'), self.cleaned_data.get('course_id')
if user and course_id:
self.cleaned_data['enrollment'] = enrollment = CourseEnrollment.get_or_create_enrollment(user, course_id)
if enrollment.refundable():
msg = _("Course {course_id} not past the refund window.").format(course_id=course_id)
raise forms.ValidationError(msg)
try:
self.cleaned_data['cert'] = enrollment.certificateitem_set.filter(
mode='verified',
status='purchased'
)[0]
except IndexError:
msg = _("No order found for {user} in course {course_id}").format(user=user, course_id=course_id)
raise forms.ValidationError(msg)
return self.cleaned_data
def is_valid(self):
"""
returns whether form is valid
"""
is_valid = super(RefundForm, self).is_valid()
if is_valid and self.cleaned_data.get('confirmed') != 'true':
# this is a two-step form: first look up the data, then issue the refund.
# first time through, set the hidden "confirmed" field to true and then redisplay the form
# second time through, do the unenrollment/refund.
data = dict(self.data.items())
self.cleaned_data['confirmed'] = data['confirmed'] = 'true'
self.data = data
is_valid = False
return is_valid
class RefundSupportView(FormView):
"""
Refund form view
"""
template_name = 'support/refund.html'
form_class = RefundForm
success_url = '/support/'
@method_decorator(require_support_permission)
def dispatch(self, *args, **kwargs):
return super(RefundSupportView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
"""
extra context data to add to page
"""
kwargs = super(RefundSupportView, self).get_context_data(**kwargs)
form = getattr(kwargs['form'], 'cleaned_data', {})
if form.get('confirmed') == 'true':
kwargs['cert'] = form.get('cert')
kwargs['enrollment'] = form.get('enrollment')
return kwargs
def form_valid(self, form):
"""
unenrolls student, issues refund
"""
user = form.cleaned_data['user']
course_id = form.cleaned_data['course_id']
enrollment = form.cleaned_data['enrollment']
cert = form.cleaned_data['cert']
enrollment.can_refund = True
enrollment.update_enrollment(is_active=False)
log.info(u"%s manually refunded %s %s", self.request.user, user, course_id)
messages.success(
self.request,
_("Unenrolled {user} from {course_id}").format(
user=user,
course_id=course_id
)
)
messages.success(
self.request,
_("Refunded {cost} for order id {order_id}").format(
cost=cert.unit_cost,
order_id=cert.order.id
)
)
return HttpResponseRedirect('/support/refund/')
| agpl-3.0 |
skyshaw/googletest | test/gtest_test_utils.py | 674 | 10826 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary "%s". Please make sure to provide\n'
'a path to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.' % path)
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| bsd-3-clause |
flacjacket/sympy | sympy/core/tests/test_match.py | 2 | 13253 | from sympy import (abc, Add, cos, Derivative, diff, exp, Float, Function,
I, Integer, log, Mul, oo, Poly, Rational, S, sin, sqrt, Symbol, symbols,
var, Wild
)
from sympy.utilities.pytest import XFAIL
def test_symbol():
x = Symbol('x')
a,b,c,p,q = map(Wild, 'abcpq')
e = x
assert e.match(x) == {}
assert e.matches(x) == {}
assert e.match(a) == {a: x}
e = Rational(5)
assert e.match(c) == {c: 5}
assert e.match(e) == {}
assert e.match(e+1) == None
def test_add():
x,y,a,b,c = map(Symbol, 'xyabc')
p,q,r = map(Wild, 'pqr')
e = a+b
assert e.match(p+b) == {p: a}
assert e.match(p+a) == {p: b}
e = 1+b
assert e.match(p+b) == {p: 1}
e = a+b+c
assert e.match(a+p+c) == {p: b}
assert e.match(b+p+c) == {p: a}
e = a+b+c+x
assert e.match(a+p+x+c) == {p: b}
assert e.match(b+p+c+x) == {p: a}
assert e.match(b) == None
assert e.match(b+p) == {p: a+c+x}
assert e.match(a+p+c) == {p: b+x}
assert e.match(b+p+c) == {p: a+x}
e = 4*x+5
assert e.match(4*x+p) == {p: 5}
assert e.match(3*x+p) == {p: x+5}
assert e.match(p*x+5) == {p: 4}
def test_power():
x,y,a,b,c = map(Symbol, 'xyabc')
p,q,r = map(Wild, 'pqr')
e = (x+y)**a
assert e.match(p**q) == {p: x+y, q: a}
assert e.match(p**p) == None
e = (x+y)**(x+y)
assert e.match(p**p) == {p: x+y}
assert e.match(p**q) == {p: x+y, q: x+y}
e = (2*x)**2
assert e.match(p*q**r) == {p: 4, q: x, r: 2}
e = Integer(1)
assert e.match(x**p) == {p: 0}
def test_match_exclude():
x = Symbol('x')
y = Symbol('y')
p = Wild("p", exclude=[x, y])
q = Wild("q", exclude=[x, y])
r = Wild("r", exclude=[x, y])
e = 3/(4*x+5)
assert e.match(3/(p*x+q)) == {p: 4, q: 5}
e = 3/(4*x+5)
assert e.match(p/(q*x+r)) == {p: 3, q: 4, r: 5}
e = 2/(x+1)
assert e.match(p/(q*x+r)) == {p: 2, q: 1, r: 1}
e = 1/(x+1)
assert e.match(p/(q*x+r)) == {p: 1, q: 1, r: 1}
e = 4*x+5
assert e.match(p*x+q) == {p: 4, q: 5}
e = 4*x+5*y+6
assert e.match(p*x+q*y+r) == {p: 4, q: 5, r: 6}
def test_mul():
x,y,a,b,c = map(Symbol, 'xyabc')
p,q = map(Wild, 'pq')
e = 4*x
assert e.match(p*x) == {p: 4}
assert e.match(p*y) == {p: 4*x/y}
e = a*x*b*c
assert e.match(p*x) == {p: a*b*c}
assert e.match(c*p*x) == {p: a*b}
e = (a+b)*(a+c)
assert e.match((p+b)*(p+c)) == {p: a}
e = x
assert e.match(p*x) == {p: 1}
e = exp(x)
assert e.match(x**p*exp(x*q)) == {p: 0, q: 1}
e = I*Poly(x, x)
assert e.match(I*p) == {p: Poly(x, x)}
def test_mul_noncommutative():
x, y = symbols('x y')
A, B = symbols('A B', commutative=False)
u, v = symbols('u v', cls=Wild)
w = Wild('w', commutative=False)
assert (u*v).matches(x) in ({v: x, u: 1}, {u: x, v: 1})
assert (u*v).matches(x*y) in ({v: y, u: x}, {u: y, v: x})
assert (u*v).matches(A) == None
assert (u*v).matches(A*B) == None
assert (u*v).matches(x*A) == None
assert (u*v).matches(x*y*A) == None
assert (u*v).matches(x*A*B) == None
assert (u*v).matches(x*y*A*B) == None
assert (v*w).matches(x) == None
assert (v*w).matches(x*y) == None
assert (v*w).matches(A) == {w: A, v: 1}
assert (v*w).matches(A*B) == {w: A*B, v: 1}
assert (v*w).matches(x*A) == {w: A, v: x}
assert (v*w).matches(x*y*A) == {w: A, v: x*y}
assert (v*w).matches(x*A*B) == {w: A*B, v: x}
assert (v*w).matches(x*y*A*B) == {w: A*B, v: x*y}
assert (v*w).matches(-x) == None
assert (v*w).matches(-x*y) == None
assert (v*w).matches(-A) == {w: A, v: -1}
assert (v*w).matches(-A*B) == {w: A*B, v: -1}
assert (v*w).matches(-x*A) == {w: A, v: -x}
assert (v*w).matches(-x*y*A) == {w: A, v: -x*y}
assert (v*w).matches(-x*A*B) == {w: A*B, v: -x}
assert (v*w).matches(-x*y*A*B) == {w: A*B, v: -x*y}
def test_complex():
a,b,c = map(Symbol, 'abc')
x,y = map(Wild, 'xy')
assert (1+I).match(x+I) == {x : 1}
assert (a+I).match(x+I) == {x : a}
assert (2*I).match(x*I) == {x : 2}
assert (a*I).match(x*I) == {x : a}
assert (a*I).match(x*y) == {x : I, y : a}
assert (2*I).match(x*y) == {x : 2, y : I}
#Result is ambiguous, so we need to use Wild's exclude keyword
x = Wild('x', exclude=[I])
y = Wild('y', exclude=[I])
assert (a+b*I).match(x+y*I) == {x : a, y : b}
def test_functions():
from sympy.core.function import WildFunction
x = Symbol('x')
g = WildFunction('g')
p = Wild('p')
q = Wild('q')
f = cos(5*x)
notf = x
assert f.match(p*cos(q*x)) == {p: 1, q: 5}
assert f.match(p*g) == {p: 1, g: cos(5*x)}
assert notf.match(g) == None
@XFAIL
def test_functions_X1():
assert f.match(p*g(q*x)) == {p: 1, g: cos, q: 5}
def test_interface():
x,y = map(Symbol, 'xy')
p,q = map(Wild, 'pq')
assert (x+1).match(p+1) == {p: x}
assert (x*3).match(p*3) == {p: x}
assert (x**3).match(p**3) == {p: x}
assert (x*cos(y)).match(p*cos(q)) == {p: x, q: y}
assert (x*y).match(p*q) in [{p:x, q:y}, {p:y, q:x}]
assert (x+y).match(p+q) in [{p:x, q:y}, {p:y, q:x}]
assert (x*y+1).match(p*q) in [{p:1, q:1+x*y}, {p:1+x*y, q:1}]
def test_derivative1():
x,y = map(Symbol, 'xy')
p,q = map(Wild, 'pq')
f = Function('f',nargs=1)
fd = Derivative(f(x), x)
assert fd.match(p) == {p: fd}
assert (fd+1).match(p+1) == {p: fd}
assert (fd).match(fd) == {}
assert (3*fd).match(p*fd) is not None
p = Wild("p", exclude=[x])
q = Wild("q", exclude=[x])
assert (3*fd-1).match(p*fd + q) == {p: 3, q: -1}
def test_derivative_bug1():
f = Function("f")
x = Symbol("x")
a = Wild("a", exclude=[f, x])
b = Wild("b", exclude=[f])
pattern = a * Derivative(f(x), x, x) + b
expr = Derivative(f(x), x)+x**2
d1 = {b: x**2}
d2 = pattern.xreplace(d1).matches(expr, d1)
assert d2 == None
def test_derivative2():
f = Function("f")
x = Symbol("x")
a = Wild("a", exclude=[f, x])
b = Wild("b", exclude=[f])
e = Derivative(f(x), x)
assert e.match(Derivative(f(x), x)) == {}
assert e.match(Derivative(f(x), x, x)) == None
e = Derivative(f(x), x, x)
assert e.match(Derivative(f(x), x)) == None
assert e.match(Derivative(f(x), x, x)) == {}
e = Derivative(f(x), x)+x**2
assert e.match(a*Derivative(f(x), x) + b) == {a: 1, b: x**2}
assert e.match(a*Derivative(f(x), x, x) + b) == None
e = Derivative(f(x), x, x)+x**2
assert e.match(a*Derivative(f(x), x) + b) == None
assert e.match(a*Derivative(f(x), x, x) + b) == {a: 1, b: x**2}
def test_match_deriv_bug1():
n = Function('n')
l = Function('l')
x = Symbol('x')
p = Wild('p')
e = diff(l(x), x)/x - diff(diff(n(x), x), x)/2 - \
diff(n(x), x)**2/4 + diff(n(x), x)*diff(l(x), x)/4
e = e.subs(n(x), -l(x)).doit()
t = x*exp(-l(x))
t2 = t.diff(x, x)/t
assert e.match( (p*t2).expand() ) == {p: -Rational(1)/2}
def test_match_bug2():
x,y = map(Symbol, 'xy')
p,q,r = map(Wild, 'pqr')
res = (x+y).match(p+q+r)
assert (p+q+r).subs(res) == x+y
def test_match_bug3():
x,a,b = map(Symbol, 'xab')
p = Wild('p')
assert (b*x*exp(a*x)).match(x*exp(p*x)) == None
def test_match_bug4():
x = Symbol('x')
p = Wild('p')
e = x
assert e.match(-p*x) == {p: -1}
def test_match_bug5():
x = Symbol('x')
p = Wild('p')
e = -x
assert e.match(-p*x) == {p: 1}
def test_match_bug6():
x = Symbol('x')
p = Wild('p')
e = x
assert e.match(3*p*x) == {p: Rational(1)/3}
def test_behavior1():
x = Symbol('x')
p = Wild('p')
e = 3*x**2
a = Wild('a', exclude = [x])
assert e.match(a*x) == None
assert e.match(p*x) == {p: 3*x}
def test_behavior2():
x = Symbol('x')
p = Wild('p')
e = Rational(6)
assert e.match(2*p) == {p: 3}
e = 3*x + 3 + 6/x
a = Wild('a', exclude = [x])
assert e.expand().match(a*x**2 + a*x + 2*a) == None
assert e.expand().match(p*x**2 + p*x + 2*p) == {p: 3/x}
def test_match_polynomial():
x = Symbol('x')
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
c = Wild('c', exclude=[x])
d = Wild('d', exclude=[x])
eq = 4*x**3 + 3*x**2 + 2*x + 1
pattern = a*x**3 + b*x**2 + c*x + d
assert eq.match(pattern) == {a: 4, b: 3, c: 2, d: 1}
assert (eq-3*x**2).match(pattern) == {a: 4, b: 0, c: 2, d: 1}
assert (x + sqrt(2) + 3).match(a + b*x + c*x**2) == \
{b: 1, a: sqrt(2) + 3, c: 0}
def test_exclude():
x,y,a = map(Symbol, 'xya')
p = Wild('p', exclude=[1,x])
q = Wild('q', exclude=[x])
r = Wild('r', exclude=[sin,y])
assert sin(x).match(r) == None
assert cos(y).match(r) == None
e = 3*x**2 + y*x + a
assert e.match(p*x**2 + q*x + r) == {p: 3, q: y, r: a}
e = x+1
assert e.match(x+p) == None
assert e.match(p+1) == None
assert e.match(x+1+p) == {p: 0}
e = cos(x) + 5*sin(y)
assert e.match(r) == None
assert e.match(cos(y) + r) == None
assert e.match(r + p*sin(q)) == {r: cos(x), p: 5, q: y}
def test_floats():
a,b = map(Wild, 'ab')
e = cos(0.12345, evaluate=False)**2
r = e.match(a*cos(b)**2)
assert r == {a: 1, b: Float(0.12345)}
def test_Derivative_bug1():
f = Function("f")
x = abc.x
a = Wild("a", exclude=[f(x)])
b = Wild("b", exclude=[f(x)])
eq = f(x).diff(x)
assert eq.match(a*Derivative(f(x), x) + b) == {a: 1, b: 0}
def test_match_wild_wild():
p = Wild('p')
q = Wild('q')
r = Wild('r')
assert p.match(q+r) in [ {q: p, r: 0} , {q: 0, r: p} ]
assert p.match(q*r) in [ {q: p, r: 1} , {q: 1, r: p} ]
p = Wild('p')
q = Wild('q', exclude=[p])
r = Wild('r')
assert p.match(q+r) == {q: 0, r: p}
assert p.match(q*r) == {q: 1, r: p}
p = Wild('p')
q = Wild('q', exclude=[p])
r = Wild('r', exclude=[p])
assert p.match(q+r) == None
assert p.match(q*r) == None
def test_combine_inverse():
x, y = var("x y")
assert Mul._combine_inverse(x*I*y, x*I) == y
assert Mul._combine_inverse(x*I*y, y*I) == x
assert Mul._combine_inverse(oo*I*y, y*I) == oo
assert Mul._combine_inverse(oo*I*y, oo*I) == y
assert Add._combine_inverse(oo, oo) == S(0)
assert Add._combine_inverse(oo*I, oo*I) == S(0)
def test_issue_674():
z, phi, r = symbols('z phi r')
c, A, B, N = symbols('c A B N', cls=Wild)
l = Wild('l', exclude=(0,))
eq = z * sin(2*phi) * r**7
matcher = c * sin(phi*N)**l * r**A * log(r)**B
assert eq.match(matcher) == {c: z, l: 1, N: 2, A: 7, B: 0}
assert (-eq).match(matcher) == {c: -z, l: 1, N: 2, A: 7, B: 0}
assert (x*eq).match(matcher) == {c: x*z, l: 1, N: 2, A: 7, B: 0}
assert (-7*x*eq).match(matcher) == {c: -7*x*z, l: 1, N: 2, A: 7, B: 0}
matcher = c*sin(phi*N)**l * r**A
assert eq.match(matcher) == {c: z, l: 1, N: 2, A: 7}
assert (-eq).match(matcher) == {c: -z, l: 1, N: 2, A: 7}
assert (x*eq).match(matcher) == {c: x*z, l: 1, N: 2, A: 7}
assert (-7*x*eq).match(matcher) == {c: -7*x*z, l: 1, N: 2, A: 7}
def test_issue_784():
from sympy.abc import gamma, mu, pi, x
f = (-gamma * (x - mu)**2 - log(gamma) + log(2*pi))/2
a, b, c = symbols('a b c', cls=Wild, exclude=(gamma,))
assert f.match(a * log(gamma) + b * gamma + c) == \
{a: -S(1)/2, b: -(x - mu)**2/2, c: log(2*pi)/2}
assert f.expand().collect(gamma).match(a * log(gamma) + b * gamma + c) == \
{a: -S(1)/2, b: (-(x - mu)**2/2).expand(), c: (log(2*pi)/2).expand()}
def test_issue_1319():
x = symbols('x')
a, b, c = symbols('a b c', cls=Wild, exclude=(x,))
f, g = symbols('f g', cls=Function)
eq = diff(g(x)*f(x).diff(x), x)
assert eq.match(g(x).diff(x)*f(x).diff(x) + g(x)*f(x).diff(x, x) + c) == {c: 0}
assert eq.match(a*g(x).diff(x)*f(x).diff(x) + b*g(x)*f(x).diff(x, x) + c) == {a: 1, b: 1, c: 0}
def test_issue_1601():
f = Function('f')
x = symbols('x')
a, b = symbols('a b', cls=Wild, exclude=(f(x),))
p = a*f(x) + b
eq1 = sin(x)
eq2 = f(x) + sin(x)
eq3 = f(x) + x + sin(x)
eq4 = x + sin(x)
assert eq1.match(p) == {a: 0, b: sin(x)}
assert eq2.match(p) == {a: 1, b: sin(x)}
assert eq3.match(p) == {a: 1, b: x + sin(x)}
assert eq4.match(p) == {a: 0, b: x + sin(x)}
def test_issue_2069():
a, b, c = symbols('a b c', cls=Wild)
x = symbols('x')
f = Function('f')
assert x.match(a) == {a: x}
assert x.match(a*f(x)**c) == {a: x, c: 0}
assert x.match(a*b) == {a: 1, b: x}
assert x.match(a*b*f(x)**c) == {a: 1, b: x, c: 0}
assert (-x).match(a) == {a: -x}
assert (-x).match(a*f(x)**c) == {a: -x, c: 0}
assert (-x).match(a*b) == {a: -1, b: x}
assert (-x).match(a*b*f(x)**c) == {a: -1, b: x, c: 0}
assert (2*x).match(a) == {a: 2*x}
assert (2*x).match(a*f(x)**c) == {a: 2*x, c: 0}
assert (2*x).match(a*b) == {a: 2, b: x}
assert (2*x).match(a*b*f(x)**c) == {a: 2, b: x, c: 0}
assert (-2*x).match(a) == {a: -2*x}
assert (-2*x).match(a*f(x)**c) == {a: -2*x, c: 0}
assert (-2*x).match(a*b) == {a: -2, b: x}
assert (-2*x).match(a*b*f(x)**c) == {a: -2, b: x, c: 0}
| bsd-3-clause |
zhimin711/nova | nova/virt/fake.py | 8 | 18644 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A fake (in-memory) hypervisor+api.
Allows nova testing w/o a hypervisor. This module also documents the
semantics of real hypervisor connections.
"""
import collections
import contextlib
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import versionutils
from nova.compute import arch
from nova.compute import hv_type
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova.console import type as ctype
from nova import exception
from nova.i18n import _LW
from nova.virt import diagnostics
from nova.virt import driver
from nova.virt import hardware
from nova.virt import virtapi
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
LOG = logging.getLogger(__name__)
_FAKE_NODES = None
def set_nodes(nodes):
"""Sets FakeDriver's node.list.
It has effect on the following methods:
get_available_nodes()
get_available_resource
To restore the change, call restore_nodes()
"""
global _FAKE_NODES
_FAKE_NODES = nodes
def restore_nodes():
"""Resets FakeDriver's node list modified by set_nodes().
Usually called from tearDown().
"""
global _FAKE_NODES
_FAKE_NODES = [CONF.host]
class FakeInstance(object):
def __init__(self, name, state, uuid):
self.name = name
self.state = state
self.uuid = uuid
def __getitem__(self, key):
return getattr(self, key)
class Resources(object):
vcpus = 0
memory_mb = 0
local_gb = 0
vcpus_used = 0
memory_mb_used = 0
local_gb_used = 0
def __init__(self, vcpus=8, memory_mb=8000, local_gb=500):
self.vcpus = vcpus
self.memory_mb = memory_mb
self.local_gb = local_gb
def claim(self, vcpus=0, mem=0, disk=0):
self.vcpus_used += vcpus
self.memory_mb_used += mem
self.local_gb_used += disk
def release(self, vcpus=0, mem=0, disk=0):
self.vcpus_used -= vcpus
self.memory_mb_used -= mem
self.local_gb_used -= disk
def dump(self):
return {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': self.vcpus_used,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used
}
class FakeDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
"supports_migrate_to_same_host": True
}
# Since we don't have a real hypervisor, pretend we have lots of
# disk and ram so this driver can be used to test large instances.
vcpus = 1000
memory_mb = 800000
local_gb = 600000
"""Fake hypervisor driver."""
def __init__(self, virtapi, read_only=False):
super(FakeDriver, self).__init__(virtapi)
self.instances = {}
self.resources = Resources(
vcpus=self.vcpus,
memory_mb=self.memory_mb,
local_gb=self.local_gb)
self.host_status_base = {
'hypervisor_type': 'fake',
'hypervisor_version': versionutils.convert_version_to_int('1.0'),
'hypervisor_hostname': CONF.host,
'cpu_info': {},
'disk_available_least': 0,
'supported_instances': [(arch.X86_64, hv_type.FAKE, vm_mode.HVM)],
'numa_topology': None,
}
self._mounts = {}
self._interfaces = {}
if not _FAKE_NODES:
set_nodes([CONF.host])
def init_host(self, host):
return
def list_instances(self):
return [self.instances[uuid].name for uuid in self.instances.keys()]
def list_instance_uuids(self):
return self.instances.keys()
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
pass
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
pass
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
uuid = instance.uuid
state = power_state.RUNNING
flavor = instance.flavor
self.resources.claim(
vcpus=flavor.vcpus,
mem=flavor.memory_mb,
disk=flavor.root_gb)
fake_instance = FakeInstance(instance.name, state, uuid)
self.instances[uuid] = fake_instance
def snapshot(self, context, instance, image_id, update_task_state):
if instance.uuid not in self.instances:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
update_task_state(task_state=task_states.IMAGE_UPLOADING)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
pass
def get_host_ip_addr(self):
return '192.168.0.1'
def set_admin_password(self, instance, new_pass):
pass
def inject_file(self, instance, b64_path, b64_contents):
pass
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
pass
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
pass
def unrescue(self, instance, network_info):
pass
def poll_rebooting_instances(self, timeout, instances):
pass
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
pass
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
pass
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
pass
def power_off(self, instance, timeout=0, retry_interval=0):
pass
def power_on(self, context, instance, network_info,
block_device_info=None):
pass
def trigger_crash_dump(self, instance):
pass
def soft_delete(self, instance):
pass
def restore(self, instance):
pass
def pause(self, instance):
pass
def unpause(self, instance):
pass
def suspend(self, context, instance):
pass
def resume(self, context, instance, network_info, block_device_info=None):
pass
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
key = instance.uuid
if key in self.instances:
flavor = instance.flavor
self.resources.release(
vcpus=flavor.vcpus,
mem=flavor.memory_mb,
disk=flavor.root_gb)
del self.instances[key]
else:
LOG.warning(_LW("Key '%(key)s' not in instances '%(inst)s'"),
{'key': key,
'inst': self.instances}, instance=instance)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
pass
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach the disk to the instance at mountpoint using info."""
instance_name = instance.name
if instance_name not in self._mounts:
self._mounts[instance_name] = {}
self._mounts[instance_name][mountpoint] = connection_info
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach the disk attached to the instance."""
try:
del self._mounts[instance.name][mountpoint]
except KeyError:
pass
def swap_volume(self, old_connection_info, new_connection_info,
instance, mountpoint, resize_to):
"""Replace the disk attached to the instance."""
instance_name = instance.name
if instance_name not in self._mounts:
self._mounts[instance_name] = {}
self._mounts[instance_name][mountpoint] = new_connection_info
def attach_interface(self, instance, image_meta, vif):
if vif['id'] in self._interfaces:
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
self._interfaces[vif['id']] = vif
def detach_interface(self, instance, vif):
try:
del self._interfaces[vif['id']]
except KeyError:
raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid)
def get_info(self, instance):
if instance.uuid not in self.instances:
raise exception.InstanceNotFound(instance_id=instance.uuid)
i = self.instances[instance.uuid]
return hardware.InstanceInfo(state=i.state,
max_mem_kb=0,
mem_kb=0,
num_cpu=2,
cpu_time_ns=0)
def get_diagnostics(self, instance):
return {'cpu0_time': 17300000000,
'memory': 524288,
'vda_errors': -1,
'vda_read': 262144,
'vda_read_req': 112,
'vda_write': 5778432,
'vda_write_req': 488,
'vnet1_rx': 2070139,
'vnet1_rx_drop': 0,
'vnet1_rx_errors': 0,
'vnet1_rx_packets': 26701,
'vnet1_tx': 140208,
'vnet1_tx_drop': 0,
'vnet1_tx_errors': 0,
'vnet1_tx_packets': 662,
}
def get_instance_diagnostics(self, instance):
diags = diagnostics.Diagnostics(state='running', driver='fake',
hypervisor_os='fake-os', uptime=46664, config_drive=True)
diags.add_cpu(time=17300000000)
diags.add_nic(mac_address='01:23:45:67:89:ab',
rx_packets=26701,
rx_octets=2070139,
tx_octets=140208,
tx_packets = 662)
diags.add_disk(id='fake-disk-id',
read_bytes=262144,
read_requests=112,
write_bytes=5778432,
write_requests=488)
diags.memory_details.maximum = 524288
return diags
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM.
"""
bw = []
return bw
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.
"""
volusage = []
return volusage
def get_host_cpu_stats(self):
stats = {'kernel': 5664160000000,
'idle': 1592705190000000,
'user': 26728850000000,
'iowait': 6121490000000}
stats['frequency'] = 800
return stats
def block_stats(self, instance, disk_id):
return [0, 0, 0, 0, None]
def get_console_output(self, context, instance):
return 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE'
def get_vnc_console(self, context, instance):
return ctype.ConsoleVNC(internal_access_path='FAKE',
host='fakevncconsole.com',
port=6969)
def get_spice_console(self, context, instance):
return ctype.ConsoleSpice(internal_access_path='FAKE',
host='fakespiceconsole.com',
port=6969,
tlsPort=6970)
def get_rdp_console(self, context, instance):
return ctype.ConsoleRDP(internal_access_path='FAKE',
host='fakerdpconsole.com',
port=6969)
def get_serial_console(self, context, instance):
return ctype.ConsoleSerial(internal_access_path='FAKE',
host='fakerdpconsole.com',
port=6969)
def get_mks_console(self, context, instance):
return ctype.ConsoleMKS(internal_access_path='FAKE',
host='fakemksconsole.com',
port=6969)
def get_console_pool_info(self, console_type):
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
return True
def refresh_instance_security_rules(self, instance):
return True
def get_available_resource(self, nodename):
"""Updates compute manager resource info on ComputeNode table.
Since we don't have a real hypervisor, pretend we have lots of
disk and ram.
"""
cpu_info = collections.OrderedDict([
('arch', 'x86_64'),
('model', 'Nehalem'),
('vendor', 'Intel'),
('features', ['pge', 'clflush']),
('topology', {
'cores': 1,
'threads': 1,
'sockets': 4,
}),
])
if nodename not in _FAKE_NODES:
return {}
host_status = self.host_status_base.copy()
host_status.update(self.resources.dump())
host_status['hypervisor_hostname'] = nodename
host_status['host_hostname'] = nodename
host_status['host_name_label'] = nodename
host_status['cpu_info'] = jsonutils.dumps(cpu_info)
return host_status
def ensure_filtering_rules_for_instance(self, instance, network_info):
return
def get_instance_disk_info(self, instance, block_device_info=None):
return
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
post_method(context, instance, dest, block_migration,
migrate_data)
return
def live_migration_force_complete(self, instance):
return
def live_migration_abort(self, instance):
return
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
return
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
return {}
def check_can_live_migrate_source(self, context, instance,
dest_check_data, block_device_info=None):
return
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
return
def confirm_migration(self, migration, instance, network_info):
return
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
return
def unfilter_instance(self, instance, network_info):
return
def _test_remove_vm(self, instance_uuid):
"""Removes the named VM, as if it crashed. For testing."""
self.instances.pop(instance_uuid)
def host_power_action(self, action):
"""Reboots, shuts down or powers up the host."""
return action
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
if not mode:
return 'off_maintenance'
return 'on_maintenance'
def set_host_enabled(self, enabled):
"""Sets the specified host's ability to accept new instances."""
if enabled:
return 'enabled'
return 'disabled'
def get_volume_connector(self, instance):
return {'ip': CONF.my_block_storage_ip,
'initiator': 'fake',
'host': 'fakehost'}
def get_available_nodes(self, refresh=False):
return _FAKE_NODES
def instance_on_disk(self, instance):
return False
def quiesce(self, context, instance, image_meta):
pass
def unquiesce(self, context, instance, image_meta):
pass
class FakeVirtAPI(virtapi.VirtAPI):
@contextlib.contextmanager
def wait_for_instance_event(self, instance, event_names, deadline=300,
error_callback=None):
# NOTE(danms): Don't actually wait for any events, just
# fall through
yield
class SmallFakeDriver(FakeDriver):
# The api samples expect specific cpu memory and disk sizes. In order to
# allow the FakeVirt driver to be used outside of the unit tests, provide
# a separate class that has the values expected by the api samples. So
# instead of requiring new samples every time those
# values are adjusted allow them to be overwritten here.
vcpus = 1
memory_mb = 8192
local_gb = 1028
| apache-2.0 |
clstl/servo | components/script/dom/bindings/codegen/parser/tests/test_attr_sequence_type.py | 276 | 1626 | def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface AttrSequenceType {
attribute sequence<object> foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Attribute type must not be a sequence type")
parser.reset()
threw = False
try:
parser.parse("""
interface AttrUnionWithSequenceType {
attribute (sequence<object> or DOMString) foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Attribute type must not be a union with a sequence member type")
parser.reset()
threw = False
try:
parser.parse("""
interface AttrNullableUnionWithSequenceType {
attribute (sequence<object>? or DOMString) foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Attribute type must not be a union with a nullable sequence "
"member type")
parser.reset()
threw = False
try:
parser.parse("""
interface AttrUnionWithUnionWithSequenceType {
attribute ((sequence<object> or DOMString) or AttrUnionWithUnionWithSequenceType) foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Attribute type must not be a union type with a union member "
"type that has a sequence member type")
| mpl-2.0 |
bravominski/PennApps2015-HeartMates | venv/lib/python2.7/site-packages/pip/_vendor/html5lib/trie/py.py | 1323 | 1775 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
| apache-2.0 |
chenbo-hhu/TMPA | tests/test_api.py | 33 | 10692 | import unittest
import json
import re
from base64 import b64encode
from flask import url_for
from app import create_app, db
from app.models import User, Role, Post, Comment
class APITestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
Role.insert_roles()
self.client = self.app.test_client()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def get_api_headers(self, username, password):
return {
'Authorization': 'Basic ' + b64encode(
(username + ':' + password).encode('utf-8')).decode('utf-8'),
'Accept': 'application/json',
'Content-Type': 'application/json'
}
def test_404(self):
response = self.client.get(
'/wrong/url',
headers=self.get_api_headers('email', 'password'))
self.assertTrue(response.status_code == 404)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['error'] == 'not found')
def test_no_auth(self):
response = self.client.get(url_for('api.get_posts'),
content_type='application/json')
self.assertTrue(response.status_code == 200)
def test_bad_auth(self):
# add a user
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='john@example.com', password='cat', confirmed=True,
role=r)
db.session.add(u)
db.session.commit()
# authenticate with bad password
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('john@example.com', 'dog'))
self.assertTrue(response.status_code == 401)
def test_token_auth(self):
# add a user
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='john@example.com', password='cat', confirmed=True,
role=r)
db.session.add(u)
db.session.commit()
# issue a request with a bad token
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('bad-token', ''))
self.assertTrue(response.status_code == 401)
# get a token
response = self.client.get(
url_for('api.get_token'),
headers=self.get_api_headers('john@example.com', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('token'))
token = json_response['token']
# issue a request with the token
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers(token, ''))
self.assertTrue(response.status_code == 200)
def test_anonymous(self):
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('', ''))
self.assertTrue(response.status_code == 200)
def test_unconfirmed_account(self):
# add an unconfirmed user
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='john@example.com', password='cat', confirmed=False,
role=r)
db.session.add(u)
db.session.commit()
# get list of posts with the unconfirmed account
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('john@example.com', 'cat'))
self.assertTrue(response.status_code == 403)
def test_posts(self):
# add a user
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='john@example.com', password='cat', confirmed=True,
role=r)
db.session.add(u)
db.session.commit()
# write an empty post
response = self.client.post(
url_for('api.new_post'),
headers=self.get_api_headers('john@example.com', 'cat'),
data=json.dumps({'body': ''}))
self.assertTrue(response.status_code == 400)
# write a post
response = self.client.post(
url_for('api.new_post'),
headers=self.get_api_headers('john@example.com', 'cat'),
data=json.dumps({'body': 'body of the *blog* post'}))
self.assertTrue(response.status_code == 201)
url = response.headers.get('Location')
self.assertIsNotNone(url)
# get the new post
response = self.client.get(
url,
headers=self.get_api_headers('john@example.com', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['url'] == url)
self.assertTrue(json_response['body'] == 'body of the *blog* post')
self.assertTrue(json_response['body_html'] ==
'<p>body of the <em>blog</em> post</p>')
json_post = json_response
# get the post from the user
response = self.client.get(
url_for('api.get_user_posts', id=u.id),
headers=self.get_api_headers('john@example.com', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('posts'))
self.assertTrue(json_response.get('count', 0) == 1)
self.assertTrue(json_response['posts'][0] == json_post)
# get the post from the user as a follower
response = self.client.get(
url_for('api.get_user_followed_posts', id=u.id),
headers=self.get_api_headers('john@example.com', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('posts'))
self.assertTrue(json_response.get('count', 0) == 1)
self.assertTrue(json_response['posts'][0] == json_post)
# edit post
response = self.client.put(
url,
headers=self.get_api_headers('john@example.com', 'cat'),
data=json.dumps({'body': 'updated body'}))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['url'] == url)
self.assertTrue(json_response['body'] == 'updated body')
self.assertTrue(json_response['body_html'] == '<p>updated body</p>')
def test_users(self):
# add two users
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u1 = User(email='john@example.com', username='john',
password='cat', confirmed=True, role=r)
u2 = User(email='susan@example.com', username='susan',
password='dog', confirmed=True, role=r)
db.session.add_all([u1, u2])
db.session.commit()
# get users
response = self.client.get(
url_for('api.get_user', id=u1.id),
headers=self.get_api_headers('susan@example.com', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['username'] == 'john')
response = self.client.get(
url_for('api.get_user', id=u2.id),
headers=self.get_api_headers('susan@example.com', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['username'] == 'susan')
def test_comments(self):
# add two users
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u1 = User(email='john@example.com', username='john',
password='cat', confirmed=True, role=r)
u2 = User(email='susan@example.com', username='susan',
password='dog', confirmed=True, role=r)
db.session.add_all([u1, u2])
db.session.commit()
# add a post
post = Post(body='body of the post', author=u1)
db.session.add(post)
db.session.commit()
# write a comment
response = self.client.post(
url_for('api.new_post_comment', id=post.id),
headers=self.get_api_headers('susan@example.com', 'dog'),
data=json.dumps({'body': 'Good [post](http://example.com)!'}))
self.assertTrue(response.status_code == 201)
json_response = json.loads(response.data.decode('utf-8'))
url = response.headers.get('Location')
self.assertIsNotNone(url)
self.assertTrue(json_response['body'] ==
'Good [post](http://example.com)!')
self.assertTrue(
re.sub('<.*?>', '', json_response['body_html']) == 'Good post!')
# get the new comment
response = self.client.get(
url,
headers=self.get_api_headers('john@example.com', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['url'] == url)
self.assertTrue(json_response['body'] ==
'Good [post](http://example.com)!')
# add another comment
comment = Comment(body='Thank you!', author=u1, post=post)
db.session.add(comment)
db.session.commit()
# get the two comments from the post
response = self.client.get(
url_for('api.get_post_comments', id=post.id),
headers=self.get_api_headers('susan@example.com', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('comments'))
self.assertTrue(json_response.get('count', 0) == 2)
# get all the comments
response = self.client.get(
url_for('api.get_comments', id=post.id),
headers=self.get_api_headers('susan@example.com', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('comments'))
self.assertTrue(json_response.get('count', 0) == 2)
| mit |
zhanghenry/stocks | tests/urlpatterns_reverse/views.py | 34 | 1528 | from functools import partial, update_wrapper
from django.contrib.auth.decorators import user_passes_test
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponse
from django.views.generic import RedirectView
def empty_view(request, *args, **kwargs):
return HttpResponse('')
def kwargs_view(request, arg1=1, arg2=2):
return HttpResponse('')
def absolute_kwargs_view(request, arg1=1, arg2=2):
return HttpResponse('')
def defaults_view(request, arg1, arg2):
pass
def nested_view(request):
pass
def erroneous_view(request):
import non_existent # NOQA
def pass_resolver_match_view(request, *args, **kwargs):
response = HttpResponse('')
response.resolver_match = request.resolver_match
return response
uncallable = "Can I be a view? Pleeeease?"
class ViewClass(object):
def __call__(self, request, *args, **kwargs):
return HttpResponse('')
view_class_instance = ViewClass()
class LazyRedirectView(RedirectView):
url = reverse_lazy('named-lazy-url-redirected-to')
@user_passes_test(lambda u: u.is_authenticated(), login_url=reverse_lazy('some-login-page'))
def login_required_view(request):
return HttpResponse('Hello you')
def bad_view(request, *args, **kwargs):
raise ValueError("I don't think I'm getting good value for this view")
empty_view_partial = partial(empty_view, template_name="template.html")
empty_view_wrapped = update_wrapper(
partial(empty_view, template_name="template.html"), empty_view,
)
| bsd-3-clause |
MrTheodor/espressopp | src/esutil/Grid.py | 7 | 1408 | # Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**********************
espressopp.esutil.Grid
**********************
"""
from espressopp import pmi
from _espressopp import esutil_Grid
class GridLocal(esutil_Grid):
pass
if pmi.isController:
class Grid(object):
__metaclass__ = pmi.Proxy
'Grid class'
pmiproxydefs = dict(
cls = 'espressopp.esutil.GridLocal',
localcall = [ 'mapIndexToPosition' ]
#localcall = [ '__call__', 'normal', 'gamma', 'uniformOnSphere' ],
#pmicall = [ 'seed' ]
)
| gpl-3.0 |
readevalprint/zipline | zipline/transforms/vwap.py | 2 | 3609 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from zipline.errors import WrongDataForTransform
from zipline.transforms.utils import EventWindow, TransformMeta
class MovingVWAP(object):
"""
Class that maintains a dictionary from sids to VWAPEventWindows.
"""
__metaclass__ = TransformMeta
def __init__(self, market_aware=True, delta=None, window_length=None):
self.market_aware = market_aware
self.delta = delta
self.window_length = window_length
# Market-aware mode only works with full-day windows.
if self.market_aware:
assert self.window_length and not self.delta,\
"Market-aware mode only works with full-day windows."
# Non-market-aware mode requires a timedelta.
else:
assert self.delta and not self.window_length, \
"Non-market-aware mode requires a timedelta."
# No way to pass arguments to the defaultdict factory, so we
# need to define a method to generate the correct EventWindows.
self.sid_windows = defaultdict(self.create_window)
def create_window(self):
"""Factory method for self.sid_windows."""
return VWAPEventWindow(
self.market_aware,
window_length=self.window_length,
delta=self.delta
)
def update(self, event):
"""
Update the event window for this event's sid. Returns the
current vwap for the sid.
"""
# This will create a new EventWindow if this is the first
# message for this sid.
window = self.sid_windows[event.sid]
window.update(event)
return window.get_vwap()
class VWAPEventWindow(EventWindow):
"""
Iteratively maintains a vwap for a single sid over a given
timedelta.
"""
def __init__(self, market_aware=True, window_length=None, delta=None):
EventWindow.__init__(self, market_aware, window_length, delta)
self.flux = 0.0
self.totalvolume = 0.0
# Subclass customization for adding new events.
def handle_add(self, event):
# Sanity check on the event.
self.assert_required_fields(event)
self.flux += event.volume * event.price
self.totalvolume += event.volume
# Subclass customization for removing expired events.
def handle_remove(self, event):
self.flux -= event.volume * event.price
self.totalvolume -= event.volume
def get_vwap(self):
"""
Return the calculated vwap for this sid.
"""
# By convention, vwap is None if we have no events.
if len(self.ticks) == 0:
return None
else:
return (self.flux / self.totalvolume)
# We need numerical price and volume to calculate a vwap.
def assert_required_fields(self, event):
if 'price' not in event or 'volume' not in event:
raise WrongDataForTransform(
transform="VWAPEventWindow",
fields=self.fields)
| apache-2.0 |
shssoichiro/servo | tests/wpt/web-platform-tests/tools/lint/lint.py | 23 | 27630 | from __future__ import print_function, unicode_literals
import abc
import argparse
import ast
import itertools
import json
import os
import re
import subprocess
import sys
from collections import defaultdict
from . import fnmatch
from ..localpaths import repo_root
from ..gitignore.gitignore import PathFilter
from manifest.sourcefile import SourceFile, js_meta_re, python_meta_re
from six import binary_type, iteritems, itervalues
from six.moves import range
from six.moves.urllib.parse import urlsplit, urljoin
import logging
logger = None
def setup_logging(prefix=False):
global logger
if logger is None:
logger = logging.getLogger(os.path.basename(os.path.splitext(__file__)[0]))
handler = logging.StreamHandler(sys.stdout)
logger.addHandler(handler)
if prefix:
format = logging.BASIC_FORMAT
else:
format = "%(message)s"
formatter = logging.Formatter(format)
for handler in logger.handlers:
handler.setFormatter(formatter)
logger.setLevel(logging.DEBUG)
setup_logging()
ERROR_MSG = """You must fix all errors; for details on how to fix them, see
https://github.com/w3c/web-platform-tests/blob/master/docs/lint-tool.md
However, instead of fixing a particular error, it's sometimes
OK to add a line to the lint.whitelist file in the root of the
web-platform-tests directory to make the lint tool ignore it.
For example, to make the lint tool ignore all '%s'
errors in the %s file,
you could add the following line to the lint.whitelist file.
%s:%s"""
def all_filesystem_paths(repo_root):
path_filter = PathFilter(repo_root, extras=[".git/*"])
for dirpath, dirnames, filenames in os.walk(repo_root):
for filename in filenames:
path = os.path.relpath(os.path.join(dirpath, filename), repo_root)
if path_filter(path):
yield path
dirnames[:] = [item for item in dirnames if
path_filter(os.path.relpath(os.path.join(dirpath, item) + "/",
repo_root))]
def _all_files_equal(paths):
"""
Checks all the paths are files that are byte-for-byte identical
:param paths: the list of paths to compare
:returns: True if they are all identical
"""
paths = list(paths)
if len(paths) < 2:
return True
first = paths.pop()
size = os.path.getsize(first)
if any(os.path.getsize(path) != size for path in paths):
return False
# Chunk this to avoid eating up memory and file descriptors
bufsize = 4096*4 # 16KB, a "reasonable" number of disk sectors
groupsize = 8 # Hypothesised to be large enough in the common case that everything fits in one group
with open(first, "rb") as first_f:
for start in range(0, len(paths), groupsize):
path_group = paths[start:start+groupsize]
first_f.seek(0)
try:
files = [open(x, "rb") for x in path_group]
for _ in range(0, size, bufsize):
a = first_f.read(bufsize)
for f in files:
b = f.read(bufsize)
if a != b:
return False
finally:
for f in files:
f.close()
return True
def check_path_length(repo_root, path, css_mode):
if len(path) + 1 > 150:
return [("PATH LENGTH", "/%s longer than maximum path length (%d > 150)" % (path, len(path) + 1), path, None)]
return []
def check_worker_collision(repo_root, path, css_mode):
endings = [(".any.html", ".any.js"),
(".any.worker.html", ".any.js"),
(".worker.html", ".worker.js")]
for path_ending, generated in endings:
if path.endswith(path_ending):
return [("WORKER COLLISION",
"path ends with %s which collides with generated tests from %s files" % (path_ending, generated),
path,
None)]
return []
drafts_csswg_re = re.compile(r"https?\:\/\/drafts\.csswg\.org\/([^/?#]+)")
w3c_tr_re = re.compile(r"https?\:\/\/www\.w3c?\.org\/TR\/([^/?#]+)")
w3c_dev_re = re.compile(r"https?\:\/\/dev\.w3c?\.org\/[^/?#]+\/([^/?#]+)")
def check_css_globally_unique(repo_root, paths, css_mode):
"""
Checks that CSS filenames are sufficiently unique
This groups files by path classifying them as "test", "reference", or
"support".
"test" files must have a unique name across files that share links to the
same spec.
"reference" and "support" files, on the other hand, must have globally
unique names.
:param repo_root: the repository root
:param paths: list of all paths
:param css_mode: whether we're in CSS testsuite mode
:returns: a list of errors found in ``paths``
"""
test_files = defaultdict(set)
ref_files = defaultdict(set)
support_files = defaultdict(set)
for path in paths:
if os.name == "nt":
path = path.replace("\\", "/")
if not css_mode:
if not path.startswith("css/"):
continue
# we're within css or in css_mode after all that
source_file = SourceFile(repo_root, path, "/")
if source_file.name_is_non_test:
# If we're name_is_non_test for a reason apart from support, ignore it.
# We care about support because of the requirement all support files in css/ to be in
# a support directory; see the start of check_parsed.
offset = path.find("/support/")
if offset == -1:
continue
parts = source_file.dir_path.split(os.path.sep)
if (parts[0] in source_file.root_dir_non_test or
any(item in source_file.dir_non_test - {"support"} for item in parts) or
any(parts[:len(non_test_path)] == list(non_test_path) for non_test_path in source_file.dir_path_non_test)):
continue
name = path[offset+1:]
support_files[name].add(path)
elif source_file.name_is_reference:
ref_files[source_file.name].add(path)
else:
test_files[source_file.name].add(path)
errors = []
for name, colliding in iteritems(test_files):
if len(colliding) > 1:
if not _all_files_equal([os.path.join(repo_root, x) for x in colliding]):
# Only compute by_spec if there are prima-facie collisions because of cost
by_spec = defaultdict(set)
for path in colliding:
source_file = SourceFile(repo_root, path, "/")
for link in source_file.spec_links:
for r in (drafts_csswg_re, w3c_tr_re, w3c_dev_re):
m = r.match(link)
if m:
spec = m.group(1)
break
else:
continue
by_spec[spec].add(path)
for spec, paths in iteritems(by_spec):
if not _all_files_equal([os.path.join(repo_root, x) for x in paths]):
for x in paths:
errors.append(("CSS-COLLIDING-TEST-NAME",
"The filename %s in the %s testsuite is shared by: %s"
% (name,
spec,
", ".join(sorted(paths))),
x,
None))
for error_name, d in [("CSS-COLLIDING-REF-NAME", ref_files),
("CSS-COLLIDING-SUPPORT-NAME", support_files)]:
for name, colliding in iteritems(d):
if len(colliding) > 1:
if not _all_files_equal([os.path.join(repo_root, x) for x in colliding]):
for x in colliding:
errors.append((error_name,
"The filename %s is shared by: %s" % (name,
", ".join(sorted(colliding))),
x,
None))
return errors
def parse_whitelist(f):
"""
Parse the whitelist file given by `f`, and return the parsed structure.
"""
data = defaultdict(lambda:defaultdict(set))
ignored_files = set()
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
parts = [item.strip() for item in line.split(":")]
if len(parts) == 2:
parts.append(None)
else:
parts[-1] = int(parts[-1])
error_types, file_match, line_number = parts
error_types = {item.strip() for item in error_types.split(",")}
file_match = os.path.normcase(file_match)
if "*" in error_types:
ignored_files.add(file_match)
else:
for error_type in error_types:
data[error_type][file_match].add(line_number)
return data, ignored_files
def filter_whitelist_errors(data, errors):
"""
Filter out those errors that are whitelisted in `data`.
"""
if not errors:
return []
whitelisted = [False for item in range(len(errors))]
for i, (error_type, msg, path, line) in enumerate(errors):
normpath = os.path.normcase(path)
if error_type in data:
wl_files = data[error_type]
for file_match, allowed_lines in iteritems(wl_files):
if None in allowed_lines or line in allowed_lines:
if fnmatch.fnmatchcase(normpath, file_match):
whitelisted[i] = True
return [item for i, item in enumerate(errors) if not whitelisted[i]]
class Regexp(object):
pattern = None
file_extensions = None
error = None
_re = None
def __init__(self):
self._re = re.compile(self.pattern)
def applies(self, path):
return (self.file_extensions is None or
os.path.splitext(path)[1] in self.file_extensions)
def search(self, line):
return self._re.search(line)
class TrailingWhitespaceRegexp(Regexp):
pattern = b"[ \t\f\v]$"
error = "TRAILING WHITESPACE"
description = "Whitespace at EOL"
class TabsRegexp(Regexp):
pattern = b"^\t"
error = "INDENT TABS"
description = "Tabs used for indentation"
class CRRegexp(Regexp):
pattern = b"\r$"
error = "CR AT EOL"
description = "CR character in line separator"
class SetTimeoutRegexp(Regexp):
pattern = b"setTimeout\s*\("
error = "SET TIMEOUT"
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "setTimeout used; step_timeout should typically be used instead"
class W3CTestOrgRegexp(Regexp):
pattern = b"w3c\-test\.org"
error = "W3C-TEST.ORG"
description = "External w3c-test.org domain used"
class Webidl2Regexp(Regexp):
pattern = b"webidl2\.js"
error = "WEBIDL2.JS"
description = "Legacy webidl2.js script used"
class ConsoleRegexp(Regexp):
pattern = b"console\.[a-zA-Z]+\s*\("
error = "CONSOLE"
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "Console logging API used"
class PrintRegexp(Regexp):
pattern = b"print(?:\s|\s*\()"
error = "PRINT STATEMENT"
file_extensions = [".py"]
description = "Print function used"
regexps = [item() for item in
[TrailingWhitespaceRegexp,
TabsRegexp,
CRRegexp,
SetTimeoutRegexp,
W3CTestOrgRegexp,
Webidl2Regexp,
ConsoleRegexp,
PrintRegexp]]
def check_regexp_line(repo_root, path, f, css_mode):
errors = []
applicable_regexps = [regexp for regexp in regexps if regexp.applies(path)]
for i, line in enumerate(f):
for regexp in applicable_regexps:
if regexp.search(line):
errors.append((regexp.error, regexp.description, path, i+1))
return errors
def check_parsed(repo_root, path, f, css_mode):
source_file = SourceFile(repo_root, path, "/", contents=f.read())
errors = []
if css_mode or path.startswith("css/"):
if (source_file.type == "support" and
not source_file.name_is_non_test and
not source_file.name_is_reference):
return [("SUPPORT-WRONG-DIR", "Support file not in support directory", path, None)]
if (source_file.type != "support" and
not source_file.name_is_reference and
not source_file.spec_links):
return [("MISSING-LINK", "Testcase file must have a link to a spec", path, None)]
if source_file.name_is_non_test or source_file.name_is_manual:
return []
if source_file.markup_type is None:
return []
if source_file.root is None:
return [("PARSE-FAILED", "Unable to parse file", path, None)]
if source_file.type == "manual" and not source_file.name_is_manual:
return [("CONTENT-MANUAL", "Manual test whose filename doesn't end in '-manual'", path, None)]
if source_file.type == "visual" and not source_file.name_is_visual:
return [("CONTENT-VISUAL", "Visual test whose filename doesn't end in '-visual'", path, None)]
for reftest_node in source_file.reftest_nodes:
href = reftest_node.attrib.get("href", "")
parts = urlsplit(href)
if parts.scheme or parts.netloc:
errors.append(("ABSOLUTE-URL-REF",
"Reference test with a reference file specified via an absolute URL: '%s'" % href, path, None))
continue
ref_url = urljoin(source_file.url, href)
ref_parts = urlsplit(ref_url)
if source_file.url == ref_url:
errors.append(("SAME-FILE-REF",
"Reference test which points at itself as a reference",
path,
None))
continue
assert ref_parts.path != ""
reference_file = os.path.join(repo_root, ref_parts.path[1:])
reference_rel = reftest_node.attrib.get("rel", "")
if not os.path.isfile(reference_file):
errors.append(("NON-EXISTENT-REF",
"Reference test with a non-existent '%s' relationship reference: '%s'" % (reference_rel, href), path, None))
if len(source_file.timeout_nodes) > 1:
errors.append(("MULTIPLE-TIMEOUT", "More than one meta name='timeout'", path, None))
for timeout_node in source_file.timeout_nodes:
timeout_value = timeout_node.attrib.get("content", "").lower()
if timeout_value != "long":
errors.append(("INVALID-TIMEOUT", "Invalid timeout value %s" % timeout_value, path, None))
if source_file.testharness_nodes:
if len(source_file.testharness_nodes) > 1:
errors.append(("MULTIPLE-TESTHARNESS",
"More than one <script src='/resources/testharness.js'>", path, None))
testharnessreport_nodes = source_file.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src='/resources/testharnessreport.js']")
if not testharnessreport_nodes:
errors.append(("MISSING-TESTHARNESSREPORT",
"Missing <script src='/resources/testharnessreport.js'>", path, None))
else:
if len(testharnessreport_nodes) > 1:
errors.append(("MULTIPLE-TESTHARNESSREPORT",
"More than one <script src='/resources/testharnessreport.js'>", path, None))
testharnesscss_nodes = source_file.root.findall(".//{http://www.w3.org/1999/xhtml}link[@href='/resources/testharness.css']")
if testharnesscss_nodes:
errors.append(("PRESENT-TESTHARNESSCSS",
"Explicit link to testharness.css present", path, None))
for element in source_file.variant_nodes:
if "content" not in element.attrib:
errors.append(("VARIANT-MISSING",
"<meta name=variant> missing 'content' attribute", path, None))
else:
variant = element.attrib["content"]
if variant != "" and variant[0] not in ("?", "#"):
errors.append(("MALFORMED-VARIANT",
"%s <meta name=variant> 'content' attribute must be the empty string or start with '?' or '#'" % path, None))
seen_elements = {"timeout": False,
"testharness": False,
"testharnessreport": False}
required_elements = [key for key, value in {"testharness": True,
"testharnessreport": len(testharnessreport_nodes) > 0,
"timeout": len(source_file.timeout_nodes) > 0}.items()
if value]
for elem in source_file.root.iter():
if source_file.timeout_nodes and elem == source_file.timeout_nodes[0]:
seen_elements["timeout"] = True
if seen_elements["testharness"]:
errors.append(("LATE-TIMEOUT",
"<meta name=timeout> seen after testharness.js script", path, None))
elif elem == source_file.testharness_nodes[0]:
seen_elements["testharness"] = True
elif testharnessreport_nodes and elem == testharnessreport_nodes[0]:
seen_elements["testharnessreport"] = True
if not seen_elements["testharness"]:
errors.append(("EARLY-TESTHARNESSREPORT",
"testharnessreport.js script seen before testharness.js script", path, None))
if all(seen_elements[name] for name in required_elements):
break
for element in source_file.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src]"):
src = element.attrib["src"]
for name in ["testharness", "testharnessreport"]:
if "%s.js" % name == src or ("/%s.js" % name in src and src != "/resources/%s.js" % name):
errors.append(("%s-PATH" % name.upper(), "%s.js script seen with incorrect path" % name, path, None))
return errors
class ASTCheck(object):
__metaclass__ = abc.ABCMeta
error = None
description = None
@abc.abstractmethod
def check(self, root):
pass
class OpenModeCheck(ASTCheck):
error = "OPEN-NO-MODE"
description = "File opened without providing an explicit mode (note: binary files must be read with 'b' in the mode flags)"
def check(self, root):
errors = []
for node in ast.walk(root):
if isinstance(node, ast.Call):
if hasattr(node.func, "id") and node.func.id in ("open", "file"):
if (len(node.args) < 2 and
all(item.arg != "mode" for item in node.keywords)):
errors.append(node.lineno)
return errors
ast_checkers = [item() for item in [OpenModeCheck]]
def check_python_ast(repo_root, path, f, css_mode):
if not path.endswith(".py"):
return []
try:
root = ast.parse(f.read())
except SyntaxError as e:
return [("PARSE-FAILED", "Unable to parse file", path, e.lineno)]
errors = []
for checker in ast_checkers:
for lineno in checker.check(root):
errors.append((checker.error, checker.description, path, lineno))
return errors
broken_js_metadata = re.compile(b"//\s*META:")
broken_python_metadata = re.compile(b"#\s*META:")
def check_script_metadata(repo_root, path, f, css_mode):
if path.endswith((".worker.js", ".any.js")):
meta_re = js_meta_re
broken_metadata = broken_js_metadata
elif path.endswith(".py"):
meta_re = python_meta_re
broken_metadata = broken_python_metadata
else:
return []
done = False
errors = []
for idx, line in enumerate(f):
assert isinstance(line, binary_type), line
m = meta_re.match(line)
if m:
key, value = m.groups()
if key == b"timeout":
if value != b"long":
errors.append(("UNKNOWN-TIMEOUT-METADATA", "Unexpected value for timeout metadata", path, idx + 1))
elif key == b"script":
pass
else:
errors.append(("UNKNOWN-METADATA", "Unexpected kind of metadata", path, idx + 1))
else:
done = True
if done:
if meta_re.match(line):
errors.append(("STRAY-METADATA", "Metadata comments should start the file", path, idx + 1))
elif meta_re.search(line):
errors.append(("INDENTED-METADATA", "Metadata comments should start the line", path, idx + 1))
elif broken_metadata.search(line):
errors.append(("BROKEN-METADATA", "Metadata comment is not formatted correctly", path, idx + 1))
return errors
def check_path(repo_root, path, css_mode):
"""
Runs lints that check the file path.
:param repo_root: the repository root
:param path: the path of the file within the repository
:param css_mode: whether we're in CSS testsuite mode
:returns: a list of errors found in ``path``
"""
errors = []
for path_fn in path_lints:
errors.extend(path_fn(repo_root, path, css_mode))
return errors
def check_all_paths(repo_root, paths, css_mode):
"""
Runs lints that check all paths globally.
:param repo_root: the repository root
:param paths: a list of all the paths within the repository
:param css_mode: whether we're in CSS testsuite mode
:returns: a list of errors found in ``f``
"""
errors = []
for paths_fn in all_paths_lints:
errors.extend(paths_fn(repo_root, paths, css_mode))
return errors
def check_file_contents(repo_root, path, f, css_mode):
"""
Runs lints that check the file contents.
:param repo_root: the repository root
:param path: the path of the file within the repository
:param f: a file-like object with the file contents
:param css_mode: whether we're in CSS testsuite mode
:returns: a list of errors found in ``f``
"""
errors = []
for file_fn in file_lints:
errors.extend(file_fn(repo_root, path, f, css_mode))
f.seek(0)
return errors
def output_errors_text(errors):
for error_type, description, path, line_number in errors:
pos_string = path
if line_number:
pos_string += ":%s" % line_number
logger.error("%s: %s (%s)" % (pos_string, description, error_type))
def output_errors_markdown(errors):
if not errors:
return
heading = """Got lint errors:
| Error Type | Position | Message |
|------------|----------|---------|"""
for line in heading.split("\n"):
logger.error(line)
for error_type, description, path, line_number in errors:
pos_string = path
if line_number:
pos_string += ":%s" % line_number
logger.error("%s | %s | %s |" % (error_type, pos_string, description))
def output_errors_json(errors):
for error_type, error, path, line_number in errors:
print(json.dumps({"path": path, "lineno": line_number,
"rule": error_type, "message": error}))
def output_error_count(error_count):
if not error_count:
return
by_type = " ".join("%s: %d" % item for item in error_count.items())
count = sum(error_count.values())
logger.info("")
if count == 1:
logger.info("There was 1 error (%s)" % (by_type,))
else:
logger.info("There were %d errors (%s)" % (count, by_type))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("paths", nargs="*",
help="List of paths to lint")
parser.add_argument("--json", action="store_true",
help="Output machine-readable JSON format")
parser.add_argument("--markdown", action="store_true",
help="Output markdown")
parser.add_argument("--css-mode", action="store_true",
help="Run CSS testsuite specific lints")
return parser.parse_args()
def main(**kwargs):
if kwargs.get("json") and kwargs.get("markdown"):
logger.critical("Cannot specify --json and --markdown")
sys.exit(2)
output_format = {(True, False): "json",
(False, True): "markdown",
(False, False): "normal"}[(kwargs.get("json", False),
kwargs.get("markdown", False))]
paths = list(kwargs.get("paths") if kwargs.get("paths") else all_filesystem_paths(repo_root))
if output_format == "markdown":
setup_logging(True)
return lint(repo_root, paths, output_format, kwargs.get("css_mode", False))
def lint(repo_root, paths, output_format, css_mode):
error_count = defaultdict(int)
last = None
with open(os.path.join(repo_root, "lint.whitelist")) as f:
whitelist, ignored_files = parse_whitelist(f)
output_errors = {"json": output_errors_json,
"markdown": output_errors_markdown,
"normal": output_errors_text}[output_format]
def process_errors(errors):
"""
Filters and prints the errors, and updates the ``error_count`` object.
:param errors: a list of error tuples (error type, message, path, line number)
:returns: ``None`` if there were no errors, or
a tuple of the error type and the path otherwise
"""
errors = filter_whitelist_errors(whitelist, errors)
if not errors:
return None
output_errors(errors)
for error_type, error, path, line in errors:
error_count[error_type] += 1
return (errors[-1][0], path)
for path in paths[:]:
abs_path = os.path.join(repo_root, path)
if not os.path.exists(abs_path):
paths.remove(path)
continue
if any(fnmatch.fnmatch(path, file_match) for file_match in ignored_files):
paths.remove(path)
continue
errors = check_path(repo_root, path, css_mode)
last = process_errors(errors) or last
if not os.path.isdir(abs_path):
with open(abs_path, 'rb') as f:
errors = check_file_contents(repo_root, path, f, css_mode)
last = process_errors(errors) or last
errors = check_all_paths(repo_root, paths, css_mode)
last = process_errors(errors) or last
if output_format in ("normal", "markdown"):
output_error_count(error_count)
if error_count:
for line in (ERROR_MSG % (last[0], last[1], last[0], last[1])).split("\n"):
logger.info(line)
return sum(itervalues(error_count))
path_lints = [check_path_length, check_worker_collision]
all_paths_lints = [check_css_globally_unique]
file_lints = [check_regexp_line, check_parsed, check_python_ast, check_script_metadata]
if __name__ == "__main__":
args = parse_args()
error_count = main(**vars(args))
if error_count > 0:
sys.exit(1)
| mpl-2.0 |
andyfoundi/mongodb-tools | mongodbtools/collection_stats.py | 2 | 7006 | #!/usr/bin/env python
"""
This script prints some basic collection stats about the size of the
collections and their indexes.
"""
from prettytable import PrettyTable
import psutil
from socket import getfqdn
from pymongo import ReadPreference
from optparse import OptionParser
from distutils.version import StrictVersion
import pymongo
HAS_PYMONGO3 = bool(StrictVersion(pymongo.version) >= StrictVersion('3.0'))
if HAS_PYMONGO3:
from pymongo import MongoClient
else:
from pymongo import Connection as MongoClient # pylint: disable=E0611
def compute_signature(index):
signature = index["ns"]
for key in index["key"]:
signature += "%s_%s" % (key, index["key"][key])
return signature
def get_collection_stats(database, collection):
print("Checking DB: %s" % collection.full_name)
return database.command("collstats", collection.name)
def get_cli_options():
parser = OptionParser(usage="usage: python %prog [options]",
description="""This script prints some basic collection stats about the size of the collections and their indexes.""")
parser.add_option("-H", "--host",
dest="host",
default="localhost",
metavar="HOST",
help="MongoDB host")
parser.add_option("-p", "--port",
dest="port",
default=27017,
metavar="PORT",
help="MongoDB port")
parser.add_option("-d", "--database",
dest="database",
default="",
metavar="DATABASE",
help="Target database to generate statistics. All if omitted.")
parser.add_option("-u", "--user",
dest="user",
default="",
metavar="USER",
help="Admin username if authentication is enabled")
parser.add_option("--password",
dest="password",
default="",
metavar="PASSWORD",
help="Admin password if authentication is enabled")
parser.add_option("--ssl-cert",
dest="ssl_certfile",
default=None,
metavar="CERTIFICATE",
help="SSL Certificate to use is SSL is enabled (only with pymongo >= 3)")
parser.add_option("--ssl-ca-certs",
dest="ssl_ca_certs",
default=None,
metavar="CA",
help="SSL Certificate of CA for certificate validation if SSL is enabled (only with pymongo >= 3)")
(options, args) = parser.parse_args()
return options
def get_connection(host, port, username, password, ssl_certfile=None, ssl_ca_certs=None):
userPass = ""
if username and password:
userPass = username + ":" + password + "@"
mongoURI = "mongodb://" + userPass + host + ":" + str(port)
conn_kwargs = dict(host=mongoURI, read_preference=ReadPreference.SECONDARY)
if HAS_PYMONGO3:
conn_kwargs.update(dict(ssl_certfile=ssl_certfile, ssl_ca_certs=ssl_ca_certs))
return MongoClient(**conn_kwargs)
# From http://www.5dollarwhitebox.org/drupal/node/84
def convert_bytes(bytes):
bytes = float(bytes)
magnitude = abs(bytes)
if magnitude >= 1099511627776:
terabytes = bytes / 1099511627776
size = '%.2fT' % terabytes
elif magnitude >= 1073741824:
gigabytes = bytes / 1073741824
size = '%.2fG' % gigabytes
elif magnitude >= 1048576:
megabytes = bytes / 1048576
size = '%.2fM' % megabytes
elif magnitude >= 1024:
kilobytes = bytes / 1024
size = '%.2fK' % kilobytes
else:
size = '%.2fb' % bytes
return size
def main(options=None):
if options is None:
options = get_cli_options()
summary_stats = {
"count" : 0,
"size" : 0,
"indexSize" : 0,
"storageSize" : 0
}
all_stats = []
connection = get_connection(options.host, options.port, options.user, options.password,
options.ssl_certfile, options.ssl_ca_certs)
all_db_stats = {}
databases= []
if options.database:
databases.append(options.database)
else:
databases = connection.database_names()
for db in databases:
# FIXME: Add an option to include oplog stats.
if db == "local":
continue
database = connection[db]
all_db_stats[database.name] = []
for collection_name in database.collection_names():
stats = get_collection_stats(database, database[collection_name])
all_stats.append(stats)
all_db_stats[database.name].append(stats)
summary_stats["count"] += stats["count"]
summary_stats["size"] += stats["size"]
summary_stats["indexSize"] += stats.get("totalIndexSize", 0)
summary_stats["storageSize"] += stats.get("storageSize", 0)
x = PrettyTable(["Collection", "Count", "% Size", "DB Size", "Avg Obj Size", "Indexes", "Index Size", "Storage Size"])
x.align["Collection"] = "l"
x.align["% Size"] = "r"
x.align["Count"] = "r"
x.align["DB Size"] = "r"
x.align["Avg Obj Size"] = "r"
x.align["Index Size"] = "r"
x.align["Storage Size"] = "r"
x.padding_width = 1
print
for db in all_db_stats:
db_stats = all_db_stats[db]
count = 0
for stat in db_stats:
count += stat["count"]
x.add_row([stat["ns"], stat["count"], "%0.1f%%" % ((stat["size"] / float(summary_stats["size"])) * 100),
convert_bytes(stat["size"]),
convert_bytes(stat.get("avgObjSize", 0)),
stat.get("nindexes", 0),
convert_bytes(stat.get("totalIndexSize", 0)),
convert_bytes(stat.get("storageSize", 0))
])
print
print(x.get_string(sortby="% Size"))
print("Total Documents: %s" % summary_stats["count"])
print("Total Data Size: %s" % convert_bytes(summary_stats["size"]))
print("Total Index Size: %s" % convert_bytes(summary_stats["indexSize"]))
print("Total Storage Size: %s" % convert_bytes(summary_stats["storageSize"]))
# this is only meaningful if we're running the script on localhost
if options.host == "localhost" or options.host == getfqdn():
ram_headroom = psutil.phymem_usage()[0] - summary_stats["indexSize"]
print("RAM Headroom: %s" % convert_bytes(ram_headroom))
print("RAM Used: %s (%s%%)" % (convert_bytes(psutil.phymem_usage()[1]), psutil.phymem_usage()[3]))
print("Available RAM Headroom: %s" % convert_bytes((100 - psutil.phymem_usage()[3]) / 100 * ram_headroom))
if __name__ == "__main__":
options = get_cli_options()
main(options)
| mit |
adrienbrault/home-assistant | homeassistant/components/gc100/__init__.py | 6 | 1690 | """Support for controlling Global Cache gc100."""
import gc100
import voluptuous as vol
from homeassistant.const import CONF_HOST, CONF_PORT, EVENT_HOMEASSISTANT_STOP
import homeassistant.helpers.config_validation as cv
CONF_PORTS = "ports"
DEFAULT_PORT = 4998
DOMAIN = "gc100"
DATA_GC100 = "gc100"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, base_config):
"""Set up the gc100 component."""
config = base_config[DOMAIN]
host = config[CONF_HOST]
port = config[CONF_PORT]
gc_device = gc100.GC100SocketClient(host, port)
def cleanup_gc100(event):
"""Stuff to do before stopping."""
gc_device.quit()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, cleanup_gc100)
hass.data[DATA_GC100] = GC100Device(hass, gc_device)
return True
class GC100Device:
"""The GC100 component."""
def __init__(self, hass, gc_device):
"""Init a gc100 device."""
self.hass = hass
self.gc_device = gc_device
def read_sensor(self, port_addr, callback):
"""Read a value from a digital input."""
self.gc_device.read_sensor(port_addr, callback)
def write_switch(self, port_addr, state, callback):
"""Write a value to a relay."""
self.gc_device.write_switch(port_addr, state, callback)
def subscribe(self, port_addr, callback):
"""Add detection for RISING and FALLING events."""
self.gc_device.subscribe_notify(port_addr, callback)
| mit |
kawamon/hue | desktop/core/ext-py/Django-1.11.29/tests/gis_tests/gdal_tests/test_raster.py | 5 | 21062 | """
gdalinfo tests/gis_tests/data/rasters/raster.tif:
Driver: GTiff/GeoTIFF
Files: tests/gis_tests/data/rasters/raster.tif
Size is 163, 174
Coordinate System is:
PROJCS["NAD83 / Florida GDL Albers",
GEOGCS["NAD83",
DATUM["North_American_Datum_1983",
SPHEROID["GRS 1980",6378137,298.2572221010002,
AUTHORITY["EPSG","7019"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6269"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4269"]],
PROJECTION["Albers_Conic_Equal_Area"],
PARAMETER["standard_parallel_1",24],
PARAMETER["standard_parallel_2",31.5],
PARAMETER["latitude_of_center",24],
PARAMETER["longitude_of_center",-84],
PARAMETER["false_easting",400000],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AUTHORITY["EPSG","3086"]]
Origin = (511700.468070655711927,435103.377123198588379)
Pixel Size = (100.000000000000000,-100.000000000000000)
Metadata:
AREA_OR_POINT=Area
Image Structure Metadata:
INTERLEAVE=BAND
Corner Coordinates:
Upper Left ( 511700.468, 435103.377) ( 82d51'46.16"W, 27d55' 1.53"N)
Lower Left ( 511700.468, 417703.377) ( 82d51'52.04"W, 27d45'37.50"N)
Upper Right ( 528000.468, 435103.377) ( 82d41'48.81"W, 27d54'56.30"N)
Lower Right ( 528000.468, 417703.377) ( 82d41'55.54"W, 27d45'32.28"N)
Center ( 519850.468, 426403.377) ( 82d46'50.64"W, 27d50'16.99"N)
Band 1 Block=163x50 Type=Byte, ColorInterp=Gray
NoData Value=15
"""
import os
import struct
import tempfile
from django.contrib.gis.gdal import GDAL_VERSION, GDALRaster
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.raster.band import GDALBand
from django.contrib.gis.shortcuts import numpy
from django.test import SimpleTestCase
from django.utils import six
from django.utils._os import upath
from ..data.rasters.textrasters import JSON_RASTER
class GDALRasterTests(SimpleTestCase):
"""
Test a GDALRaster instance created from a file (GeoTiff).
"""
def setUp(self):
self.rs_path = os.path.join(os.path.dirname(upath(__file__)),
'../data/rasters/raster.tif')
self.rs = GDALRaster(self.rs_path)
def test_rs_name_repr(self):
self.assertEqual(self.rs_path, self.rs.name)
self.assertRegex(repr(self.rs), r"<Raster object at 0x\w+>")
def test_rs_driver(self):
self.assertEqual(self.rs.driver.name, 'GTiff')
def test_rs_size(self):
self.assertEqual(self.rs.width, 163)
self.assertEqual(self.rs.height, 174)
def test_rs_srs(self):
self.assertEqual(self.rs.srs.srid, 3086)
self.assertEqual(self.rs.srs.units, (1.0, 'metre'))
def test_rs_srid(self):
rast = GDALRaster({
'width': 16,
'height': 16,
'srid': 4326,
})
self.assertEqual(rast.srid, 4326)
rast.srid = 3086
self.assertEqual(rast.srid, 3086)
def test_geotransform_and_friends(self):
# Assert correct values for file based raster
self.assertEqual(
self.rs.geotransform,
[511700.4680706557, 100.0, 0.0, 435103.3771231986, 0.0, -100.0]
)
self.assertEqual(self.rs.origin, [511700.4680706557, 435103.3771231986])
self.assertEqual(self.rs.origin.x, 511700.4680706557)
self.assertEqual(self.rs.origin.y, 435103.3771231986)
self.assertEqual(self.rs.scale, [100.0, -100.0])
self.assertEqual(self.rs.scale.x, 100.0)
self.assertEqual(self.rs.scale.y, -100.0)
self.assertEqual(self.rs.skew, [0, 0])
self.assertEqual(self.rs.skew.x, 0)
self.assertEqual(self.rs.skew.y, 0)
# Create in-memory rasters and change gtvalues
rsmem = GDALRaster(JSON_RASTER)
rsmem.geotransform = range(6)
self.assertEqual(rsmem.geotransform, [float(x) for x in range(6)])
self.assertEqual(rsmem.origin, [0, 3])
self.assertEqual(rsmem.origin.x, 0)
self.assertEqual(rsmem.origin.y, 3)
self.assertEqual(rsmem.scale, [1, 5])
self.assertEqual(rsmem.scale.x, 1)
self.assertEqual(rsmem.scale.y, 5)
self.assertEqual(rsmem.skew, [2, 4])
self.assertEqual(rsmem.skew.x, 2)
self.assertEqual(rsmem.skew.y, 4)
self.assertEqual(rsmem.width, 5)
self.assertEqual(rsmem.height, 5)
def test_rs_extent(self):
self.assertEqual(
self.rs.extent,
(511700.4680706557, 417703.3771231986, 528000.4680706557, 435103.3771231986)
)
def test_rs_bands(self):
self.assertEqual(len(self.rs.bands), 1)
self.assertIsInstance(self.rs.bands[0], GDALBand)
def test_memory_based_raster_creation(self):
# Create uint8 raster with full pixel data range (0-255)
rast = GDALRaster({
'datatype': 1,
'width': 16,
'height': 16,
'srid': 4326,
'bands': [{
'data': range(256),
'nodata_value': 255,
}],
})
# Get array from raster
result = rast.bands[0].data()
if numpy:
result = result.flatten().tolist()
# Assert data is same as original input
self.assertEqual(result, list(range(256)))
def test_file_based_raster_creation(self):
# Prepare tempfile
rstfile = tempfile.NamedTemporaryFile(suffix='.tif')
# Create file-based raster from scratch
GDALRaster({
'datatype': self.rs.bands[0].datatype(),
'driver': 'tif',
'name': rstfile.name,
'width': 163,
'height': 174,
'nr_of_bands': 1,
'srid': self.rs.srs.wkt,
'origin': (self.rs.origin.x, self.rs.origin.y),
'scale': (self.rs.scale.x, self.rs.scale.y),
'skew': (self.rs.skew.x, self.rs.skew.y),
'bands': [{
'data': self.rs.bands[0].data(),
'nodata_value': self.rs.bands[0].nodata_value,
}],
})
# Reload newly created raster from file
restored_raster = GDALRaster(rstfile.name)
self.assertEqual(restored_raster.srs.wkt, self.rs.srs.wkt)
self.assertEqual(restored_raster.geotransform, self.rs.geotransform)
if numpy:
numpy.testing.assert_equal(
restored_raster.bands[0].data(),
self.rs.bands[0].data()
)
else:
self.assertEqual(restored_raster.bands[0].data(), self.rs.bands[0].data())
def test_offset_size_and_shape_on_raster_creation(self):
rast = GDALRaster({
'datatype': 1,
'width': 4,
'height': 4,
'srid': 4326,
'bands': [{
'data': (1,),
'offset': (1, 1),
'size': (2, 2),
'shape': (1, 1),
'nodata_value': 2,
}],
})
# Get array from raster.
result = rast.bands[0].data()
if numpy:
result = result.flatten().tolist()
# Band data is equal to nodata value except on input block of ones.
self.assertEqual(
result,
[2, 2, 2, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 2, 2, 2]
)
def test_set_nodata_value_on_raster_creation(self):
# Create raster filled with nodata values.
rast = GDALRaster({
'datatype': 1,
'width': 2,
'height': 2,
'srid': 4326,
'bands': [{'nodata_value': 23}],
})
# Get array from raster.
result = rast.bands[0].data()
if numpy:
result = result.flatten().tolist()
# All band data is equal to nodata value.
self.assertEqual(result, [23, ] * 4)
def test_set_nodata_none_on_raster_creation(self):
if GDAL_VERSION < (2, 1):
self.skipTest("GDAL >= 2.1 is required for this test.")
# Create raster without data and without nodata value.
rast = GDALRaster({
'datatype': 1,
'width': 2,
'height': 2,
'srid': 4326,
'bands': [{'nodata_value': None}],
})
# Get array from raster.
result = rast.bands[0].data()
if numpy:
result = result.flatten().tolist()
# Band data is equal to zero becaues no nodata value has been specified.
self.assertEqual(result, [0] * 4)
def test_raster_warp(self):
# Create in memory raster
source = GDALRaster({
'datatype': 1,
'driver': 'MEM',
'name': 'sourceraster',
'width': 4,
'height': 4,
'nr_of_bands': 1,
'srid': 3086,
'origin': (500000, 400000),
'scale': (100, -100),
'skew': (0, 0),
'bands': [{
'data': range(16),
'nodata_value': 255,
}],
})
# Test altering the scale, width, and height of a raster
data = {
'scale': [200, -200],
'width': 2,
'height': 2,
}
target = source.warp(data)
self.assertEqual(target.width, data['width'])
self.assertEqual(target.height, data['height'])
self.assertEqual(target.scale, data['scale'])
self.assertEqual(target.bands[0].datatype(), source.bands[0].datatype())
self.assertEqual(target.name, 'sourceraster_copy.MEM')
result = target.bands[0].data()
if numpy:
result = result.flatten().tolist()
self.assertEqual(result, [5, 7, 13, 15])
# Test altering the name and datatype (to float)
data = {
'name': '/path/to/targetraster.tif',
'datatype': 6,
}
target = source.warp(data)
self.assertEqual(target.bands[0].datatype(), 6)
self.assertEqual(target.name, '/path/to/targetraster.tif')
self.assertEqual(target.driver.name, 'MEM')
result = target.bands[0].data()
if numpy:
result = result.flatten().tolist()
self.assertEqual(
result,
[0.0, 1.0, 2.0, 3.0,
4.0, 5.0, 6.0, 7.0,
8.0, 9.0, 10.0, 11.0,
12.0, 13.0, 14.0, 15.0]
)
def test_raster_warp_nodata_zone(self):
# Create in memory raster.
source = GDALRaster({
'datatype': 1,
'driver': 'MEM',
'width': 4,
'height': 4,
'srid': 3086,
'origin': (500000, 400000),
'scale': (100, -100),
'skew': (0, 0),
'bands': [{
'data': range(16),
'nodata_value': 23,
}],
})
# Warp raster onto a location that does not cover any pixels of the original.
result = source.warp({'origin': (200000, 200000)}).bands[0].data()
if numpy:
result = result.flatten().tolist()
# The result is an empty raster filled with the correct nodata value.
self.assertEqual(result, [23] * 16)
def test_raster_transform(self):
# Prepare tempfile and nodata value
rstfile = tempfile.NamedTemporaryFile(suffix='.tif')
ndv = 99
# Create in file based raster
source = GDALRaster({
'datatype': 1,
'driver': 'tif',
'name': rstfile.name,
'width': 5,
'height': 5,
'nr_of_bands': 1,
'srid': 4326,
'origin': (-5, 5),
'scale': (2, -2),
'skew': (0, 0),
'bands': [{
'data': range(25),
'nodata_value': ndv,
}],
})
# Transform raster into srid 4326.
target = source.transform(3086)
# Reload data from disk
target = GDALRaster(target.name)
self.assertEqual(target.srs.srid, 3086)
self.assertEqual(target.width, 7)
self.assertEqual(target.height, 7)
self.assertEqual(target.bands[0].datatype(), source.bands[0].datatype())
self.assertAlmostEqual(target.origin[0], 9124842.791079799)
self.assertAlmostEqual(target.origin[1], 1589911.6476407414)
self.assertAlmostEqual(target.scale[0], 223824.82664250192)
self.assertAlmostEqual(target.scale[1], -223824.82664250192)
self.assertEqual(target.skew, [0, 0])
result = target.bands[0].data()
if numpy:
result = result.flatten().tolist()
# The reprojection of a raster that spans over a large area
# skews the data matrix and might introduce nodata values.
self.assertEqual(
result,
[
ndv, ndv, ndv, ndv, 4, ndv, ndv,
ndv, ndv, 2, 3, 9, ndv, ndv,
ndv, 1, 2, 8, 13, 19, ndv,
0, 6, 6, 12, 18, 18, 24,
ndv, 10, 11, 16, 22, 23, ndv,
ndv, ndv, 15, 21, 22, ndv, ndv,
ndv, ndv, 20, ndv, ndv, ndv, ndv,
]
)
class GDALBandTests(SimpleTestCase):
def setUp(self):
self.rs_path = os.path.join(os.path.dirname(upath(__file__)), '../data/rasters/raster.tif')
rs = GDALRaster(self.rs_path)
self.band = rs.bands[0]
def test_band_data(self):
pam_file = self.rs_path + '.aux.xml'
self.assertEqual(self.band.width, 163)
self.assertEqual(self.band.height, 174)
self.assertEqual(self.band.description, '')
self.assertEqual(self.band.datatype(), 1)
self.assertEqual(self.band.datatype(as_string=True), 'GDT_Byte')
self.assertEqual(self.band.nodata_value, 15)
if numpy:
data = self.band.data()
assert_array = numpy.loadtxt(
os.path.join(os.path.dirname(upath(__file__)), '../data/rasters/raster.numpy.txt')
)
numpy.testing.assert_equal(data, assert_array)
self.assertEqual(data.shape, (self.band.height, self.band.width))
try:
smin, smax, smean, sstd = self.band.statistics(approximate=True)
self.assertEqual(smin, 0)
self.assertEqual(smax, 9)
self.assertAlmostEqual(smean, 2.842331288343558)
self.assertAlmostEqual(sstd, 2.3965567248965356)
smin, smax, smean, sstd = self.band.statistics(approximate=False, refresh=True)
self.assertEqual(smin, 0)
self.assertEqual(smax, 9)
self.assertAlmostEqual(smean, 2.828326634228898)
self.assertAlmostEqual(sstd, 2.4260526986669095)
self.assertEqual(self.band.min, 0)
self.assertEqual(self.band.max, 9)
self.assertAlmostEqual(self.band.mean, 2.828326634228898)
self.assertAlmostEqual(self.band.std, 2.4260526986669095)
# Statistics are persisted into PAM file on band close
self.band = None
self.assertTrue(os.path.isfile(pam_file))
finally:
# Close band and remove file if created
self.band = None
if os.path.isfile(pam_file):
os.remove(pam_file)
def test_read_mode_error(self):
# Open raster in read mode
rs = GDALRaster(self.rs_path, write=False)
band = rs.bands[0]
# Setting attributes in write mode raises exception in the _flush method
with self.assertRaises(GDALException):
setattr(band, 'nodata_value', 10)
def test_band_data_setters(self):
# Create in-memory raster and get band
rsmem = GDALRaster({
'datatype': 1,
'driver': 'MEM',
'name': 'mem_rst',
'width': 10,
'height': 10,
'nr_of_bands': 1,
'srid': 4326,
})
bandmem = rsmem.bands[0]
# Set nodata value
bandmem.nodata_value = 99
self.assertEqual(bandmem.nodata_value, 99)
# Set data for entire dataset
bandmem.data(range(100))
if numpy:
numpy.testing.assert_equal(bandmem.data(), numpy.arange(100).reshape(10, 10))
else:
self.assertEqual(bandmem.data(), list(range(100)))
# Prepare data for setting values in subsequent tests
block = list(range(100, 104))
packed_block = struct.pack('<' + 'B B B B', *block)
# Set data from list
bandmem.data(block, (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from packed block
bandmem.data(packed_block, (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from bytes
bandmem.data(bytes(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from bytearray
bandmem.data(bytearray(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from memoryview
bandmem.data(six.memoryview(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from numpy array
if numpy:
bandmem.data(numpy.array(block, dtype='int8').reshape(2, 2), (1, 1), (2, 2))
numpy.testing.assert_equal(
bandmem.data(offset=(1, 1), size=(2, 2)),
numpy.array(block).reshape(2, 2)
)
# Test json input data
rsmemjson = GDALRaster(JSON_RASTER)
bandmemjson = rsmemjson.bands[0]
if numpy:
numpy.testing.assert_equal(
bandmemjson.data(),
numpy.array(range(25)).reshape(5, 5)
)
else:
self.assertEqual(bandmemjson.data(), list(range(25)))
def test_band_statistics_automatic_refresh(self):
rsmem = GDALRaster({
'srid': 4326,
'width': 2,
'height': 2,
'bands': [{'data': [0] * 4, 'nodata_value': 99}],
})
band = rsmem.bands[0]
# Populate statistics cache
self.assertEqual(band.statistics(), (0, 0, 0, 0))
# Change data
band.data([1, 1, 0, 0])
# Statistics are properly updated
self.assertEqual(band.statistics(), (0.0, 1.0, 0.5, 0.5))
# Change nodata_value
band.nodata_value = 0
# Statistics are properly updated
self.assertEqual(band.statistics(), (1.0, 1.0, 1.0, 0.0))
def test_band_statistics_empty_band(self):
rsmem = GDALRaster({
'srid': 4326,
'width': 1,
'height': 1,
'bands': [{'data': [0], 'nodata_value': 0}],
})
self.assertEqual(rsmem.bands[0].statistics(), (None, None, None, None))
def test_band_delete_nodata(self):
rsmem = GDALRaster({
'srid': 4326,
'width': 1,
'height': 1,
'bands': [{'data': [0], 'nodata_value': 1}],
})
if GDAL_VERSION < (2, 1):
msg = 'GDAL >= 2.1 required to delete nodata values.'
with self.assertRaisesMessage(ValueError, msg):
rsmem.bands[0].nodata_value = None
else:
rsmem.bands[0].nodata_value = None
self.assertIsNone(rsmem.bands[0].nodata_value)
def test_band_data_replication(self):
band = GDALRaster({
'srid': 4326,
'width': 3,
'height': 3,
'bands': [{'data': range(10, 19), 'nodata_value': 0}],
}).bands[0]
# Variations for input (data, shape, expected result).
combos = (
([1], (1, 1), [1] * 9),
(range(3), (1, 3), [0, 0, 0, 1, 1, 1, 2, 2, 2]),
(range(3), (3, 1), [0, 1, 2, 0, 1, 2, 0, 1, 2]),
)
for combo in combos:
band.data(combo[0], shape=combo[1])
if numpy:
numpy.testing.assert_equal(band.data(), numpy.array(combo[2]).reshape(3, 3))
else:
self.assertEqual(band.data(), list(combo[2]))
| apache-2.0 |
wlonk/warehouse | tests/unit/cache/origin/test_init.py | 3 | 8664 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
import pytest
from warehouse.cache import origin
from warehouse.cache.origin.interfaces import IOriginCache
def test_store_purge_keys():
class Type1:
pass
class Type2:
pass
class Type3:
pass
class Type4:
pass
config = pretend.stub(
registry={
"cache_keys": {
Type1: lambda o: origin.CacheKeys(cache=[], purge=["type_1"]),
Type2: lambda o: origin.CacheKeys(
cache=[],
purge=["type_2", "foo"],
),
Type3: lambda o: origin.CacheKeys(
cache=[],
purge=["type_3", "foo"],
),
},
},
)
session = pretend.stub(
info={},
new={Type1()},
dirty={Type2()},
deleted={Type3(), Type4()},
)
origin.store_purge_keys(config, session, pretend.stub())
assert session.info["warehouse.cache.origin.purges"] == {
"type_1", "type_2", "type_3", "foo",
}
def test_execute_purge_success(app_config):
cacher = pretend.stub(purge=pretend.call_recorder(lambda purges: None))
factory = pretend.call_recorder(lambda ctx, config: cacher)
app_config.register_service_factory(factory, IOriginCache)
app_config.commit()
session = pretend.stub(
info={
"warehouse.cache.origin.purges": {"type_1", "type_2", "foobar"},
},
)
origin.execute_purge(app_config, session)
assert factory.calls == [pretend.call(None, app_config)]
assert cacher.purge.calls == [pretend.call({"type_1", "type_2", "foobar"})]
assert "warehouse.cache.origin.purges" not in session.info
def test_execute_purge_no_backend():
@pretend.call_recorder
def find_service_factory(interface):
raise ValueError
config = pretend.stub(find_service_factory=find_service_factory)
session = pretend.stub(
info={
"warehouse.cache.origin.purges": {"type_1", "type_2", "foobar"},
},
)
origin.execute_purge(config, session)
assert find_service_factory.calls == [pretend.call(origin.IOriginCache)]
assert "warehouse.cache.origin.purges" not in session.info
class TestOriginCache:
def test_no_cache_key(self):
response = pretend.stub()
@origin.origin_cache(1)
def view(context, request):
return response
def raiser(iface):
raise ValueError
context = pretend.stub()
request = pretend.stub(
registry={"cache_keys": {}},
find_service=raiser,
)
assert view(context, request) is response
def test_no_origin_cache(self):
class Fake:
pass
response = pretend.stub()
@origin.origin_cache(1)
def view(context, request):
return response
@pretend.call_recorder
def raiser(iface):
raise ValueError
context = Fake()
request = pretend.stub(
registry={
"cache_keys": {
Fake: lambda X: origin.CacheKeys(cache=[], purge=[]),
},
},
find_service=raiser,
)
assert view(context, request) is response
assert raiser.calls == [pretend.call(IOriginCache)]
@pytest.mark.parametrize(
("seconds", "keys"),
[
(745, None),
(823, ["nope", "yup"]),
],
)
def test_response_hook(self, seconds, keys):
class Fake:
pass
class Cache:
@staticmethod
@pretend.call_recorder
def cache(keys, request, response, seconds, stale_while_revalidate,
stale_if_error):
pass
response = pretend.stub()
deco = origin.origin_cache(seconds, keys=keys)
@deco
def view(context, request):
return response
key_maker = pretend.call_recorder(
lambda obj: origin.CacheKeys(cache=["one", "two"], purge=[])
)
cacher = Cache()
context = Fake()
callbacks = []
request = pretend.stub(
registry={"cache_keys": {Fake: key_maker}},
find_service=lambda iface: cacher,
add_response_callback=callbacks.append,
)
assert view(context, request) is response
assert key_maker.calls == [pretend.call(context)]
assert len(callbacks) == 1
callbacks[0](request, response)
assert cacher.cache.calls == [
pretend.call(
sorted(["one", "two"] + ([] if keys is None else keys)),
request,
response,
seconds=seconds,
stale_while_revalidate=None,
stale_if_error=None,
),
]
class TestKeyMaker:
def test_both_cache_and_purge(self):
key_maker = origin.key_maker_factory(
cache_keys=["foo", "foo/{obj.attr}"],
purge_keys=["bar", "bar/{obj.attr}"],
)
assert key_maker(pretend.stub(attr="bar")) == origin.CacheKeys(
cache=["foo", "foo/bar"],
purge=["bar", "bar/bar"],
)
def test_only_cache(self):
key_maker = origin.key_maker_factory(
cache_keys=["foo", "foo/{obj.attr}"],
purge_keys=None,
)
assert key_maker(pretend.stub(attr="bar")) == origin.CacheKeys(
cache=["foo", "foo/bar"],
purge=[],
)
def test_only_purge(self):
key_maker = origin.key_maker_factory(
cache_keys=None,
purge_keys=["bar", "bar/{obj.attr}"],
)
assert key_maker(pretend.stub(attr="bar")) == origin.CacheKeys(
cache=[],
purge=["bar", "bar/bar"],
)
def test_register_origin_keys(monkeypatch):
class Fake1:
pass
class Fake2:
pass
key_maker = pretend.stub()
key_maker_factory = pretend.call_recorder(lambda **kw: key_maker)
monkeypatch.setattr(origin, "key_maker_factory", key_maker_factory)
config = pretend.stub(registry={})
origin.register_origin_cache_keys(
config, Fake1, cache_keys=["one", "two/{obj.attr}"])
origin.register_origin_cache_keys(
config, Fake2, cache_keys=["three"], purge_keys=["lol"],
)
assert key_maker_factory.calls == [
pretend.call(cache_keys=["one", "two/{obj.attr}"], purge_keys=None),
pretend.call(cache_keys=["three"], purge_keys=["lol"]),
]
assert config.registry == {
"cache_keys": {
Fake1: key_maker,
Fake2: key_maker,
},
}
def test_includeme_no_origin_cache():
config = pretend.stub(
add_directive=pretend.call_recorder(lambda name, func: None),
registry=pretend.stub(settings={}),
)
origin.includeme(config)
assert config.add_directive.calls == [
pretend.call(
"register_origin_cache_keys",
origin.register_origin_cache_keys,
),
]
def test_includeme_with_origin_cache():
cache_class = pretend.stub(create_service=pretend.stub())
config = pretend.stub(
add_directive=pretend.call_recorder(lambda name, func: None),
registry=pretend.stub(
settings={
"origin_cache.backend":
"warehouse.cache.origin.fastly.FastlyCache",
},
),
maybe_dotted=pretend.call_recorder(lambda n: cache_class),
register_service_factory=pretend.call_recorder(lambda f, iface: None)
)
origin.includeme(config)
assert config.add_directive.calls == [
pretend.call(
"register_origin_cache_keys",
origin.register_origin_cache_keys,
),
]
assert config.maybe_dotted.calls == [
pretend.call("warehouse.cache.origin.fastly.FastlyCache"),
]
assert config.register_service_factory.calls == [
pretend.call(cache_class.create_service, IOriginCache),
]
| apache-2.0 |
3dfxsoftware/cbss-addons | mrp_repair/__init__.py | 380 | 1087 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import mrp_repair
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| gpl-2.0 |
Slezhuk/ansible | lib/ansible/modules/packaging/language/maven_artifact.py | 63 | 16002 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Chris Schmidt <chris.schmidt () contrastsecurity.com>
#
# Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact
# as a reference and starting point.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: maven_artifact
short_description: Downloads an Artifact from a Maven Repository
version_added: "2.0"
description:
- Downloads an artifact from a maven repository given the maven coordinates provided to the module. Can retrieve
- snapshots or release versions of the artifact and will resolve the latest available version if one is not
- available.
author: "Chris Schmidt (@chrisisbeef)"
requirements:
- "python >= 2.6"
- lxml
- boto if using a S3 repository (s3://...)
options:
group_id:
description:
- The Maven groupId coordinate
required: true
artifact_id:
description:
- The maven artifactId coordinate
required: true
version:
description:
- The maven version coordinate
required: false
default: latest
classifier:
description:
- The maven classifier coordinate
required: false
default: null
extension:
description:
- The maven type/extension coordinate
required: false
default: jar
repository_url:
description:
- The URL of the Maven Repository to download from.
- Use s3://... if the repository is hosted on Amazon S3, added in version 2.2.
required: false
default: http://repo1.maven.org/maven2
username:
description:
- The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3
required: false
default: null
aliases: [ "aws_secret_key" ]
password:
description:
- The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3
required: false
default: null
aliases: [ "aws_secret_access_key" ]
dest:
description:
- The path where the artifact should be written to
required: true
default: false
state:
description:
- The desired state of the artifact
required: true
default: present
choices: [present,absent]
timeout:
description:
- Specifies a timeout in seconds for the connection attempt
required: false
default: 10
version_added: "2.3"
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be set to C(no) when no other option exists.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: "1.9.3"
'''
EXAMPLES = '''
# Download the latest version of the JUnit framework artifact from Maven Central
- maven_artifact:
group_id: junit
artifact_id: junit
dest: /tmp/junit-latest.jar
# Download JUnit 4.11 from Maven Central
- maven_artifact:
group_id: junit
artifact_id: junit
version: 4.11
dest: /tmp/junit-4.11.jar
# Download an artifact from a private repository requiring authentication
- maven_artifact:
group_id: com.company
artifact_id: library-name
repository_url: 'https://repo.company.com/maven'
username: user
password: pass
dest: /tmp/library-name-latest.jar
# Download a WAR File to the Tomcat webapps directory to be deployed
- maven_artifact:
group_id: com.company
artifact_id: web-app
extension: war
repository_url: 'https://repo.company.com/maven'
dest: /var/lib/tomcat7/webapps/web-app.war
'''
from lxml import etree
import os
import hashlib
import sys
import posixpath
import urlparse
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
try:
import boto3
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class Artifact(object):
def __init__(self, group_id, artifact_id, version, classifier=None, extension='jar'):
if not group_id:
raise ValueError("group_id must be set")
if not artifact_id:
raise ValueError("artifact_id must be set")
self.group_id = group_id
self.artifact_id = artifact_id
self.version = version
self.classifier = classifier
if not extension:
self.extension = "jar"
else:
self.extension = extension
def is_snapshot(self):
return self.version and self.version.endswith("SNAPSHOT")
def path(self, with_version=True):
base = posixpath.join(self.group_id.replace(".", "/"), self.artifact_id)
if with_version and self.version:
return posixpath.join(base, self.version)
else:
return base
def _generate_filename(self):
if not self.classifier:
return self.artifact_id + "." + self.extension
else:
return self.artifact_id + "-" + self.classifier + "." + self.extension
def get_filename(self, filename=None):
if not filename:
filename = self._generate_filename()
elif os.path.isdir(filename):
filename = os.path.join(filename, self._generate_filename())
return filename
def __str__(self):
if self.classifier:
return "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version)
elif self.extension != "jar":
return "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version)
else:
return "%s:%s:%s" % (self.group_id, self.artifact_id, self.version)
@staticmethod
def parse(input):
parts = input.split(":")
if len(parts) >= 3:
g = parts[0]
a = parts[1]
v = parts[len(parts) - 1]
t = None
c = None
if len(parts) == 4:
t = parts[2]
if len(parts) == 5:
t = parts[2]
c = parts[3]
return Artifact(g, a, v, c, t)
else:
return None
class MavenDownloader:
def __init__(self, module, base="http://repo1.maven.org/maven2"):
self.module = module
if base.endswith("/"):
base = base.rstrip("/")
self.base = base
self.user_agent = "Maven Artifact Downloader/1.0"
def _find_latest_version_available(self, artifact):
path = "/%s/maven-metadata.xml" % (artifact.path(False))
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r))
v = xml.xpath("/metadata/versioning/versions/version[last()]/text()")
if v:
return v[0]
def find_uri_for_artifact(self, artifact):
if artifact.version == "latest":
artifact.version = self._find_latest_version_available(artifact)
if artifact.is_snapshot():
path = "/%s/maven-metadata.xml" % (artifact.path())
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r))
timestamp = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")[0]
buildNumber = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0]
for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"):
if (len(snapshotArtifact.xpath("classifier/text()")) > 0 and
snapshotArtifact.xpath("classifier/text()")[0] == artifact.classifier and
len(snapshotArtifact.xpath("extension/text()")) > 0 and
snapshotArtifact.xpath("extension/text()")[0] == artifact.extension):
return self._uri_for_artifact(artifact, snapshotArtifact.xpath("value/text()")[0])
return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + buildNumber))
return self._uri_for_artifact(artifact, artifact.version)
def _uri_for_artifact(self, artifact, version=None):
if artifact.is_snapshot() and not version:
raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact))
elif not artifact.is_snapshot():
version = artifact.version
if artifact.classifier:
return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension)
return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "." + artifact.extension)
def _request(self, url, failmsg, f):
url_to_use = url
parsed_url = urlparse(url)
if parsed_url.scheme=='s3':
parsed_url = urlparse(url)
bucket_name = parsed_url.netloc
key_name = parsed_url.path[1:]
client = boto3.client('s3',aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', ''))
url_to_use = client.generate_presigned_url('get_object',Params={'Bucket':bucket_name,'Key':key_name},ExpiresIn=10)
req_timeout = self.module.params.get('timeout')
# Hack to add parameters in the way that fetch_url expects
self.module.params['url_username'] = self.module.params.get('username', '')
self.module.params['url_password'] = self.module.params.get('password', '')
self.module.params['http_agent'] = self.module.params.get('user_agent', None)
response, info = fetch_url(self.module, url_to_use, timeout=req_timeout)
if info['status'] != 200:
raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use)
else:
return f(response)
def download(self, artifact, filename=None):
filename = artifact.get_filename(filename)
if not artifact.version or artifact.version == "latest":
artifact = Artifact(artifact.group_id, artifact.artifact_id, self._find_latest_version_available(artifact),
artifact.classifier, artifact.extension)
url = self.find_uri_for_artifact(artifact)
if not self.verify_md5(filename, url + ".md5"):
response = self._request(url, "Failed to download artifact " + str(artifact), lambda r: r)
if response:
f = open(filename, 'w')
# f.write(response.read())
self._write_chunks(response, f, report_hook=self.chunk_report)
f.close()
return True
else:
return False
else:
return True
def chunk_report(self, bytes_so_far, chunk_size, total_size):
percent = float(bytes_so_far) / total_size
percent = round(percent * 100, 2)
sys.stdout.write("Downloaded %d of %d bytes (%0.2f%%)\r" %
(bytes_so_far, total_size, percent))
if bytes_so_far >= total_size:
sys.stdout.write('\n')
def _write_chunks(self, response, file, chunk_size=8192, report_hook=None):
total_size = response.info().getheader('Content-Length').strip()
total_size = int(total_size)
bytes_so_far = 0
while 1:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
file.write(chunk)
if report_hook:
report_hook(bytes_so_far, chunk_size, total_size)
return bytes_so_far
def verify_md5(self, file, remote_md5):
if not os.path.exists(file):
return False
else:
local_md5 = self._local_md5(file)
remote = self._request(remote_md5, "Failed to download MD5", lambda r: r.read())
return local_md5 == remote
def _local_md5(self, file):
md5 = hashlib.md5()
f = open(file, 'rb')
for chunk in iter(lambda: f.read(8192), ''):
md5.update(chunk)
f.close()
return md5.hexdigest()
def main():
module = AnsibleModule(
argument_spec = dict(
group_id = dict(default=None),
artifact_id = dict(default=None),
version = dict(default="latest"),
classifier = dict(default=None),
extension = dict(default='jar'),
repository_url = dict(default=None),
username = dict(default=None,aliases=['aws_secret_key']),
password = dict(default=None, no_log=True,aliases=['aws_secret_access_key']),
state = dict(default="present", choices=["present","absent"]), # TODO - Implement a "latest" state
timeout = dict(default=10, type='int'),
dest = dict(type="path", default=None),
validate_certs = dict(required=False, default=True, type='bool'),
)
)
repository_url = module.params["repository_url"]
if not repository_url:
repository_url = "http://repo1.maven.org/maven2"
try:
parsed_url = urlparse(repository_url)
except AttributeError as e:
module.fail_json(msg='url parsing went wrong %s' % e)
if parsed_url.scheme=='s3' and not HAS_BOTO:
module.fail_json(msg='boto3 required for this module, when using s3:// repository URLs')
group_id = module.params["group_id"]
artifact_id = module.params["artifact_id"]
version = module.params["version"]
classifier = module.params["classifier"]
extension = module.params["extension"]
repository_username = module.params["username"]
repository_password = module.params["password"]
state = module.params["state"]
dest = module.params["dest"]
#downloader = MavenDownloader(module, repository_url, repository_username, repository_password)
downloader = MavenDownloader(module, repository_url)
try:
artifact = Artifact(group_id, artifact_id, version, classifier, extension)
except ValueError as e:
module.fail_json(msg=e.args[0])
prev_state = "absent"
if os.path.isdir(dest):
dest = posixpath.join(dest, artifact_id + "-" + version + "." + extension)
if os.path.lexists(dest) and downloader.verify_md5(dest, downloader.find_uri_for_artifact(artifact) + '.md5'):
prev_state = "present"
else:
path = os.path.dirname(dest)
if not os.path.exists(path):
os.makedirs(path)
if prev_state == "present":
module.exit_json(dest=dest, state=state, changed=False)
try:
if downloader.download(artifact, dest):
module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier,
extension=extension, repository_url=repository_url, changed=True)
else:
module.fail_json(msg="Unable to download the artifact")
except ValueError as e:
module.fail_json(msg=e.args[0])
if __name__ == '__main__':
main()
| gpl-3.0 |
microdee/IronHydra | src/IronHydra/Lib/genericpath.py | 246 | 3015 | """
Path operations common to more than one OS
Do not use directly. The OS specific modules import the appropriate
functions from this module themselves.
"""
import os
import stat
__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime',
'getsize', 'isdir', 'isfile']
# Does a path exist?
# This is false for dangling symbolic links on systems that support them.
def exists(path):
"""Test whether a path exists. Returns False for broken symbolic links"""
try:
os.stat(path)
except os.error:
return False
return True
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path ono systems that support symlinks
def isfile(path):
"""Test whether a path is a regular file"""
try:
st = os.stat(path)
except os.error:
return False
return stat.S_ISREG(st.st_mode)
# Is a path a directory?
# This follows symbolic links, so both islink() and isdir()
# can be true for the same path on systems that support symlinks
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except os.error:
return False
return stat.S_ISDIR(st.st_mode)
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
return os.stat(filename).st_size
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
return os.stat(filename).st_mtime
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
return os.stat(filename).st_atime
def getctime(filename):
"""Return the metadata change time of a file, reported by os.stat()."""
return os.stat(filename).st_ctime
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
# Generic implementation of splitext, to be parametrized with
# the separators
def _splitext(p, sep, altsep, extsep):
"""Split the extension from a pathname.
Extension is everything from the last dot to the end, ignoring
leading dots. Returns "(root, ext)"; ext may be empty."""
sepIndex = p.rfind(sep)
if altsep:
altsepIndex = p.rfind(altsep)
sepIndex = max(sepIndex, altsepIndex)
dotIndex = p.rfind(extsep)
if dotIndex > sepIndex:
# skip all leading dots
filenameIndex = sepIndex + 1
while filenameIndex < dotIndex:
if p[filenameIndex] != extsep:
return p[:dotIndex], p[dotIndex:]
filenameIndex += 1
return p, ''
| mit |
brunogamacatao/portalsaladeaula | django/db/models/sql/constants.py | 394 | 1043 | import re
# Valid query types (a dictionary is used for speedy lookups).
QUERY_TERMS = dict([(x, None) for x in (
'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
'month', 'day', 'week_day', 'isnull', 'search', 'regex', 'iregex',
)])
# Size of each "chunk" for get_iterator calls.
# Larger values are slightly faster at the expense of more storage space.
GET_ITERATOR_CHUNK_SIZE = 100
# Separator used to split filter strings apart.
LOOKUP_SEP = '__'
# Constants to make looking up tuple values clearer.
# Join lists (indexes into the tuples that are values in the alias_map
# dictionary in the Query class).
TABLE_NAME = 0
RHS_ALIAS = 1
JOIN_TYPE = 2
LHS_ALIAS = 3
LHS_JOIN_COL = 4
RHS_JOIN_COL = 5
NULLABLE = 6
# How many results to expect from a cursor.execute call
MULTI = 'multi'
SINGLE = 'single'
ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$')
ORDER_DIR = {
'ASC': ('ASC', 'DESC'),
'DESC': ('DESC', 'ASC')}
| bsd-3-clause |
RomRaider/assembla.mirror | 3rdparty/IzPack/utils/wrappers/izpack2app/izpack2app.py | 26 | 1678 | #!/usr/bin/env python
# ........................................................................... #
#
# IzPack - 2007, 2008 Julien Ponge, All Rights Reserved.
#
# http://izpack.org/
# http://izpack.codehaus.org/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ........................................................................... #
import os
import sys
from shutil import *
def main():
base = os.path.dirname(sys.argv[0])
jar = sys.argv[1]
jar_name = os.path.basename(jar)
app = sys.argv[2]
if os.path.exists(app): rmtree(app)
copytree(os.path.join(base, 'Mac-App-Template'), app)
java_folder = os.path.join(app, 'Contents/Resources/Java/')
if not os.path.exists(java_folder): os.mkdir(java_folder)
copy(jar, java_folder)
def reducer(str, line):
return str + line
plist_path = os.path.join(app, 'Contents/Info.plist')
plist = open(plist_path, 'r')
plist_content = reduce(reducer, plist.readlines(), '').replace('__JAR__', jar_name)
plist.close()
plist = open(plist_path, 'w')
plist.write(plist_content)
plist.close()
if __name__ == '__main__':
if (len(sys.argv) != 3):
print "Usage: izpack2app.py installer.jar Installer.app"
else:
main()
| gpl-2.0 |
FRBs/FRB | frb/surveys/wise.py | 2 | 3806 | """WISE Survey"""
from IPython import embed
import numpy as np
from astropy import units, io, utils
from frb.surveys import dlsurvey
from frb.surveys import catalog_utils
# Dependencies
try:
from pyvo.dal import sia
except ImportError:
print("Warning: You need to install pyvo to retrieve DES images")
_svc = None
else:
_DEF_ACCESS_URL = "https://datalab.noao.edu/sia/des_dr1"
_svc = sia.SIAService(_DEF_ACCESS_URL)
# Define the data model for DES data
photom = {}
# DES-WISE
photom['WISE'] = {}
WISE_bands = ['W1', 'W2', 'W3', 'W4']
for band in WISE_bands:
photom['WISE']['WISE_{:s}'.format(band)] = '{:s}mpro'.format(band.lower())
photom['WISE']['WISE_{:s}_err'.format(band)] = '{:s}sigmpro'.format(band.lower())
photom['WISE']['ra'] = 'ra'
photom['WISE']['dec'] = 'dec'
class WISE_Survey(dlsurvey.DL_Survey):
"""
Class to handle queries on the WISE survey
Child of DL_Survey which uses datalab to access NOAO
Args:
coord (SkyCoord): Coordiante for surveying around
radius (Angle): Search radius around the coordinate
"""
def __init__(self, coord, radius, **kwargs):
dlsurvey.DL_Survey.__init__(self, coord, radius, **kwargs)
self.survey = 'WISE'
self.bands = ['W1', 'W2', 'W3', 'W4']
self.svc = sia.SIAService("https://datalab.noao.edu/sia/allwise")
self.qc_profile = "default"
self.database = "allwise.source"
def get_catalog(self, query=None, query_fields=None, print_query=False):
"""
Grab a catalog of sources around the input coordinate to the search radius
Args:
query: Not used
query_fields (list, optional): Over-ride list of items to query
print_query (bool): Print the SQL query generated
Returns:
astropy.table.Table: Catalog of sources returned. Includes WISE
photometry for matched sources.
"""
# Main DES query
main_cat = super(WISE_Survey, self).get_catalog(query_fields=query_fields, print_query=print_query)
if len(main_cat) == 0:
return main_cat
main_cat = catalog_utils.clean_cat(main_cat, photom['WISE'], fill_mask=-999.)
# Finish
self.catalog = main_cat
self.validate_catalog()
return self.catalog
def _gen_cat_query(self,query_fields=None):
"""
Generate SQL query for catalog search
self.query is modified in place
Args:
query_fields (list): Override the default list for the SQL query
"""
if query_fields is None:
object_id_fields = ['source_id','ra','dec','tmass_key']
mag_fields = ['w1mpro', 'w2mpro', 'w3mpro', 'w4mpro',
'w1sigmpro', 'w2sigmpro', 'w3sigmpro', 'w4sigmpro', 'ph_qual',
'moon_lev']
query_fields = object_id_fields+mag_fields
self.query = dlsurvey._default_query_str(query_fields, self.database, self.coord, self.radius)
def _select_best_img(self,imgTable,verbose,timeout=120):
"""
Select the best band for a cutout
Args:
imgTable: Table of images
verbose (bool): Print status
timeout (int or float): How long to wait before timing out, in seconds
Returns:
HDU: header data unit for the downloaded image
"""
row = imgTable[np.argmax(imgTable['exptime'].data.data.astype('float'))] # pick image with longest exposure time
url = row['access_url'].decode()
if verbose:
print ('downloading deepest stacked image...')
imagedat = io.fits.open(utils.data.download_file(url,cache=True,show_progress=False,timeout=timeout))
return imagedat
| bsd-3-clause |
CiscoSystems/neutron | neutron/plugins/ibm/sdnve_api_fake.py | 5 | 2084 | # Copyright 2014 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.i18n import _LI
from neutron.openstack.common import log as logging
from neutron.plugins.ibm.common import constants
LOG = logging.getLogger(__name__)
HTTP_OK = 200
class FakeClient():
'''Fake Client for SDNVE controller.'''
def __init__(self, **kwargs):
LOG.info(_LI('Fake SDNVE controller initialized'))
def sdnve_list(self, resource, **_params):
LOG.info(_LI('Fake SDNVE controller: list'))
return (HTTP_OK, None)
def sdnve_show(self, resource, specific, **_params):
LOG.info(_LI('Fake SDNVE controller: show'))
return (HTTP_OK, None)
def sdnve_create(self, resource, body):
LOG.info(_LI('Fake SDNVE controller: create'))
return (HTTP_OK, None)
def sdnve_update(self, resource, specific, body=None):
LOG.info(_LI('Fake SDNVE controller: update'))
return (HTTP_OK, None)
def sdnve_delete(self, resource, specific):
LOG.info(_LI('Fake SDNVE controller: delete'))
return (HTTP_OK, None)
def sdnve_get_tenant_byid(self, id):
LOG.info(_LI('Fake SDNVE controller: get tenant by id'))
return id, constants.TENANT_TYPE_OF
def sdnve_check_and_create_tenant(self, id, network_type=None):
LOG.info(_LI('Fake SDNVE controller: check and create tenant'))
return id
def sdnve_get_controller(self):
LOG.info(_LI('Fake SDNVE controller: get controller'))
return None
| apache-2.0 |
kit-cel/gr-dab | python/GUI/usrp_dab_tx.py | 1 | 7643 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Moritz Luca Schmid, Communications Engineering Lab (CEL) / Karlsruhe Institute of Technology (KIT).
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
transmit DAB+ with USRP
"""
from gnuradio import gr, uhd, blocks
from gnuradio import audio
import dab
import numpy as np
class usrp_dab_tx(gr.top_block):
def __init__(self, dab_mode, frequency, num_subch, ensemble_label, service_label, language, country_ID, protections, data_rates_n, stereo_flags, audio_sampling_rates, src_paths, selected_audio, use_usrp, dabplus_types, sink_path = "dab_iq_generated.dat"):
gr.top_block.__init__(self)
self.dab_mode = dab_mode
self.frequency = frequency
interp = 64
self.sample_rate = 128e6 / interp
self.dp = dab.parameters.dab_parameters(self.dab_mode, 2000000, False)
self.num_subch = num_subch
self.ensemble_label = ensemble_label
self.service_label = service_label
self.language = language
self.protections = protections
self.data_rates_n = data_rates_n
self.src_paths = src_paths
self.use_usrp = use_usrp
self.dabplus_types = dabplus_types
self.sink_path = sink_path
self.selected_audio = selected_audio
self.volume = 80
sizes = {0: 12, 1: 8, 2: 6, 3: 4}
self.subch_sizes = [None] * len(self.data_rates_n)
for i in range(0, len(self.data_rates_n)):
self.subch_sizes[i] = self.data_rates_n[i] * sizes[protections[i]]
########################
# FIC
########################
# source
self.fic_src = dab.fib_source_b_make(self.dab_mode, country_ID, self.num_subch, self.ensemble_label, self.service_label, "", self.language, self.protections, self.data_rates_n, self.dabplus_types)
# encoder
self.fic_enc = dab.fic_encode(self.dp)
########################
# MSC
########################
self.recorder = audio.source_make(32000)
self.msc_sources = [None] * self.num_subch
self.f2s_left_converters = [None] * self.num_subch
self.f2s_right_converters = [None] * self.num_subch
self.mp4_encoders = [None] * self.num_subch
self.mp2_encoders = [None] * self.num_subch
self.rs_encoders = [None] * self.num_subch
self.msc_encoders = [None] * self.num_subch
for i in range(0, self.num_subch):
if not self.src_paths[i] is "mic":
# source
self.msc_sources[i] = blocks.wavfile_source_make(self.src_paths[i], True)
# float to short
self.f2s_left_converters[i] = blocks.float_to_short_make(1, 32767)
self.f2s_right_converters[i] = blocks.float_to_short_make(1, 32767)
if self.dabplus_types[i] is 1:
# mp4 encoder and Reed-Solomon encoder
self.mp4_encoders[i] = dab.mp4_encode_sb_make(self.data_rates_n[i], 2, audio_sampling_rates[i], 1)
self.rs_encoders[i] = dab.reed_solomon_encode_bb_make(self.data_rates_n[i])
else:
# mp2 encoder
self.mp2_encoders[i] = dab.mp2_encode_sb_make(self.data_rates_n[i], 2, audio_sampling_rates[i])
# encoder
self.msc_encoders[i] = dab.msc_encode(self.dp, self.data_rates_n[i], self.protections[i])
########################
# MUX
########################
self.mux = dab.dab_transmission_frame_mux_bb_make(self.dab_mode, self.num_subch, self.subch_sizes)
self.trigsrc = blocks.vector_source_b([1] + [0] * (self.dp.symbols_per_frame-2), True)
########################
# Modulator
########################
self.s2v_mod = blocks.stream_to_vector(gr.sizeof_char, self.dp.num_carriers/4)
self.mod = dab.ofdm_mod(self.dp)
########################
# Sink
########################
if self.use_usrp:
self.sink = uhd.usrp_sink("", uhd.io_type.COMPLEX_FLOAT32, 1)
self.sink.set_samp_rate(self.sample_rate)
self.sink.set_antenna("TX/RX")
self.sink.set_center_freq(self.frequency)
else:
self.sink = blocks.file_sink_make(gr.sizeof_gr_complex, self.sink_path)
# audio sink
self.audio = audio.sink_make(32000)
self.gain_left = blocks.multiply_const_ff_make(1, 1)
self.gain_right = blocks.multiply_const_ff_make(1, 1)
self.s2f_left = blocks.short_to_float_make(1, 32767)
self.s2f_right = blocks.short_to_float_make(1, 32767)
########################
# Connections
########################
self.connect(self.fic_src, self.fic_enc, (self.mux, 0))
for i in range(0, self.num_subch):
if self.dabplus_types[i] is 1:
if self.src_paths[i] is "mic":
self.connect((self.recorder, 0), self.f2s_left_converters[i], (self.mp4_encoders[i], 0), self.rs_encoders[i], self.msc_encoders[i], (self.mux, i + 1))
if stereo_flags[i] == 0:
self.connect((self.recorder, 1), self.f2s_right_converters[i], (self.mp4_encoders[i], 1))
else:
self.connect(self.f2s_left_converters[i], (self.mp4_encoders[i], 1))
else:
self.connect((self.msc_sources[i], 0), self.f2s_left_converters[i], (self.mp4_encoders[i], 0), self.rs_encoders[i], self.msc_encoders[i], (self.mux, i+1))
if stereo_flags[i] == 0:
self.connect((self.msc_sources[i], 1), self.f2s_right_converters[i], (self.mp4_encoders[i], 1))
else:
self.connect(self.f2s_left_converters[i], (self.mp4_encoders[i], 1))
else:
self.connect((self.msc_sources[i], 0), self.f2s_left_converters[i], (self.mp2_encoders[i], 0), self.msc_encoders[i], (self.mux, i + 1))
if stereo_flags[i] == 0:
self.connect((self.msc_sources[i], 1), self.f2s_right_converters[i], (self.mp2_encoders[i], 1))
else:
self.connect(self.f2s_left_converters[i], (self.mp2_encoders[i], 1))
self.connect((self.mux, 0), self.s2v_mod, (self.mod, 0))
self.connect(self.trigsrc, (self.mod, 1))
if use_usrp:
self.connect(self.mod, self.sink)
else:
self.connect(self.mod, blocks.throttle_make(gr.sizeof_gr_complex, 2e6), self.sink)
#self.connect((self.msc_sources[self.selected_audio-1], 0), self.gain_left, (self.audio, 0))
#self.connect((self.msc_sources[self.selected_audio-1], 1), self.gain_right, (self.audio, 1))
def transmit(self):
tx = usrp_dab_tx()
tx.run()
def set_volume(self, volume):
self.gain_left.set_k(volume)
self.gain_right.set_k(volume)
| gpl-3.0 |
sumanthha/fundafriend | django/utils/simplejson/scanner.py | 928 | 2227 | """JSON token scanner
"""
import re
try:
from simplejson._speedups import make_scanner as c_make_scanner
except ImportError:
c_make_scanner = None
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
make_scanner = c_make_scanner or py_make_scanner
| bsd-3-clause |
flexpeace/btb | scanblog/btb/management/commands/org_status_email.py | 2 | 8239 | import os
import json
import codecs
import random
import datetime
from django.conf import settings
from django.core.management import BaseCommand
from django.core.mail import mail_managers
from django.db.models import Q
from django.contrib.auth.models import Group
from django.contrib.sites.models import Site
from django.template import loader, Context
from comments.models import Comment
from profiles.models import Organization
from scanning.models import Document, Scan, PendingScan
from correspondence.models import Letter
from correspondence.views import needed_letters
from annotations.models import Note
def send_org_mail(org):
ctx = {'site': Site.objects.get_current(), 'org': org}
# Get a random org moderator for permissioning.
now = datetime.datetime.now()
try:
org_user = org.moderators.all()[0]
except IndexError:
print "No moderators found for {0}; skipping.".format(org.name)
return
# Pending scans.
ps = PendingScan.objects.org_filter(org_user).filter(
completed__isnull=True
).order_by('created')
finished_ps = PendingScan.objects.org_filter(org_user).filter(
completed__isnull=False
).order_by('-completed')
ctx['pendingscans'] = { 'count': ps.count(), }
if ctx['pendingscans']['count'] > 0:
ctx['pendingscans']['oldest'] = ps[0]
overdue = ctx['pendingscans']['oldest'].created < \
(now - datetime.timedelta(days=7))
ctx['pendingscans']['overdue'] = overdue
try:
ctx['pendingscans']['last_completed'] = finished_ps[0]
except IndexError:
pass
# Scans.
scans = Scan.objects.org_filter(org_user).filter(
processing_complete=False,
).exclude(
author__profile__managed=False
).order_by('created')
finished_scans = Scan.objects.org_filter(org_user).filter(
processing_complete=True
).order_by('-modified')
ctx['scans'] = {'count': scans.count()}
if ctx['scans']['count'] > 0:
ctx['scans']['oldest'] = scans.order_by('created')[0]
ctx['scans']['overdue'] = ctx['scans']['oldest'].created < \
(now - datetime.timedelta(days=7))
try:
ctx['scans']['last_completed'] = finished_scans[0]
except IndexError:
pass
# Documents.
all_docs = Document.objects.org_filter(org_user).exclude(
author__profile__managed=False,
).exclude(
scan__isnull=True
)
docs = all_docs.filter(status="unknown").order_by('scan__created')
finished_docs = all_docs.filter(status="unknown").order_by('-modified')
ctx['documents'] = { 'count': docs.count() }
if ctx['documents']['count'] > 0:
ctx['documents']['oldest'] = docs.order_by('scan__created')[0]
overdue = ctx['documents']['oldest'].scan.created < \
(now - datetime.timedelta(days=14))
ctx['documents']['overdue'] = overdue
try:
ctx['documents']['last_completed'] = finished_docs[0]
except IndexError:
pass
# Outgoing mail
needed = needed_letters(org_user).items()
ctx['outgoing_mail'] = {}
for letter_type, recipients in needed:
all_letters = Letter.objects.mail_filter(org_user).filter(
type=letter_type
)
try:
latest = Letter.objects.mail_filter(org_user).filter(
sent__isnull=False
).order_by('-sent')[0]
except IndexError:
latest = None
ctx['outgoing_mail'][letter_type] = {
'count': recipients.count(),
'last_completed': latest,
}
if ctx['outgoing_mail'][letter_type]['count'] > 0:
if letter_type in ('waitlist', 'consent_form'):
due_since = recipients.order_by('user__date_joined')[0].user.date_joined
elif letter_type == 'enqueued':
due_since = recipients.order_by('created')[0].created
elif letter_type == 'comments':
due_since = Comment.objects.unmailed().order_by('created')[0].created
elif letter_type == 'signup_complete':
try:
due_since = Document.objects.filter(
type='license',
).exclude(
author__received_letters__type="signup_complete"
).order_by('created')[0].created
except IndexError:
due_since = None
elif letter_type == 'first_post':
try:
due_since = Document.objects.public().filter(
Q(type='post') | Q(type='profile')
).exclude(
author__received_letters__type="first_post"
).order_by('created')[0].created
except IndexError:
due_since = None
else:
due_since = None
if due_since:
ctx['outgoing_mail'][letter_type]['due_since'] = due_since
if letter_type != 'consent_form':
ctx['outgoing_mail'][letter_type]['overdue'] = due_since < (
now - datetime.timedelta(days=7)
)
# Tickets
tickets = Note.objects.org_filter(org_user).filter(
resolved__isnull=True
)
finished_tickets = Note.objects.org_filter(org_user).filter(
resolved__isnull=False
).order_by('-resolved')
ctx['tickets'] = { 'count': tickets.count() }
if ctx['tickets']['count'] > 0:
ctx['tickets']['important'] = tickets.filter(important=True).count()
ctx['tickets']['oldest'] = tickets.order_by('created')[0]
overdue = ctx['tickets']['oldest'].created < \
(now - datetime.timedelta(days=14))
ctx['tickets']['overdue'] = overdue
try:
ctx['tickets']['last_completed'] = finished_tickets[0]
except IndexError:
pass
ctx['inbox_zero_distance'] = 0
for kind in ('scans', 'documents', 'tickets', 'pendingscans'):
ctx['inbox_zero_distance'] += ctx[kind]['count']
for letter_type, details in ctx['outgoing_mail'].iteritems():
if letter_type != 'consent_form':
ctx['inbox_zero_distance'] += details['count']
# Flavor
with open(os.path.join(
os.path.dirname(__file__),
"obscure.txt"
)) as fh:
lines = fh.read().split("\n")
word, definition = random.choice(lines).split(" ", 1)
ctx['adjective'] = {
'word': word,
'definition': definition,
}
with open(os.path.join(
os.path.dirname(__file__),
"collective_nouns.json"
)) as fh:
collective_nouns = json.load(fh).items()
noun = random.choice(collective_nouns)
ctx['collective_noun'] = {
'thing': noun[0],
'names_and_conditions': noun[1],
}
t = loader.get_template("btb/org_status_email.html")
html = t.render(Context(ctx))
mail_managers(
subject="{0} clicks away from Inbox Zero".format(
ctx['inbox_zero_distance']
),
message="",
html_message=html,
)
class Command(BaseCommand):
def handle(self, *args, **kwargs):
"""
A status email sent to an org which aims to answer the general
questions of "what work needs to be done?" and "what work has recently
been done?". More specifically:
- How much logged incoming mail hasn't been processed, and how old is
it?
- How many scans need to be processed, and how old are they?
- How many documents have been split, but still need attention, and
how old are they?
- How much outgoing mail is enqueued for each type, and when was the
last time each of those types were sent?
"""
if settings.DISABLE_ADMIN_NOTIFICATIONS:
print "Admin notifications are disabled; sending nothing."
for org in Organization.objects.all():
send_org_mail(org)
| agpl-3.0 |
Altaf-Mahdi/bacon | tools/perf/python/twatch.py | 7370 | 1334 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
tribeiro/chimera | src/chimera/instruments/dcfocuser.py | 3 | 6413 | #! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# chimera - observatory automation system
# Copyright (C) 2006-2007 P. Henrique Silva <henrique@astro.ufsc.br>
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
from chimera.interfaces.focuser import (InvalidFocusPositionException,
FocuserFeature, FocuserAxis)
from chimera.instruments.focuser import FocuserBase
from chimera.core.constants import SYSTEM_CONFIG_DIRECTORY
from chimera.core.lock import lock
from chimera.util.enum import Enum
import os
Direction = Enum("IN", "OUT")
__all__ = ['Direction',
'DCFocuser']
class DCFocuser (FocuserBase):
"""
DCFocuser uses as standard DC pulse Focuser with a software
layer to mimic the behaviour of a Encoder-based Focuser.
You have to define two parameter:
- 'dt': define the longest pulse possible to go end-to-end through the focuser.
- 'pulse_dt': how long would a single unit pulse be.
NOTE: For APIs which doesn't provide access to time driven focuser
movements, dt and pulse_dt could be in whatever unit you like, for
example: dt could be how in units of 'small pulse'. See
TheSkyTelescope implementation of this class for an example.
Those two paremeters will define the 'encoded' range of DCFocuser,
for example: If dt=2000 ms and pulse_dt = 10 ms, this virtual
encoded focuser would be 2000/10= 200 units (getRange would return
(0, 200).
Normally, before use this class you should do a handwork with a
chronometer and pulse your focuser to one end and then time how
long it takes to go end-to-en, unless your focuser manufacturer
give this number for you.
"""
__config__ = {"dt": 1000, # how many milliseconds/pulses should I
# use to traverse the whole focus range
# (end-to-end).
"pulse_dt": 10} # unit pulse length (ms)
def __init__(self):
FocuserBase.__init__(self)
self._supports = {FocuserFeature.TEMPERATURE_COMPENSATION: False,
FocuserFeature.POSITION_FEEDBACK: True,
FocuserFeature.ENCODER: False,
FocuserFeature.CONTROLLABLE_X: False,
FocuserFeature.CONTROLLABLE_Y: False,
FocuserFeature.CONTROLLABLE_Z: True,
FocuserFeature.CONTROLLABLE_U: False,
FocuserFeature.CONTROLLABLE_V: False,
FocuserFeature.CONTROLLABLE_W: False,
}
self._position = 0
self._range = None
self._lastTimeLog = None
def __start__(self):
# range setting
self._range = (0, int(self["dt"] / float(self["pulse_dt"])))
if self._range[1] <= 0:
self.log.warning(
"Invalid dt and pulse_dt constants, focuser range negative.")
return False
# restore last position
lastPosition = None
filename = os.path.join(SYSTEM_CONFIG_DIRECTORY, "dc_focuser.memory")
if os.path.exists(filename):
try:
lastPosition = int(open(filename, 'r').read())
except ValueError:
self.log.warning(
"Content of dc_focuser.memory file is invalid. Removing it.")
os.unlink(filename)
self._lastPositionLog = open(filename, 'w')
# assume focuser is at the same position last time unless it was zero
if lastPosition is None:
self._position = self.getRange()[1]
else:
self._position = lastPosition
# move focuser to our "zero" if needed
if lastPosition is None:
self.log.info("Focuser not calibrated. Wait ...")
self.moveTo(0)
self.log.info("Calibration DONE")
return True
def _savePosition(self, position):
self._lastPositionLog.truncate()
self._lastPositionLog.write(str(position))
self._lastPositionLog.flush()
@lock
def moveIn(self, n, axis=FocuserAxis.Z):
self._checkAxis(axis)
return self._move(Direction.IN, n)
@lock
def moveOut(self, n, axis=FocuserAxis.Z):
self._checkAxis(axis)
return self._move(Direction.OUT, n)
@lock
def moveTo(self, position, axis=FocuserAxis.Z):
self._checkAxis(axis)
current = self.getPosition()
delta = position - current
try:
if delta > 0:
self._move(Direction.OUT, abs(delta))
elif delta < 0:
self._move(Direction.IN, abs(delta))
except:
return
self._savePosition(position)
self._position = position
return True
def _move(self, direction, steps):
if not self._inRange(direction, steps):
raise InvalidFocusPositionException(
"%d is outside focuser limits." % steps)
if direction not in Direction:
raise ValueError("Invalid direction '%s'." % direction)
self._moveTo(direction, steps)
return True
def _inRange(self, direction, n):
# Assumes:
# 0 ------- N
# IN OUT
current = self.getPosition()
if direction == Direction.IN:
target = current - n
else:
target = current + n
min_pos, max_pos = self.getRange()
return (min_pos <= target <= max_pos)
def getPosition(self, axis=FocuserAxis.Z):
self._checkAxis(axis)
return self._position
def getRange(self, axis=FocuserAxis.Z):
self._checkAxis(axis)
return self._range
| gpl-2.0 |
blueboxgroup/horizon | horizon/test/tests/tabs.py | 45 | 10631 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from django import http
from horizon import exceptions
from horizon import tabs as horizon_tabs
from horizon.test import helpers as test
from horizon.test.tests.tables import MyTable # noqa
from horizon.test.tests.tables import TEST_DATA # noqa
class BaseTestTab(horizon_tabs.Tab):
def get_context_data(self, request):
return {"tab": self}
class TabOne(BaseTestTab):
slug = "tab_one"
name = "Tab One"
template_name = "_tab.html"
class TabDelayed(BaseTestTab):
slug = "tab_delayed"
name = "Delayed Tab"
template_name = "_tab.html"
preload = False
class TabDisabled(BaseTestTab):
slug = "tab_disabled"
name = "Disabled Tab"
template_name = "_tab.html"
def enabled(self, request):
return False
class TabDisallowed(BaseTestTab):
slug = "tab_disallowed"
name = "Disallowed Tab"
template_name = "_tab.html"
def allowed(self, request):
return False
class Group(horizon_tabs.TabGroup):
slug = "tab_group"
tabs = (TabOne, TabDelayed, TabDisabled, TabDisallowed)
sticky = True
def tabs_not_available(self):
self._assert_tabs_not_available = True
class TabWithTable(horizon_tabs.TableTab):
table_classes = (MyTable,)
name = "Tab With My Table"
slug = "tab_with_table"
template_name = "horizon/common/_detail_table.html"
def get_my_table_data(self):
return TEST_DATA
class RecoverableErrorTab(horizon_tabs.Tab):
name = "Recoverable Error Tab"
slug = "recoverable_error_tab"
template_name = "_tab.html"
def get_context_data(self, request):
# Raise a known recoverable error.
exc = exceptions.AlreadyExists("Recoverable!", horizon_tabs.Tab)
exc.silence_logging = True
raise exc
class TableTabGroup(horizon_tabs.TabGroup):
slug = "tab_group"
tabs = [TabWithTable]
class TabWithTableView(horizon_tabs.TabbedTableView):
tab_group_class = TableTabGroup
template_name = "tab_group.html"
class TabTests(test.TestCase):
def test_tab_group_basics(self):
tg = Group(self.request)
# Test tab instantiation/attachement to tab group, and get_tabs method
tabs = tg.get_tabs()
# "tab_disallowed" should NOT be in this list.
self.assertQuerysetEqual(tabs, ['<TabOne: tab_one>',
'<TabDelayed: tab_delayed>',
'<TabDisabled: tab_disabled>'])
# Test get_id
self.assertEqual("tab_group", tg.get_id())
# get_default_classes
self.assertEqual(horizon_tabs.base.CSS_TAB_GROUP_CLASSES,
tg.get_default_classes())
# Test get_tab
self.assertEqual("tab_one", tg.get_tab("tab_one").slug)
# Test selected is None w/o GET input
self.assertIsNone(tg.selected)
# Test get_selected_tab is None w/o GET input
self.assertIsNone(tg.get_selected_tab())
def test_tab_group_active_tab(self):
tg = Group(self.request)
# active tab w/o selected
self.assertEqual(tg.get_tabs()[0], tg.active)
# active tab w/ selected
self.request.GET['tab'] = "tab_group__tab_delayed"
tg = Group(self.request)
self.assertEqual(tg.get_tab('tab_delayed'), tg.active)
# active tab w/ invalid selected
self.request.GET['tab'] = "tab_group__tab_invalid"
tg = Group(self.request)
self.assertEqual(tg.get_tabs()[0], tg.active)
# active tab w/ disallowed selected
self.request.GET['tab'] = "tab_group__tab_disallowed"
tg = Group(self.request)
self.assertEqual(tg.get_tabs()[0], tg.active)
# active tab w/ disabled selected
self.request.GET['tab'] = "tab_group__tab_disabled"
tg = Group(self.request)
self.assertEqual(tg.get_tabs()[0], tg.active)
# active tab w/ non-empty garbage selected
# Note: this entry does not contain the '__' SEPARATOR string.
self.request.GET['tab'] = "<!--"
tg = Group(self.request)
self.assertEqual(tg.get_tabs()[0], tg.active)
def test_tab_basics(self):
tg = Group(self.request)
tab_one = tg.get_tab("tab_one")
tab_delayed = tg.get_tab("tab_delayed")
tab_disabled = tg.get_tab("tab_disabled", allow_disabled=True)
# Disallowed tab isn't even returned
tab_disallowed = tg.get_tab("tab_disallowed")
self.assertIsNone(tab_disallowed)
# get_id
self.assertEqual("tab_group__tab_one", tab_one.get_id())
# get_default_classes
self.assertEqual(horizon_tabs.base.CSS_ACTIVE_TAB_CLASSES,
tab_one.get_default_classes())
self.assertEqual(horizon_tabs.base.CSS_DISABLED_TAB_CLASSES,
tab_disabled.get_default_classes())
# load, allowed, enabled
self.assertTrue(tab_one.load)
self.assertFalse(tab_delayed.load)
self.assertFalse(tab_disabled.load)
self.request.GET['tab'] = tab_delayed.get_id()
tg = Group(self.request)
tab_delayed = tg.get_tab("tab_delayed")
self.assertTrue(tab_delayed.load)
# is_active
self.request.GET['tab'] = ""
tg = Group(self.request)
tab_one = tg.get_tab("tab_one")
tab_delayed = tg.get_tab("tab_delayed")
self.assertTrue(tab_one.is_active())
self.assertFalse(tab_delayed.is_active())
self.request.GET['tab'] = tab_delayed.get_id()
tg = Group(self.request)
tab_one = tg.get_tab("tab_one")
tab_delayed = tg.get_tab("tab_delayed")
self.assertFalse(tab_one.is_active())
self.assertTrue(tab_delayed.is_active())
def test_rendering(self):
tg = Group(self.request)
tab_one = tg.get_tab("tab_one")
tab_delayed = tg.get_tab("tab_delayed")
tab_disabled = tg.get_tab("tab_disabled", allow_disabled=True)
# tab group
output = tg.render()
res = http.HttpResponse(output.strip())
self.assertContains(res, "<li", 3)
# stickiness
self.assertContains(res, 'data-sticky-tabs="sticky"', 1)
# tab
output = tab_one.render()
self.assertEqual(tab_one.name, output.strip())
# disabled tab
output = tab_disabled.render()
self.assertEqual("", output.strip())
# preload false
output = tab_delayed.render()
self.assertEqual("", output.strip())
# preload false w/ active
self.request.GET['tab'] = tab_delayed.get_id()
tg = Group(self.request)
tab_delayed = tg.get_tab("tab_delayed")
output = tab_delayed.render()
self.assertEqual(tab_delayed.name, output.strip())
def test_table_tabs(self):
tab_group = TableTabGroup(self.request)
tabs = tab_group.get_tabs()
# Only one tab, as expected.
self.assertEqual(1, len(tabs))
tab = tabs[0]
# Make sure it's the tab we think it is.
self.assertIsInstance(tab, horizon_tabs.TableTab)
# Data should not be loaded yet.
self.assertFalse(tab._table_data_loaded)
table = tab._tables[MyTable.Meta.name]
self.assertIsInstance(table, MyTable)
# Let's make sure the data *really* isn't loaded yet.
self.assertIsNone(table.data)
# Okay, load the data.
tab.load_table_data()
self.assertTrue(tab._table_data_loaded)
self.assertQuerysetEqual(table.data, ['<FakeObject: object_1>',
'<FakeObject: object_2>',
'<FakeObject: object_3>'])
context = tab.get_context_data(self.request)
# Make sure our table is loaded into the context correctly
self.assertEqual(table, context['my_table_table'])
# Since we only had one table we should get the shortcut name too.
self.assertEqual(table, context['table'])
def test_tabbed_table_view(self):
view = TabWithTableView.as_view()
# Be sure we get back a rendered table containing data for a GET
req = self.factory.get("/")
res = view(req)
self.assertContains(res, "<table", 1)
self.assertContains(res, "Displaying 3 items", 1)
# AJAX response to GET for row update
params = {"table": "my_table", "action": "row_update", "obj_id": "1"}
req = self.factory.get('/', params,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
res = view(req)
self.assertEqual(200, res.status_code)
# Make sure we got back a row but not a table or body
self.assertContains(res, "<tr", 1)
self.assertContains(res, "<table", 0)
self.assertContains(res, "<body", 0)
# Response to POST for table action
action_string = "my_table__toggle__2"
req = self.factory.post('/', {'action': action_string})
res = view(req)
self.assertEqual(302, res.status_code)
self.assertEqual("/", res["location"])
# Ensure that lookup errors are raised as such instead of converted
# to TemplateSyntaxErrors.
action_string = "my_table__toggle__2000000000"
req = self.factory.post('/', {'action': action_string})
self.assertRaises(exceptions.Http302, view, req)
class TabExceptionTests(test.TestCase):
def setUp(self):
super(TabExceptionTests, self).setUp()
self._original_tabs = copy.copy(TabWithTableView.tab_group_class.tabs)
TabWithTableView.tab_group_class.tabs.append(RecoverableErrorTab)
def tearDown(self):
super(TabExceptionTests, self).tearDown()
TabWithTableView.tab_group_class.tabs = self._original_tabs
def test_tab_view_exception(self):
view = TabWithTableView.as_view()
req = self.factory.get("/")
res = view(req)
self.assertMessageCount(res, error=1)
| apache-2.0 |
Moriadry/tensorflow | tensorflow/contrib/opt/__init__.py | 22 | 1908 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module containing optimization routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.opt.python.training.delay_compensated_gradient_descent import *
from tensorflow.contrib.opt.python.training.drop_stale_gradient_optimizer import *
from tensorflow.contrib.opt.python.training.external_optimizer import *
from tensorflow.contrib.opt.python.training.lazy_adam_optimizer import *
from tensorflow.contrib.opt.python.training.nadam_optimizer import *
from tensorflow.contrib.opt.python.training.moving_average_optimizer import *
from tensorflow.contrib.opt.python.training.nadam_optimizer import *
from tensorflow.contrib.opt.python.training.variable_clipping_optimizer import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'DelayCompensatedGradientDescentOptimizer',
'DropStaleGradientOptimizer', 'ExternalOptimizerInterface',
'LazyAdamOptimizer', 'NadamOptimizer', 'MovingAverageOptimizer',
'ScipyOptimizerInterface', 'VariableClippingOptimizer'
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.