text
stringlengths 29
850k
|
|---|
# $Id$
##
## This file is part of pyFormex 0.8.9 (Fri Nov 9 10:49:51 CET 2012)
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: http://savannah.nongnu.org/projects/pyformex/
## Copyright 2004-2012 (C) Benedict Verhegghe (benedict.verhegghe@ugent.be)
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""tools.py
Graphic Tools for pyFormex.
"""
from __future__ import print_function
import pyformex as pf
from coords import *
from collection import Collection
from gui.actors import GeomActor
from mesh import Mesh
from formex import Formex
from plugins.trisurface import TriSurface
from plugins.nurbs import NurbsCurve,NurbsSurface
class Plane(object):
def __init__(self,points,normal=None,size=((1.0,1.0),(1.0,1.0))):
pts = Coords(points)
if pts.shape == (3,) and normal is not None:
P = pts
n = Coords(normal)
if n.shape != (3,):
raise ValueError,"normal does not have correct shape"
elif pts.shape == (3,3,):
P = pts.centroid()
n = cross(pts[1]-pts[0],pts[2]-pts[0])
else:
raise ValueError,"points has incorrect shape (%s)" % str(pts.shape)
size = asarray(size)
s = Coords([insert(size[0],0,0.,-1),insert(size[1],0,0.,-1)])
self.P = P
self.n = n
self.s = s
def point(self):
return self.P
def normal(self):
return self.n
def size(self):
return self.s
def bbox(self):
return self.P.bbox()
def __str__(self):
return 'P:%s n:%s s:%s' % (list(self.P),list(self.n), (list(self.s[0]),list(self.s[1])))
def actor(self,**kargs):
from gui import actors
actor = actors.PlaneActor(size=self.s,**kargs)
actor = actors.RotatedActor(actor,self.n,**kargs)
actor = actors.TranslatedActor(actor,self.P,**kargs)
return actor
################# Report information about picked objects ################
def report(K):
if K is not None and hasattr(K,'obj_type'):
print(K.obj_type)
if K.obj_type == 'actor':
return reportActors(K)
elif K.obj_type == 'element':
return reportElements(K)
elif K.obj_type == 'point':
return reportPoints(K)
elif K.obj_type == 'edge':
return reportEdges(K)
elif K.obj_type == 'partition':
return reportPartitions(K)
return ''
def reportActors(K):
s = "Actor report\n"
v = K.get(-1,[])
s += "Actors %s\n" % v
for k in v:
A = pf.canvas.actors[k]
t = A.getType()
s += " Actor %s (type %s)\n" % (k,t)
return s
def reportElements(K):
s = "Element report\n"
for k in K.keys():
v = K[k]
A = pf.canvas.actors[k]
t = A.getType()
s += "Actor %s (type %s); Elements %s\n" % (k,t,v)
if t == Formex:
e = A.coords
elif t == TriSurface or t == Mesh :
e = A.elems
for p in v:
s += " Element %s: %s\n" % (p,e[p])
return s
def reportPoints(K):
s = "Point report\n"
for k in K.keys():
v = K[k]
A = pf.canvas.actors[k]
s += "Actor %s (type %s); Points %s\n" % (k,A.getType(),v)
x = A.points()
for p in v:
s += " Point %s: %s\n" % (p,x[p])
return s
def reportEdges(K):
s = "Edge report\n"
for k in K.keys():
v = K[k]
A = pf.canvas.actors[k]
s += "Actor %s (type %s); Edges %s\n" % (k,A.getType(),v)
e = A.edges()
for p in v:
s += " Edge %s: %s\n" % (p,e[p])
def reportPartitions(K):
s = "Partition report\n"
for k in K.keys():
P = K[k][0]
A = pf.canvas.actors[k]
t = A.getType()
for l in P.keys():
v = P[l]
s += "Actor %s (type %s); Partition %s; Elements %s\n" % (k,t,l,v)
if t == 'Formex':
e = A
elif t == 'TriSurface':
e = A.getElems()
for p in v:
s += " Element %s: %s\n" % (p,e[p])
return s
def reportDistances(K):
if K is None or not hasattr(K,'obj_type') or K.obj_type != 'point':
return ''
s = "Distance report\n"
x = Coords.concatenate(getCollection(K))
s += "First point: %s %s\n" % (0,x[0])
d = x.distanceFromPoint(x[0])
for i,p in enumerate(zip(x,d)):
s += "Distance from point: %s %s: %s\n" % (i,p[0],p[1])
return s
def reportAngles(K):
if K is None or not hasattr(K,'obj_type') or K.obj_type != 'element':
return ''
s = "Angle report:\n"
for F in getCollection(K):
if isinstance(F,Mesh):
F=F.toFormex()
if isinstance(F,Formex):
x = F.coords
if len(x)!=2:
raise ValueError,"You didn't select 2 elements"
v = x[:,1,:] - x[:,0,:]
v = normalize(v)
cosa = dotpr(v[0],v[1])
#print(cosa)
a = arccosd(cosa)
s += " a = %s" % a
else:
raise TypeError,"Angle measurement only possible with Formex or Mesh"
return s
def getObjectItems(obj,items,mode):
"""Get the specified items from object."""
if mode == 'actor':
return [ obj[i].object for i in items if hasattr(obj[i],'object') ]
elif mode in ['element','partition']:
if hasattr(obj,'object') and hasattr(obj.object,'select'):
return obj.object.select(items)
elif mode == 'point':
if hasattr(obj,'points'):
return obj.points()[items]
return None
def getCollection(K):
"""Returns a collection."""
if K.obj_type == 'actor':
return [ pf.canvas.actors[int(i)].object for i in K.get(-1,[]) if hasattr(pf.canvas.actors[int(i)],'object') ]
elif K.obj_type in ['element','point']:
return [ getObjectItems(pf.canvas.actors[k],K[k],K.obj_type) for k in K.keys() ]
elif K.obj_type == 'partition':
return [getObjectItems(pf.canvas.actors[k],K[k][0][prop],K.obj_type) for k in K.keys() for prop in K[k][0].keys()]
else:
return None
def growCollection(K,**kargs):
"""Grow the collection with n frontal rings.
K should be a collection of elements.
This should work on any objects that have a growSelection method.
"""
if K.obj_type == 'element':
for k in K.keys():
o = pf.canvas.actors[k]
if hasattr(o,'growSelection'):
K[k] = o.growSelection(K[k],**kargs)
def partitionCollection(K):
"""Partition the collection according to node adjacency.
The actor numbers will be connected to a collection of property numbers,
e.g. 0 [1 [4,12] 2 [6,20]], where 0 is the actor number, 1 and 2 are the
property numbers and 4, 12, 6 and 20 are the element numbers.
"""
sel = getCollection(K)
if len(sel) == 0:
print("Nothing to partition!")
return
if K.obj_type == 'actor':
actor_numbers = K.get(-1,[])
K.clear()
for i in actor_numbers:
K.add(range(sel[int(i)].nelems()),i)
prop = 1
j = 0
for i in K.keys():
p = sel[j].partitionByConnection() + prop
print("Actor %s partitioned in %s parts" % (i,p.max()-p.min()+1))
C = Collection()
C.set(transpose(asarray([p,K[i]])))
K[i] = C
prop += p.max()-p.min()+1
j += 1
K.setType('partition')
def getPartition(K,prop):
""" Remove all partitions with property not in prop."""
for k in K.keys():
for p in K[k][0].keys():
if not p in prop:
K[k][0].remove(K[k][0][p],p)
def exportObjects(obj,name,single=False):
"""Export a list of objects under the given name.
If obj is a list, and single=True, each element of the list is exported
as a single item. The items will be given the names name-0, name-1, etc.
Else, the obj is exported as is under the name.
"""
if single and type(obj) == list:
export(dict([ ("name-%s"%i,v) for i,v in enumerate(obj)]))
else:
export({name:obj})
# End
|
Two points here. And the hypocrisy and uselessness of today's "news" media.
poor innocent asylum seekers. Never mind the chunks of cement they are throwing or their proclamations of loyalty to other countries as shown by the flags they wave. Forget the fact they have rejected Mexico's offers of asylum and employment. We are obligated to grant them asylum not because they meet the requirements for it but because morality as dictated by a freshman member of Congress has hallucinations of a new holocaust.
The only thing I will say about this video is, beware of listening to anyone dumb enough to look to AOC for intelligent policy.
Oh yeah, that's right; you're a Maddow fan . . .
What does that have to do with you posting videos of morons spouting their idiotic opinions?
You don't have the slightest insight as to my reading habits or my education. Based on your pearls of wisdom and your habitual use of profanity and miss spelled words and miss used vocabulary, you perhaps should follow your own advice. And by books, I mean those that contain words, not black and white pictures that are made for you to provide the color.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# coding=utf-8
import logging
import sys
from PyMysqlPool.constant.constant import loggingerr, loggErrorFile
logging.basicConfig(level=logging.NOTSET,
format='[%(asctime)s][%(levelname)7s][%(threadName)s][%(filename)s:%(funcName)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
stream=sys.stdout)
logFormatter = logging.Formatter(
'[%(asctime)s][%(levelname)7s][%(threadName)s][%(filename)s:%(funcName)s:%(lineno)d] %(message)s')
rootLogger = logging.getLogger(__name__)
rootLogger.setLevel(logging.ERROR)
# create console handler and set level to debug
ch = logging.StreamHandler(stream=sys.stderr)
ch.setLevel(logging.ERROR)
# add ch to logger
rootLogger.addHandler(ch)
if loggErrorFile:
fileHandler = logging.FileHandler("{0}".format(loggingerr))
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
|
Ok a bit of a fib (sort of). Tonight I played in Boyle’s of Slane which is a very nice venue that hosts the “Purple Sessions”. It’s been a while since I played live. Actually no that’s another fib: it’s only been a few weeks. It feels like an age though.
First solo acoustic gig in a long time. It felt strange not having a band behind me ! Even stranger not having Mark who has been holding down the bass for the last 18 months or so. No we haven’t fallen out: he’s got a life, study and work to do so I’m not going to abuse his generosity of time. Anyway I digress….
The show tonight was videotaped so I’ll be sharing it soon. I played material from “Other People’s Hats” and it felt GOOD to be on stage. It felt even better having a listening audience which included a couple of friends who travelled from Drogheda to show their support.
The last time I was in Slane was to see David Bowie live at Slane Castle in 1987, the previous time was to see Bruce Springsteen in 1985. I wonder what the teenage me would make of the shaven headed singer-songwriter me ? Why have I kept going when I’m long past the burning optimism flame of youth and, let’s face it, acting half my age playing gigs and writing songs ? I’ve thought about this and it came to me midway through my set tonight: because I have to and because I bloody love it. I’m a show off … so sue me !
I was recently asked this question by a blog reader and it has been on my mind for a few days. Why have I and a few others of a similar vintage (mid-80s hopefuls) either kept going or resurrected their love for making music and why have others stopped? I think if I had burned myself out back then I would probably not be doing this now. I never “boiled over” but rather I “simmered” all this time. Does it explain why I never made it (to use that expression yuk): if I had been more passion back then and stayed the course and immersed myself in what is really my true love would things have been different ? Well of course they would but different how ? I’ll never know.
|
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.apps import AppConfig
from django.apps import apps
from django.db.models import signals
from . import signals as handlers
def connect_memberships_signals():
# On membership object is deleted, update role-points relation.
signals.pre_delete.connect(handlers.membership_post_delete,
sender=apps.get_model("projects", "Membership"),
dispatch_uid='membership_pre_delete')
# On membership object is deleted, update notify policies of all objects relation.
signals.post_save.connect(handlers.create_notify_policy,
sender=apps.get_model("projects", "Membership"),
dispatch_uid='create-notify-policy')
def connect_projects_signals():
# On project object is created apply template.
signals.post_save.connect(handlers.project_post_save,
sender=apps.get_model("projects", "Project"),
dispatch_uid='project_post_save')
# Tags
signals.pre_save.connect(handlers.tags_normalization,
sender=apps.get_model("projects", "Project"),
dispatch_uid="tags_normalization_projects")
signals.pre_save.connect(handlers.update_project_tags_when_create_or_edit_taggable_item,
sender=apps.get_model("projects", "Project"),
dispatch_uid="update_project_tags_when_create_or_edit_taggable_item_projects")
def connect_us_status_signals():
signals.post_save.connect(handlers.try_to_close_or_open_user_stories_when_edit_us_status,
sender=apps.get_model("projects", "UserStoryStatus"),
dispatch_uid="try_to_close_or_open_user_stories_when_edit_us_status")
def connect_task_status_signals():
signals.post_save.connect(handlers.try_to_close_or_open_user_stories_when_edit_task_status,
sender=apps.get_model("projects", "TaskStatus"),
dispatch_uid="try_to_close_or_open_user_stories_when_edit_task_status")
def disconnect_memberships_signals():
signals.pre_delete.disconnect(sender=apps.get_model("projects", "Membership"), dispatch_uid='membership_pre_delete')
signals.post_save.disconnect(sender=apps.get_model("projects", "Membership"), dispatch_uid='create-notify-policy')
def disconnect_projects_signals():
signals.post_save.disconnect(sender=apps.get_model("projects", "Project"), dispatch_uid='project_post_save')
signals.pre_save.disconnect(sender=apps.get_model("projects", "Project"), dispatch_uid="tags_normalization_projects")
signals.pre_save.disconnect(sender=apps.get_model("projects", "Project"), dispatch_uid="update_project_tags_when_create_or_edit_taggable_item_projects")
def disconnect_us_status_signals():
signals.post_save.disconnect(sender=apps.get_model("projects", "UserStoryStatus"), dispatch_uid="try_to_close_or_open_user_stories_when_edit_us_status")
def disconnect_task_status_signals():
signals.post_save.disconnect(sender=apps.get_model("projects", "TaskStatus"), dispatch_uid="try_to_close_or_open_user_stories_when_edit_task_status")
class ProjectsAppConfig(AppConfig):
name = "taiga.projects"
verbose_name = "Projects"
def ready(self):
connect_memberships_signals()
connect_projects_signals()
connect_us_status_signals()
connect_task_status_signals()
|
2002 Cabernet Sauvignon Alexander Valley 93 POINTS The Wine News February/March 2006 Issue Reserved aromas of plum, cranberry, cedar, dust and graphite. Stable tannins frame balanced flavors of cherry, mocha and vanillin oak. Bushels of red fruit in the racy close.
Light rain last night. Perfect cool temperature for bottling our Pinot Noir today.
|
from django import forms
from django.shortcuts import render
from django.contrib.auth import get_user_model
from .decorators import superuser_required
from airmozilla.manage.forms import BaseForm
from airmozilla.main.models import (
Event,
SuggestedEvent,
EventEmail,
EventRevision,
EventAssignment,
SuggestedEventComment,
EventTweet,
Approval,
Picture,
Chapter,
UserEmailAlias,
)
from airmozilla.closedcaptions.models import ClosedCaptions, RevOrder
from airmozilla.comments.models import (
Comment,
Unsubscription,
Discussion,
SuggestedDiscussion,
)
from airmozilla.search.models import (
LoggedSearch,
SavedSearch,
)
from airmozilla.starred.models import StarredEvent
from airmozilla.surveys.models import Answer
from airmozilla.uploads.models import Upload
User = get_user_model()
class AuthMigrateForm(BaseForm):
file = forms.FileField()
dry_run = forms.BooleanField(required=False)
@superuser_required
def upload(request): # pragma: no cover
results = None
dry_run = False
if request.method == 'POST':
form = AuthMigrateForm(request.POST, request.FILES)
if form.is_valid():
dry_run = form.cleaned_data['dry_run']
lines = []
first = True
for line in form.cleaned_data['file']:
if first:
first = False
else:
alias, real = line.strip().split(',')
lines.append((alias, real))
if lines:
results = migrate(lines, dry_run)
else:
form = AuthMigrateForm()
context = {
'form': form,
'results': results,
'dry_run': dry_run,
}
return render(request, 'manage/authmigrate_upload.html', context)
def migrate(lines, dry_run=False):
results = []
for alias, real in lines:
try:
old = User.objects.get(email__iexact=alias)
except User.DoesNotExist:
old = None
try:
new = User.objects.get(email__iexact=real)
except User.DoesNotExist:
new = None
notes = ''
if old and not new:
# Easy, just change this user's email address
old.email = real
if not dry_run:
old.save()
UserEmailAlias.objects.get_or_create(
user=old,
email=alias,
)
notes = 'Moved over'
elif not old and new:
if not dry_run:
UserEmailAlias.objects.get_or_create(
email=alias,
user=new,
)
notes = 'Nothing to do'
elif not old and not new:
notes = 'Neither found'
else:
assert old and new
notes = 'Merged'
notes += '\n({})'.format(
'\n'.join(merge_user(old, new, dry_run=dry_run))
)
if not dry_run:
old.is_active = False
old.save()
UserEmailAlias.objects.get_or_create(
user=new,
email=old.email,
)
results.append({
'alias': alias,
'old': old,
'real': real,
'new': new,
'notes': notes,
})
return results
def merge_user(old, new, dry_run=False):
things = []
def migrate(model, key='user', name=None, only_if_in=False):
if only_if_in:
if model.objects.filter(**{key: new}).exists():
model.objects.filter(**{key: old}).delete()
count = 0
for instance in model.objects.filter(**{key: old}):
setattr(instance, key, new)
if not dry_run:
instance.save()
count += 1
if count > 0:
things.append('{}{} {}'.format(
name or model._meta.verbose_name,
count != 1 and 's' or '',
count,
))
if old.is_staff:
new.is_staff = True
if not dry_run:
new.save()
things.append('transferred is_staff')
if old.is_superuser:
new.is_superuser = True
if not dry_run:
new.save()
things.append('transferred is_superuser')
# Groups
for group in old.groups.all():
if group not in new.groups.all():
if not dry_run:
new.groups.add(group)
things.append('{} group membership transferred'.format(group.name))
# Events
migrate(Event, 'creator')
migrate(Event, 'modified_user', name='modified event')
# EventEmail
migrate(EventEmail)
# EventRevision
migrate(EventRevision)
# SuggestedEventComment
migrate(SuggestedEventComment)
# Comments
migrate(Comment)
# Discussions
migrate(Discussion.moderators.through, only_if_in=True)
# Suggested discussions
migrate(SuggestedDiscussion.moderators.through, only_if_in=True)
# Event assignments
migrate(EventAssignment.users.through, only_if_in=True)
# Unsubscriptions
migrate(Unsubscription)
# SuggestedEvent
migrate(SuggestedEvent)
# Closed captions
migrate(ClosedCaptions, 'created_user')
# Rev orders
migrate(RevOrder, 'created_user')
# EventTweet
migrate(EventTweet, 'creator')
# Approval
migrate(Approval)
# Picture
migrate(Picture, 'modified_user')
# Chapters
migrate(Chapter)
# Logged search
migrate(LoggedSearch)
# Saved search
migrate(SavedSearch)
# Starred events
migrate(StarredEvent)
# (survey) Answers
migrate(Answer)
# Upload
migrate(Upload)
return things
|
This is a gray granodiorite stele fragment with three horizontal lines of incised Meroitic script seperated by incised register lines. Style A. Line width is 3.9 - 4.0 cm.
From Nubia (Sudan) Gebel Barkal, B551 III(3) "W of third N area in sand just above floor. 1919: excavated by the Harvard University-Museum of Fine Arts Expedition; assigned to the MFA by the government of Sudan.
|
from django.views.generic import ListView, DetailView, MonthArchiveView
from django import forms
from .models import Post
class ListMixin(object):
paginate_by = 5
context_object_name = 'posts'
template_name = 'blog/post_list_view.html'
def get_queryset(self):
tag = self.request.GET.get('tag', None)
if tag:
return Post.objects.published().filter(tags__name__in=[tag])
return Post.objects.published()
class MonthArchive(ListMixin, MonthArchiveView):
date_field = 'published_date'
class PostListView(ListMixin, ListView):
model = Post
class PostDetailView(DetailView):
model = Post
context_object_name = 'post'
template_name = 'blog/post_detail_item.html'
class SearchListView(ListView):
model = Post
template_name = 'blog/post_list_view.html'
context_object_name = 'posts'
paginate_by = 5
def get_queryset(self):
search = self.kwargs.get('q', None)
if search:
return Post.objects.published().search(search)
return Post.objects.all()
|
About A Rosie Place for children.
A Rosie Place for Children is situated on five beautiful acres in South Bend, Indiana. Accommodations include six unique kid-friendly bedrooms the children like to call their own. Our home is warm, spacious and colorful featuring nature-inspired themes. The saltwater aquarium, colorful murals, outdoor pond and waterfall are just some of the fun features the children love.
Families choose our respite care service for a variety of reasons. Our staff understands the importance of continuity in each child’s care, and respects the role of the caregiver at home. An in-depth assessment is provided by one of our registered nurses to ensure quality care and comfort.
A Rosie Place is currently open three weekends each month. Please contact us for more information.
A Rosie Place for Children is the first (and only) specialty hospital in the state of Indiana exclusively for children who are medically fragile. Although we are a highly equipped hospital, capable of caring for the most complex pediatric conditions, we like to emphasize our home-away-from-home environment.
A Rosie Place for Children depends on the generosity of the community. We depend on dedicated individuals, businesses, and various entities to help us serve the community by providing a level of excellence our families and children who are medically fragile deserve. See the list of some of our supporters here.
|
#!/usr/bin/env python
# Copyright (c) 2013 Maxim Kovalev, Carnegie Mellon University
# This file is part of Locationing Server.
#
# Locationing Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Locationing Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Locationing Server. If not, see <http://www.gnu.org/licenses/>.
import dataloader
import dataprocessor
import traceback
def create_data_processor():
dataloader.db_name = "wifi_location_training"
wifi_stats = dataloader.get_all_wifi_stats()
gps_stats = dataloader.get_all_gps_stats()
dp = dataprocessor.DataProcessor(wifi_stats, gps_stats)
return dp
def load_wifi_gps(timestamp):
w = dataloader.get_one_wifi_reading(timestamp)
g = dataloader.get_one_gps_reading(timestamp)
return w, g
def lookup_location(location_id):
dataloader.db_name = "wifi_location_training"
return dataloader.lookup_location(location_id)
def main():
dp = create_data_processor()
# ts = dataloader.get_few_timestamps(10)
dataloader.db_name = "wifi_location_test"
ts = dataloader.get_all_timestamps()
l = len(ts)
rights = 0
for i in xrange(l):
try:
t = ts[i]
dataloader.db_name = "wifi_location_test"
w, g = load_wifi_gps(t)
ev_loc = dp.estimate_location(w, g)
tr_loc = dataloader.get_true_location(t)
ev_loc_name = lookup_location(ev_loc[0])
tr_loc_name = lookup_location(tr_loc)
if ev_loc_name == tr_loc_name:
rights += 1
print i, "of", l, "(", 100*i/l, "%), rights:", float(rights) / (i+1), "Timestamp:", t, "estimate:", ev_loc, "(", ev_loc_name, ") true:", tr_loc, "(", tr_loc_name, ")"
except Exception as e:
tr = traceback.format_exc().splitlines()
for line in tr:
print line
print e
print "Total accuracy:", 100*float(rights) / l
print "Or, considering i's", 100*float(rights) / (i+1)
if __name__ == "__main__":
main()
|
To honor its form — true devotion.
And I love this idea…..
A Givebox is a cupboard where people can put out stuff they don’t need anymore, that others are free to take. The intention behind these Giveboxes are that they encourage neighborhood communicating and exchanging and much more.
# 1 – When being unbecoming is a good thing :-)…..
# 2 – An excerpt from David Thomas Nicol’s Subtle Activism…..
“Subtle activism can be understood as a set of practices that allow us to connect, in the depth of our being, with our love for the world and our longing for it to reflect the highest potentials of human nature. Although being in touch with our hope for a more loving or peaceful world can make us more vulnerable, it is also the most powerful force for change we know. Subtle activism represents the intention to cultivate this force as a transformative presence in the world. Although countless concrete initiatives will of course also be needed to restore our planet to balance, underlying and informing all of these actions is a shift in consciousness involving a deeper awareness of our underlying interconnectedness. That shift can, and indeed must, take place in a very subtle way in the depths of the human psyche.
Subtle Activism envisions the emergence of postmodern or integral expressions of spirituality that help to re-establish links between non dogmatic forms of spiritual consciousness and modern public awareness—not through overt action in the sociopolitical or even intellectual arena, but via actual practices that enact the connection at subtle levels of consciousness.
# 3 – And some stimulation from Abraham to end on…..
Next PostNext Go With The Flow!
|
#!/usr/bin/env python
# coding: utf-8
# Reflection and Heating
# ============================
#
# For a comparison between "Horvat" and "Wilson" methods in the "irad_method" parameter, see the tutorial on [Lambert Scattering](./irrad_method_horvat.ipynb).
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.2 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.2,<2.3"')
# As always, let's do imports and initialize a logger and a new bundle. See [Building a System](../tutorials/building_a_system.htmlipynb) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger('error')
b = phoebe.default_binary()
# Relevant Parameters
# ---------------------------------
# The parameters that define reflection and heating are all prefaced by "irrad_frac" (fraction of incident flux) and suffixed by "bol" to indicate that they all refer to a bolometric (rather than passband-dependent) process. For this reason, they are *not* stored in the dataset, but rather directly in the component.
#
# Each of these parameters dictates how much incident flux will be handled by each of the available processes. For now these only include reflection (heating with immediate re-emission, without heat distribution) and lost flux. In the future, heating with distribution and scattering will also be supported.
#
# For each component, these parameters *must* add up to exactly 1.0 - and this is handled by a constraint which by default constrains the "lost" parameter.
# In[3]:
print(b['irrad_frac_refl_bol'])
# In[4]:
print(b['irrad_frac_lost_bol'])
# In[5]:
print(b['irrad_frac_refl_bol@primary'])
# In[6]:
print(b['irrad_frac_lost_bol@primary@component'])
# In order to see the effect of reflection, let's set "irrad_frac_refl_bol" of both of our stars to 0.9 - that is 90% of the incident flux will go towards reflection and 10% will be ignored.
# In[7]:
b.set_value_all('irrad_frac_refl_bol', 0.9)
# Since reflection can be a computationally expensive process and in most cases is a low-order effect, there is a switch in the compute options that needs to be enabled in order for reflection to be taken into account. If this switch is False (which it is by default), the albedos are completely ignored and will be treated as if all incident light is lost/ignored.
# In[8]:
print(b['irrad_method@compute'])
# Reflection has the most noticeable effect when the two stars are close to each other and have a large temperature ratio.
# In[9]:
b['sma@orbit'] = 4.0
# In[10]:
b['teff@primary'] = 10000
# In[11]:
b['teff@secondary'] = 5000
# Influence on Light Curves (fluxes)
# ---------------------------------
# In[12]:
b.add_dataset('lc', times=np.linspace(0,1,101))
# Let's run models with the reflection switch both turned on and off so that we can compare the two results. We'll also override delta to be a larger number since the computation time required by delta depends largely on the number of surface elements.
# In[13]:
b.run_compute(irrad_method='none', ntriangles=700, model='refl_false')
# In[14]:
b.run_compute(irrad_method='wilson', ntriangles=700, model='refl_true')
# In[15]:
afig, mplfig = b.plot(show=True, legend=True)
# In[16]:
artists = plt.plot(b['value@times@refl_false'], b['value@fluxes@refl_true']-b['value@fluxes@refl_false'], 'r-')
# Influence on Meshes (Intensities)
# ------------------------------------------
# In[17]:
b.add_dataset('mesh', times=[0.2], columns=['intensities@lc01'])
# In[18]:
b.run_compute(irrad_method='none', ntriangles=700, model='refl_false', overwrite=True)
# In[19]:
b.run_compute(irrad_method='wilson', ntriangles=700, model='refl_true', overwrite=True)
# In[20]:
afig, mplfig = b.plot(component='secondary', kind='mesh', model='refl_false', fc='intensities', ec='face', show=True)
# In[21]:
afig, mplfig = b.plot(component='secondary', kind='mesh', model='refl_true', fc='intensities', ec='face', show=True)
# In[ ]:
|
1cindycoxx . Eva1Sweet. Akkane. blondyxangelx.
sexitaliantoyboyAlexisBrittRaechkaSonyaking .AnaughtyGirlzzAlisonHoffmannYeraiAckerAbsoluteCute .BustyMaddieAlexaJohnssonnAnaandKateangie4ux .UrKatyCatFridaSunDitaREDEdithCutex .AliceMuscleJohn0384LunaticLoverRaechka .CellinaBlackangie4uxmagicGIRL7geney .HibencatSexyVitjaXXLSarykaFridaSun .PRETTYSEXYBOYSJeffAndHarrygeneyNadineefoxy .RaechkaEdithCutexEmaGreenHornyLoveAdrian .SarykaYeraiAckerEdithCutexbedroomeyess .AnnaboomAngyeAngellBlackAmySonyaking .
TaylorStudBustyMaddieJulieSportJulieSport .SonyakingFridaSunZARAIBIGCOCKFridaSun .CellinaBlackgeneySinfulAlizeeBIGcoks10I .SarykaRichardLohanlerik1984AndyDweejaha1 .PolletPougeneyAlisonHoffmannVikiSweetyKiss .FabiennyPolletPouAlinaAndTimfrancoisexxx69 .NadineefoxyNadineefoxyYeraiAckerEmaGreen .AlisonHoffmannLunaticLover1MasterChiefLatinxBurning .AlishaTorridEdithCutexclairecroftDitaRED .francoisexxx69JeffAndHarryAnnaboomsexitaliantoyboy .raresdarkKarinkaLove1LiaThaiPolletPou .
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import werkzeug
from odoo import http, _
from odoo.addons.auth_signup.models.res_users import SignupError
from odoo.addons.web.controllers.main import ensure_db, Home
from odoo.http import request
_logger = logging.getLogger(__name__)
class AuthSignupHome(Home):
@http.route()
def web_login(self, *args, **kw):
ensure_db()
response = super(AuthSignupHome, self).web_login(*args, **kw)
response.qcontext.update(self.get_auth_signup_config())
if request.httprequest.method == 'GET' and request.session.uid and request.params.get('redirect'):
# Redirect if already logged in and redirect param is present
return http.redirect_with_hash(request.params.get('redirect'))
return response
@http.route('/web/signup', type='http', auth='public', website=True)
def web_auth_signup(self, *args, **kw):
qcontext = self.get_auth_signup_qcontext()
if not qcontext.get('token') and not qcontext.get('signup_enabled'):
raise werkzeug.exceptions.NotFound()
if 'error' not in qcontext and request.httprequest.method == 'POST':
try:
self.do_signup(qcontext)
return super(AuthSignupHome, self).web_login(*args, **kw)
except (SignupError, AssertionError), e:
if request.env["res.users"].sudo().search([("login", "=", qcontext.get("login"))]):
qcontext["error"] = _("Another user is already registered using this email address.")
else:
_logger.error(e.message)
qcontext['error'] = _("Could not create a new account.")
return request.render('auth_signup.signup', qcontext)
@http.route('/web/reset_password', type='http', auth='public', website=True)
def web_auth_reset_password(self, *args, **kw):
qcontext = self.get_auth_signup_qcontext()
if not qcontext.get('token') and not qcontext.get('reset_password_enabled'):
raise werkzeug.exceptions.NotFound()
if 'error' not in qcontext and request.httprequest.method == 'POST':
try:
if qcontext.get('token'):
self.do_signup(qcontext)
return super(AuthSignupHome, self).web_login(*args, **kw)
else:
login = qcontext.get('login')
assert login, "No login provided."
request.env['res.users'].sudo().reset_password(login)
qcontext['message'] = _("An email has been sent with credentials to reset your password")
except SignupError:
qcontext['error'] = _("Could not reset your password")
_logger.exception('error when resetting password')
except Exception, e:
qcontext['error'] = e.message
return request.render('auth_signup.reset_password', qcontext)
def get_auth_signup_config(self):
"""retrieve the module config (which features are enabled) for the login page"""
IrConfigParam = request.env['ir.config_parameter']
return {
'signup_enabled': IrConfigParam.sudo().get_param('auth_signup.allow_uninvited') == 'True',
'reset_password_enabled': IrConfigParam.sudo().get_param('auth_signup.reset_password') == 'True',
}
def get_auth_signup_qcontext(self):
""" Shared helper returning the rendering context for signup and reset password """
qcontext = request.params.copy()
qcontext.update(self.get_auth_signup_config())
if qcontext.get('token'):
try:
# retrieve the user info (name, login or email) corresponding to a signup token
token_infos = request.env['res.partner'].sudo().signup_retrieve_info(qcontext.get('token'))
for k, v in token_infos.items():
qcontext.setdefault(k, v)
except:
qcontext['error'] = _("Invalid signup token")
qcontext['invalid_token'] = True
return qcontext
def do_signup(self, qcontext):
""" Shared helper that creates a res.partner out of a token """
values = { key: qcontext.get(key) for key in ('login', 'name', 'password') }
assert values.values(), "The form was not properly filled in."
assert values.get('password') == qcontext.get('confirm_password'), "Passwords do not match; please retype them."
supported_langs = [lang['code'] for lang in request.env['res.lang'].sudo().search_read([], ['code'])]
if request.lang in supported_langs:
values['lang'] = request.lang
self._signup_with_values(qcontext.get('token'), values)
request.env.cr.commit()
def _signup_with_values(self, token, values):
db, login, password = request.env['res.users'].sudo().signup(values, token)
request.env.cr.commit() # as authenticate will use its own cursor we need to commit the current transaction
uid = request.session.authenticate(db, login, password)
if not uid:
raise SignupError(_('Authentication Failed.'))
|
The magnificent design accents of the Vanderbilt electronic humidor's high lacquer finish bring about a fresh new look that is on a level all its own.
A smooth flowing ebony wood grain pattern flows beautifully into the humidors softened edges.
Opening the humidor through its over-sized contemporary pull, reveals a striking interior that makes a statement. Inside you will find an ultra-large face rectilinear digital hygrometer with built-calibration feature and an adjustable vent polished humidifier to assist in monitoring and regulating humidity levels. All of these devices are recessed magnetically into a stunning high gloss ebony wood plate that matches its exterior.
And to top it off..... a one-of-a-kind slotted divider system that looks amazing and does away with traditional press fit dividers! To adjust the position of any of the 3 included dividers, simply lift one up, relocate and slide back into place.
The bottom of the interior is equipped with an airflow grate which improves the circulation of moist air underneath your cigars.
The elegance and beauty of the Vanderbilt embodies one of those proud investments you can't help but stare at in amazement.
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2013 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
# If True, the TreeTester will automatically reorder node on the same level
# as a deleted node. If False, it means that Liblarch has the responsability
# to handle that itself.
REORDER_ON_DELETE = False
class TreeTester:
""" A class that will check if a tree implementation is consistent
by connecting to emitted signals and crashing on any problem """
def __init__(self, viewtree):
self.tree = viewtree
# both dict should always be synchronized
# They are the internal representation of the tree,
# based only on received signals
self.nodes = {}
self.paths = {}
self.tree.register_cllbck('node-added-inview', self.add)
self.tree.register_cllbck('node-deleted-inview', self.delete)
self.tree.register_cllbck('node-modified-inview', self.update)
self.tree.register_cllbck('node-children-reordered', self.reordered)
self.trace = "* * * * * * * *\n"
def add(self, nid, path):
self.trace += "adding %s to path %s\n" % (nid, str(path))
currentnode = self.paths.get(path, None)
if currentnode and currentnode != nid:
raise Exception('path %s is already occupied by %s' % (
str(path), nid))
if nid in self.nodes:
node = self.nodes[nid]
else:
node = []
self.nodes[nid] = node
if path not in node:
node.append(path)
self.paths[path] = nid
def delete(self, nid, path):
self.trace += "removing %s from path %s\n" % (nid, str(path))
if nid != self.paths.get(path, None):
error = '%s is not assigned to path %s\n' % (nid, str(path))
error += self.print_tree()
raise Exception(error)
if path not in self.nodes.get(nid, []):
raise Exception('%s is not a path of node %s' % (str(path), nid))
if REORDER_ON_DELETE:
index = path[-1:]
print "reorder on delete not yet implemented"
self.nodes[nid].remove(path)
if len(self.nodes[nid]) == 0:
self.nodes.pop(nid)
self.paths.pop(path)
# Move other paths lower like in real TreeModel
path_prefix = path[:-1]
index = path[-1]
assert path_prefix + (index, ) == path, "%s vs %s" % (
path_prefix + (index, ), path)
def check_prefix(path):
""" Is this path affected by the change?
Conditions:
* the same prefix
(3, 1, 2, 3) vs (3,1,2,4) OK
(3, 1, 2, 3) vs (3,1,2,4,0) OK
(3, 1, 2, 3) vs (3,2,2,4) FALSE
* higher index
(3, 1, 2, 3) vs (3,1,2,2) FALSE
"""
if len(path) <= len(path_prefix):
return False
for i, pos in enumerate(path_prefix):
if path[i] != pos:
return False
return path[len(path_prefix)] > index
paths = list(self.paths.keys())
paths.sort()
for path in paths:
old_path = path
if check_prefix(path) and len(path_prefix) > 1:
new_path = list(path)
print "new_path: %s" % str(new_path)
index = len(path_prefix)
new_path[index] = str(int(new_path[index]) - 1)
new_path = tuple(new_path)
print "new_path: %s" % str(new_path)
print "self.paths: %s" % str(self.paths)
assert new_path not in self.paths
nid = self.paths[old_path]
self.nodes[nid].remove(old_path)
del self.paths[old_path]
self.nodes[nid].append(new_path)
self.paths[new_path] = nid
def update(self, nid, path):
## self.tree.flush()
# self.trace += "updating %s in path %s\n" %(nid, str(path))
# error = "updating node %s for path %s\n" %(nid, str(path))
# if not self.nodes.has_key(nid):
# error += "%s is not in nodes !\n" %nid
# error += self.print_tree()
# raise Exception(error)
# #Nothing to do, we just update.
# for p in self.nodes[nid]:
# if self.paths[p] != nid:
# raise Exception('Mismatching path for %s'%nid)
# if not self.paths.has_key(path):
# error += '%s is not in stored paths (node %s)\n'%(str(path),nid)
# error += self.print_tree()
# raise Exception(error)
# n = self.paths[path]
# if path not in self.nodes[n] or n != nid:
# raise Exception('Mismatching node for path %s'%str(p))
# Because of the asynchronousness of update, this test
# doesn't work anymore
pass
def reordered(self, nid, path, neworder):
print "reordering"
self.trace += "reordering children of %s (%s) : %s\n" % (nid,
str(path),
neworder)
self.trace += "VR is %s\n" % self.tree.node_all_children()
if not path:
path = ()
i = 0
newpaths = {}
toremove = []
# we first update self.nodes with the new paths
while i < len(neworder):
if i != neworder[i]:
old = neworder[i]
oldp = path + (old, )
newp = path + (i, )
le = len(newp)
for pp in self.paths.keys():
if pp[0:le] == oldp:
n = self.paths[pp]
self.nodes[n].remove(pp)
newpp = newp + pp[le:]
self.nodes[n].append(newpp)
self.trace += " change %s path from %s to %s\n" % (
n, pp, newpp)
newpaths[newpp] = n
toremove.append(pp)
i += 1
# now we can update self.paths
for p in toremove:
self.paths.pop(p)
for p in newpaths:
self.trace += " adding %s to paths %s\n" % (newpaths[p], str(p))
self.paths[p] = newpaths[p]
def test_validity(self):
for n in self.nodes.keys():
paths = self.tree.get_paths_for_node(n)
if len(self.nodes[n]) == 0:
raise Exception('Node %s is stored without any path' % n)
for p in self.nodes[n]:
if self.paths[p] != n:
raise Exception('Mismatching path for %s' % n)
if p not in paths:
error = 'we have a unknown stored path for %s\n' % n
nn = self.tree.get_node_for_path(p)
parent = self.tree.get_node_for_path(p[:-1])
error += ' path %s is the path of %s\n' % (
str(p), str(nn))
error += ' parent is %s' % parent
# error += self.trace
raise Exception(error)
paths.remove(p)
if len(paths) > 0:
raise Exception('why is this path existing for %s' % n)
for p in self.paths.keys():
node = self.tree.get_node_for_path(p)
n = self.paths[p]
if n != node:
error = 'Node for path is %s but should be %s' % (node, n)
raise Exception(error)
if p not in self.nodes[n]:
error = 'Mismatching node for path %s\n' % str(p)
error += self.print_tree()
raise Exception(error)
if len(p) == 1 and len(self.nodes[n]) > 1:
error = 'Node %s has multiple paths and is in the VR\n' % n
error += self.print_tree()
raise Exception(error)
return True
def print_tree(self):
st = self.trace
st += "nodes are %s\n" % self.nodes
st += "paths are %s\n" % self.paths
return st
def quit(self):
self.tree.deregister_cllbck('node-added-inview', self.add)
self.tree.deregister_cllbck('node-deleted-inview', self.delete)
self.tree.deregister_cllbck('node-modified-inview', self.update)
self.tree.deregister_cllbck('node-children-reordered', self.reordered)
|
Fabulous move-in ready ranch in sought after Olde Cotswold with 4 bedrooms, 3.5 baths. Kitchen opens up to office, mudroom and has lots of natural light. SS appliances, granite countetops, white subway tile backslash, 4 burner gas cooktop with griddle. Hardwoods throughout (just refinished) Large master suite with spacious en suite bath. Cozy detached screen porch with wood burning fireplace added in 2008.
Please have an Agent contact me about MLS#: 3483045 2420 Danbury St, Charlotte.
|
from datetime import datetime, timedelta
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, JsonResponse
import clients.models as clients
import directory.models as directory
from appconf.manager import SettingManager
from rmis_integration.client import Client
from slog.models import Log as slog
CLEANUP_TYPES_LOG = (
1,
2,
3,
4,
5,
6,
10,
16,
17,
18,
19,
20,
25,
27,
22,
23,
100,
998,
999,
1001,
2000,
2001,
2002,
2003,
2004,
2005,
2006,
3000,
3001,
5000,
6000,
10000,
20000,
60001,
60003,
)
@login_required
@staff_member_required
def log(request):
response = {"cnt": slog.objects.all().count(), "store_days": SettingManager.get("max_log_store_days", "120", "i")}
response["to_delete"] = slog.objects.filter(time__lt=datetime.today() - timedelta(days=response["store_days"]), type__in=CLEANUP_TYPES_LOG).count()
return JsonResponse(response)
@login_required
@staff_member_required
def log_cleanup(request):
_, cnt = slog.objects.filter(time__lt=datetime.today() - timedelta(days=SettingManager.get("max_log_store_days", "120", "i")), type__in=CLEANUP_TYPES_LOG).delete()
return HttpResponse(str(cnt.get("slog.Log", 0)), content_type="text/plain")
@login_required
@staff_member_required
def db(request):
response = []
return JsonResponse(response, safe=False)
@login_required
@staff_member_required
def rmis_check(request):
c = Client()
return HttpResponse(c.search_organization_id(check=True) + " " + c.search_dep_id(check=True), content_type="text/plain")
@login_required
@staff_member_required
def archive_without_directions(request):
objs = clients.Card.objects.filter(napravleniya__isnull=True, is_archive=True)
cnt = objs.count()
if request.GET.get("remove", "0") == "1":
_, cnt = objs.delete()
cnt = cnt.get("clients.Card", 0)
return HttpResponse(str(cnt), content_type="text/plain")
@login_required
@staff_member_required
def patients_without_cards(request):
objs = clients.Individual.objects.filter(card__isnull=True)
cnt = objs.count()
if request.GET.get("remove", "0") == "1":
_, cnt = objs.delete()
cnt = cnt.get("clients.Individual", 0)
return HttpResponse(str(cnt), content_type="text/plain")
@login_required
@staff_member_required
def sync_departments(request):
c = Client()
return HttpResponse("Добавлено: %s. Обновлено: %s." % c.department.sync_departments(), content_type="text/plain")
@login_required
@staff_member_required
def sync_researches(request):
r = directory.Researches.objects.filter(podrazdeleniye__isnull=True, subgroup__isnull=False)
cnt = r.count()
for research in r:
research.podrazdeleniye = research.subgroup.podrazdeleniye
research.save()
return HttpResponse(str(cnt), content_type="text/plain")
|
Non-Shrink PTFE Tubing is virtually friction-free and one of the most versatile insulating materials made.
PTFE Tubing is the most versatile insulating material, offering highly stable electrical properties at virtually all electronic frequencies throughout an operating range of -70°C to 260°C. PTFE tubing is nontoxic and will not burn in normal atmosphere at any temperature, and is inert to practically all lubricants, solvents, and reagents. Electrical performance is excellent, offering extremely high dielectric strength even in the thinnest cross sections, with hot soldering irons and sub-zero environmental conditions having no effect. PTFE Teflon tubing possesses the lowest coefficient of friction of any known solid. It is also resistant to wicking, moisture absorption, or contamination from objectionable sources.
PTFE tubing is the electrical insulation tubing material to specify when the reliability and dependability of an application at environmental, electrical, mechanical, and chemical extremes is paramount.
|
#! -*- coding: utf8 -*-
especiales_masculino = [
'cero',
'uno',
'dos',
'tres',
'cuatro',
'cinco',
'seis',
'siete',
'ocho',
'nueve',
'diez',
'once',
'doce',
'trece',
'catorce',
'quince',
'dieciséis',
'diecisiete',
'dieciocho',
'diecinueve',
'veinte',
'veintiuno',
'veintidós',
'veintitrés',
'veinticuatro',
'veinticinco',
'veintiséis',
'veintisiete',
'veintiocho',
'veintinueve'
]
especiales_femenino = [
'cero',
'una',
'dos',
'tres',
'cuatro',
'cinco',
'seis',
'siete',
'ocho',
'nueve',
'diez',
'once',
'doce',
'trece',
'catorce',
'quince',
'dieciséis',
'diecisiete',
'dieciocho',
'diecinueve',
'veinte',
'veintiuna',
'veintidós',
'veintitrés',
'veinticuatro',
'veinticinco',
'veintiséis',
'veintisiete',
'veintiocho',
'veintinueve'
]
especiales_apocopado = [
'cero',
'un',
'dos',
'tres',
'cuatro',
'cinco',
'seis',
'siete',
'ocho',
'nueve',
'diez',
'once',
'doce',
'trece',
'catorce',
'quince',
'dieciséis',
'diecisiete',
'dieciocho',
'diecinueve',
'veinte',
'veintiún',
'veintidós',
'veintitrés',
'veinticuatro',
'veinticinco',
'veintiséis',
'veintisiete',
'veintiocho',
'veintinueve',
]
decenas = [
'',
'diez',
'veinte',
'treinta',
'cuarenta',
'cincuenta',
'sesenta',
'setenta',
'ochenta',
'noventa',
]
centena_masculino = [
'',
'ciento',
'doscientos',
'trescientos',
'cuatrocientos',
'quinientos',
'seiscientos',
'setecientos',
'ochocientos',
'novecientos'
]
centena_apocopado = [
'',
'cien',
'doscientos',
'trescientos',
'cuatrocientos',
'quinientos',
'seiscientos',
'setecientos',
'ochocientos',
'novecientos'
]
centena_femenino = [
'',
'ciento',
'doscientas',
'trescientas',
'cuatrocientas',
'quinientas',
'seiscientas',
'setecientas',
'ochocientas',
'novecientas',
]
exponentes_plural = {
# 2:'cien',
3: 'mil',
6: 'millones',
12: 'billones',
18: 'trillones',
24: 'cuatrillones', #mas exponentes agregar acá
}
exponentes_singular = {
# 2:'cien',
3: 'mil',
6: 'un millón',
12: 'un billón',
18: 'un trillón',
24: 'un cuatrillón', #mas exponentes agregar acá
}
|
To the editor: High-dose cyclophosphamide chemotherapy can be complicated by haemorrhagic cystitis, probably due to the effect of acrolein, a breakdown product, on the urothelium. The cystitis is occasionally fatal (1). The concurrent use of mesna (sodium 2-mercaptoethanesulphonate) has decreased the incidence of this complication (2). We report here the successful use of topical prostaglandin E2 (dinoprostone) in controlling severe chronic haemorrhagic cystitis after cyclophosphamide therapy to condition a bone marrow transplant recipient.
Published: Ann Intern Med. 1984;101(1):142.
|
#Gradebook Project *Display a persons grades/where can he/she get a job!*
#Made by JonathanD
#Thanks to LoveFeelings(https://www.twitch.tv/lovefeelings), Subzidion(https://www.twitch.tv/subzidion), MichaelC212(https://www.twitch.tv/michaelc212)
import time
from prettytable import PrettyTable
x = PrettyTable()
c = PrettyTable()
def Wait(x): # Wait x amount of time before next line is executed!
time.sleep(x)
class Student(): # Class Student
def __init__(self, Fullname, age, born, classes, grades): #Arguments
self.Fullname = Fullname # Define
self.age = age
self.born = born
self.classes = classes
self.grades = grades
def stats(self):
return 'Fulll Name : {}, Age : {}, Born : {}, Grades : {}'.format(self.Fullname, self.age, self.born, self.grades)
def name():
return '{}'.format(Fullname)
def age():
return '{}'.format(age)
def born():
return '{}'.format(born)
def grades():
return '{}'.format(grades)
def name():
global name_
name_ = input('Full name: ')
if (len(name_) > 1):
Wait(1)
elif (int(name_) ):
print("Error, only letters! (Try again)") #\n creates a new line!
Wait(1.5)
name()
def age():
global age_
age_ = input("Age: ")
if (age_.isdigit() ):
Wait(1.5)
else:
print("Error, please enter a valid number!")
age()
def born():
global born_
born_ = input("Birthday(dd.mm.yy): ")
if (born_):
Wait(1.5)
else:
Wait(1.5)
print("Error, please enter a valid birthday!")
born()
def classes():
global s_classes
s_classes = [] # Empty index! Add item/word to the index (index name goes here).append(obj)
add = input("Enter classes: ")
if (add):
s_classes.extend(add.split() ) #x.split() splits up a sentence(for index() ) / x.append(what you want to add)
return (', '.join(s_classes) )
print(s_classes)
Wait(1.5)
else:
Wait(1.5)
print("Error, something went wrong!")
classes()
def Getgrades():
global grades
grades = {}
for cl in s_classes:
while True:
try:
a = int(input("What did you get in {} ".format(cl)))
print("Registered for {}.".format(cl))
grades[cl] = a
break
except ValueError:
print("Please give a valid integer")
#mylist = ['spam', 'ham', 'eggs']
#print(mylist)
#print (', '.join(mylist) )
print("Made by JonathanD 2017\n")
name()
age()
born()
classes()
Getgrades()
Student_1 = Student(name_, age_, born_, s_classes, grades)
test = (', \n'.join(grades) )
value = sum(grades.values())
c = len(grades)
x.field_names = ["Name", "Born", "Age", "Classes", "Snitt(NO)"]
x.add_row([name_, born_ , age_, test, value / c])
#file = open("details.txt", "w")
#file.write(x)
#file.close()
print(x) #+ "\n(Data saved @ 'details.txt')")
Wait(200)
|
"Robert was quick to respond and schedule our pickup. They were professional, efficient and on time. I would definitely recommend using them!"
"Anytime Trash Removal is top notch! I called this morning and they came out right on time this afternoon. I had a lot more stuff than I realized, and they didn’t bat an eyelash. They worked quickly and courteously in this heat. They were finished quickly and the price was extremely reasonable. Hire these guys, the will not let you down."
"Who could go wrong with hiring these guys? No one. They were professional, courteous, hard working and honest. They showed up on time and were finished, without any issues, within an hours time. I will recommend Anytime Trash Removal and definitely use them again."
|
# -*- coding: utf-8 -*-
''' accpy.simulate.lsd
author: felix.kramer(at)physik.hu-berlin.de
'''
from __future__ import division
from numpy import (eye, dot, trapz, pi, nanmean, array, newaxis,
hstack, concatenate, empty, dstack, sqrt, zeros,
vstack)
from numpy.random import standard_normal
from numpy.linalg import inv
from .slicing import cellslice
from .rmatrices import rmatrix, UCS2R
from .tracking import (initialtwiss, tracktwiss4, trackparts)
from .radiate import dipolering, synchroints
from .particles import part2mqey
from ..lattices.reader import latt2py
from ..visualize.plot import (plotopticpars_closed, plottrajs, plotbeamsigma,
plotopticpars_open, plotoptic, plotphasespace,
plotdisptraj)
def oneturn(UC, P_UC, N_UC, gamma):
M = eye(6)
rho = []
LD = []
D_UC = 0
for i in range(P_UC):
UC_tmp = UC[:, i]
# R matrices of unsliced unit cell
M = dot(rmatrix(UC_tmp, gamma), M)
if UC_tmp[0] == 1:
LD.append(UC_tmp[1])
rho.append(UC_tmp[2])
D_UC += 1
UC_tmp = None
LD = nanmean(LD)
rho = nanmean(rho)
UD = N_UC*D_UC*LD
xtwiss0, ytwiss0, xdisp0 = initialtwiss(M)
# one turn R matrix of ring
M1T = eye(6)
for i in range(N_UC):
M1T = dot(M, M1T)
return xtwiss0, ytwiss0, xdisp0, rho, D_UC, UD, LD
def gettunes(s, xtwiss, ytwiss, N_UC):
Qx = N_UC*trapz(1./xtwiss[0, 0, :], s)/2/pi
Qy = N_UC*trapz(1./ytwiss[0, 0, :], s)/2/pi
return Qx, Qy
def getchromaticity(s, xtwiss, ytwiss, N_UC, UCS):
kx, ky = [], [] # negative k == focus
for k, t in zip(UCS[4, :], UCS[0, :]):
if t == 3:
kx.append(-k)
ky.append(k)
elif t == 4:
kx.append(k)
ky.append(-k)
else:
kx.append(0)
ky.append(0)
Xx = N_UC*trapz(kx*xtwiss[0, 0, 1:], s[1:])/4/pi
Xy = N_UC*trapz(ky*ytwiss[0, 0, 1:], s[1:])/4/pi
return Xx, Xy
def lsd(closed, latt, slices, mode, particles, rounds):
if closed:
(particle, E, I, UC, diagnostics, N_UC,
HF_f, HF_V) = latt2py(latt, closed)
else:
(particle, E, I, UC, diagnostics, N_UC,
xtwiss0, ytwiss0, xdisp0,
emit_x, emit_y, emit_s) = latt2py(latt, closed)
m, q, E0, gamma, P_UC = part2mqey(E, UC, particle)
if closed:
xtwiss0, ytwiss0, xdisp0, rho, D_UC, UD, LD = oneturn(UC, P_UC, N_UC, gamma)
# get sliced unit cell for finer tracking
s, UCS, P_UCS = cellslice(UC, P_UC, slices)
# calculate according sliced R matrix
R = UCS2R(P_UCS, UCS, gamma)
# track twiss and dispersion
xtwiss, ytwiss, xdisp, xytwiss = tracktwiss4(R, P_UCS, closed, xtwiss0,
ytwiss0, xdisp0)
if closed:
# tune Q_u:=1/2pi*int(ds/beta_u(s))
Qx, Qy = gettunes(s, xtwiss, ytwiss, N_UC)
# nat chromaticity xi_u:=1/4pi*int(k_u(s)*beta_u(s)) with k_y = - k_x
Xx, Xy = getchromaticity(s, xtwiss, ytwiss, N_UC, UCS)
# calculate according ring of dipoles
sdip, disperdip, xtwissdip, ytwissdip = \
dipolering(s, N_UC, UD, P_UCS, UCS, xdisp, xtwiss, ytwiss, slices,
D_UC)
# synchrotron integrals
(Cq, Jx, emiteqx, tau_x, Jy, E, emiteqy, tau_y, alpha_mc, eta_mc,
gamma_tr, Q_s, Js, sigma_E, sigma_tau, sigma_s, tau_s, U_rad, P_ges,
E_c, lambda_c) = \
synchroints(N_UC, s, gamma, xtwissdip, disperdip, sdip, rho, E, E0,
I, q, m, ytwiss)
if mode == 'trackbeta':
figs = plotoptic(UC, diagnostics, s, xtwiss, ytwiss, xdisp)
if closed:
figs.append(plotopticpars_closed(xtwiss, xdisp, ytwiss, gamma, Qx,
Xx, Jx, emiteqx, tau_x, Qy, Xy,
Jy, E, emiteqy, tau_y, alpha_mc,
eta_mc, gamma_tr, Q_s, Js,
sigma_E, sigma_tau, sigma_s,
tau_s, U_rad, P_ges, E_c,
lambda_c))
sigx = sqrt(xtwiss[0, 0, :]*emiteqx+(xdisp[0, :]*sigma_E)**2)
sigy = sqrt(ytwiss[0, 0, :]*emiteqy)
else:
figs.append(plotopticpars_open(xtwiss, xdisp, ytwiss, gamma, E))
sigx = sqrt(xtwiss[0, 0, :]*emit_x+(xdisp[0, :]*emit_s)**2)
sigy = sqrt(ytwiss[0, 0, :]*emit_y)
figs.append(plotbeamsigma(UC, diagnostics, s, sigx, sigy))
elif mode == 'trackpart':
# [x, x', y, y', l, delta_p/p_0]
# [mm, mrad, mm, mrad, mm, promille]
ideal = array([0, 0, 0, 0, 0, 0]) # Ideal particle
start = array([1, 1, 1, 1, 1, 0]) # 1 sigma particle
distmean = 1e-3*ideal[newaxis, :].T
distsigma = 1e-3*start[newaxis, :].T
# emmitanz des vorgegebenen 1-sigma teilchens (Wille 3.142)
emittx = dot(start[:2], dot(inv(xtwiss0), start[:2]))
emitty = dot(start[2:4], dot(inv(ytwiss0), start[2:4]))
# Envelope E(s)=sqrt(epsilon_i*beta_i(s))
ydisp = zeros([1, P_UCS+1])
emit_x_beta = array([emittx*xtwiss[0, 0, :], emitty*ytwiss[0, 0, :]])
dispdelta = (vstack([xdisp[0, :], ydisp[0, :]])*1E-3*distsigma[5])**2
envelope = sqrt(dispdelta + emit_x_beta)
# start vectors of normally distributed ensemble
points = P_UCS*N_UC*rounds
X0 = (distsigma - distmean)*standard_normal([6, particles])
X0 = dstack([X0, empty([6, particles, points])])
X0[:, :2, 0] = hstack([distmean, distsigma])
X_S = [X0[:, i, :] for i in range(particles)]
X = trackparts(R, N_UC, X_S, rounds)
s0 = s
envelope0 = envelope
for i in range(1, N_UC):
s = concatenate([s, s0[1:]+s0[-1]*i])[:]
envelope = hstack([envelope, envelope0[:, 1:]])
figs = plottrajs(s, X, rounds, envelope)
figs.append(plotphasespace(s, X, rounds, xtwiss, emittx, ytwiss, emitty))
#figs.append(plotdisptraj(s, P_UCS, E, E0, UCS, UC, diagnostics))
return figs
|
Simply submit your e-mail address below to get started with our interactive software demo of your Microsoft MCSE 70-411 Microsoft exam.
Forget the previous online MCSA: Windows Server 2012 70-411 Microsoft classroom and go for online MCSE: Server Infrastructure 70-411 Microsoft from Test king study guide with Microsoft Testking 70-411 questions from Test king test questions and answers online to ensure success in Certkey 70-411 training certification exam. Preparation becomes uncomplicated after using Testkings MCSA 70-411 Microsoft updated audio training along with updated Microsoft 70-411 MCSE: Private Cloud from Test kings study notes to pass Microsoft MCSE: Private Cloud 70-411 certification.
Feel free to give Microsoft MCSE 70-411 from Testkings latest demo practice exam a try before purchasing online http://www.examsking.com/70-411.html from TestKing's practice questions to get 100% success in test Testking 70-411 Microsoft test. online Testking 70-411 demo test questions is the best option available before buying latest Test King Microsoft 70-411 MCSA exam questions and answers to make way towards the victory in Microsoft MCSA: Windows Server 2012 70-411 certification. Passing Microsoft MCSE: Server Infrastructure Administering Windows Server 2012 cert is no big concern any longer as our experts have demonstrated the proficiency with latest MCSE Microsoft 70-411 dumps Testking from TestKing's video training and applied their competencies to bring updated MCSE: Server Infrastructure 70-411 Microsoft from Test king test braindumps. online MCSE 70-411 Microsoft from Test kings practise questions and latest Microsoft MCSA 70-411 intereactive testing engine, created by a team of very talented professionals across the globe provide tremendous amount of help in MCSA 70-411 Microsoft test. Before you finalize your decision of purchasing 70-411 updated test questions and answers, spend some time on the 70-411 practice exams sample practise tests to ensure the overall worth of the product and its immense importance in scoring high for 70-411 Microsoft MCSA exam. To handle your Time Constraints in a productive way, our professionals have designed Microsoft 70-411 MCSA: Windows Server 2012 mp3 guide online along with 70-411 Microsoft MCSA latest audio lectures to provide you a relaxing environment to fulfill your desire to score high in Microsoft 70-411 MCSA exam. To get the clearer picture of how to make it with high score in MCSA: Windows Server 2012 70-411 Microsoft test use Microsoft MCSE: Private Cloud 70-411 from Test kings books and Microsoft 70-411 updated engine. For the in depth knowledge to achieve and doing it in a precise manner while sitting in the Administering Windows Server 2012 test, we have successfully managed to bring you the marvelous MCSE: Private Cloud 70-411 Microsoft video training online and Microsoft MCSE: Server Infrastructure 70-411 questions online.
Download 70-411 Microsoft MCSE: Private Cloud from Testkings computer based training and latest MCSA 70-411 Microsoft online test dump for the preparation of your Microsoft 70-411 exam and become the member of Microsoft Administering Windows Server 2012 MCSE: Private Cloud exam certified people. We challenge on the behalf of Test King 70-411 Microsoft MCSE practice questions and Microsoft 70-411 online courses that if you use it, then definitely u will pass 70-411 Microsoft MCSA exam easily. The Testking MCSE 70-411 Microsoft online testing engine and Test kings Microsoft 70-411 MCSE: Private Cloud updated exam dumps so you can easily download and one of the best products u can access to pass your Microsoft 70-411 exam right away.
Get use too of real environment of Microsoft Administering Windows Server 2012 MCSE exam before appearing in your actual Microsoft 70-411 MCSE certification then latest Testking's MCSE: Private Cloud 70-411 Microsoft testing engine and 70-411 Microsoft MCSA: Windows Server 2012 from Testking free braindumps are the ideal Products to consult. updated Administering Windows Server 2012 from Test kings test materials and online 70-411 Microsoft MCSE: Server Infrastructure from Testking latest test provides you to the point material; the lengthy explanations of questions are also included for your better preparation of Microsoft 70-411 MCSE exam. Microsoft MCSA: Windows Server 2012 70-411 exam preparation is walking in garden with Microsoft 70-411 Administering Windows Server 2012 from Test king online audio training as it is downloadable windows application for preparing you well for 70-411 Microsoft MCSE cert. Microsoft 70-411 MCSE: Server Infrastructure from TestKing's updated practise exams and updated Microsoft 70-411 MCSE from Test king test dumps are much fascinating and prepares you very well for the MCSA 70-411 Microsoft cert.
One of the outstanding Test King Microsoft 70-411 MCSA: Windows Server 2012 online audio training and the updated Test King's MCSE: Private Cloud 70-411 Microsoft practice questions and answers is very interesting in preparing and saving time for the preparation of Microsoft 70-411 Administering Windows Server 2012 exam. Now become respected professional after passing Microsoft 70-411 MCSE cert Exam with high scores merely if you have used Microsoft 70-411 from Test King's latest notes and do online Microsoft MCSE: Private Cloud 70-411 from Test king practice tests for Preparation. You can improve your show in Real Microsoft 70-411 MCSE cert by our wonderful products updated Test King 70-411 Microsoft MCSE: Server Infrastructure braindump and latest Testkings Microsoft 70-411 MCSE exam engine.
|
from djwebsockets.mixins import BaseWSMixin, MixinFail
import asyncio
class WebSocket:
loop = None
def __init__(self, socket, close, send):
self.socket = socket
self.close_handler = close
self.send_handler = send
self.id = id(socket)
self.closed = False
def send(self, Message):
self.loop.call_soon_threadsafe(self._send, Message)
def _send(self, Message):
self.send_handler.put_nowait(Message)
def close(self):
self.closed = True
self.loop.call_soon_threadsafe(self._close)
def _close(self):
self.close_handler.set_result(-1)
class BaseWSClass:
@staticmethod
def super_classes(cls):
return reversed(cls.__mro__)
@classmethod
def call_methods(cls, method, *args):
for clus in cls.super_classes(cls):
try:
if hasattr(clus, method):
getattr(clus, method)(*args)
except MixinFail:
args[0].close()
return
@classmethod
def _on_connect(cls, socket, path):
cls.call_methods("on_connect", socket, path)
@classmethod
def _on_message(cls, socket, message):
cls.call_methods("on_message", socket, message)
@classmethod
def _on_close(cls, socket):
cls.call_methods("on_close", socket)
|
Last Sunday (October 19, 2014) our sermon text was from Romans 3:19-31. The title of the message was Freely Justified by God's Grace. This sermon deals with the doctrine of justification and presents the gospel call to faith and repentance. The Scriptures clearly show in our text that we are not justified by the works of man but by the grace of Jesus Christ.
|
#!/usr/bin/python2
# -*- coding=utf-8 -*-
#************************************************************************
# $Id: spellverbconst.py,v 0.7 2010/12/26 01:10:00 Taha Zerrouki $
#
# ------------
# Description:
# ------------
# Copyright (c) 2009, Arabtechies, Arabeyes Taha Zerrouki
#
#
# -----------------
# Revision Details: (Updated by Revision Control System)
# -----------------
# $Date: 2009/06/02 01:10:00 $
# $Author: Taha Zerrouki $
# $Revision: 0.7 $
# $Source: arabtechies.sourceforge.net
#
#***********************************************************************/
from libqutrub.verb_const import *
# table of suffixes of double transitive verbs
#جدةل لواحق الفعل القلبي المتعدي لمغعول به عاقل،
TabSuffixesPronominale={
PronounAna :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'1'},
PronounNahnu :{'full': u" HcHdHeHfHgHhHi".replace(' ','') ,'alias':'2'},
PronounAnta :{'full': u" HbHcHd HfHg Hi".replace(' ','') ,'alias':'3'},
PronounAnti :{'full': u" HbHc HeHfHgHhHi".replace(' ','') ,'alias':'4'},
PronounAntuma :{'full': u" HbHc HeHfHgHhHi".replace(' ','') ,'alias':'5'},
PronounAntuma_f:{'full': u" HbHc HfHgHhHi".replace(' ','') ,'alias':'6'},
PronounAntum :{'full': u" HjHk Ho Hq".replace(' ','') ,'alias':'7'},
PronounAntunna :{'full': u" HbHc HgHhHi".replace(' ','') ,'alias':'8'},
PronounHuwa :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'9'},
PronounHya :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'10'},
PronounHuma :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'11'},
PronounHuma_f :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'12'},
PronounHum :{'full': u" HjHkHlHmHnHoHpHq".replace(' ','') ,'alias':'13'},
PronounHunna :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'14'},
}
#جدةل لواحق الفعل غير قلبي المتعدي
TabSuffixes={
PronounAna :{'full': u" HdHeHfHgHhHi".replace(' ','') ,'alias':'15'},
PronounNahnu :{'full': u" HdHeHfHgHhHi".replace(' ','') ,'alias':'16'},
PronounAnta :{'full': u" HbHc Hi".replace(' ','') ,'alias':'17'},
PronounAnti :{'full': u" HbHc Hi".replace(' ','') ,'alias':'18'},
PronounAntuma :{'full': u" HbHc Hi".replace(' ','') ,'alias':'19'},
PronounAntuma_f:{'full': u" HbHc Hi".replace(' ','') ,'alias':'20'},
PronounAntum :{'full': u" HjHk Hq".replace(' ','') ,'alias':'21'},
PronounAntunna :{'full': u" HbHc Hi".replace(' ','') ,'alias':'22'},
PronounHuwa :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'23'},
PronounHya :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'24'},
PronounHuma :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'25'},
PronounHuma_f :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'26'},
PronounHum :{'full': u" HjHkHlHmHnHoHpHq".replace(' ','') ,'alias':'27'},
PronounHunna :{'full': u" HbHcHdHeHfHgHhHi".replace(' ','') ,'alias':'28'},
}
TabPrefixes={
# const for Tense Name
TensePast :{'full': u"PaPbPc PePfPg ".replace(' ','') ,'alias':'29'},
TenseFuture :{'full': u"PaPbPc PePfPg Pj".replace(' ','') ,'alias':'30'},
TenseImperative :{'full': u" Pb Pe ".replace(' ','') ,'alias':'31'},
TenseConfirmedImperative :{'full': u" Pb Pe ".replace(' ','') ,'alias':'32'},
TenseJussiveFuture :{'full': u" Pb Pe Pi ".replace(' ','') ,'alias':'33'},
TenseSubjunctiveFuture :{'full': u" Pb PdPe Ph ".replace(' ','') ,'alias':'34'},
TenseConfirmedFuture :{'full': u"PaPbPc PePfPg ".replace(' ','') ,'alias':'35'},
TensePassivePast :{'full': u"PaPbPc PePfPg ".replace(' ','') ,'alias':'36'},
TensePassiveFuture :{'full': u"PaPbPc PePfPg Pj".replace(' ','') ,'alias':'37'},
TensePassiveJussiveFuture :{'full': u" Pb Pe Pi ".replace(' ','') ,'alias':'38'},
TensePassiveSubjunctiveFuture:{'full': u" Pb PdPe Ph ".replace(' ','') ,'alias':'39'},
TensePassiveConfirmedFuture :{'full': u"PaPbPc PePfPg ".replace(' ','') ,'alias':'40'},
}
# table of suffixes of double transitive verbs
#جدةل لةاحق الفعل المتعدي لمغعولين
TabDisplayTagDouble={
PronounAna :{'full': u"HbHc",'alias':'41'},
PronounNahnu :{'full': u"HbHc",'alias':'42'},
PronounAnta :{'full': u"HbHd",'alias':'43'},
PronounAnti :{'full': u"HbHd",'alias':'44'},
PronounAntuma :{'full': u"HbHd",'alias':'45'},
PronounAntuma_f:{'full': u"HbHd",'alias':'46'},
PronounAntum :{'full': u"HbHd",'alias':'47'},
PronounAntunna :{'full': u"HbHd",'alias':'48'},
PronounHuwa :{'full': u"HbHcHd",'alias':'49'},
PronounHya :{'full': u"HbHcHd",'alias':'50'},
PronounHuma :{'full': u"HbHcHd",'alias':'51'},
PronounHuma_f :{'full': u"HbHcHd",'alias':'52'},
PronounHum :{'full': u"HbHcHd",'alias':'53'},
PronounHunna :{'full': u"HbHcHd",'alias':'54'},
}
CodePronoun={
PronounAna :'1',
PronounNahnu :'2',
PronounAnta :'3',
PronounAnti :'4',
PronounAntuma :'5',
PronounAntuma_f:'6',
PronounAntum :'7',
PronounAntunna :'8',
PronounHuwa :'9',
PronounHya :'10',
PronounHuma :'11',
PronounHuma_f :'12',
PronounHum :'13',
PronounHunna :'14',
}
CodeTense={
# const for Tense Name
TensePast :'1',
TenseFuture :'2',
TenseImperative :'3',
TenseConfirmedImperative :'4',
TenseJussiveFuture :'5',
TenseSubjunctiveFuture :'6',
TenseConfirmedFuture :'7',
TensePassivePast :'8',
TensePassiveFuture :'9',
TensePassiveJussiveFuture :'10',
TensePassiveSubjunctiveFuture:'11',
TensePassiveConfirmedFuture :'12',
}
|
Get the gear! Look great while promoting sun safety and education. The Live SunSmart Foundation offers great products for you or as gifts for others. All proceeds from the Live SunSmart store benefit the Live SunSmart Foundation, furthering education and awareness of skin cancer.
|
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# An alternate Python Minecraft library for the Rasperry-Pi
# Copyright (c) 2013-2016 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
The events module defines the :class:`Events` class, which provides methods for
querying events in the Minecraft world, and the :class:`BlockHitEvent`,
:class:`PlayerPosEvent`, :class:`ChatPostEvent`, and :class:`IdleEvent` classes
which represent the various event types.
.. note::
All items in this module are available from the :mod:`picraft` namespace
without having to import :mod:`picraft.events` directly.
The following items are defined in the module:
Events
======
.. autoclass:: Events
:members:
BlockHitEvent
=============
.. autoclass:: BlockHitEvent(pos, face, player)
:members:
PlayerPosEvent
==============
.. autoclass:: PlayerPosEvent(old_pos, new_pos, player)
:members:
ChatPostEvent
=============
.. autoclass:: ChatPostEvent(message, player)
:members:
IdleEvent
=========
.. autoclass:: IdleEvent()
:members:
"""
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
import logging
import threading
import time
import warnings
from collections import namedtuple, Container
from weakref import WeakSet
from functools import update_wrapper
from types import FunctionType
from .exc import ConnectionClosed, NoHandlersWarning
from .vector import Vector
from .player import Player
logger = logging.getLogger('picraft')
class BlockHitEvent(namedtuple('BlockHitEvent', ('pos', 'face', 'player'))):
"""
Event representing a block being hit by a player.
This tuple derivative represents the event resulting from a player striking
a block with their sword in the Minecraft world. Users will not normally
need to construct instances of this class, rather they are constructed and
returned by calls to :meth:`~Events.poll`.
.. note::
Please note that the block hit event only registers when the player
*right clicks* with the sword. For some reason, left clicks do not
count.
.. attribute:: pos
A :class:`~picraft.vector.Vector` indicating the position of the block
which was struck.
.. attribute:: face
A string indicating which side of the block was struck. This can be one
of six values: 'x+', 'x-', 'y+', 'y-', 'z+', or 'z-'. The value
indicates the axis, and direction along that axis, that the side faces:
.. image:: images/block_faces.*
.. attribute:: player
A :class:`~picraft.player.Player` instance representing the player that
hit the block.
"""
@classmethod
def from_string(cls, connection, s):
v, f, p = s.rsplit(',', 2)
return cls(Vector.from_string(v), {
0: 'y-',
1: 'y+',
2: 'z-',
3: 'z+',
4: 'x-',
5: 'x+',
}[int(f)], Player(connection, int(p)))
@property
def __dict__(self):
# Ensure __dict__ property works in Python 3.3 and above.
return super(BlockHitEvent, self).__dict__
def __repr__(self):
return '<BlockHitEvent pos=%s face=%r player=%d>' % (
self.pos, self.face, self.player.player_id)
class PlayerPosEvent(namedtuple('PlayerPosEvent', ('old_pos', 'new_pos', 'player'))):
"""
Event representing a player moving.
This tuple derivative represents the event resulting from a player moving
within the Minecraft world. Users will not normally need to construct
instances of this class, rather they are constructed and returned by calls
to :meth:`~Events.poll`.
.. attribute:: old_pos
A :class:`~picraft.vector.Vector` indicating the location of the player
prior to this event. The location includes decimal places (it is not
the tile-position, but the actual position).
.. attribute:: new_pos
A :class:`~picraft.vector.Vector` indicating the location of the player
as of this event. The location includes decimal places (it is not
the tile-position, but the actual position).
.. attribute:: player
A :class:`~picraft.player.Player` instance representing the player that
moved.
"""
@property
def __dict__(self):
# Ensure __dict__ property works in Python 3.3 and above.
return super(PlayerPosEvent, self).__dict__
def __repr__(self):
return '<PlayerPosEvent old_pos=%s new_pos=%s player=%d>' % (
self.old_pos, self.new_pos, self.player.player_id)
class ChatPostEvent(namedtuple('ChatPostEvent', ('message', 'player'))):
"""
Event representing a chat post.
This tuple derivative represents the event resulting from a chat message
being posted in the Minecraft world. Users will not normally need to
construct instances of this class, rather they are constructed and returned
by calls to :meth:`~Events.poll`.
.. note::
Chat events are only generated by the Raspberry Juice server, not by
Minecraft Pi edition.
.. attribute:: message
The message that was posted to the world.
.. attribute:: player
A :class:`~picraft.player.Player` instance representing the player that
moved.
"""
@classmethod
def from_string(cls, connection, s):
p, m = s.split(',', 1)
return cls(m, Player(connection, int(p)))
@property
def __dict__(self):
# Ensure __dict__ property works in Python 3.3 and above.
return super(ChatPostEvent, self).__dict__
def __repr__(self):
return '<ChatPostEvent message=%s player=%d>' % (
self.message, self.player.player_id)
class IdleEvent(namedtuple('IdleEvent', ())):
"""
Event that fires in the event that no other events have occurred since the
last poll. This is only used if :attr:`Events.include_idle` is ``True``.
"""
@property
def __dict__(self):
# Ensure __dict__ property works in Python 3.3 and above.
return super(IdleEvent, self).__dict__
def __repr__(self):
return '<IdleEvent>'
class Events(object):
"""
This class implements the :attr:`~picraft.world.World.events` attribute.
There are two ways of responding to picraft's events: the first is to
:meth:`poll` for them manually, and process each event in the resulting
list::
>>> for event in world.events.poll():
... print(repr(event))
...
<BlockHitEvent pos=1,1,1 face="y+" player=1>,
<PlayerPosEvent old_pos=0.2,1.0,0.7 new_pos=0.3,1.0,0.7 player=1>
The second is to "tag" functions as event handlers with the decorators
provided and then call the :meth:`main_loop` function which will handle
polling the server for you, and call all the relevant functions as needed::
@world.events.on_block_hit(pos=Vector(1,1,1))
def hit_block(event):
print('You hit the block at %s' % event.pos)
world.events.main_loop()
By default, only block hit events will be tracked. This is because it is
the only type of event that the Minecraft server provides information about
itself, and thus the only type of event that can be processed relatively
efficiently. If you wish to track player positions, assign a set of player
ids to the :attr:`track_players` attribute. If you wish to include idle
events (which fire when nothing else is produced in response to
:meth:`poll`) then set :attr:`include_idle` to ``True``.
.. note::
If you are using a Raspberry Juice server, chat post events are also
tracked by default. Chat post events are only supported with Raspberry
Juice servers; Minecraft Pi edition doesn't support chat post events.
Finally, the :attr:`poll_gap` attribute specifies how long to pause during
each iteration of :meth:`main_loop` to permit event handlers some time to
interact with the server. Setting this to 0 will provide the fastest
response to events, but will result in event handlers having to fight with
event polling for access to the server.
"""
def __init__(self, connection, poll_gap=0.1, include_idle=False):
self._connection = connection
self._handlers = []
self._handler_instances = WeakSet()
self._poll_gap = poll_gap
self._include_idle = include_idle
self._track_players = {}
def _get_poll_gap(self):
return self._poll_gap
def _set_poll_gap(self, value):
self._poll_gap = float(value)
poll_gap = property(_get_poll_gap, _set_poll_gap, doc="""\
The length of time (in seconds) to pause during :meth:`main_loop`.
This property specifies the length of time to wait at the end of each
iteration of :meth:`main_loop`. By default this is 0.1 seconds.
The purpose of the pause is to give event handlers executing in the
background time to communicate with the Minecraft server. Setting this
to 0.0 will result in faster response to events, but also starves
threaded event handlers of time to communicate with the server,
resulting in "choppy" performance.
""")
def _get_track_players(self):
return self._track_players.keys()
def _set_track_players(self, value):
try:
self._track_players = {
pid: Player(self._connection, pid).pos.round(1)
for pid in value
}
except TypeError:
if not isinstance(value, int):
raise ValueError(
'track_players value must be a player id '
'or a sequence of player ids')
self._track_players = {
value: Player(self._connection, value).pos.round(1)
}
if self._connection.server_version != 'raspberry-juice':
# Filter out calculated directions for untracked players
self._connection._directions = {
pid: delta
for (pid, delta) in self._connection._directions.items()
if pid in self._track_players
}
track_players = property(_get_track_players, _set_track_players, doc="""\
The set of player ids for which movement should be tracked.
By default the :meth:`poll` method will not produce player position
events (:class:`PlayerPosEvent`). Producing these events requires extra
interactions with the Minecraft server (one for each player tracked)
which slow down response to block hit events.
If you wish to track player positions, set this attribute to the set of
player ids you wish to track and their positions will be stored. The
next time :meth:`poll` is called it will query the positions for all
specified players and fire player position events if they have changed.
Given that the :attr:`~picraft.world.World.players` attribute
represents a dictionary mapping player ids to players, if you wish to
track all players you can simply do::
>>> world.events.track_players = world.players
""")
def _get_include_idle(self):
return self._include_idle
def _set_include_idle(self, value):
self._include_idle = bool(value)
include_idle = property(_get_include_idle, _set_include_idle, doc="""\
If ``True``, generate an idle event when no other events would be
generated by :meth:`poll`. This attribute defaults to ``False``.
""")
def clear(self):
"""
Forget all pending events that have not yet been retrieved with
:meth:`poll`.
This method is used to clear the list of events that have occurred
since the last call to :meth:`poll` without retrieving them. This is
useful for ensuring that events subsequently retrieved definitely
occurred *after* the call to :meth:`clear`.
"""
self._set_track_players(self._get_track_players())
self._connection.send('events.clear()')
def poll(self):
"""
Return a list of all events that have occurred since the last call to
:meth:`poll`.
For example::
>>> w = World()
>>> w.events.track_players = w.players
>>> w.events.include_idle = True
>>> w.events.poll()
[<PlayerPosEvent old_pos=0.2,1.0,0.7 new_pos=0.3,1.0,0.7 player=1>,
<BlockHitEvent pos=1,1,1 face="x+" player=1>,
<BlockHitEvent pos=1,1,1 face="x+" player=1>]
>>> w.events.poll()
[<IdleEvent>]
"""
def player_pos_events(positions):
for pid, old_pos in positions.items():
player = Player(self._connection, pid)
new_pos = player.pos.round(1)
if old_pos != new_pos:
if self._connection.server_version != 'raspberry-juice':
# Calculate directions for tracked players on platforms
# which don't provide it natively
self._connection._directions[pid] = new_pos - old_pos
yield PlayerPosEvent(old_pos, new_pos, player)
positions[pid] = new_pos
def block_hit_events():
s = self._connection.transact('events.block.hits()')
if s:
for e in s.split('|'):
yield BlockHitEvent.from_string(self._connection, e)
def chat_post_events():
if self._connection.server_version == 'raspberry-juice':
s = self._connection.transact('events.chat.posts()')
if s:
for e in s.split('|'):
yield ChatPostEvent.from_string(self._connection, e)
events = list(player_pos_events(self._track_players)) + list(block_hit_events()) + list(chat_post_events())
if events:
return events
elif self._include_idle:
return [IdleEvent()]
else:
return []
def main_loop(self):
"""
Starts the event polling loop when using the decorator style of event
handling (see :meth:`on_block_hit`).
This method will not return, so be sure that you have specified all
your event handlers before calling it. The event loop can only be
broken by an unhandled exception, or by closing the world's connection
(in the latter case the resulting :exc:`~picraft.exc.ConnectionClosed`
exception will be suppressed as it is assumed that you want to end the
script cleanly).
"""
logger.info('Entering event loop')
try:
while True:
self.process()
time.sleep(self.poll_gap)
except ConnectionClosed:
logger.info('Connection closed; exiting event loop')
def process(self):
"""
Poll the server for events and call any relevant event handlers
registered with :meth:`on_block_hit`.
This method is called repeatedly the event handler loop implemented by
:meth:`main_loop`; developers should only call this method when
implementing their own event loop manually, or when their (presumably
non-threaded) event handler is engaged in a long operation and they
wish to permit events to be processed in the meantime.
"""
for event in self.poll():
for handler in self._handlers:
if handler.matches(event):
handler.execute(event)
def has_handlers(self, cls):
"""
Decorator for registering a class as containing picraft event handlers.
If you are writing a class which contains methods that you wish to
use as event handlers for picraft events, you must decorate the class
with ``@has_handlers``. This will ensure that picraft tracks instances
of the class and dispatches events to each instance that exists when
the event occurs.
For example::
from picraft import World, Block, Vector, X, Y, Z
world = World()
@world.events.has_handlers
class HitMe(object):
def __init__(self, pos):
self.pos = pos
self.been_hit = False
world.blocks[self.pos] = Block('diamond_block')
@world.events.on_block_hit()
def was_i_hit(self, event):
if event.pos == self.pos:
self.been_hit = True
print('Block at %s was hit' % str(self.pos))
p = world.player.tile_pos
block1 = HitMe(p + 2*X)
block2 = HitMe(p + 2*Z)
world.events.main_loop()
Class-based handlers are an advanced feature and have some notable
limitations. For instance, in the example above the ``on_block_hit``
handler couldn't be declared with the block's position because this was
only known at instance creation time, not at class creation time (which
was when the handler was registered).
Furthermore, class-based handlers must be regular instance methods
(those which accept the instance, self, as the first argument); they
cannot be class methods or static methods.
.. note::
The ``@has_handlers`` decorator takes no arguments and shouldn't
be called, unlike event handler decorators.
"""
# Search the class for handler methods, appending the class to the
# handler's list of associated classes (if you're thinking why is this
# a collection, consider that a method can be associated with multiple
# classes either by inheritance or direct assignment)
handlers_found = 0
for item in dir(cls):
item = getattr(cls, item, None)
if item: # PY2
item = getattr(item, 'im_func', item)
if item and isinstance(item, FunctionType):
try:
item._picraft_classes.add(cls)
handlers_found += 1
except AttributeError:
pass
if not handlers_found:
warnings.warn(NoHandlersWarning('no handlers found in %s' % cls))
return cls
# Replace __init__ on the class with a closure that adds every instance
# constructed to self._handler_instances. As this is a WeakSet,
# instances that die will be implicitly removed
old_init = getattr(cls, '__init__', None)
def __init__(this, *args, **kwargs):
if old_init:
old_init(this, *args, **kwargs)
self._handler_instances.add(this)
if old_init:
update_wrapper(__init__, old_init)
cls.__init__ = __init__
return cls
def _handler_closure(self, f):
def handler(event):
if not f._picraft_classes:
# The handler is a straight-forward function; just call it
f(event)
else:
# The handler is an unbound method (yes, I know these don't
# really exist in Python 3; it's a function which is expecting
# to be called from an object instance if you like). Here we
# search the set of instances of classes which were registered
# as having handlers (by @has_handlers)
for cls in f._picraft_classes:
for inst in self._handler_instances:
# Check whether the instance has the right class; note
# that we *don't* use isinstance() here as we want an
# exact match
if inst.__class__ == cls:
# Bind the function to the instance via its
# descriptor
f.__get__(inst, cls)(event)
update_wrapper(handler, f)
return handler
def on_idle(self, thread=False, multi=True):
"""
Decorator for registering a function/method as an idle handler.
This decorator is used to mark a function as an event handler which
will be called when no other event handlers have been called in an
iteration of :meth:`main_loop`. The function will be called with the
corresponding :class:`IdleEvent` as the only argument.
Note that idle events will only be generated if :attr:`include_idle`
is set to ``True``.
"""
def decorator(f):
self._handlers.append(
IdleHandler(self._handler_closure(f), thread, multi))
f._picraft_classes = set()
return f
return decorator
def on_player_pos(self, thread=False, multi=True, old_pos=None, new_pos=None):
"""
Decorator for registering a function/method as a position change
handler.
This decorator is used to mark a function as an event handler which
will be called for any events indicating that a player's position has
changed while :meth:`main_loop` is executing. The function will be
called with the corresponding :class:`PlayerPosEvent` as the only
argument.
The *old_pos* and *new_pos* parameters can be used to specify vectors
or sequences of vectors (including a
:class:`~picraft.vector.vector_range`) that the player position events
must match in order to activate the associated handler. For example, to
fire a handler every time any player enters or walks over blocks within
(-10, 0, -10) to (10, 0, 10)::
from picraft import World, Vector, vector_range
world = World()
world.events.track_players = world.players
from_pos = Vector(-10, 0, -10)
to_pos = Vector(10, 0, 10)
@world.events.on_player_pos(new_pos=vector_range(from_pos, to_pos + 1))
def in_box(event):
world.say('Player %d stepped in the box' % event.player.player_id)
world.events.main_loop()
Various effects can be achieved by combining *old_pos* and *new_pos*
filters. For example, one could detect when a player crosses a boundary
in a particular direction, or decide when a player enters or leaves a
particular area.
Note that only players specified in :attr:`track_players` will generate
player position events.
"""
def decorator(f):
self._handlers.append(
PlayerPosHandler(self._handler_closure(f),
thread, multi, old_pos, new_pos))
f._picraft_classes = set()
return f
return decorator
def on_block_hit(self, thread=False, multi=True, pos=None, face=None):
"""
Decorator for registering a function/method as a block hit handler.
This decorator is used to mark a function as an event handler which
will be called for any events indicating a block has been hit while
:meth:`main_loop` is executing. The function will be called with the
corresponding :class:`BlockHitEvent` as the only argument.
The *pos* parameter can be used to specify a vector or sequence of
vectors (including a :class:`~picraft.vector.vector_range`); in this
case the event handler will only be called for block hits on matching
vectors.
The *face* parameter can be used to specify a face or sequence of
faces for which the handler will be called.
For example, to specify that one handler should be called for hits
on the top of any blocks, and another should be called only for hits
on any face of block at the origin one could use the following code::
from picraft import World, Vector
world = World()
@world.events.on_block_hit(pos=Vector(0, 0, 0))
def origin_hit(event):
world.say('You hit the block at the origin')
@world.events.on_block_hit(face="y+")
def top_hit(event):
world.say('You hit the top of a block at %d,%d,%d' % event.pos)
world.events.main_loop()
The *thread* parameter (which defaults to ``False``) can be used to
specify that the handler should be executed in its own background
thread, in parallel with other handlers.
Finally, the *multi* parameter (which only applies when *thread* is
``True``) specifies whether multi-threaded handlers should be allowed
to execute in parallel. When ``True`` (the default), threaded handlers
execute as many times as activated in parallel. When ``False``, a
single instance of a threaded handler is allowed to execute at any
given time; simultaneous activations are ignored (but not queued, as
with unthreaded handlers).
"""
def decorator(f):
self._handlers.append(
BlockHitHandler(self._handler_closure(f),
thread, multi, pos, face))
f._picraft_classes = set()
return f
return decorator
def on_chat_post(self, thread=False, multi=True, message=None):
"""
Decorator for registering a function/method as a chat event handler.
This decorator is used to mark a function as an event handler which
will be called for events indicating a chat message was posted to
the world while :meth:`main_loop` is executing. The function will be
called with the corresponding :class:`ChatPostEvent` as the only
argument.
.. note::
Only the Raspberry Juice server generates chat events; Minecraft
Pi Edition does not support this event type.
The *message* parameter can be used to specify a string or regular
expression; in this case the event handler will only be called for chat
messages which match this value. For example::
import re
from picraft import World, Vector
world = World()
@world.events.on_chat_post(message="hello world")
def echo(event):
world.say("Hello player %d!" % event.player.player_id)
@world.events.on_chat_post(message=re.compile(r"teleport_me \d+,\d+,\d+"))
def teleport(event):
x, y, z = event.message[len("teleport_me "):].split(",")
event.player.pos = Vector(int(x), int(y), int(z))
world.events.main_loop()
The *thread* parameter (which defaults to ``False``) can be used to
specify that the handler should be executed in its own background
thread, in parallel with other handlers.
Finally, the *multi* parameter (which only applies when *thread* is
``True``) specifies whether multi-threaded handlers should be allowed
to execute in parallel. When ``True`` (the default), threaded handlers
execute as many times as activated in parallel. When ``False``, a
single instance of a threaded handler is allowed to execute at any
given time; simultaneous activations are ignored (but not queued, as
with unthreaded handlers).
"""
def decorator(f):
self._handlers.append(
ChatPostHandler(self._handler_closure(f),
thread, multi, message))
f._picraft_classes = set()
return f
return decorator
class EventHandler(object):
"""
This is an internal object used to associate event handlers with their
activation restrictions.
The *action* parameter specifies the function to be run when a matching
event is received from the server.
The *thread* parameter specifies whether the *action* will be launched in
its own background thread. If *multi* is ``False``, then the
:meth:`execute` method will ensure that any prior execution has finished
before launching another one.
"""
def __init__(self, action, thread, multi):
self.action = action
self.thread = thread
self.multi = multi
self._thread = None
def execute(self, event):
"""
Launches the *action* in a background thread if necessary. If required,
this method also ensures threaded actions don't overlap.
"""
if self.thread:
if self.multi:
threading.Thread(target=self._execute_handler, args=(event,)).start()
elif not self._thread:
self._thread = threading.Thread(target=self._execute_single, args=(event,))
self._thread.start()
else:
self._execute_handler(event)
def _execute_single(self, event):
try:
self._execute_handler(event)
finally:
self._thread = None
def _execute_handler(self, event):
self.action(event)
def matches(self, event):
"""
Tests whether or not *event* match all the filters for the handler that
this object represents.
"""
raise NotImplementedError
class PlayerPosHandler(EventHandler):
"""
This class associates a handler with a player-position event.
Constructor parameters are similar to the parent class,
:class:`EventHandler` but additionally include *old_pos* and *new_pos* to
specify the vectors (or sequences of vectors) that an event must transition
across in order to activate this action. These filters must both match in
order for the action to fire.
"""
def __init__(self, action, thread, multi, old_pos, new_pos):
super(PlayerPosHandler, self).__init__(action, thread, multi)
self.old_pos = old_pos
self.new_pos = new_pos
def matches(self, event):
return (
isinstance(event, PlayerPosEvent) and
self.matches_pos(self.old_pos, event.old_pos) and
self.matches_pos(self.new_pos, event.new_pos))
def matches_pos(self, test, pos):
if test is None:
return True
if isinstance(test, Vector):
return test == pos.floor()
if isinstance(test, Container):
return pos.floor() in test
raise TypeError(
"%r is not a valid position test; expected Vector or "
"sequence of Vector" % test)
class BlockHitHandler(EventHandler):
"""
This class associates a handler with a block-hit event.
Constructor parameters are similar to the parent class,
:class:`EventHandler` but additionally include *pos* to specify the vector
(or sequence of vectors) which an event must match in order to activate
this action, and *face* to specify the block face (or set of faces) which
an event must match. These filters must both match in order for the action
to fire.
"""
def __init__(self, action, thread, multi, pos, face):
super(BlockHitHandler, self).__init__(action, thread, multi)
self.pos = pos
if isinstance(face, bytes):
face = face.decode('ascii')
self.face = face
def matches(self, event):
return (
isinstance(event, BlockHitEvent) and
self.matches_pos(event.pos) and
self.matches_face(event.face))
def matches_pos(self, pos):
if self.pos is None:
return True
if isinstance(self.pos, Vector):
return self.pos == pos
if isinstance(self.pos, Container):
return pos in self.pos
raise TypeError(
"%r is not a valid position test; expected Vector or "
"sequence of Vector" % pos)
def matches_face(self, face):
if self.face is None:
return True
if isinstance(self.face, str):
return self.face == face
if isinstance(self.face, Container):
return face in self.face
raise TypeError(
"%r is not a valid face test; expected string or sequence "
"of strings" % face)
class ChatPostHandler(EventHandler):
"""
This class associates a handler with a chat-post event.
Constructor parameters are similar to the parent class,
:class:`EventHandler` but additionally include *message* to specify the
message that an event must contain in order to activate this action.
"""
def __init__(self, action, thread, multi, message):
super(ChatPostHandler, self).__init__(action, thread, multi)
if isinstance(message, bytes):
message = message.decode('ascii')
self.message = message
def matches(self, event):
return (
isinstance(event, ChatPostEvent) and
self.matches_message(event.message))
def matches_message(self, message):
if self.message is None:
return True
if isinstance(self.message, str):
return self.message == message
try:
return self.message.match(message)
except AttributeError:
raise TypeError(
"%r is not a valid message test; expected string"
"or regular expression" % message)
class IdleHandler(EventHandler):
"""
This class associates a handler with an idle event.
"""
def matches(self, event):
return isinstance(event, IdleEvent)
|
Christopher D. Quinn focuses on all aspects of land use and zoning matters. Mr. Quinn regularly appears before planning boards and zoning boards of adjustment of municipalities throughout northern New Jersey. Clients are represented in a wide variety of matters including site plans, subdivisions, variances and re-zoning. He also represents planning and zoning boards.
Additionally, Mr. Quinn represents clients in residential real estate purchases and sales; commercial real estate purchases and sales with stock and asset acquisitions. He has extensive experience in contract drafting and negotiations, providing legal consultation and document review to individuals and business entities of all sizes, in complex business transactions relating to business formation, including incorporations, limited liability formations and partnerships. He has a broad background in banking and corporate law and represents several lending institutions.
Well versed in Estate Administration, Mr. Quinn prepares all documents necessary to probate a will and attend to all procedural requirements of administering an estate. He counsels fiduciaries and prepares and files tax returns for the estate (Federal Estate Tax Returns and State Inheritance and Estate Tax Returns).
Mr. Quinn also represents clients in need of Estate Planning, including drafting Wills, Trusts, Powers of Attorney, and Medical Directives (Living Wills) as well as Guardianship for elderly and incapacitated individuals.
|
# coding: utf-8
import anyjson
from urllib import urlencode
from nose.tools import eq_
from allmychanges.models import Changelog, Preview
from django.test import Client
from django.core.urlresolvers import reverse
from allmychanges.tasks import _task_log
from .utils import (refresh, check_status_code,
create_user, put_json, json)
from hamcrest import (
assert_that,
has_entries)
def test_preview():
_task_log[:] = []
cl = Client()
eq_(0, Preview.objects.count())
eq_(0, Changelog.objects.count())
# when user opens add-new page, a new changelog and preview
# are created
source = 'test+samples/very-simple.md'
cl.get(reverse('add-new') + '?' + urlencode(dict(url=source)))
eq_(1, Changelog.objects.count())
eq_(1, Preview.objects.count())
preview = Preview.objects.all()[0]
eq_(None, preview.user)
assert preview.light_user != None
eq_([('update_preview_task', (1,), {})], _task_log)
preview_url = reverse('preview', kwargs=dict(pk=preview.pk))
response = cl.get(preview_url)
eq_(200, response.status_code)
assert 'Some <span class="changelog-highlight-fix">bugfix</span>.' in response.content
assert 'Initial release.' in response.content
# при этом, у объекта preview должны быть версии, а у changelog нет
changelog = Changelog.objects.all()[0]
eq_(0, changelog.versions.count())
eq_(2, preview.versions.count())
# проверим, что у preview есть поле log, и оно список
preview = refresh(preview)
eq_(6, len(preview.log))
# теперь обновим preview на несуществующий источник
response = cl.post(preview_url,
data=anyjson.serialize(dict(source='test+another source',
ignore_list='NEWS',
search_list='docs')),
content_type='application/json')
eq_(200, response.status_code)
preview = refresh(preview)
eq_('test+another source', preview.source)
eq_('NEWS', preview.ignore_list)
eq_('docs', preview.search_list)
# and another preview task was scheduled
eq_([('update_preview_task', (1,), {}),
('update_preview_task', (1,), {})], _task_log)
# версии должны были удалиться
eq_(0, changelog.versions.count())
eq_(0, preview.versions.count())
# а само preview перейти в состояние error
eq_('error', preview.status)
def test_update_package_preview_versions():
changelog = Changelog.objects.create(
namespace='python', name='pip', source='test+samples/markdown-release-notes')
preview = changelog.previews.create(light_user='anonymous',
source=changelog.source)
preview.schedule_update()
eq_(0, changelog.versions.filter(preview=None).count())
def first_line(version):
return version.processed_text.split('\n', 1)[0]
def first_lines(versions):
return map(first_line, versions)
versions = preview.versions.all()
eq_([
'<ul>',
u'<h1>0.1.1</h1>', u'<h1>0.1.0</h1>'],
first_lines(versions))
# now we'll check if ignore list works
preview.set_ignore_list(['docs/unrelated-crap.md'])
preview.save()
preview.schedule_update()
versions = preview.versions.all()
eq_([u'<h1>0.1.1</h1>', u'<h1>0.1.0</h1>'],
first_lines(versions))
def test_when_preview_saved_versions_are_copied_to_changelog():
# this only should happen when changelog is empty
user = create_user('art')
changelog = Changelog.objects.create(
namespace='python', name='pip', source='test+samples/markdown-release-notes')
changelog.add_to_moderators(user)
cl = Client()
cl.login(username='art', password='art')
response = cl.get(reverse('changelog-list'))
preview = changelog.previews.create(light_user=response.cookies.get('light_user_id').value,
source=changelog.source)
preview.schedule_update()
eq_(0, changelog.versions.count())
eq_(3, preview.versions.count())
response = put_json(cl,
reverse('changelog-detail', kwargs=dict(pk=changelog.pk)),
expected_code=200,
namespace=changelog.namespace,
name=changelog.name,
source='http://github.com/svetlyak40wt/django-fields',
downloader='git.vcs')
# versions now moved to the changelog
eq_(3, changelog.versions.count())
eq_(0, preview.versions.count())
def test_get_preview_details_via_api():
changelog = Changelog.objects.create(
namespace='python', name='pip', source='test+samples/markdown-release-notes')
preview = changelog.previews.create(light_user='anonymous',
source=changelog.source)
cl = Client()
response = cl.get(reverse('preview-detail', kwargs=dict(pk=preview.pk)))
check_status_code(200, response)
data = json(response)
assert_that(data,
has_entries(
status='created',
processing_status='',
log=[]))
|
These 20 curricular units integrate STEM skills and environmental learning through the German language. Each has been used successfully with students at Waldsee German Language Village. They have been adapted for use by classroom teachers of German.
These units are part of a of a unique partnership between Concordia Language Villages and the German Environmental Foundation (Deutsche Bundesstiftung Umwelt), Europe’s largest foundation dedicated to environmental learning. Together we built an environmental learning center, called das Waldsee BioHaus, at the German Language Village site. Das Waldsee BioHaus was the first certified passive house in North America and winner of Minnesota’s Environmental Prize. It has become a showcase of modern Germany’s approach to sustainable living.
Participants in Waldsee’s residential immersion programming have hands-on opportunities to learn German through environmental awareness, as well as to advance their STEM skills in our high school credit session. These curricular units unite Waldsee’s content-based learning approaches to the environment and to STEM to advance greater knowledge of German language and a deeper awareness of German culture.
3.3Wie ist das Wetter heute?
|
__author__ = "Panagiotis Garefalakis"
__copyright__ = "Imperial College London"
# The MIT License (MIT)
#
# Copyright (c) 2016 Panagiotis Garefalakis
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import numpy as np
import pandas as pd
import plots.utils as utils
import brewer2mpl
# brewer2mpl.get_map args: set name set type number of colors
# bmap = brewer2mpl.get_map('RdBu', 'Diverging', 5)
bmap = brewer2mpl.get_map('Set1', 'Qualitative', 5)
colors = bmap.mpl_colors
files = ["CPLEX-off_stats.csv", "CPLEX-on_stats.csv", "GR-NODE_CAND_stats.csv", "GR-SERIAL_stats.csv", "GR-RANDOM_stats.csv"]
labels = ["ILP-offline", "ILP-online", "Node Candidates", "Random"]
labels_map={"CPLEX-on": "ILP-online", "CPLEX-off": "ILP-offline",
"GR-NODE_CAND": "Node Candidates", "GR-RANDOM": "Greedy", "GR-SERIAL": "Aurora-Prelim"}
# colors = ['r', 'g', 'b', 'black', 'c', 'm']
markers = ['o', '^', 'v', 'h', 'x']
linestyle_list = ['--', '-', ':', '-', '-.']
# Global style configuration
utils.set_rcs()
def latency_logscale(data):
fig = utils.plt.figure()
ax = fig.add_subplot(111)
space = 0.25
conditions = np.unique(data[:, 0])
categories = np.unique(data[:, 1])
# n = len(conditions)
n = len(labels_map)
width = (1 - space) / n
print "width:", width
i = 0
for cond in conditions:
print "cond:", cond
y_vals = data[data[:, 0] == cond][:, 2].astype(np.float)
x_vals = data[data[:, 0] == cond][:, 1].astype(np.int)
pos = [j - (1 - space) / 2. + i * width for j in range(1, len(categories) + 1)]
if labels_map.has_key(str(cond).strip()):
ax.plot(x_vals, y_vals, label=labels_map[str(cond).strip()], color=colors[i], linestyle=linestyle_list[i],
marker=markers[i], linewidth=1.5,)
# , edgecolor=get_colors()[i+1],hatch=hatch_patterns[i])
i +=1
indexes = range(1, len(categories) + 1)
print "Indexes: ", indexes
print "Categories: ", categories
# Add the axis labels
ax.set_ylabel("Latency (ms)")
ax.set_xlabel("Number of Nodes")
# Make Y axis logscale
utils.plt.yscale('log', nonposy='clip')
# Add a legend
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1])
utils.plt.tight_layout()
# Create some space for the last marker
utils.plt.xlim((0, x_vals[len(x_vals)-1]+10))
return fig, ax
def file_parser(fnames):
file_data = (pd.read_csv(f) for f in fnames)
all_data = pd.concat(file_data, ignore_index=True)
# grouped_data = all_data.groupby([' Plan technique', ' totJobs'])[' ObjectiveValue '].mean()
print all_data.columns.values
# print grouped_data
numpyMatrix = all_data[[' Plan technique', ' clusterSize', ' runTime(ms)']].values
# print numpyMatrix
return numpyMatrix
if __name__ == '__main__':
print "Sytem Path {}".format(os.environ['PATH'])
if len(sys.argv) < 2:
print "Usage: bars_efficiency.py.py <input PATH>"
sys.exit(-1)
outname = "placement_latency_log"
fpaths = []
for file in files:
fpaths.append(sys.argv[1]+"/"+file)
# labels.append(sys.argv[2 + i])
print 'Files given: {}'.format(" | ".join(fname for fname in fpaths))
# print 'Labels given: {}'.format(" | ".join(label for label in labels))
# print brewer2mpl.print_maps()
data = file_parser(fpaths)
fig, axes = latency_logscale(data)
utils.set_rcs()
utils.prepare_legend(legend_loc="upper left", legend_font=15)
utils.writeout("%s"%outname)
|
While the increasingly dirty language evident at these rallies should certainly be covered in stride, and while Bill Ayers deserves independent inquiry, any report from the trail should remember that McCain did present a new idea that is supposed to help troubled homeowners, and assess his speeches with that in mind. If he’s talking about the plan in between the “Who is Senator Obama?” lines, it deserves mention. If he’s not, that deserves mention as well.
John, you're right, some reports were just out this weekend about some ugly behavior on the part of Obama supporters. And they should be called out for it as McCain supporters were. You're also right that the reactions of supporters aren't necessarily the fault of candidates, but they can be stoked or implicitly condoned by them. I'm not necessarily saying McCain and Palin were doing that, but the concern was that they was verging on it. When shouts of "kill him" are yelled out at a rally and the candidate offers no response, I think it's fair to say that shows a lack of leadership. I'd also venture to say that the concern that was expressed over these rallies wasn't necessarily blaming John McCain for the outbursts, but was holding him accountable for encouraging a more reasonable discussion, which in the end he did.
Have you listened to an Obama rally lately? Ignorance has unfortunately reared its ugly head in those meetings more than once as well. That's obviously not Obama's fault. Why do we presume that it's McCain's fault when it happens at his rallies? We need to be very careful as a society about mind and heart-reading. When we are most convinced that we know the motives of others, we are almost certainly wrong. I actually admire both candidates for calming down the extremism expressed in their crowds. Obama recently thanked McCain for coming to his defense when a few angry people got out of hand. You may support one candidate or another, but give them some credit when its due. Neither of them really impress me. Neither of them really scare me either. I am not particularly concerned with the expression of liberal or conservative views, but the dehumanization of the opposition and blindness to the good ideas on both sides is destroying us as a civilization. Don't presume that it's only the other side that does it. It's often the self-proclaimed "enlightened ones" on both sides who are the most obnoxious (environmentalists and animal rights advocates too often remind me of even the worst evangelicals and isolationists). If we are really for peace and justice, why are we so filled with fear, anger, and hatred ourselves? Lighten up, people. I have decided to educate my fears instead. After only a few short lectures, they have turned to peace. This peace will remain regardless of who wins this election.
Gawker had an interesting post and discussion yesterday about these recent incidents, with commenters debating whether they're isolated outbursts by a few ultimately harmless nutcases, or portents of increased acrimony and even danger to come in the first years of an Obama administration. http://gawker.com/5061858/its-going-to-be-an-angry-couple-years The post also contains some video clips of some truly horrible McCain "supporters" spouting ignorant things.
|
import numpy as np
import dask.array as da
from collections import Sequence
def minimum_image(coords, pbc):
"""
Wraps a vector collection of atom positions into the central periodic
image or primary simulation cell.
Parameters
----------
pos : :class:`numpy.ndarray`, (Nx3)
Vector collection of atom positions.
Returns
-------
wrap : :class:`numpy.ndarray`, (Nx3)
Returns atomic positions wrapped into the primary simulation
cell, or periodic image.
"""
# This will do the broadcasting
coords = np.array(coords)
pbc = np.array(pbc)
# For each coordinate this number represents which box we are in
image_number = np.floor(coords / pbc)
wrap = coords - pbc * image_number
return wrap
def noperiodic(r_array, periodic, reference=None):
'''Rearrange the array of coordinates *r_array* in a way that doensn't
cross the periodic boundary.
Parameters
----------
r_array : :class:`numpy.ndarray`, (Nx3)
Array of 3D coordinates.
periodic: :class:`numpy.ndarray`, (3)
Periodic boundary dimensions.
reference: ``None`` or :class:`numpy.ndarray` (3)
The points will be moved to be in the periodic image centered on the reference.
If None, the first point will be taken as a reference
Returns
-------
A (N, 3) array of coordinates, all in the same periodic image.
Example
-------
>>> coordinates = np.array([[0.1, 0.0, 0.0], [0.9, 0.0, 0.0]])
>>> periodic = np.array([1, 1, 1])
>>> noperiodic(coordinates, periodic)
[[ 0.1, 0.0, 0.0],
[-0.1, 0.0, 0.0]]
'''
if reference is None:
center = r_array[0]
else:
center = reference
# Find the displacements
dr = (center - r_array)
drsign = np.sign(dr)
# Move things when the displacement is more than half the box size
tomove = np.abs(dr) >= periodic / 2.0
r_array[tomove] += (drsign * periodic)[tomove]
return r_array
def subtract_vectors(a, b, periodic):
'''Returns the difference of the points vec_a - vec_b subject
to the periodic boundary conditions.
'''
r = a - b
delta = np.abs(r)
sign = np.sign(r)
return np.where(delta > 0.5 * periodic, sign * (periodic - delta), r)
def add_vectors(vec_a, vec_b, periodic):
'''Returns the sum of the points vec_a - vec_b subject
to the periodic boundary conditions.
'''
moved = noperiodic(np.array([vec_a, vec_b]), periodic)
return vec_a + vec_b
def distance_matrix(a, b, periodic):
'''Calculate a distrance matrix between coordinates sets a and b
'''
a = a
b = b[:, np.newaxis]
return periodic_distance(a, b, periodic)
def periodic_distance(a, b, periodic):
'''
Periodic distance between two arrays. Periodic is a 3
dimensional array containing the 3 box sizes.
'''
a = np.array(a)
b = np.array(b)
periodic = np.array(periodic)
delta = np.abs(a - b)
delta = np.where(delta > 0.5 * periodic, periodic - delta, delta)
return np.sqrt((delta ** 2).sum(axis=-1))
def geometric_center(coords, periodic):
'''Geometric center taking into account periodic boundaries'''
max_vals = periodic
theta = 2 * np.pi * (coords / max_vals)
eps = np.cos(theta) * max_vals / (2 * np.pi)
zeta = np.sin(theta) * max_vals / (2 * np.pi)
eps_avg = eps.sum(axis=0)
zeta_avg = zeta.sum(axis=0)
theta_avg = np.arctan2(-zeta_avg, -eps_avg) + np.pi
return theta_avg * max_vals / (2 * np.pi)
def radius_of_gyration(coords, periodic):
'''Calculate the square root of the mean distance squared from the center of gravity.
'''
gc = geometric_center(coords, periodic)
return (periodic_distance(coords, gc, periodic) ** 2).sum() / len(coords)
def fractional_coordinates(xyz, box_vectors):
T = np.linalg.inv(box_vectors)
return T.T.dot(xyz.T).T
def cell_coordinates(fractional, box_vectors):
return box_vectors.T.dot(fractional.T).T
def general_periodic_distance(a, b, box_vectors):
frac = fractional_coordinates(b - a, box_vectors)
delta = np.abs(frac)
periodic = 1.0
delta = np.where(delta > 0.5 * periodic, periodic - delta, delta)
return np.linalg.norm(cell_coordinates(delta, box_vectors))
|
Keeppy :: What can you do with old jeans?
Jeans is a piece of clothing every person (it doesn’t matter if you dress according to fashion trends or not) should have in his/her closet. You can spice them up with some cool belts, badges, flower or other motives if they start to look old or worn out. You can breathe a completely new life into your jeans just by using a little imagination and your skillful or less skillful hands. After seeing these amazing jeans you will most certainly wish to have a pair of your own.
This is something you can easily do to your dull old jeans .You will look hot in any occasion and get applause for your own creation.
Have you ever had a pair of jeans wear through at the knees? Once worn-in just enough to always be comfortable, your favorite pair now sport raggedy knees and faded legs. No longer appropriate for those casual dinners with the boyfriend’s parents, or your son’s parent teacher conferences, what can you do? Throwing them out seems like a waste, yet mending them reminds you too strongly of the red cotton hearts your mother used to patch yours with when you were fifteen.
Instead of throwing them away, try turning them into something new. For only the cost of a spool of thread (and the cup of tea you’ll bribe your mother with for use of her sewing machine), you get a new pair of denim shorts. Next year, when they’ve seen their share of summer festivals and unplanned road trips, you’ll turn them into a jean miniskirt for casual nights out, or a purse to give your crafty auntie for her forty-fifth birthday.
What do you do with those old jeans? You can send them to a Women's shelter or you can make an adorable apron. I don't like to waste good jeans for crafts but occasionally my craftiness gets the better of me. I think even the younger girls will appreciate this apron. I cut the jeans below the zipper and at the sides making sure I cut the side seam off because it would be hard to sew through. The pocket was sewn into the side seam so I zig-zagged the sides and bottom. It will fray slightly but that adds to the cuteness. Then using a straight stitch I added the lace and ribbons. The waist band has a string of beads that I had to hand stitch on. Then I just added two 32" side ties and there you have it, a blue jean apron. I ran out of iron on rhinestones or I would have added some to the pocket openings. Machine wash on delicate and hang to dry.
There are numerous places online that show you how to make book or photo album covers from a pair of jeans. This can be especially exciting if you have worn a pair of jeans around the time of the photos and may encompass your clothing and a lot of your style into that period. In years to come this can be quite a cool way to recall a time through the use of the photos and the altered jeans.
Need a camera stabilizer, but don't have the cash? With an old pair of jeans, a bag of lentils, and an hour or two of your time, you can DIY one for cheap (or free)! This tutorial by Jeff Meyer on Digital Camera World uses a sewing machine, but if you don't have one, or don't know how to use one, you can pick up some fabric glue for a few bucks. You could also try this method that uses fabric glue, fusible tape, and an iron for a more secure seam so you don't have to worry about spilling the beans and missing your shot.
Start by cutting off the waist and legs of the jeans, then turning them inside out (make sure the zipper is open). Sew up the seams at the top and bottom, then turn the bag right-side-out by pulling it through the zipper hole.
Use a funnel to pour in your lentils (or beans, or whatever you have lying around), zip it up, and you're all set! If you want to make your own, check out Jeff's tutorial for more details.
If you really don't want to tackle sewing, you can also make a cheap stabilizer out of PVC, or a smaller version for your smartphone or mini-camcorder. Want some stable moving shots? Try out these DIY car mounts.
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from django.template.context_processors import csrf
from django.shortcuts import redirect,HttpResponse,render_to_response,render
from django.http.response import StreamingHttpResponse,HttpResponseRedirect,HttpResponseNotFound
from django.core.urlresolvers import reverse
from django.views.decorators.csrf import csrf_exempt
from django.template.context import RequestContext
from models import UserInfo,DocumentInfo
from forms import DocumentForm
from decorators import is_login_auth
import platform,os
from utils.common import Page,page_div,query_page_div,get_doc_page_info,filenameJudge
from DocumentSearch import settings
import datetime
from django.db.models import Q
from tasks import analyze_uploadfile_task
from indexhelper import del_es_doc
import os
# Create your views here.
#登陆
@csrf_exempt
def login(request):
# Create your views here.
ret = {'status':''}
if request.method == 'POST':
username = request.POST.get('username',None)
password = request.POST.get('password',None)
is_not_empty=all([username,password])
if is_not_empty:
count = UserInfo.objects.filter(username=username,password=password).count()
#判断输入用户名密码OK,则跳转到主页面
if count == 1:
request.session['username'] = username
request.session['login_auth'] = True
#logging.info("user login : {}".format(username))
return redirect('/backend/index/')
else:
ret['status']='password error'
else:
ret['status']='can not empty'
return render_to_response('login.html',ret)
#登出
@is_login_auth
def logout(request):
#logging.info("user logout : {}".format(request.session['username']))
del request.session['login_auth']
del request.session['username']
return redirect("/backend/login/")
#首页
@is_login_auth
def index(request,page=1):
ret = {'DocumentInfoObj':None,'UserInfoObj':None,'PageInfo':None,'AllCount':None}
try:
page = int(page)
except Exception:
page = 1
if request.method == 'GET':
#查询页面的分页显示
if request.GET.get('issearch',None):
searchindexstate = request.GET.get('searchindexstate',None)
tmpstarttime = request.GET.get('searchstarttime',None)
tmpendtime = request.GET.get('searchendtime',None)
Qset = {}
Qset['indexstate'] = searchindexstate
Qset['searchstarttime'] = tmpstarttime
Qset['searchendtime'] = tmpendtime
#判断是否输入了开始时间,没输入或输入非法则默认为1970.01.01
try:
searchstarttime = datetime.datetime.strptime(tmpstarttime,'%Y-%m-%d')
except:
searchstarttime = datetime.datetime(1970, 1, 1)
#判断是否输入了结束时间或输入非法,没输入或输入非法则默认为现在
try:
searchendtime = datetime.datetime.strptime(tmpendtime,'%Y-%m-%d')
except:
searchendtime = datetime.datetime.now()
allDoc = DocumentInfo.objects.filter(
Q(indexstate__startswith=searchindexstate)
&Q(timestamp__gte=searchstarttime)
&Q(timestamp__lte=searchendtime)
)
AllCount = allDoc.count()
ret['AllCount'] = AllCount
PageObj = Page(AllCount,page,6)
DocumentInfoObj = allDoc[PageObj.begin:PageObj.end]
pageurl = 'index'
querycondition = request.META.get("QUERY_STRING",None)
pageinfo = query_page_div(page, PageObj.all_page_count,pageurl,querycondition)
ret['PageInfo'] = pageinfo
ret['DocumentInfoObj'] = DocumentInfoObj
UserInfoObj = UserInfo.objects.get(username=request.session.get('username',None))
ret['UserInfoObj'] = UserInfoObj
ret['Qset'] = Qset
print Qset
return render_to_response('index.html',ret,context_instance=RequestContext(request))
#正常主页的分页显示
else:
docPage = get_doc_page_info(DocumentInfo,page,'n')
ret['AllCount'] = docPage['AllCount']
ret['PageInfo'] = docPage['PageInfo']
ret['DocumentInfoObj'] = docPage['DocumentInfoObj']
UserInfoObj = UserInfo.objects.get(username=request.session.get('username',None))
ret['UserInfoObj'] = UserInfoObj
return render_to_response('index.html',ret,context_instance=RequestContext(request))
else:
return HttpResponse("this is a web page , please use metod GET")
#提交新文档
@is_login_auth
def submit_doc(request):
ret = {'UserName':None,'form':None,'UserInfoObj':None}
ret['UserName'] = request.session.get('username',None)
#WorkOrderObj = WorkOrder.objects.create()
UserInfoObj = UserInfo.objects.get(username=ret['UserName'])
ret['UserInfoObj'] = UserInfoObj
if request.method == 'POST':
DocumentObj_form = DocumentForm(request.POST,request.FILES)
upload_filename = request.FILES['attachment'].name
#django.core.files.uploadedfile.InMemoryUploadedFile
fileSuffixObj = filenameJudge(upload_filename)
file_flag = fileSuffixObj.suffix_judge()
if DocumentObj_form.is_valid() and file_flag:
DocumentObj = DocumentObj_form.save(commit=False)
#索引状态放置为b即开始索引
DocumentObj.indexstate = 'b'
DocumentObj.save()
analyze_uploadfile_task.delay(DocumentObj.id,file_flag)
ret['status'] = 'save ok'
else:
ret['status'] = 'save error'
ret['form'] = DocumentObj_form
#添加跨站请求伪造的认证
ret.update(csrf(request))
return render(request,'submitdoc.html',ret)
DocumentObj_form = DocumentForm()
ret['form'] = DocumentObj_form
#添加跨站请求伪造的认证
ret.update(csrf(request))
return render_to_response('submitdoc.html',ret)
#文件下载功能
@is_login_auth
def big_file_download(request,attachmentid):
def _file_iterator(file_name, chunk_size=512):
with open(file_name,'rb') as f:
while True:
c = f.read(chunk_size)
if c:
yield c
else:
break
DocumentFileObj = DocumentInfo.objects.get(id=attachmentid)
#获取系统类别
sys_type = platform.system()
if sys_type == 'Windows':
#windows下使用
the_file_name = str(settings.MEDIA_ROOT) + '\\' + str(DocumentFileObj.attachment).replace('/', '\\').decode('utf-8')
elif sys_type == 'Linux':
#linux下使用
the_file_name = settings.MEDIA_ROOT + "/" + str(DocumentFileObj.attachment).decode('utf-8')
else:
#非linux或windows下,如unbantu等皆使用linux的标准
the_file_name = settings.MEDIA_ROOT + "/" + str(DocumentFileObj.attachment).decode('utf-8')
response = StreamingHttpResponse(_file_iterator(the_file_name))
response['Content-Type'] = 'application/octet-stream'
if sys_type == 'Windows':
#windows下使用
response['Content-Disposition'] = 'attachment;filename=' + the_file_name.encode('gbk').split("\\")[-1]
elif sys_type == 'Linux':
#linux下使用
response['Content-Disposition'] = 'attachment;filename=' + the_file_name.encode('gbk').split("/")[-1]
else:
#非linux或windows下,如unbantu等皆使用linux的标准
response['Content-Disposition'] = 'attachment;filename=' + the_file_name.encode('gbk').split("/")[-1]
return response
#批量删除主机信息
@is_login_auth
def batch_del_doc(request):
if request.method == 'POST':
#根据传进来的主机id批量删除数据库对象
ret = {'DocumentInfoObj':None,'UserInfoObj':None,'PageInfo':None,'AllCount':None}
will_del_doc = request.POST.getlist("checkboxdel[]",None)
if will_del_doc:
for i in will_del_doc:
DocumentInfoObj = DocumentInfo.objects.get(id=i)
DocumentInfoObj.delete()
try:
del_es_doc(i)
except Exception,e:
print e
print "del this doc id in es error,may be this doc id does not exist "
ids = ",".join(will_del_doc)
ret['popover'] = { "id":ids,"info":"已经删除以下编号的文档" }
else:
ret['popover'] = { "id":"","info":"没有选中可删除的文档" }
page = 1
docPage = get_doc_page_info(DocumentInfo,page,'n')
ret['AllCount'] = docPage['AllCount']
ret['PageInfo'] = docPage['PageInfo']
ret['DocumentInfoObj'] = docPage['DocumentInfoObj']
UserInfoObj = UserInfo.objects.get(username=request.session.get('username',None))
ret['UserInfoObj'] = UserInfoObj
return render_to_response('index.html',ret,context_instance=RequestContext(request))
else:
return HttpResponseNotFound('<h1>Page not found</h1>')
#删除文档信息
@is_login_auth
def del_doc(request,id):
try:
try:
DocumentInfoObj = DocumentInfo.objects.get(id=id)
except Exception,e:
print e
return HttpResponseRedirect('/backend/index')
DocumentInfoObj.delete()
try:
del_es_doc(id)
except Exception,e:
print e
print "del this doc id in es error,may be this doc id does not exist "
ret = {'DocumentInfoObj':None,'UserInfoObj':None,'PageInfo':None,'AllCount':None}
page = 1
docPage = get_doc_page_info(DocumentInfo,page,'n')
ret['AllCount'] = docPage['AllCount']
ret['PageInfo'] = docPage['PageInfo']
ret['DocumentInfoObj'] = docPage['DocumentInfoObj']
UserInfoObj = UserInfo.objects.get(username=request.session.get('username',None))
ret['UserInfoObj'] = UserInfoObj
ret['popover'] = { "id":id,"info":"已经删除文档" }
return render_to_response('index.html',ret,context_instance=RequestContext(request))
except Exception,e:
return HttpResponseNotFound('<h1>Page not found</h1>')
#编辑文档信息
@is_login_auth
def edit(request,id):
ret = {'UserName':None,'form':None,'status':'','id':None,'UserInfoObj':None}
DocumentInfoObj = DocumentInfo.objects.get(id=id)
#print DocumentInfoObj.type
if request.method == 'POST':
DocumentInfoObj_form = DocumentForm(data=request.POST,files=request.FILES,instance=DocumentInfoObj)
#print request.POST
#print request.FILES['attachment'].name
#print DocumentInfoObj.attachment
#print str(DocumentInfoObj.attachment)
#print DocumentInfoObj_form.attachment
try:
fileSuffixObj = filenameJudge(request.FILES['attachment'].name)
except:
fileSuffixObj = filenameJudge(os.path.basename(str(DocumentInfoObj.attachment)))
file_flag = fileSuffixObj.suffix_judge()
if DocumentInfoObj_form.is_valid() and file_flag:
DocumentObj = DocumentInfoObj_form.save(commit=False)
#索引状态放置为b即开始索引
DocumentObj.indexstate = 'b'
DocumentObj.save()
analyze_uploadfile_task.delay(DocumentObj.id,file_flag)
ret['status'] = '修改成功'
else:
ret['status'] = '修改失败'
ret['form'] = DocumentInfoObj_form
#添加跨站请求伪造的认证
ret.update(csrf(request))
return render(request,'edit.html',ret)
DocumentInfoObj_form = DocumentForm(instance=DocumentInfoObj)
ret['UserName'] = request.session.get('username',None)
UserInfoObj = UserInfo.objects.get(username=ret['UserName'])
ret['UserInfoObj'] = UserInfoObj
ret['form'] = DocumentInfoObj_form
ret['id'] = id
#添加跨站请求伪造的认证
ret.update(csrf(request))
return render_to_response('edit.html',ret)
|
This beyond-dairy-free dressing is packed with omega-3s and is sure to satisfy. Our beloved Ranch Chia offers the comforting taste of your favorite ranch, while flaunting an all-star list of clean, real food ingredients including healthful organic chia seeds. That's one rockin' ranch.
|
# importer_api.py
# Meteor Pi, Cambridge Science Centre
# Dominic Ford, Tom Oinn
# -------------------------------------------------
# Copyright 2016 Cambridge Science Centre.
# This file is part of Meteor Pi.
# Meteor Pi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meteor Pi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meteor Pi. If not, see <http://www.gnu.org/licenses/>.
# -------------------------------------------------
from logging import getLogger
from yaml import safe_load
from os import path, remove
import meteorpi_model as model
from flask.ext.jsonpify import jsonify
from flask import request, g
class MeteorDatabaseImportReceiver(object):
"""
Connects to a
:class:`meteorpi_db.MeteorDatabase` and pushes any data to it on import, including managing the acquisition of any
additional information (camera status, binary file data) required in the process.
"""
def __init__(self, db):
self.db = db
@staticmethod
def get_importing_user_id():
"""
Retrieve the importing user ID from the request context, this user will have already authenticated correctly
by the point the import receiver is called.
:return:
The string user_id for the importing user
"""
return g.user.user_id
def receive_observation(self, import_request):
obs = import_request.entity
if not self.db.has_observation_id(obs.id):
self.db.import_observation(observation=obs, user_id=self.get_importing_user_id())
self.db.commit()
return import_request.response_complete()
def receive_file_record(self, import_request):
file_record = import_request.entity
if not self.db.has_file_id(file_record.id):
if not path.isfile(self.db.file_path_for_id(file_record.id)):
return import_request.response_need_file_data(file_id=file_record.id)
self.db.import_file(file_item=file_record, user_id=self.get_importing_user_id())
self.db.commit()
return import_request.response_complete()
def receive_metadata(self, import_request):
entity = import_request.entity
if not self.db.has_obstory_metadata(entity.id):
if not self.db.has_obstory_name(entity.obstory_name):
self.db.register_obstory(obstory_id=entity.obstory_id, obstory_name=entity.obstory_name,
latitude=entity.obstory_lat, longitude=entity.obstory_lng)
self.db.import_obstory_metadata(obstory_name=entity.obstory_name,
key=entity.key, value=entity.value, metadata_time=entity.time,
time_created=entity.time_created,
user_created=self.get_importing_user_id(),
item_id=entity.id)
self.db.commit()
return import_request.response_complete()
def receive_file_data(self, file_id, file_data, md5_hex):
file_path = self.db.file_path_for_id(file_id)
if not path.isfile(file_path):
file_data.save(file_path)
if md5_hex != model.get_md5_hash(file_path):
remove(file_path)
class ImportRequest(object):
"""
Helper used when importing, makes use of the 'cached_request' request transparent to the importing party.
:cvar logger:
Logs to 'meteorpi.server.import'
:ivar entity_type:
The type of the ID being imported, which will be one of 'file', 'status', 'event' or 'none'.
"""
logger = getLogger("meteorpi.server.import")
def __init__(self, entity, entity_id):
"""
Constructor, don't use this from your own code, instead use process_request() to create one from the Flask
request context.
:param entity:
The entity being imported, either pulled from the request directly or from the cache. This can be None under
error conditions, in which case the only legitimate response is to send a 'continue' message back to the
exporter, at which point it will re-send the necessary information to rebuild the cache.
:param entity_id:
The ID of the entity being imported, this will always be defined.
"""
self.entity_id = entity_id
self.entity = entity
if entity is None:
self.entity_type = 'none'
elif isinstance(entity, model.Observation):
self.entity_type = 'observation'
elif isinstance(entity, model.FileRecord):
self.entity_type = 'file'
elif isinstance(entity, model.ObservatoryMetadata):
self.entity_type = 'metadata'
else:
raise ValueError("Unknown entity type, cannot continue.")
def response_complete(self):
"""
Signal that this particular entity has been fully processed. The exporter will not send it to this target again
under this particular export configuration (there is no guarantee another export configuration on the same
server won't send it, or that it won't be received from another server though, so you must always check whether
you have an entity and return this status as early as possible if so)
:return:
A response that can be returned from a Flask service method
"""
ImportRequest.logger.info("Completed import for {0} with id {1}".format(self.entity_type, self.entity_id))
ImportRequest.logger.debug("Sending: complete")
return jsonify({'state': 'complete'})
@staticmethod
def response_failed(message='Import failed'):
"""
Signal that import for this entity failed. Whether this results in a retry either immediately or later in time
is entirely up to the exporting party - this should therefore only be used for error cases, and not used to
indicate duplicate data (use the response_complete for this as it tells the exporter that it shouldn't send the
data again)
:param string message:
An optional message to convey about the failure
:return:
A response that can be returned from a Flask service method
"""
ImportRequest.logger.debug("Sending: failed")
return jsonify({'state': 'failed', 'message': message})
def response_continue(self):
"""
Signals that a partial reception of data has occurred and that the exporter should continue to send data for
this entity. This should also be used if import-side caching has missed, in which case the response will direct
the exporter to re-send the full data for the entity (otherwise it will send back the entity ID and rely on the
import party's caching to resolve it). Use this for generic cases where we need to be messaged again about this
entity - currently used after requesting and receiving a status block, and in its cache-refresh form if we have
a cache miss during import.
:return:
A response that can be returned from a Flask service method
"""
if self.entity is not None:
ImportRequest.logger.debug("Sending: continue")
return jsonify({'state': 'continue'})
else:
ImportRequest.logger.debug("Sending: continue-nocache")
return jsonify({'state': 'continue-nocache'})
@staticmethod
def response_continue_after_file():
"""
As with response_continue, but static to allow it to be called from context where we don't have a populated
ImportRequest object. Always uses cached IDs, with the expectation that a subsequent request will force cache
revalidation if required. Use this when acting on reception of binary data.
:return:
A response that can be returned from a Flask service method
"""
return jsonify({'state': 'continue'})
@staticmethod
def response_need_file_data(file_id):
"""
Signal the exporter that we need the binary data associated with a given file ID
:param string file_id:
the UUID of the :class:`meteorpi_model.FileRecord` for which we don't currently have data
:return:
A response that can be returned from a Flask service method
"""
ImportRequest.logger.debug("Sending: need_file_data, id={0}".format(file_id))
return jsonify({'state': 'need_file_data', 'file_id': file_id})
@staticmethod
def process_request():
"""
Retrieve a CameraStatus, Event or FileRecord from the request, based on the supplied type and ID. If the type is
'cached_request' then the ID must be specified in 'cached_request_id' - if this ID is not for an entity in the
cache this method will return None and clear the cache (this should only happen under conditions where we've
failed to correctly handle caching, such as a server restart or under extreme load, but will result in the
server having to re-request a previous value from the exporting party).
:return:
A dict containing 'entity' - the entity for this request or None if there was an issue causing an unexpected
cache miss, and 'entity-id' which will be the UUID of the entity requested.
The entity corresponding to this request, or None if we had an issue and there was an unexpected cache miss.
"""
g.request_dict = safe_load(request.get_data())
entity_type = g.request_dict['type']
entity_id = g.request_dict[entity_type]['id']
ImportRequest.logger.debug("Received request, type={0}, id={1}".format(entity_type, entity_id))
entity = ImportRequest._get_entity(entity_id)
ImportRequest.logger.debug("Entity with id={0} was {1}".format(entity_id, entity))
return ImportRequest(entity=entity, entity_id=entity_id)
@staticmethod
def _get_entity(entity_id):
"""
Uses the request context to retrieve a :class:`meteorpi_model.CameraStatus`, :class:`meteorpi_model.Event` or
:class:`meteorpi_model.FileRecord` from the POSTed JSON string.
:param string entity_id:
The ID of a CameraStatus, Event or FileRecord contained within the request
:return:
The corresponding entity from the request.
"""
entity_type = g.request_dict['type']
if entity_type == 'file':
return model.FileRecord.from_dict(g.request_dict['file'])
elif entity_type == 'metadata':
return model.ObservatoryMetadata.from_dict(g.request_dict['metadata'])
elif entity_type == 'observation':
return model.Observation.from_dict(g.request_dict['observation'])
else:
return None
def add_routes(meteor_app, url_path='/importv2'):
"""
Add two routes to the specified instance of :class:`meteorpi_server.MeteorApp` to implement the import API and allow
for replication of data to this server.
:param meteorpi_server.MeteorApp meteor_app:
The :class:`meteorpi_server.MeteorApp` to which import routes should be added
:param meteorpi_server.importer_api.BaseImportReceiver handler:
A subclass of :class:`meteorpi_server.importer_api.BaseImportReceiver` which is used to handle the import. If
not specified this defaults to an instance of :class:`meteorpi_server.importer_api.MeteorDatabaseImportReceiver`
which will replicate any missing information from the import into the database attached to the meteor_app.
:param string url_path:
The base of the import routes for this application. Defaults to '/import' - routes will be created at this path
and as import_path/data/<id> for binary data reception. Both paths only respond to POST requests and require
that the requests are authenticated and that the authenticated user has the 'import' role.
"""
app = meteor_app.app
@app.route(url_path, methods=['POST'])
@meteor_app.requires_auth(roles=['import'])
def import_entities():
"""
Receive an entity import request, using :class:`meteorpi_server.import_api.ImportRequest` to parse it, then
passing the parsed request on to an instance of :class:`meteorpi_server.import_api.BaseImportReceiver` to deal
with the possible import types.
:return:
A response, generally using one of the response_xxx methods in ImportRequest
"""
db = meteor_app.get_db()
handler = MeteorDatabaseImportReceiver(db=db)
import_request = ImportRequest.process_request()
if import_request.entity is None:
return import_request.response_continue()
if import_request.entity_type == 'file':
response = handler.receive_file_record(import_request)
handler.db.commit()
db.close_db()
if response is not None:
return response
else:
return import_request.response_complete()
elif import_request.entity_type == 'observation':
response = handler.receive_observation(import_request)
handler.db.commit()
db.close_db()
if response is not None:
return response
else:
return import_request.response_complete()
elif import_request.entity_type == 'metadata':
response = handler.receive_metadata(import_request)
handler.db.commit()
db.close_db()
if response is not None:
return response
else:
return import_request.response_continue()
else:
db.close_db()
return import_request.response_failed("Unknown import request")
@app.route('{0}/data/<file_id_hex>/<md5_hex>'.format(url_path), methods=['POST'])
@meteor_app.requires_auth(roles=['import'])
def import_file_data(file_id_hex, md5_hex):
"""
Receive a file upload, passing it to the handler if it contains the appropriate information
:param string file_id_hex:
The hex representation of the :class:`meteorpi_model.FileRecord` to which this data belongs.
"""
db = meteor_app.get_db()
handler = MeteorDatabaseImportReceiver(db=db)
file_id = file_id_hex
file_data = request.files['file']
if file_data:
handler.receive_file_data(file_id=file_id, file_data=file_data, md5_hex=md5_hex)
db.close_db()
return ImportRequest.response_continue_after_file()
|
Loyola Guild is pleased to present the annual JHS Sober Grad Night, which will take place at Golfland Sunsplash in Roseville. We have rented the entire water park for the exclusive use of our Jesuit Seniors!
A fun evening is planned with access to all the theme park attractions – Wave Pool, Water Slides, Riptide, Mini Golf, Lazer Tag, Arcade Games w/free play and the Fast Cars. The boys will enjoy catering by JR’s BBQ, Krushburger, as well as snacks, Go Girl and Body Armor beverage station, pizza station and a continental breakfast. And last, but not least, A SURPRISE GUEST DJ who will be playing music throughout the evening.
We are looking forward to the Seniors sharing a Last Hurrah night together with one another and celebrating the JHS Brotherhood, before they depart on their life journeys after high school.
Students will be transported by chartered motorcoach from Jesuit to and from the event.
Schedule and details about what to bring can be found here.
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
from grako.util import simplify_list, eval_escapes, warning
from grako.util import re, RE_FLAGS
from grako import grammars
from grako.exceptions import FailedSemantics
from grako.model import ModelBuilderSemantics
class GrakoASTSemantics(object):
def group(self, ast, *args):
return simplify_list(ast)
def element(self, ast, *args):
return simplify_list(ast)
def sequence(self, ast, *args):
return simplify_list(ast)
def choice(self, ast, *args):
if len(ast) == 1:
return simplify_list(ast[0])
return ast
class GrakoSemantics(ModelBuilderSemantics):
def __init__(self, grammar_name):
super(GrakoSemantics, self).__init__(
baseType=grammars.Model,
types=grammars.Model.classes()
)
self.grammar_name = grammar_name
self.rules = OrderedDict()
def token(self, ast, *args):
token = eval_escapes(ast)
return grammars.Token(token)
def pattern(self, ast, *args):
pattern = ast
try:
re.compile(pattern, RE_FLAGS)
except (TypeError, re.error) as e:
raise FailedSemantics('regexp error: ' + str(e))
return grammars.Pattern(pattern)
def hext(self, ast):
return int(ast, 16)
def float(self, ast):
return float(ast)
def int(self, ast):
return int(ast)
def cut_deprecated(self, ast, *args):
warning('The use of >> for cut is deprecated. Use the ~ symbol instead.')
return grammars.Cut()
def override_single_deprecated(self, ast, *args):
warning('The use of @ for override is deprecated. Use @: instead')
return grammars.Override(ast)
def sequence(self, ast, *args):
seq = ast.sequence
assert isinstance(seq, list), str(seq)
if len(seq) == 1:
return seq[0]
return grammars.Sequence(ast)
def choice(self, ast, *args):
if len(ast) == 1:
return ast[0]
return grammars.Choice(ast)
def new_name(self, name):
if name in self.rules:
raise FailedSemantics('rule "%s" already defined' % str(name))
return name
def known_name(self, name):
if name not in self.rules:
raise FailedSemantics('rule "%s" not yet defined' % str(name))
return name
def directive(self, ast):
return ast
def rule(self, ast, *args):
decorators = ast.decorators
name = ast.name
exp = ast.exp
base = ast.base
params = ast.params
kwparams = OrderedDict(ast.kwparams) if ast.kwparams else None
if 'override' not in decorators and name in self.rules:
self.new_name(name)
elif 'override' in decorators:
self.known_name(name)
if not base:
rule = grammars.Rule(ast, name, exp, params, kwparams, decorators=decorators)
else:
self.known_name(base)
base_rule = self.rules[base]
rule = grammars.BasedRule(ast, name, exp, base_rule, params, kwparams, decorators=decorators)
self.rules[name] = rule
return rule
def rule_include(self, ast, *args):
name = str(ast)
self.known_name(name)
rule = self.rules[name]
return grammars.RuleInclude(rule)
def grammar(self, ast, *args):
directives = {d.name: d.value for d in ast.directives}
return grammars.Grammar(
self.grammar_name,
list(self.rules.values()),
directives=directives
)
|
Highlands Cabins in the Winter.
In 1990, the Kimmel Kabins were listed on the National Register of Historic Places. In 1998, the Highlands and the dining hall at the Double Diamond Ranch were added to the National Register of Historic Places.
During the 1950s, the National Park Service kicked off "Mission 66" a 10-year plan to increase visitor services dramatically by 1966, the Service's 50th anniversary. Drawn by the scenic backdrop and roadway access to nearby lakes, several ranches and auto camps sprang up south of Jenny Lake. The Elbo Ranch was a large cabin camp; the Double Diamond Ranch was a boys' ranch; and the X Quarter Circle X Ranch, the Highlands and the Kimmel Kabins were auto courts.
Accommodations changed with the times: dude ranch infrastructure, such as barns and corrals disappeared, replaced by cabins clustered around a central courtyard with adjacent parking. As road systems improved and car-ownership increased during the Post WW II era, vacations were no longer limited to the wealthy. Middle class travelers experienced a newfound freedom, staying only a few days at one place before moving to the next. Traditional dude ranchers referred to these visitors as "tin can tourists."
Over the years, the park has reused many auto camp buildings. The Elbo Ranch cabins are scattered through the park as housing. The Double Diamond Ranch now serves as the American Alpine Club Climbers' Ranch. Seasonal employees live in the Kimmel Kabins during the summer, and the Highlands is home to many other summer park employees.
How to get there: Drive north from Jackson to Moose Junction. Turn left onto the Teton Park Road. Drive through the entrance station. Turn left in three miles onto a dirt following signs to the Climbers Ranch. The Climbers’ Ranch provides lodging during the peak summer months.
|
# -*- coding: utf-8 -*-
""""
Folium Colormap Module
----------------------
"""
import branca.colormap as cm
def test_simple_step():
step = cm.StepColormap(['green', 'yellow', 'red'],
vmin=3., vmax=10.,
index=[3, 4, 8, 10], caption='step')
step = cm.StepColormap(['r', 'y', 'g', 'c', 'b', 'm'])
step._repr_html_()
def test_simple_linear():
linear = cm.LinearColormap(['green', 'yellow', 'red'], vmin=3., vmax=10.)
linear = cm.LinearColormap(['red', 'orange', 'yellow', 'green'],
index=[0, 0.1, 0.9, 1.])
linear._repr_html_()
def test_linear_to_step():
some_list = [30.6, 50, 51, 52, 53, 54, 55, 60, 70, 100]
lc = cm.linear.YlOrRd_06
lc.to_step(n=12)
lc.to_step(index=[0, 2, 4, 6, 8, 10])
lc.to_step(data=some_list, n=12)
lc.to_step(data=some_list, n=12, method='linear')
lc.to_step(data=some_list, n=12, method='log')
lc.to_step(data=some_list, n=30, method='quantiles')
lc.to_step(data=some_list, quantiles=[0, 0.3, 0.7, 1])
lc.to_step(data=some_list, quantiles=[0, 0.3, 0.7, 1], round_method='int')
lc.to_step(data=some_list, quantiles=[0, 0.3, 0.7, 1],
round_method='log10')
def test_step_to_linear():
step = cm.StepColormap(['green', 'yellow', 'red'],
vmin=3., vmax=10.,
index=[3, 4, 8, 10], caption='step')
step.to_linear()
def test_linear_object():
cm.linear.OrRd_06._repr_html_()
cm.linear.PuBu_06.to_step(12)
cm.linear.YlGn_06.scale(3, 12)
cm.linear._repr_html_()
def test_step_object():
cm.step.OrRd_06._repr_html_()
cm.step.PuBu_06.to_linear()
cm.step.YlGn_06.scale(3, 12)
cm.step._repr_html_()
|
Pioneering educational programming in Portugal, Orquestra do Norte has streamlined this experience over time, which occupies about 50% of its current activity. Past iterations of this program have offered somewhere between 70 and 90 performances to schools across northern Portugal. The goal is to promote education of the younger generations, harnessing the advantages of music regarding concentration, audition and abstract reasoning, as well as fine motor skills and rhythmic coordination. Each academic year, a whole season of discoveries waiting to be made, towards which the centers of learning contribute with making preparations in advance.
With proposals for both younger audiences and college students, via these pedagogic concerts, Orquestra do Norte establishes direct ties with their listeners, turning them into active participants in the experience (case in point, the “Concertos Sinfónicos Participados”, Participative Symphonic Concerts). In the upcoming season, the learning institutions may choose for the Orquestra to come to them within certain dates, or they may opt to attend open rehearsals, thus discovering the myriad timbres and the peculiarities of each work and its composer(s), in a different way.
The Orquestra’s proposals begin, with ears wide open, by the “Anatomy of the Orchestra”, proving that “Music also Travels”, “Music also Dances”, that there are 3 Bs that made history (Bach, Beethoven and Brahms) and that music changed cinema (“Compositions in the Dream Factory I and II”). In the “Participative Symphonic Concerts”, voice, movement and percussion turn listeners into interpreters. This year there is a premiere: concerts in which “Physics, Mathematics and Music” convey that everything is interconnected in the Universe of Sound.
When “The Orquestra goes to School”, there are programs conceived for necessarily smaller venues, with orchestral formations composed of strings, reeds, percussion and harp that will invoke markedly different worlds of sounds.
Within the works that will help demonstrate the instruments’ timbric colours, it is noteworthy to mention “Peter and the Wolf”, “The Nutcracker” and “The Four Seasons”. There are, however, others that illustrate the ways in which the instruments colour the music and connect to emotions - in order to understand, for instance, how the clarinet can be more intimitistic, the oboe keen, the cello expressive and the bassoon playful; and to know what timbre is, the volume, the rhythm - to fully seize the opportunity of an immediate contact with the Orquestra and, who knows, perhaps discover new talents and ways to contemplate the future.
To awaken the aesthetic sense and the love for music, actively contributing toward the child’s cognitive and social development are some of the pedagogic concerts’ objectives. Integrating music in educational programs is ever more important, since music has great influence over body control, expression, psychomotricity and cultivation of the voice and language, incentivizing creativity in children and teenagers.
The music room is a place for experiencing sensations, awakening perceptions and opening avenues of communication. The combination of these leads children to acquire competences and access another plane of understanding and expression. Since audition is of recognizable importance in the development of sensibility, one of the concerts’ objectives is to turn each pupil into an active receptor, endeavoring for each to participate in the listening of both specific musical fragments and complete works. In this fashion, the young listener may more easily learn to observe, analyze and appreciate the sonic reality in general and, particularly, the musical one.
The instruments’ timbre plays an important part in listening to a given work, since it helps to determine its character. Prior to the listening of an instrumental piece, pupils will learn the intervening instruments, through direct observation. Instruments are then presented sequentially according to families (strings, reed, brass and percussion) via a simple technical description, followed by the playing of a short passage, performed solo or by a group of musicians.
In a given musical work, different and varied factors come to the forefront: the epoch’s culture in which it was created, its creator’s environment, the composer’s own style, compositional structure, the voices or instruments that interpret it, etc. Regarding the composer, only the most determining biographical aspects are referenced, presented in a brief, agile and attractive manner, capturing the pupil’s attention. In such cases, composers’ portrait and respective life elements are employed, namely the place and country of birth. In all sessions both the author and the work itself is framed within the socio-cultural context of their time, outlining the music’s technical and aesthetic aspects.
Rhythm, melody, harmony, timbre and shape are Music’s basic elements utilized by the composer to create. Via directed listening, students are progressively sensitized to recognize these elements, enabling them to capture and compare, later on, the general distinguishing traits between different works from distinct periods and aesthetic currents.
Illustrating the aural experience with accompanying slideshows or videos is a resource employed in these concerts as an ancillary element of introduction to music, given the permanent power of attraction images exert over viewers. Projecting images related to the program at hand, in synchrony with thematic changes during the work, facilitates analysis and comprehension of each composition’s structure.
“A Fábrica da Música” is the generic title of a series of programs – composed by either selected excerpts or whole works – whose primary objective is the discovery and understanding of the Orchestra. Through specifically pieces, students will have the opportunity to know a symphonic formation from within and, in some cases, outright integrate it! In this fashion, each listener will uncover the Orchestra’s components, noting the duplicity of their nature, simultaneously simple yet complex – an adventure with several steps and proposals.
One begins by hearing some of the most widely known themes from the Seventh Art’s world. The objective is the acquaintance with the orchestra’s different instruments and their varied use within film scores, as well as to highlight the importance of orchestral music in Film.
This program’s objective is to promote the interaction between Musical Education students and the orchestra. Beyond than listening, there’s space for the attendees to actively participate. The chosen works’ scores include the audience, assigning them a vital role, not only in executing small choreographies, but also in percussion (corporal or instrumental) and vocals. Note: In preparation for such sessions previous classroom work is expected, with previously provided materials. Recommended age group: 9 to 12 years.
Peter and the Wolf is a children’s tale told through music. It was composed in the short span of 4 days by Sergei Prokofiev in 1936, with the pedagogic goal of showing the instruments’ sonorities to children. The story tells the way how Peter tricks the Wolf and saves his friends Duck, Bird and Cat. Each character (Peter, the Wolf, Grandfather, Bird, Duck, Cat and the Hunters) is represented by an instrument. Thus, Peter is represented by the string section, Bird by the flute; the clarinet gives voice to the Duck, the bassoon to Grandfather and the wicked Wolf is interpreted by the French horns. This program is conceived and staged by Jorge Castro Guedes, with the participation of actors Linda Rodrigues, Inah Santos and Mário Santos.
The Nutcracker is a piece from Tchaikovsky’s classic ballet, inspired by an excerpt from a tale by Hoffmann. The work develops around a Christmas night, where Jans Stahlbaum hosts a large party. The children, Clara and Fritz, wait anxiously. Clara’s godfather, Herr Drosselmeyer, is a toymaker. Upon his arrival, he entertains the guests with mechanical dancing toys. During gift exchange, Clara is bestowed by her godfather with a nutcracker wearing a soldier’s uniform. The party ends, everyone goes to sleep. Sometime later, Clara is attacked by legions of mice. Thence, the Nutcracker comes to life and faces the Mouse King, ending up fatally injured. Clara’s tears break the spell, transforming the doll into a prince, who invites the girl for a journey to the Land of Sweets. Upon her return, Clara awakens and retains her memory from that enchanted night. Program conceived and hosted by Jorge Castro Ribeiro.
Which season is children’s favorite? Spring? Summer? The answer will be given after listening to Vivaldi’s most famous composition: The Four Seasons. Premiered in 1723, this set of concerts for violin remains as one of Music History’s most popular. Each movement’s texture is varied, illustrating the respective season. For instance, “Winter” is dark and sable, whilst “Spring” exhibits sounds that invoke then birds’ chirper and the bees’ buzz.
Project “Ensaio Aberto” invites children to enter an orchestra’s backstage. In a rehearsal room’s informal setting, youths will be able to verify the effort required to concretize a concert. The composer’s particularities will be pointed out by the conductor and the musicians – highlighting themes, motifs, transitions, orchestration details, formal structures, details of style and interpretation – in order to provide the audience with a close vision of the creator’s world, his work and an orchestra’s experience.
In order to discover the orchestra’s frontmost instruments – and why they are so located – whose sonority goes from the violin’s lyricism to the cello’s telluric timbre. A family very seldom resisted by composers.
The most festive? The most powerful? Or are they also capable of growing delicate harmonies? From the warrior horns to the bassoon’s sweet sound, a world of wind instruments, divided into reeds and brass.
As ancient as the world of men, percussion makes audiences vibrate, with diverse and often surprising sonorities. An appeal not unnoticed by the younger, even when they are discovering music.
A source of multiple sounds, the harp has its own place in composition throughout Music History. Something only listening can explain, across this most antique instrument’s complex possibilities.
Fonte múltipla de sons, a harpa tem lugar próprio na composição ao longo da **História da Música**. Algo que só a audição pode explicar, nas complexas possibilidades deste antiquíssimo instrumento.
- selection of a contact element to ensure the connection between the school and Orquestra do Norte’s production team during the concert(s) organization.
Imagens dos vários eventos realizados.
|
# Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy
def uncompress(data, uncompressed_size):
_0 = numpy.intc(0)
_1 = numpy.intc(1)
_2 = numpy.intc(2)
_128 = numpy.intc(128)
_255 = numpy.intc(255)
n, r, s, p = _0, _0, _0, _0
i, d = _1, _1
f = _255 & data[_0]
ptrs = numpy.zeros(256, dtype = numpy.intc)
uncompressed = numpy.zeros(uncompressed_size, dtype = numpy.uint8)
idx = numpy.arange(uncompressed_size, dtype = numpy.intc)
while s < uncompressed_size:
pp = p + _1
if f & i:
r = ptrs[data[d]]
n = _2 + data[d + _1]
uncompressed[idx[s:s + n]] = uncompressed[r:r + n]
ptrs[(uncompressed[p]) ^ (uncompressed[pp])] = p
if s == pp:
ptrs[(uncompressed[pp]) ^ (uncompressed[pp + _1])] = pp
d += _2
r += _2
s = s + n
p = s
else:
uncompressed[s] = data[d]
if pp == s:
ptrs[(uncompressed[p]) ^ (uncompressed[pp])] = p
p = pp
s += _1
d += _1
if i == _128:
if s < uncompressed_size:
f = _255 & data[d]
d += _1
i = _1
else:
i += i
return uncompressed
|
To add a new product category from the Product Categories module, the Add option should be clicked.
To update an existing product category, the Category Name of the respective product category should be clicked.
This module is also accessible by the ProductCategoryGet API function. For more, visit the Megaventory API layer.
Adding a new product category to the account requires -at minimum- the setting of the compulsory fields of the Category Name.
This module is also accessible by the ProductCategoryUpdate API function. For more, visit the Megaventory API layer.
Product Categories may also be added through the Data Import module.
The deletion of a product category -that has been assigned to one or more products- allows all products that are assigned under this product category to be either: a) associated to another existing product category, b) left without a product category or c) deleted.
This module is also accessible by the ProductCategoryDelete API function. For more, visit the Megaventory API layer.
To browse the list of your deleted Product Categories, click on Show Deleted Product Categories on the bottom of your Product Categories list.
To restore deleted Product Categories, select them from the list and click on Un-delete. Click here for detailed instructions.
If you restore a deleted Product Category, any previously assigned products will not be reassigned automatically.
|
# Copyright (c) 2009-2010 Six Apart Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Six Apart Ltd. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
The `typepad.api` module contains `TypePadObject` implementations of all the
content objects provided in the TypePad API.
"""
from urlparse import urljoin
from remoteobjects.dataobject import find_by_name
from typepad.tpobject import *
from typepad.tpobject import _ImageResizer, _VideoResizer
from typepad import fields
import typepad
class Account(TypePadObject):
"""A user account on an external website."""
crosspostable = fields.Field()
"""`True` if this account can be used to crosspost, or `False` otherwise.
An account can be used to crosspost if its service supports crossposting and
the user has enabled crossposting for the specific account.
"""
domain = fields.Field()
"""The DNS domain of the service that provides the account."""
id = fields.Field()
"""A URI that serves as a globally unique identifier for the account."""
provider_icon_url = fields.Field(api_name='providerIconUrl')
"""The URL of a 16-by-16 pixel icon that represents the service that provides
this account."""
provider_name = fields.Field(api_name='providerName')
"""A human-friendly name for the service that provides this account."""
provider_url = fields.Field(api_name='providerURL')
"""**Deprecated.** The URL of the home page of the service that provides this
account."""
url = fields.Field()
"""The URL of the user's profile or primary page on the remote site, if known."""
user_id = fields.Field(api_name='userId')
"""The machine identifier or primary key for the account, if known.
(Some sites only have a `username`.)
"""
username = fields.Field()
"""The username of the account, if known.
(Some sites only have a `user_id`.)
"""
@property
def xid(self):
return self.id.rsplit(':', 1)[-1]
class ApiKey(TypePadObject):
api_key = fields.Field(api_name='apiKey')
"""The actual API key string.
Use this as the consumer key when making an OAuth request.
"""
owner = fields.Object('Application')
"""The application that owns this API key.
:attrtype:`Application`
"""
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/api-keys/%s.json' % self.api_key)
@classmethod
def get_by_api_key(cls, api_key):
"""Returns an `ApiKey` instance with the given consumer key.
Asserts that the api_key parameter matches ^\w+$."""
assert re.match('^\w+$', api_key), "invalid api_key parameter given"
return cls.get('/api-keys/%s.json' % api_key)
class Application(TypePadObject):
"""An application that can authenticate to the TypePad API using OAuth.
An application is identified by its OAuth consumer key, which in the case
of a hosted group is the same as the identifier for the group itself.
"""
_class_object_type = "Application"
external_feed_subscriptions = fields.Link(ListOf('ExternalFeedSubscription'), api_name='external-feed-subscriptions')
"""Get a list of the application's active external feed subscriptions.
:attrtype:`list of ExternalFeedSubscription`
"""
groups = fields.Link(ListOf('Group'))
"""Get a list of groups in which a client using a ``app_full`` access auth
token from this application can act.
:attrtype:`list of Group`
"""
id = fields.Field()
"""A string containing the canonical identifier that can be used to identify
this application in URLs."""
name = fields.Field()
"""The name of the application as provided by its developer."""
oauth_access_token_url = fields.Field(api_name='oauthAccessTokenUrl')
"""The URL of the OAuth access token endpoint for this application."""
oauth_authorization_url = fields.Field(api_name='oauthAuthorizationUrl')
"""The URL to send the user's browser to for the user authorization step."""
oauth_identification_url = fields.Field(api_name='oauthIdentificationUrl')
"""The URL to send the user's browser to in order to identify who is logged in
(that is, the "sign in" link)."""
oauth_request_token_url = fields.Field(api_name='oauthRequestTokenUrl')
"""The URL of the OAuth request token endpoint for this application."""
object_type = fields.Field(api_name='objectType')
"""The keyword identifying the type of object this is.
For an Application object, `object_type` will be ``Application``.
"""
object_types = fields.List(fields.Field(), api_name='objectTypes')
"""**Deprecated.** The object types for this object.
This set will contain the string ``tag:api.typepad.com,2009:Application`` for
an Application object.
:attrtype:`list`
"""
session_sync_script_url = fields.Field(api_name='sessionSyncScriptUrl')
"""The URL of the session sync script."""
signout_url = fields.Field(api_name='signoutUrl')
"""The URL to send the user's browser to in order to sign them out of TypePad."""
user_flyouts_script_url = fields.Field(api_name='userFlyoutsScriptUrl')
"""The URL of a script to embed to enable the user flyouts functionality."""
class _CreateExternalFeedSubscriptionPost(TypePadObject):
callback_url = fields.Field(api_name='callbackUrl')
"""The URL which will receive notifications of new content in the subscribed
feeds."""
feed_idents = fields.List(fields.Field(), api_name='feedIdents')
"""A list of identifiers of the initial set of feeds to be subscribed to.
:attrtype:`list`
"""
filter_rules = fields.List(fields.Field(), api_name='filterRules')
"""A list of rules for filtering notifications to this subscription; each rule
is a query string using the search API's syntax.
:attrtype:`list`
"""
secret = fields.Field()
"""An optional subscriber-provided opaque token that will be used to compute
an HMAC digest to be sent along with each item delivered to the
`callback_url`."""
verify_token = fields.Field(api_name='verifyToken')
"""A subscriber-provided opaque token that will be echoed back in the
verification request to assist the subscriber in identifying which
subscription request is being verified."""
class _CreateExternalFeedSubscriptionResponse(TypePadObject):
subscription = fields.Object('ExternalFeedSubscription')
"""The subscription object that was created.
:attrtype:`ExternalFeedSubscription`
"""
create_external_feed_subscription = fields.ActionEndpoint(api_name='create-external-feed-subscription', post_type=_CreateExternalFeedSubscriptionPost, response_type=_CreateExternalFeedSubscriptionResponse)
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/applications/%s.json' % self.id)
@classmethod
def get_by_id(cls, id, **kwargs):
if id == '':
raise ValueError("An id is required")
obj = cls.get('/applications/%s.json' % id, **kwargs)
obj.__dict__['id'] = id
return obj
@classmethod
def get_by_api_key(cls, api_key, **kwargs):
"""Returns an `Application` instance by the API key.
Asserts that the api_key parameter matches ^\w+$."""
assert re.match('^\w+$', api_key), "invalid api_key parameter given"
import logging
logging.getLogger("typepad.api").warn(
'%s.get_by_api_key is deprecated' % cls.__name__)
return cls.get('/applications/%s.json' % api_key, **kwargs)
@property
def browser_upload_endpoint(self):
"""The endpoint to use for uploading file assets directly to
TypePad."""
return urljoin(typepad.client.endpoint, '/browser-upload.json')
user_flyouts_script = renamed_property(old='user_flyouts_script', new='user_flyouts_script_url')
class Asset(TypePadObject):
"""An item of content generated by a user."""
_class_object_type = "Asset"
author = fields.Object('User')
"""The user who created the selected asset.
:attrtype:`User`
"""
categories = fields.Link(ListObject)
"""Get a list of categories into which this asset has been placed within its
blog.
Currently supported only for `Post` assets that are posted within a blog.
:attrtype:`list`
"""
comment_count = fields.Field(api_name='commentCount')
"""The number of comments that have been posted in reply to this asset.
This number includes comments that have been posted in response to other
comments.
"""
comment_tree = fields.Link(ListOf('CommentTreeItem'), api_name='comment-tree')
"""Get a list of assets that were posted in response to the selected asset and
their depth in the response tree
:attrtype:`list of CommentTreeItem`
"""
comments = fields.Link(ListOf('Comment'))
"""Get a list of assets that were posted in response to the selected asset.
POST: Create a new Comment asset as a response to the selected asset.
:attrtype:`list of Comment`
"""
container = fields.Object('ContainerRef')
"""An object describing the group or blog to which this asset belongs.
:attrtype:`ContainerRef`
"""
content = fields.Field()
"""The raw asset content.
The `text_format` property describes how to format this data. Use this
property to set the asset content in write operations. An asset posted in a
group may have a `content` value up to 10,000 bytes long, while a `Post` asset
in a blog may have up to 65,000 bytes of content.
"""
crosspost_accounts = fields.List(fields.Field(), api_name='crosspostAccounts')
"""**Editable.** A set of identifiers for `Account` objects to which to
crosspost this asset when it's posted.
This property is omitted when retrieving existing assets.
:attrtype:`list`
"""
description = fields.Field()
"""The description of the asset."""
excerpt = fields.Field()
"""A short, plain-text excerpt of the entry content.
This is currently available only for `Post` assets.
"""
extended_content = fields.Link('AssetExtendedContent', api_name='extended-content')
"""Get the extended content for the asset, if any.
Currently supported only for `Post` assets that are posted within a blog.
:attrtype:`AssetExtendedContent`
"""
favorite_count = fields.Field(api_name='favoriteCount')
"""The number of distinct users who have added this asset as a favorite."""
favorites = fields.Link(ListOf('Favorite'))
"""Get a list of favorites that have been created for the selected asset.
:attrtype:`list of Favorite`
"""
feedback_status = fields.Link('FeedbackStatus', api_name='feedback-status')
"""Get the feedback status of selected asset PUT: Set the feedback status of
selected asset
:attrtype:`FeedbackStatus`
"""
groups = fields.List(fields.Field())
"""**Deprecated.** An array of strings containing the `id` URI of the `Group`
object that this asset is mapped into, if any.
This property has been superseded by the `container` property.
:attrtype:`list`
"""
id = fields.Field()
"""A URI that serves as a globally unique identifier for the user."""
is_favorite_for_current_user = fields.Field(api_name='isFavoriteForCurrentUser')
"""`True` if this asset is a favorite for the currently authenticated user, or
`False` otherwise.
This property is omitted from responses to anonymous requests.
"""
media_assets = fields.Link(ListOf('Asset'), api_name='media-assets')
"""Get a list of media assets that are embedded in the content of the selected
asset.
:attrtype:`list of Asset`
"""
object_type = fields.Field(api_name='objectType')
"""The keyword identifying the type of asset this is."""
object_types = fields.List(fields.Field(), api_name='objectTypes')
"""**Deprecated.** An array of object type identifier URIs identifying the
type of this asset.
Only the one object type URI for the particular type of asset this asset is
will be present.
:attrtype:`list`
"""
permalink_url = fields.Field(api_name='permalinkUrl')
"""The URL that is this asset's permalink.
This will be omitted if the asset does not have a permalink of its own (for
example, if it's embedded in another asset) or if TypePad does not know its
permalink.
"""
publication_status = fields.Object('PublicationStatus', api_name='publicationStatus')
"""**Editable.** An object describing the visibility status and publication
date for this asset.
Only visibility status is editable.
:attrtype:`PublicationStatus`
"""
publication_status_obj = fields.Link('PublicationStatus', api_name='publication-status')
"""Get the publication status of selected asset PUT: Set the publication
status of selected asset
:attrtype:`PublicationStatus`
"""
published = fields.Datetime()
"""The time at which the asset was created, as a W3CDTF timestamp.
:attrtype:`datetime`
"""
reblogs = fields.Link(ListOf('Post'))
"""Get a list of posts that were posted as reblogs of the selected asset.
:attrtype:`list of Post`
"""
rendered_content = fields.Field(api_name='renderedContent')
"""The content of this asset rendered to HTML.
This is currently available only for `Post` and `Page` assets.
"""
source = fields.Object('AssetSource')
"""An object describing the site from which this asset was retrieved, if the
asset was obtained from an external source.
:attrtype:`AssetSource`
"""
text_format = fields.Field(api_name='textFormat')
"""A keyword that indicates what formatting mode to use for the content of
this asset.
This can be ``html`` for assets the content of which is HTML,
``html_convert_linebreaks`` for assets the content of which is HTML but where
paragraph tags should be added automatically, or ``markdown`` for assets the
content of which is Markdown source. Other formatting modes may be added in
future. Applications that present assets for editing should use this property
to present an appropriate editor.
"""
title = fields.Field()
"""The title of the asset."""
url_id = fields.Field(api_name='urlId')
"""A string containing the canonical identifier that can be used to identify
this object in URLs.
This can be used to recognise where the same user is returned in response to
different requests, and as a mapping key for an application's local data
store.
"""
class _AddCategoryPost(TypePadObject):
category = fields.Field()
"""The category to add"""
add_category = fields.ActionEndpoint(api_name='add-category', post_type=_AddCategoryPost)
class _MakeCommentPreviewPost(TypePadObject):
content = fields.Field()
"""The body of the comment."""
class _MakeCommentPreviewResponse(TypePadObject):
comment = fields.Object('Asset')
"""A mockup of the future comment.
:attrtype:`Asset`
"""
make_comment_preview = fields.ActionEndpoint(api_name='make-comment-preview', post_type=_MakeCommentPreviewPost, response_type=_MakeCommentPreviewResponse)
class _RemoveCategoryPost(TypePadObject):
category = fields.Field()
"""The category to remove"""
remove_category = fields.ActionEndpoint(api_name='remove-category', post_type=_RemoveCategoryPost)
class _UpdatePublicationStatusPost(TypePadObject):
draft = fields.Field()
"""A boolean indicating whether the asset is a draft"""
publication_date = fields.Field(api_name='publicationDate')
"""The publication date of the asset"""
spam = fields.Field()
"""A boolean indicating whether the asset is spam; Comment only"""
update_publication_status = fields.ActionEndpoint(api_name='update-publication-status', post_type=_UpdatePublicationStatusPost)
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/assets/%s.json' % self.url_id)
@property
def xid(self):
return self.url_id
@classmethod
def get_by_id(cls, id, **kwargs):
url_id = id.rsplit(':', 1)[-1]
return cls.get_by_url_id(url_id, **kwargs)
@classmethod
def get_by_url_id(cls, url_id, **kwargs):
if url_id == '':
raise ValueError("An url_id is required")
obj = cls.get('/assets/%s.json' % url_id, **kwargs)
obj.__dict__['url_id'] = url_id
obj.__dict__['id'] = 'tag:api.typepad.com,2009:%s' % url_id
return obj
actor = renamed_property(old='actor', new='author')
def primary_object_type(self):
try:
return self.object_types[0]
except (TypeError, IndexError):
return
@property
def asset_ref(self):
"""An `AssetRef` instance representing this asset."""
return AssetRef(url_id=self.url_id,
id=self.id,
author=self.author,
href='/assets/%s.json' % self.url_id,
type='application/json',
object_types=self.object_types,
object_type=self.object_type)
def __unicode__(self):
return self.title or self.content
def __str__(self):
return self.__unicode__()
class AssetExtendedContent(TypePadObject):
rendered_extended_content = fields.Field(api_name='renderedExtendedContent')
"""The HTML rendered version of this asset's extended content, if it has any.
Otherwise, this property is omitted.
"""
class AssetRef(TypePadObject):
"""A structure that refers to an asset without including its full
content."""
author = fields.Object('User')
"""The user who created the referenced asset.
:attrtype:`User`
"""
href = fields.Field()
"""The URL of a representation of the referenced asset."""
id = fields.Field()
"""The URI from the referenced `Asset` object's `id` property."""
object_type = fields.Field(api_name='objectType')
"""The keyword identifying the type of asset the referenced `Asset` object is."""
object_types = fields.List(fields.Field(), api_name='objectTypes')
"""**Deprecated.** An array of object type identifier URIs identifying the
type of the referenced asset.
Only the one object type URI for the particular type of asset the referenced
asset is will be present.
:attrtype:`list`
"""
type = fields.Field()
"""The MIME type of the representation at the URL given in the `href`
property."""
url_id = fields.Field(api_name='urlId')
"""The canonical identifier from the referenced `Asset` object's `url_id`
property."""
def reclass_for_data(self, data):
"""Returns ``False``.
This method prevents `AssetRef` instances from being reclassed when
updated from a data dictionary based on the dictionary's
``objectTypes`` member.
"""
# AssetRefs are for any object type, so don't reclass them.
return False
class AssetSource(TypePadObject):
"""Information about an `Asset` instance imported from another service."""
by_user = fields.Field(api_name='byUser')
"""**Deprecated.** `True` if this content is considered to be created by its
author, or `False` if it's actually someone else's content imported by the
asset author."""
permalink_url = fields.Field(api_name='permalinkUrl')
"""The permalink URL of the resource from which the related asset was
imported."""
provider = fields.Dict(fields.Field())
"""**Deprecated.** Description of the external service provider from which
this content was imported, if known.
Contains ``name``, ``icon``, and ``uri`` properties. This property will be
omitted if the service from which the related asset was imported is not
recognized.
:attrtype:`dict`
"""
class AudioLink(TypePadObject):
"""A link to an audio recording."""
duration = fields.Field()
"""The duration of the audio stream in seconds.
This property will be omitted if the length of the audio stream could not be
determined.
"""
url = fields.Field()
"""The URL of an MP3 representation of the audio stream."""
class AuthToken(TypePadObject):
auth_token = fields.Field(api_name='authToken')
"""The actual auth token string.
Use this as the access token when making an OAuth request.
"""
target_object = fields.Object('TypePadObject', api_name='targetObject')
"""**Deprecated.** The root object to which this auth token grants access.
This is a legacy field maintained for backwards compatibility with older
clients, as auth tokens are no longer scoped to specific objects.
:attrtype:`TypePadObject`
"""
def make_self_link(self):
# TODO: We don't have the API key, so we can't build a self link.
return
@classmethod
def get_by_key_and_token(cls, api_key, auth_token):
return cls.get('/auth-tokens/%s:%s.json' % (api_key, auth_token))
target = renamed_property(old='target', new='target_object')
class Badge(TypePadObject):
description = fields.Field()
"""A human-readable description of what a user must do to win this badge."""
display_name = fields.Field(api_name='displayName')
"""A human-readable name for this badge."""
id = fields.Field()
"""The canonical identifier that can be used to identify this badge in URLs.
This can be used to recognise where the same badge is returned in response to
different requests, and as a mapping key for an application's local data
store.
"""
image_link = fields.Object('ImageLink', api_name='imageLink')
"""A link to the image that depicts this badge to users.
:attrtype:`ImageLink`
"""
class Blog(TypePadObject):
categories = fields.Link(ListObject)
"""Get a list of categories which are defined for the selected blog.
:attrtype:`list`
"""
commenting_settings = fields.Link('BlogCommentingSettings', api_name='commenting-settings')
"""Get the commenting-related settings for this blog.
:attrtype:`BlogCommentingSettings`
"""
comments = fields.Link(ListOf('Comment'))
crosspost_accounts = fields.Link(ListOf('Account'), api_name='crosspost-accounts')
"""Get a list of accounts that can be used for crossposting with this blog.
:attrtype:`list of Account`
"""
description = fields.Field()
"""The description of the blog as provided by its owner."""
home_url = fields.Field(api_name='homeUrl')
"""The URL of the blog's home page."""
id = fields.Field()
"""A URI that serves as a globally unique identifier for the object."""
media_assets = fields.Link(ListOf('Asset'), api_name='media-assets')
"""POST: Add a new media asset to the account that owns this blog.
:attrtype:`list of Asset`
"""
object_type = fields.Field(api_name='objectType')
"""The keyword identifying the type of object this is.
For a Blog object, `object_type` will be ``Blog``.
"""
object_types = fields.List(fields.Field(), api_name='objectTypes')
"""**Deprecated.** An array of object type identifier URIs.
This set will contain the string ``tag:api.typepad.com,2009:Blog`` for a Blog
object.
:attrtype:`list`
"""
owner = fields.Object('User')
"""The user who owns the blog.
:attrtype:`User`
"""
page_assets = fields.Link(ListOf('Page'), api_name='page-assets')
"""Get a list of pages associated with the selected blog.
POST: Add a new page to a blog
:attrtype:`list of Page`
"""
post_assets = fields.Link(ListOf('Post'), api_name='post-assets')
"""Get a list of posts associated with the selected blog.
POST: Add a new post to a blog
:attrtype:`list of Post`
"""
post_by_email_settings = fields.Link('PostByEmailAddress', api_name='post-by-email-settings')
stats = fields.Link('BlogStats')
"""Get data about the pageviews for the selected blog.
:attrtype:`BlogStats`
"""
title = fields.Field()
"""The title of the blog."""
url_id = fields.Field(api_name='urlId')
"""A string containing the canonical identifier that can be used to identify
this object in URLs.
This can be used to recognise where the same user is returned in response to
different requests, and as a mapping key for an application's local data
store.
"""
class _AddCategoryPost(TypePadObject):
category = fields.Field()
"""The category to add"""
add_category = fields.ActionEndpoint(api_name='add-category', post_type=_AddCategoryPost)
class _DiscoverExternalPostAssetPost(TypePadObject):
permalink_url = fields.Field(api_name='permalinkUrl')
"""The URL of the page whose external post stub is being retrieved."""
class _DiscoverExternalPostAssetResponse(TypePadObject):
asset = fields.Object('Asset')
"""The asset that acts as a stub for the given permalink.
:attrtype:`Asset`
"""
discover_external_post_asset = fields.ActionEndpoint(api_name='discover-external-post-asset', post_type=_DiscoverExternalPostAssetPost, response_type=_DiscoverExternalPostAssetResponse)
class _RemoveCategoryPost(TypePadObject):
category = fields.Field()
"""The category to remove"""
remove_category = fields.ActionEndpoint(api_name='remove-category', post_type=_RemoveCategoryPost)
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/blogs/%s.json' % self.url_id)
@property
def xid(self):
return self.url_id
@classmethod
def get_by_id(cls, id, **kwargs):
url_id = id.rsplit(':', 1)[-1]
return cls.get_by_url_id(url_id, **kwargs)
@classmethod
def get_by_url_id(cls, url_id, **kwargs):
if url_id == '':
raise ValueError("An url_id is required")
obj = cls.get('/blogs/%s.json' % url_id, **kwargs)
obj.__dict__['url_id'] = url_id
obj.__dict__['id'] = 'tag:api.typepad.com,2009:%s' % url_id
return obj
class BlogCommentingSettings(TypePadObject):
captcha_required = fields.Field(api_name='captchaRequired')
"""`True` if this blog requires anonymous commenters to pass a CAPTCHA before
submitting a comment, or `False` otherwise."""
email_address_required = fields.Field(api_name='emailAddressRequired')
"""`True` if this blog requires anonymous comments to be submitted with an
email address, or `False` otherwise."""
html_allowed = fields.Field(api_name='htmlAllowed')
"""`True` if this blog allows commenters to use basic HTML formatting in
comments, or `False` if HTML will be removed."""
moderation_enabled = fields.Field(api_name='moderationEnabled')
"""`True` if this blog places new comments into a moderation queue for
approval before they are displayed, or `False` if new comments may be
available immediately."""
signin_allowed = fields.Field(api_name='signinAllowed')
"""`True` if this blog allows users to sign in to comment, or `False` if all
new comments are anonymous."""
signin_required = fields.Field(api_name='signinRequired')
"""`True` if this blog requires users to be logged in in order to leave a
comment, or `False` if anonymous comments will be rejected."""
time_limit = fields.Field(api_name='timeLimit')
"""Number of days after a post is published that comments will be allowed.
If the blog has no time limit for comments, this property will be omitted.
"""
urls_auto_linked = fields.Field(api_name='urlsAutoLinked')
"""`True` if comments in this blog will automatically have any bare URLs
turned into links, or `False` if URLs will be shown unlinked."""
class BlogStats(TypePadObject):
daily_page_views = fields.Dict(fields.Field(), api_name='dailyPageViews')
"""A map containing the daily page views on the blog for the last 120 days.
The keys of the map are dates in W3CDTF format, and the values are the integer
number of page views on the blog for that date.
:attrtype:`dict`
"""
total_page_views = fields.Field(api_name='totalPageViews')
"""The total number of page views received by the blog for all time."""
class CommentTreeItem(TypePadObject):
comment = fields.Object('Asset')
"""The comment asset at this point in the tree.
:attrtype:`Asset`
"""
depth = fields.Field()
"""The number of levels deep this comment is in the tree.
A comment that is directly in reply to the root asset is 1 level deep. If a
given comment has a depth of 1, all of the direct replies to that comment will
have a depth of 2; their replies will have depth 3, and so forth.
"""
class ContainerRef(TypePadObject):
display_name = fields.Field(api_name='displayName')
"""The display name of the blog or group, as set by its owner."""
home_url = fields.Field(api_name='homeUrl')
"""The URL of the home page of the referenced blog or group."""
id = fields.Field()
"""The URI from the `id` property of the referenced blog or group."""
object_type = fields.Field(api_name='objectType')
"""The keyword identifying the type of object the referenced container is."""
url_id = fields.Field(api_name='urlId')
"""The canonical identifier from the `url_id` property of the referenced blog
or group."""
class Endpoint(TypePadObject):
action_endpoints = fields.List(fields.Object('Endpoint'), api_name='actionEndpoints')
"""For noun endpoints, an array of action endpoints that it supports.
:attrtype:`list of Endpoint`
"""
can_have_id = fields.Field(api_name='canHaveId')
"""For noun endpoints, `True` if an id part is accepted, or `False` if the
noun may only be used alone."""
can_omit_id = fields.Field(api_name='canOmitId')
"""For noun endpoints, `True` if the id part can be ommitted, or `False` if it
is always required."""
filter_endpoints = fields.List(fields.Object('Endpoint'), api_name='filterEndpoints')
"""For endpoints that return lists, an array of filters that can be appended
to the endpoint.
:attrtype:`list of Endpoint`
"""
format_sensitive = fields.Field(api_name='formatSensitive')
"""`True` if this endpoint requires a format suffix, or `False` otherwise."""
name = fields.Field()
"""The name of the endpoint, as it appears in URLs."""
parameterized = fields.Field()
"""For filter endpoints, `True` if a parameter is required on the filter, or
`False` if it's a boolean filter."""
post_object_type = fields.Object('ObjectType', api_name='postObjectType')
"""The type of object that this endpoint accepts for ``POST`` operations.
This property is omitted if this endpoint does not accept ``POST`` requests.
:attrtype:`ObjectType`
"""
property_endpoints = fields.List(fields.Object('Endpoint'), api_name='propertyEndpoints')
"""For noun endpoints, an array of property endpoints that it supports.
:attrtype:`list of Endpoint`
"""
resource_object_type = fields.Object('ObjectType', api_name='resourceObjectType')
"""The type of object that this endpoint represents for ``GET``, ``PUT`` and
``DELETE`` operations.
This property is omitted for action endpoints, as they do not represent
resources.
:attrtype:`ObjectType`
"""
response_object_type = fields.Object('ObjectType', api_name='responseObjectType')
"""For action endpoints, the type of object that this endpoint returns on
success.
If the endpoint returns no payload on success, or if this is not an action
endpoint, this property is omitted.
:attrtype:`ObjectType`
"""
supported_methods = fields.Dict(fields.Field(), api_name='supportedMethods')
"""A mapping of the HTTP methods that this endpoint accepts to the docstrings
describing the result of each method.
:attrtype:`dict`
"""
supported_query_arguments = fields.List(fields.Field(), api_name='supportedQueryArguments')
"""The names of the query string arguments that this endpoint accepts.
:attrtype:`list`
"""
class Entity(TypePadObject):
id = fields.Field()
"""A URI that serves as a globally unique identifier for the object."""
url_id = fields.Field(api_name='urlId')
"""A string containing the canonical identifier that can be used to identify
this object in URLs.
This can be used to recognise where the same user is returned in response to
different requests, and as a mapping key for an application's local data
store.
"""
class Event(TypePadObject):
"""An action that a user or group did.
An event has an `actor`, which is the user or group that did the action; a
set of `verbs` that describe what kind of action occured; and an `object`
that is the object that the action was done to. In the current TypePad API
implementation, only assets, users and groups can be the object of an
event.
"""
actor = fields.Object('Entity')
"""The user who performed the action described by this event.
:attrtype:`Entity`
"""
id = fields.Field()
"""A URI that serves as a globally unique identifier for the user."""
object = fields.Object('TypePadObject')
"""The object to which the action described by this event was performed.
:attrtype:`TypePadObject`
"""
published = fields.Datetime()
"""The time at which the event was performed, as a W3CDTF timestamp.
:attrtype:`datetime`
"""
url_id = fields.Field(api_name='urlId')
"""A string containing the canonical identifier that can be used to identify
this object in URLs.
This can be used to recognise where the same user is returned in response to
different requests, and as a mapping key for an application's local data
store.
"""
verb = fields.Field()
"""A keyword identifying the type of event this is."""
verbs = fields.List(fields.Field())
"""**Deprecated.** An array of verb identifier URIs.
This set will contain one verb identifier URI.
:attrtype:`list`
"""
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/events/%s.json' % self.url_id)
@property
def xid(self):
return self.url_id
@classmethod
def get_by_id(cls, id, **kwargs):
url_id = id.rsplit(':', 1)[-1]
return cls.get_by_url_id(url_id, **kwargs)
@classmethod
def get_by_url_id(cls, url_id, **kwargs):
if url_id == '':
raise ValueError("An url_id is required")
obj = cls.get('/events/%s.json' % url_id, **kwargs)
obj.__dict__['url_id'] = url_id
obj.__dict__['id'] = 'tag:api.typepad.com,2009:%s' % url_id
return obj
def __unicode__(self):
return unicode(self.object)
class ExternalFeedSubscription(TypePadObject):
callback_status = fields.Field(api_name='callbackStatus')
"""The HTTP status code that was returned by the last call to the
subscription's callback URL."""
callback_url = fields.Field(api_name='callbackUrl')
"""The URL to which to send notifications of new items in this subscription's
feeds."""
feeds = fields.Link(ListObject)
"""Get a list of strings containing the identifiers of the feeds to which this
subscription is subscribed.
:attrtype:`list`
"""
filter_rules = fields.List(fields.Field(), api_name='filterRules')
"""A list of rules for filtering notifications to this subscription.
Each rule is a full-text search query string, like those used with the
``/assets`` endpoint. An item will be delivered to the `callback_url` if it
matches any one of these query strings.
:attrtype:`list`
"""
post_as_user_id = fields.List(fields.Field(), api_name='postAsUserId')
"""For a Group-owned subscription, the urlId of the User who will own the
items posted into the group by the subscription.
:attrtype:`list`
"""
url_id = fields.Field(api_name='urlId')
"""The canonical identifier that can be used to identify this object in URLs.
This can be used to recognise where the same user is returned in response to
different requests, and as a mapping key for an application's local data
store.
"""
class _AddFeedsPost(TypePadObject):
feed_idents = fields.List(fields.Field(), api_name='feedIdents')
"""A list of identifiers to be added to the subscription's set of feeds.
:attrtype:`list`
"""
add_feeds = fields.ActionEndpoint(api_name='add-feeds', post_type=_AddFeedsPost)
class _RemoveFeedsPost(TypePadObject):
feed_idents = fields.List(fields.Field(), api_name='feedIdents')
"""A list of identifiers to be removed from the subscription's set of feeds.
:attrtype:`list`
"""
remove_feeds = fields.ActionEndpoint(api_name='remove-feeds', post_type=_RemoveFeedsPost)
class _UpdateFiltersPost(TypePadObject):
filter_rules = fields.List(fields.Field(), api_name='filterRules')
"""The new list of rules for filtering notifications to this subscription;
this will replace the subscription's existing rules.
:attrtype:`list`
"""
update_filters = fields.ActionEndpoint(api_name='update-filters', post_type=_UpdateFiltersPost)
class _UpdateNotificationSettingsPost(TypePadObject):
callback_url = fields.Field(api_name='callbackUrl')
"""The new callback URL to receive notifications of new content in this
subscription's feeds."""
secret = fields.Field()
"""An optional subscriber-provided opaque token that will be used to compute
an HMAC digest to be sent along with each item delivered to the
`callback_url`."""
verify_token = fields.Field(api_name='verifyToken')
"""A subscriber-provided opaque token that will be echoed back in a
verification request to the `callback_url`.
Required, if the `callback_url` is being modified with this endpoint.
"""
update_notification_settings = fields.ActionEndpoint(api_name='update-notification-settings', post_type=_UpdateNotificationSettingsPost)
class _UpdateUserPost(TypePadObject):
post_as_user_id = fields.Field(api_name='postAsUserId')
"""The `url_id` of the user who will own the assets and events posted into the
group's stream by this subscription.
The user must be an administrator of the group.
"""
update_user = fields.ActionEndpoint(api_name='update-user', post_type=_UpdateUserPost)
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/external-feed-subscriptions/%s.json' % self.url_id)
@property
def xid(self):
return self.url_id
@classmethod
def get_by_id(cls, id, **kwargs):
url_id = id.rsplit(':', 1)[-1]
return cls.get_by_url_id(url_id, **kwargs)
@classmethod
def get_by_url_id(cls, url_id, **kwargs):
if url_id == '':
raise ValueError("An url_id is required")
obj = cls.get('/external-feed-subscriptions/%s.json' % url_id, **kwargs)
obj.__dict__['url_id'] = url_id
obj.__dict__['id'] = 'tag:api.typepad.com,2009:%s' % url_id
return obj
class Favorite(TypePadObject):
"""A favorite of some other asset.
Asserts that the user_id and asset_id parameter match ^\w+$."""
_class_object_type = "Favorite"
author = fields.Object('User')
"""The user who saved this favorite.
That is, this property is the user who saved the target asset as a favorite,
not the creator of that asset.
:attrtype:`User`
"""
id = fields.Field()
"""A URI that serves as a globally unique identifier for the favorite."""
in_reply_to = fields.Object('AssetRef', api_name='inReplyTo')
"""A reference to the target asset that has been marked as a favorite.
:attrtype:`AssetRef`
"""
published = fields.Datetime()
"""The time that the favorite was created, as a W3CDTF timestamp.
:attrtype:`datetime`
"""
url_id = fields.Field(api_name='urlId')
"""A string containing the canonical identifier that can be used to identify
this favorite in URLs.
This can be used to recognise where the same favorite is returned in response
to different requests, and as a mapping key for an application's local data
store.
"""
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/favorites/%s.json' % self.url_id)
@property
def xid(self):
return self.url_id
@classmethod
def get_by_id(cls, id, **kwargs):
url_id = id.rsplit(':', 1)[-1]
return cls.get_by_url_id(url_id, **kwargs)
@classmethod
def get_by_url_id(cls, url_id, **kwargs):
if url_id == '':
raise ValueError("An url_id is required")
obj = cls.get('/favorites/%s.json' % url_id, **kwargs)
obj.__dict__['url_id'] = url_id
obj.__dict__['id'] = 'tag:api.typepad.com,2009:%s' % url_id
return obj
@classmethod
def get_by_user_asset(cls, user_id, asset_id, **kwargs):
assert re.match('^\w+$', user_id), "invalid user_id parameter given"
assert re.match('^\w+$', asset_id), "invalid asset_id parameter given"
return cls.get('/favorites/%s:%s.json' % (asset_id, user_id),
**kwargs)
@classmethod
def head_by_user_asset(cls, *args, **kwargs):
fav = cls.get_by_user_asset(*args, **kwargs)
return fav.head()
class FeedbackStatus(TypePadObject):
allow_comments = fields.Field(api_name='allowComments')
"""`True` if new comments may be posted to the related asset, or `False` if no
new comments are accepted."""
allow_trackback = fields.Field(api_name='allowTrackback')
"""`True` if new trackback pings may be posted to the related asset, or
`False` if no new pings are accepted."""
show_comments = fields.Field(api_name='showComments')
"""`True` if comments should be displayed on the related asset's permalink
page, or `False` if they should be hidden."""
class ImageLink(TypePadObject, _ImageResizer):
"""A link to an image.
Images hosted by TypePad can be resized with image sizing specs. See
the `url_template` field and `at_size` method.
"""
height = fields.Field()
"""The height of the original image in pixels.
If the height of the image is not available (for example, if the image isn't
hosted on TypePad), this property will be omitted.
"""
url = fields.Field()
"""The URL for the original, full size version of the image."""
url_template = fields.Field(api_name='urlTemplate')
"""An URL template with which to build alternate sizes of this image.
If present, replace the placeholder string ``{spec}`` with a valid sizing
specifier to generate the URL for an alternate version of this image. This
property is omitted if TypePad is unable to provide a scaled version of this
image (for example, if the image isn't hosted on TypePad).
"""
width = fields.Field()
"""The width of the original image in pixels.
If the width of the image is not available (for example, if the image isn't
hosted on TypePad), this property will be omitted.
"""
href = renamed_property(old='url', new='href')
class ObjectProperty(TypePadObject):
doc_string = fields.Field(api_name='docString')
"""A human-readable description of this property."""
name = fields.Field()
"""The name of the property."""
type = fields.Field()
"""The name of the type of this property."""
class ObjectType(TypePadObject):
name = fields.Field()
"""The name of this object type.
If this is an anonymous type representing the request or response of an action
endpoint, this property is omitted.
"""
parent_type = fields.Field(api_name='parentType')
"""The name of the parent type.
This property is omitted if this object type has no parent type.
"""
properties = fields.List(fields.Object('ObjectProperty'))
"""The properties belonging to objects of this object type.
:attrtype:`list of ObjectProperty`
"""
class PostByEmailAddress(TypePadObject):
email_address = fields.Field(api_name='emailAddress')
"""A private email address for posting via email."""
class PublicationStatus(TypePadObject):
"""A container for the flags that represent an asset's publication status.
Publication status is currently represented by two flags: published and
spam. The published flag is false when an asset is held for moderation,
and can be set to true to publish the asset. The spam flag is true when
TypePad's spam filter has determined that an asset is spam, or when the
asset has been marked as spam by a moderator.
"""
draft = fields.Field()
"""`True` if this asset is private (not yet published), or `False` if it has
been published."""
publication_date = fields.Field(api_name='publicationDate')
"""The time at which the related asset was (or will be) published, as a W3CDTF
timestamp.
If the related asset has been scheduled to be posted later, this property's
timestamp will be in the future.
"""
class Relationship(TypePadObject):
"""The unidirectional relationship between a pair of entities.
A Relationship can be between a user and a user (a contact relationship),
or a user and a group (a membership). In either case, the relationship's
status shows *all* the unidirectional relationships between the source and
target entities.
"""
created = fields.Dict(fields.Datetime())
"""A mapping of the relationship types present between the source and target
objects to the times those types of relationship were established.
The keys of the map are the relationship type URIs present in the
relationship's `status` property; the values are W3CDTF timestamps for the
times those relationship edges were created.
:attrtype:`dict of datetime`
"""
id = fields.Field()
"""A URI that serves as a globally unique identifier for the relationship."""
source = fields.Object('Entity')
"""The source entity of the relationship.
:attrtype:`Entity`
"""
status = fields.Object('RelationshipStatus')
"""An object describing all the types of relationship that currently exist
between the source and target objects.
:attrtype:`RelationshipStatus`
"""
status_obj = fields.Link('RelationshipStatus', api_name='status')
"""Get the status information for the selected relationship, including its
types.
PUT: Change the status information for the selected relationship, including
its types.
:attrtype:`RelationshipStatus`
"""
target = fields.Object('Entity')
"""The target entity of the relationship.
:attrtype:`Entity`
"""
url_id = fields.Field(api_name='urlId')
"""A string containing the canonical identifier that can be used to identify
this object in URLs.
This can be used to recognise where the same relationship is returned in
response to different requests, and as a mapping key for an application's
local data store.
"""
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/relationships/%s.json' % self.url_id)
@property
def xid(self):
return self.url_id
@classmethod
def get_by_id(cls, id, **kwargs):
url_id = id.rsplit(':', 1)[-1]
return cls.get_by_url_id(url_id, **kwargs)
@classmethod
def get_by_url_id(cls, url_id, **kwargs):
if url_id == '':
raise ValueError("An url_id is required")
obj = cls.get('/relationships/%s.json' % url_id, **kwargs)
obj.__dict__['url_id'] = url_id
obj.__dict__['id'] = 'tag:api.typepad.com,2009:%s' % url_id
return obj
def _rel_type_updater(uri):
def update(self):
rel_status = RelationshipStatus.get(self.status_obj._location, batch=False)
if uri:
rel_status.types = [uri]
else:
rel_status.types = []
rel_status.put()
return update
block = _rel_type_updater("tag:api.typepad.com,2009:Blocked")
unblock = _rel_type_updater(None)
leave = _rel_type_updater(None)
def _rel_type_checker(uri):
def has_edge_with_uri(self):
return uri in self.status.types
return has_edge_with_uri
is_member = _rel_type_checker("tag:api.typepad.com,2009:Member")
is_admin = _rel_type_checker("tag:api.typepad.com,2009:Admin")
is_blocked = _rel_type_checker("tag:api.typepad.com,2009:Blocked")
class RelationshipStatus(TypePadObject):
"""A representation of just the relationship types of a relationship,
without the associated endpoints."""
types = fields.List(fields.Field())
"""A list of relationship type URIs describing the types of the related
relationship.
:attrtype:`list`
"""
class UserBadge(TypePadObject):
badge = fields.Object('Badge')
"""The badge that was won.
:attrtype:`Badge`
"""
earned_time = fields.Field(api_name='earnedTime')
"""The time that the user earned the badge given in `badge`."""
class UserProfile(TypePadObject):
"""Additional profile information about a TypePad user.
This additional information is useful when showing information about a
TypePad account directly, but is generally not required when linking to
an ancillary TypePad account, such as the author of a post.
"""
about_me = fields.Field(api_name='aboutMe')
"""The user's long description or biography, as a free-form string they
provided."""
avatar_link = fields.Object('ImageLink', api_name='avatarLink')
"""A link to an image representing this user.
:attrtype:`ImageLink`
"""
display_name = fields.Field(api_name='displayName')
"""The user's chosen display name."""
email = fields.Field()
"""The user's email address.
This property is only provided for authenticated requests if the user has
shared it with the authenticated application, and the authenticated user is
allowed to view it (as with administrators of groups the user has joined). In
all other cases, this property is omitted.
"""
follow_frame_content_url = fields.Field(api_name='followFrameContentUrl')
"""The URL of a widget that, when rendered in an ``iframe``, allows viewers to
follow this user.
Render this widget in an ``iframe`` 300 pixels wide and 125 pixels high.
"""
gender = fields.Field()
"""The user's gender, as they provided it.
This property is only provided for authenticated requests if the user has
shared it with the authenticated application, and the authenticated user is
allowed to view it (as with administrators of groups the user has joined). In
all other cases, this property is omitted.
"""
homepage_url = fields.Field(api_name='homepageUrl')
"""The address of the user's homepage, as a URL they provided.
This property is omitted if the user has not provided a homepage.
"""
id = fields.Field()
"""The URI from the related `User` object's `id` property."""
interests = fields.List(fields.Field())
"""A list of interests provided by the user and displayed on their profile
page.
:attrtype:`list`
"""
location = fields.Field()
"""The user's location, as a free-form string they provided."""
membership_management_page_url = fields.Field(api_name='membershipManagementPageUrl')
"""The URL of a page where this user can manage their group memberships.
If this is not the authenticated user's UserProfile object, this property is
omitted.
"""
preferred_username = fields.Field(api_name='preferredUsername')
"""The name the user has chosen for use in the URL of their TypePad profile
page.
This property can be used to select this user in URLs, although it is not a
persistent key, as the user can change it at any time.
"""
profile_edit_page_url = fields.Field(api_name='profileEditPageUrl')
"""The URL of a page where this user can edit their profile information.
If this is not the authenticated user's UserProfile object, this property is
omitted.
"""
profile_page_url = fields.Field(api_name='profilePageUrl')
"""The URL of the user's TypePad profile page."""
url_id = fields.Field(api_name='urlId')
"""The canonical identifier from the related `User` object's `url_id`
property."""
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/users/%s/profile.json' % self.url_id)
@property
def xid(self):
return self.url_id
@classmethod
def get_by_id(cls, id, **kwargs):
url_id = id.rsplit(':', 1)[-1]
return cls.get_by_url_id(url_id, **kwargs)
@classmethod
def get_by_url_id(cls, url_id, **kwargs):
"""Returns the `UserProfile` instance with the given URL identifier."""
if url_id == '':
raise ValueError("An url_id is required")
prof = cls.get('/users/%s/profile.json' % url_id, **kwargs)
prof.__dict__['url_id'] = url_id
prof.__dict__['id'] = 'tag:api.typepad.com,2009:%s' % url_id
return prof
@property
def user(self):
"""Returns a `User` instance for the TypePad member whose
`UserProfile` this is."""
return find_by_name('User').get_by_url_id(self.url_id)
class VideoLink(TypePadObject, _VideoResizer):
"""A link to a web video."""
embed_code = fields.Field(api_name='embedCode')
"""An opaque HTML fragment that, when embedded in a HTML page, provides an
inline player for the video."""
permalink_url = fields.Field(api_name='permalinkUrl')
"""**Editable.** The permalink URL for the video on its own site.
When posting a new video, send only the `permalink_url` property; videos on
supported sites will be discovered and the embed code generated automatically.
"""
html = renamed_property(old='html', new='embed_code')
class Audio(Asset):
"""An entry in a blog."""
_class_object_type = "Audio"
audio_link = fields.Object('AudioLink', api_name='audioLink')
"""A link to the audio stream that is this Audio asset's content.
:attrtype:`AudioLink`
"""
class Comment(Asset):
"""A text comment posted in reply to some other asset."""
_class_object_type = "Comment"
in_reply_to = fields.Object('AssetRef', api_name='inReplyTo')
"""A reference to the asset that this comment is in reply to.
:attrtype:`AssetRef`
"""
class Group(Entity):
"""A group that users can join, and to which users can post assets.
TypePad API social applications are represented as groups.
"""
_class_object_type = "Group"
audio_assets = fields.Link(ListOf('Audio'), api_name='audio-assets')
"""POST: Create a new Audio asset within the selected group.
:attrtype:`list of Audio`
"""
avatar_link = fields.Object('ImageLink', api_name='avatarLink')
"""A link to an image representing this group.
:attrtype:`ImageLink`
"""
display_name = fields.Field(api_name='displayName')
"""The display name set by the group's owner."""
events = fields.Link(ListOf('Event'))
"""Get a list of events describing actions performed in the selected group.
:attrtype:`list of Event`
"""
external_feed_subscriptions = fields.Link(ListOf('ExternalFeedSubscription'), api_name='external-feed-subscriptions')
"""Get a list of the group's active external feed subscriptions.
:attrtype:`list of ExternalFeedSubscription`
"""
link_assets = fields.Link(ListOf('Link'), api_name='link-assets')
"""POST: Create a new Link asset within the selected group.
:attrtype:`list of Link`
"""
memberships = fields.Link(ListOf('Relationship'))
"""Get a list of relationships between users and the selected group.
:attrtype:`list of Relationship`
"""
object_type = fields.Field(api_name='objectType')
"""A keyword describing the type of this object.
For a group object, `object_type` will be ``Group``.
"""
object_types = fields.List(fields.Field(), api_name='objectTypes')
"""**Deprecated.** An array of object type identifier URIs.
:attrtype:`list`
"""
photo_assets = fields.Link(ListOf('Photo'), api_name='photo-assets')
"""POST: Create a new Photo asset within the selected group.
:attrtype:`list of Photo`
"""
post_assets = fields.Link(ListOf('Post'), api_name='post-assets')
"""POST: Create a new Post asset within the selected group.
:attrtype:`list of Post`
"""
site_url = fields.Field(api_name='siteUrl')
"""The URL to the front page of the group website."""
tagline = fields.Field()
"""A tagline describing the group, as set by the group's owner."""
video_assets = fields.Link(ListOf('Video'), api_name='video-assets')
"""POST: Create a new Video asset within the selected group.
:attrtype:`list of Video`
"""
class _AddMemberPost(TypePadObject):
user_id = fields.Field(api_name='userId')
"""The urlId of the user who is being added."""
add_member = fields.ActionEndpoint(api_name='add-member', post_type=_AddMemberPost)
class _BlockUserPost(TypePadObject):
user_id = fields.Field(api_name='userId')
"""The urlId of the user who is being blocked."""
block_user = fields.ActionEndpoint(api_name='block-user', post_type=_BlockUserPost)
class _CreateExternalFeedSubscriptionPost(TypePadObject):
feed_idents = fields.List(fields.Field(), api_name='feedIdents')
"""A list of identifiers of the initial set of feeds to be subscribed to.
:attrtype:`list`
"""
filter_rules = fields.List(fields.Field(), api_name='filterRules')
"""A list of rules for filtering notifications to this subscription; each rule
is a query string using the search API's syntax.
:attrtype:`list`
"""
post_as_user_id = fields.Field(api_name='postAsUserId')
"""the urlId of the user who will own the assets and events posted into the
group's stream by this subscription.
The user must be an administrator of the group.
"""
class _CreateExternalFeedSubscriptionResponse(TypePadObject):
subscription = fields.Object('ExternalFeedSubscription')
"""The subscription object that was created.
:attrtype:`ExternalFeedSubscription`
"""
create_external_feed_subscription = fields.ActionEndpoint(api_name='create-external-feed-subscription', post_type=_CreateExternalFeedSubscriptionPost, response_type=_CreateExternalFeedSubscriptionResponse)
class _RemoveMemberPost(TypePadObject):
user_id = fields.Field(api_name='userId')
"""The urlId of the user who is being removed."""
remove_member = fields.ActionEndpoint(api_name='remove-member', post_type=_RemoveMemberPost)
class _UnblockUserPost(TypePadObject):
user_id = fields.Field(api_name='userId')
"""The urlId of the user who is being unblocked."""
unblock_user = fields.ActionEndpoint(api_name='unblock-user', post_type=_UnblockUserPost)
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/groups/%s.json' % self.url_id)
@property
def xid(self):
return self.url_id
@classmethod
def get_by_id(cls, id, **kwargs):
url_id = id.rsplit(':', 1)[-1]
return cls.get_by_url_id(url_id, **kwargs)
@classmethod
def get_by_url_id(cls, url_id, **kwargs):
if url_id == '':
raise ValueError("An url_id is required")
obj = cls.get('/groups/%s.json' % url_id, **kwargs)
obj.__dict__['url_id'] = url_id
obj.__dict__['id'] = 'tag:api.typepad.com,2009:%s' % url_id
return obj
class Link(Asset):
"""A shared link to some URL."""
_class_object_type = "Link"
target_url = fields.Field(api_name='targetUrl')
"""The URL that is the target of this link."""
class Page(Asset):
embedded_image_links = fields.List(fields.Object('ImageLink'), api_name='embeddedImageLinks')
"""A list of links to the images that are embedded within the content of this
page.
:attrtype:`list of ImageLink`
"""
feedback_status = fields.Object('FeedbackStatus', api_name='feedbackStatus')
"""**Editable.** An object describing the comment and trackback behavior for
this page.
:attrtype:`FeedbackStatus`
"""
filename = fields.Field()
"""**Editable.** The base name of the page, used to create the
`permalink_url`."""
class Photo(Asset):
"""An entry in a blog."""
_class_object_type = "Photo"
image_link = fields.Object('ImageLink', api_name='imageLink')
"""A link to the image that is this Photo asset's content.
:attrtype:`ImageLink`
"""
class Post(Asset):
"""An entry in a blog."""
_class_object_type = "Post"
categories = fields.List(fields.Field())
"""**Editable.** A list of categories associated with the post.
:attrtype:`list`
"""
embedded_audio_links = fields.List(fields.Object('AudioLink'), api_name='embeddedAudioLinks')
"""A list of links to the audio streams that are embedded within the content
of this post.
:attrtype:`list of AudioLink`
"""
embedded_image_links = fields.List(fields.Object('ImageLink'), api_name='embeddedImageLinks')
"""A list of links to the images that are embedded within the content of this
post.
:attrtype:`list of ImageLink`
"""
embedded_video_links = fields.List(fields.Object('VideoLink'), api_name='embeddedVideoLinks')
"""A list of links to the videos that are embedded within the content of this
post.
:attrtype:`list of VideoLink`
"""
feedback_status = fields.Object('FeedbackStatus', api_name='feedbackStatus')
"""**Editable.** An object describing the comment and trackback behavior for
this post.
:attrtype:`FeedbackStatus`
"""
filename = fields.Field()
"""**Editable.** The base name of the post to use when creating its
`permalink_url`."""
reblog_count = fields.Field(api_name='reblogCount')
"""The number of times this post has been reblogged by other people."""
reblog_of = fields.Object('AssetRef', api_name='reblogOf')
"""A reference to a post of which this post is a reblog.
:attrtype:`AssetRef`
"""
class User(Entity):
"""A TypePad user.
This includes those who own TypePad blogs, those who use TypePad Connect
and registered commenters who have either created a TypePad account or
signed in with OpenID.
"""
_class_object_type = "User"
avatar_link = fields.Object('ImageLink', api_name='avatarLink')
"""A link to an image representing this user.
:attrtype:`ImageLink`
"""
badges = fields.Link(ListOf('UserBadge'))
"""Get a list of badges that the selected user has won.
:attrtype:`list of UserBadge`
"""
blogs = fields.Link(ListOf('Blog'))
"""Get a list of blogs that the selected user has access to.
:attrtype:`list of Blog`
"""
display_name = fields.Field(api_name='displayName')
"""The user's chosen display name."""
elsewhere_accounts = fields.Link(ListOf('Account'), api_name='elsewhere-accounts')
"""Get a list of elsewhere accounts for the selected user.
:attrtype:`list of Account`
"""
email = fields.Field()
"""**Deprecated.** The user's email address.
This property is only provided for authenticated requests if the user has
shared it with the authenticated application, and the authenticated user is
allowed to view it (as with administrators of groups the user has joined). In
all other cases, this property is omitted.
"""
events = fields.Link(StreamOf('Event'))
"""Get a list of events describing actions that the selected user performed.
:attrtype:`list of Event`
"""
favorites = fields.Link(ListOf('Favorite'))
"""Get a list of favorites that were listed by the selected user.
POST: Create a new favorite in the selected user's list of favorites.
:attrtype:`list of Favorite`
"""
gender = fields.Field()
"""**Deprecated.** The user's gender, as they provided it.
This property is only provided for authenticated requests if the user has
shared it with the authenticated application, and the authenticated user is
allowed to view it (as with administrators of groups the user has joined). In
all other cases, this property is omitted.
"""
interests = fields.List(fields.Field())
"""**Deprecated.** A list of interests provided by the user and displayed on
the user's profile page.
Use the `interests` property of the `UserProfile` object, which can be
retrieved from the ``/users/{id}/profile`` endpoint.
:attrtype:`list`
"""
location = fields.Field()
"""**Deprecated.** The user's location, as a free-form string provided by
them.
Use the the `location` property of the related `UserProfile` object, which can
be retrieved from the ``/users/{id}/profile`` endpoint.
"""
memberships = fields.Link(ListOf('Relationship'))
"""Get a list of relationships that the selected user has with groups.
:attrtype:`list of Relationship`
"""
notifications = fields.Link(ListOf('Event'))
"""Get a list of events describing actions by users that the selected user is
following.
:attrtype:`list of Event`
"""
object_type = fields.Field(api_name='objectType')
"""The keyword identifying the type of object this is.
For a User object, `object_type` will be ``User``.
"""
object_types = fields.List(fields.Field(), api_name='objectTypes')
"""**Deprecated.** An array of object type identifier URIs.
:attrtype:`list`
"""
preferred_username = fields.Field(api_name='preferredUsername')
"""The name the user has chosen for use in the URL of their TypePad profile
page.
This property can be used to select this user in URLs, although it is not a
persistent key, as the user can change it at any time.
"""
profile = fields.Link('UserProfile')
"""Get a more extensive set of user properties that can be used to build a
user profile page.
:attrtype:`UserProfile`
"""
profile_page_url = fields.Field(api_name='profilePageUrl')
"""The URL of the user's TypePad profile page."""
relationships = fields.Link(ListOf('Relationship'))
"""Get a list of relationships that the selected user has with other users,
and that other users have with the selected user.
:attrtype:`list of Relationship`
"""
def make_self_link(self):
return urljoin(typepad.client.endpoint, '/users/%s.json' % self.url_id)
@property
def xid(self):
return self.url_id
@classmethod
def get_by_id(cls, id, **kwargs):
url_id = id.rsplit(':', 1)[-1]
return cls.get_by_url_id(url_id, **kwargs)
@classmethod
def get_by_url_id(cls, url_id, **kwargs):
if url_id == '':
raise ValueError("An url_id is required")
obj = cls.get('/users/%s.json' % url_id, **kwargs)
obj.__dict__['url_id'] = url_id
obj.__dict__['id'] = 'tag:api.typepad.com,2009:%s' % url_id
return obj
@classmethod
def get_self(cls, **kwargs):
"""Returns a `User` instance representing the account as whom the
client library is authenticating."""
return cls.get('/users/@self.json', **kwargs)
class Video(Asset):
"""An entry in a blog."""
_class_object_type = "Video"
preview_image_link = fields.Object('ImageLink', api_name='previewImageLink')
"""A link to a preview image or poster frame for this video.
This property is omitted if no such image is available.
:attrtype:`ImageLink`
"""
video_link = fields.Object('VideoLink', api_name='videoLink')
"""A link to the video that is this Video asset's content.
:attrtype:`VideoLink`
"""
browser_upload = BrowserUploadEndpoint()
|
AMITYVILLE, N.Y.--(BUSINESS WIRE)-- Hi-Tech Pharmacal Co., Inc. (NAS: HITK) today announced that it is scheduled to make an investor presentation at the Needham 12th Annual Healthcare Conference Tuesday, April 29, 2013, at 10:40 a.m. Eastern Time.
The webcast will be archived within one hour after the live event and will be available for 90 days.
Hi-Tech Pharmacal is a specialty pharmaceutical company developing, manufacturing and marketing generic and branded products. The Company specializes in difficult to manufacture liquid and semi-solid dosage forms and produces a range of sterile ophthalmic, otic and inhalation products. The Company's Health Care Products Division is a leading developer and marketer of branded prescription and OTC products for the diabetes marketplace. Hi-Tech's ECR Pharmaceuticals subsidiary markets branded prescription products.
This press release contains certain future projections and forward-looking statements (statements which are not historical facts) with respect to the anticipated future performance of Hi-Tech made pursuant to the safe harbor provisions of the Private Securities Litigation Reform Act of 1995. Such future projections and forward-looking statements are not assurances, promises or guarantees and investors are cautioned that all future projections and forward-looking statements involve significant business, economic and competitive risks and uncertainties, many of which are beyond Hi-Tech's ability to control or estimate precisely, including, but not limited to, the impact of competitive products and pricing, product demand and market acceptance, new product development, the regulatory environment, including without limitation, reliance on key strategic alliances, availability of raw materials, fluctuations in operating results, loss of customers or employees, the possibility that legal proceedings may be instituted against Hi-Tech and other results and other risks detailed from time to time in Hi-Tech's filings with the Securities and Exchange Commission. The actual results will vary from the projected results and such variations may be material. These statements are based on management's current expectations and assumptions concerning the future performance of Hi-Tech and are naturally subject to uncertainty and changes in circumstances. No representations or warranties are made as to the accuracy or completeness of any of the information contained herein, including, but not limited to, any assumptions or projections contained herein or forward-looking statements based thereon. We caution you not to place undue reliance upon any such forward-looking statements which speak only as of the date made, except to the extent specifically dated as of an earlier date. Hi-Tech is under no obligation, and expressly disclaims any such obligation, to update, alter or correct any inaccuracies herein, whether as a result of new information, future events or otherwise.
The article Hi-Tech Pharmacal to Present at the Needham 12th Annual Healthcare Conference originally appeared on Fool.com.
|
# -*- coding: utf-8 -*-
# Scrapy settings for spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'spider'
SPIDER_MODULES = ['spider.spiders']
NEWSPIDER_MODULE = 'spider.spiders'
COOKIES_ENABLES=False
ITEM_PIPELINES = {
'spider.pipelines.SpiderPipeline': 300,
}
# ES configuration
ES_HOST = "localhost:9200"
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS=2
LOG_LEVEL = 'INFO'
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY=10
TIME_DELTA =20
SCHEDULER_IDLE_BEFORE_CLOSE = 10
# Specify the host and port to use when connecting to Redis (optional).
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
# The download delay setting will honor only one of:
CONCURRENT_REQUESTS_PER_DOMAIN=2
CONCURRENT_REQUESTS_PER_IP=2
|
1. How do I know if my research project requires institutional (i.e., IACUC) review?
Any use of live, vertebrate animals requires IACUC approval prior to the start of the project. If, however, your research will be conducted at another facility (i.e., one not affiliated with CWU), AND that facility has its own IACUC, you may not need additional approval from CWU's IACUC. Please check this website for further information or contact the IACUC Chairperson (Dr. Jason Irwin, 509-963-2884, iacuc@cwu.edu.
2. Do student research projects require institutional review and approval?
Yes. We have instituted an "expedited review" process to facilitate gaining approval in time to complete class projects within a typical quarter. If your research is for a class project, please select 'Expedited review requested' on the front page of the Clearance Form and state that this process is requested because the protocol is for a class project. We encourage you to submit your protocol as early as possible in the quarter to ensure a timely approval, as the committee may have questions or clarifications that need to be addressed before approval can be given.
3. How do I begin the IACUC review process?
Animal Use Clearance Forms are submitted via an online database system. Please see the "Forms" link on our homepage to begin the submission process. Submissions by student researchers will be sent to their faculty supervisor for approval prior to being reviewed by the IACUC.
4. I am on a tight timeline to get my research done. How long does the IACUC review process take?
During a normal quarter, you should hear back from the committee in approximately one week from the time your protocol is received by IACUC staff. Between quarters or during holiday periods, this may be longer, depending on IACUC member availability.
If you have requested Expedited Review, you should hear back from the committee within 3 business days from the time your protocol is received by IACUC staff.
Please note that when you hear from us, it may be with requests for clarification or modification rather than an immediate approval. You can facilitate prompt approval by ensuring that your Clearance Form is filled out completely and accurately, that the information provided is understandable by individuals outside of your field, and that your project is consistent with federal guidelines for animal care and use. If you have questions about federal guidelines, you can contact the IACUC chairperson (iacuc@cwu.edu) for information on materials relevant to your project.
5. What factors do IACUC members consider when reviewing my research proposal?
Of foremost concern is whether any animals will be harmed, or whether the potential for harm exists, as a part of your protocol. "Harm" includes, but is not limited to, physical pain and/or injury, death, and psychological distress. If animals may be harmed, you will need to document that alternative methods have been considered but are not appropriate, and why; that steps have been taken to minimize the harm; that the scientific merit of the project outweighs the harm to the animals; that any methods of euthanasia (if relevant) meet with federal guidelines; and that the minimum number of animals are used that will allow you to conduct your research with reasonable statistical power.
6. Is it possible that the IACUC will deny approval of my research proposal?
Yes. However, in the past we have worked with Principal Investigators when concerns have been noted, and typically been able to resolve the issues and eventually issue an approval. Your cooperation as part of this process is, of course, vital.
7. What if I want to change some of my study procedures after the IACUC has already approved my research?
You must notify the IACUC if you intend to make changes in you research protocol. Our website contains a Protocol Modification form, along with examples of what constitutes significant changes.
8. What if I need to add other investigators or research assistants after the IACUC has already approved my study?
The change or addition of personnel who will be responsible for working directly with animals may constitute a significant change. If you have questions about whether your personnel additions fall into this category please contact the IACUC chairperson.
9. Once I have received IACUC approval, how long is it valid?
IACUC approval is valid for one year. If your project will extend beyond that, you must submit a request for continuation prior to the one year anniversary of your approval. A request for contiuation merely involves submitting another Clearance Form and selecting either 'Continuation (no change)' or 'Continuation (with change)', instead of 'Initial Submission' on the front page. The remainder of the form should be filled out as usual, but must include any changes to the original protocol, and incorporate any clarifications that were originally requested by the committee. Discussion should include status of the project to date, as well. But must include any changes to the original protocol, and incorporate any clarifications that were originally requested by the committee.
10. Whom should I contact for more information about the IACUC review and approval process?
|
#!/usr/bin/env python
"""
harnesses
Common/reusable test harnesses
"""
import os
import unittest
import tempfile
import mock
import subprocess
from cirrus._2to3 import ConfigParser, to_str
from cirrus.configuration import Configuration
def _repo_directory():
command = ['git', 'rev-parse', '--show-toplevel']
process = subprocess.Popen(command, stdout=subprocess.PIPE)
outp, err = process.communicate()
return to_str(outp.strip())
def write_cirrus_conf(config_file, **sections):
"""
_write_cirrus_conf_
Util to create a cirrus configuration file and populate it
with the settings for the package, gitflow, pypi etc sections.
sections should be nested dict of the form {sectionname: {sectionsettings}}
Eg:
settings={'package': {'name': 'package_name'} }
"""
parser = ConfigParser.RawConfigParser()
for section, settings in iter(sections.items()):
parser.add_section(section)
for key, value in iter(settings.items()):
parser.set(section, key, value)
with open(config_file, 'w') as handle:
parser.write(handle)
class CirrusConfigurationHarness(object):
"""
CirrusConfigurationHarness
Test harness that generates a mock for load_configuration in the
module that is being mocked.
TODO: better location for this, plus maybe combine with
generating the cirrus config file
"""
def __init__(self, module_symbol, config_file, gitconf_content=None, **settings):
self.module_symbol = module_symbol
self.config_file = config_file
self.gitconf_str = gitconf_content
if self.gitconf_str is None:
self.gitconf_str = "cirrus.credential-plugin=default"
def setUp(self):
self.patch_environ = mock.patch.dict(os.environ, {'HOME': 'womp'})
self.patch_environ.start()
self.mock_config = mock.patch(self.module_symbol)
self.load_mock = self.mock_config.start()
self.patch_gitconfig = mock.patch('cirrus.gitconfig.shell_command')
self.mock_gitconfig = self.patch_gitconfig.start()
self.mock_gitconfig.return_value = self.gitconf_str
self.config = Configuration(self.config_file)
self.config.load()
self.load_mock.return_value = self.config
def tearDown(self):
self.patch_environ.stop()
self.mock_config.stop()
self.patch_gitconfig.stop()
|
As the Summer leaves Fall, and the warmth slowly fades we welcome light woven sleeved fabrics into our wardrobes, easy to layer with leggings and jeans and welcoming more detailing.
Inspired by our Moroccan travels, the wonderful Rough Studios from the Netherlands takes us to days past, wandering the Medina, searching for that perfect rug, hours travelling over the Atlas Mountains through snow into deserts of shining golden sand.
Let Fall inspire your past travels with this beautiful range of dresses, shirts, pants and shorts for the Winter traveller.
The Moody Blues of Spell Designs City Lights Collection is teamed with our new Wrangler Range exclusive to our Newmarket and online store. The inspiring star printed skirts, shirts and dresses are the perfect blue in our eyes to take us through Fall & Winter.
Dress up with Lilya Silks, and layer with Lilya Furs. Don't be confined to Summer dressing with Spell - it's so easy to layer these pieces with new knits and cashmere we have in store to enjoy these colours and prints year round.
Are you the type to wear your daywear as evening wear? Look no further than our new season Amuse.
Featuring easy wearing tops, shirts and dresses fit to wear to work - these can be dressed down with our new season Wrangler and Matisse leather boots, or taken out for a night on the town teamed with black leather or heels.
These soft, hand drawn prints are feminine and so easy to wear on any occasion - and not to mention price affordable.
Our Lilya Knits available in Grey, Pink or Moss make the perfect accessory with our felt hats and Hunter X Hunter Jewellery.
Talk to our stylists in store to mix and match key pieces this Fall & Winter to take you through the next six months.
|
# Generated by Django 2.0 on 2018-02-08 11:47
from django.db import migrations
def forwards(apps, schema_editor):
"""
Change all Play objects into Work objects, and their associated
data into WorkRole and WorkSelection models, then delete the Play.
"""
Play = apps.get_model("spectator_events", "Play")
Work = apps.get_model("spectator_events", "Work")
WorkRole = apps.get_model("spectator_events", "WorkRole")
WorkSelection = apps.get_model("spectator_events", "WorkSelection")
for p in Play.objects.all():
work = Work.objects.create(kind="play", title=p.title, title_sort=p.title_sort)
for role in p.roles.all():
WorkRole.objects.create(
creator=role.creator,
work=work,
role_name=role.role_name,
role_order=role.role_order,
)
for selection in p.events.all():
WorkSelection.objects.create(
event=selection.event, work=work, order=selection.order
)
p.delete()
class Migration(migrations.Migration):
dependencies = [
("spectator_events", "0028_dancepieces_to_works"),
]
operations = [
migrations.RunPython(forwards),
]
|
Combine two of Canada's most popular cities with a four-night premier train experience across Canada's prairies and mountain towns. Enjoy the elegance and comfort of VIA Rail's Prestige Class, complete with personalized concierge service, all-inclusive bar service and priority dining reservations. Large en suite cabins feature a leather couch and fold-down double bed, flat screen TV and fully-stocked minibar.
The Prestige Sleeper Class provides an exciting new level of comfort and service for travelers on the Canadian, one of the world's greatest train journeys. Each Prestige Sleep Cabin has a modular leather L-shaped couch by day and a Murphy bed for two by night facing the window with electric radiant heated cabin floors and walls that maintain a comfortable and constant temperature. Highlights of this Prestige Class includes: a personalized dedicated service by the Prestige Concierge; a spacious cabin, large windows and a private washroom with shower; flat-screen TV with video selection; minibar stocked with a selection of beverages and more.
Prices are correct at the time of publication, however are subject to change at time of booking. Rates are in USD Per person, double occupancy. Senior rates are available. Ask a rail specialist for details.
Cabins purchased in VIA rail for a single traveler are Cabins for 1. For upgrade options. ask your rail specialist for details.
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 04 08:55:52 2017
@author: galad-loth
"""
import mxnet as mx
def SharedFeatureNet(data,conv_weight, conv_bias):
shared_net = mx.sym.Convolution(data=data, kernel=(7, 7), stride=(1,1),
pad=(3, 3), num_filter=24,weight=conv_weight[0],
bias=conv_bias[0],name="conv0")
shared_net = mx.sym.Activation(data=shared_net, act_type="relu", name="relu0")
shared_net = mx.sym.Pooling(data=shared_net, kernel=(3,3),pool_type="max",
stride=(2,2), name="maxpool0")
shared_net = mx.sym.Convolution(data=shared_net, kernel=(5, 5), stride=(1,1),
pad=(2, 2), num_filter=64, weight=conv_weight[1],
bias=conv_bias[1],name="conv1")
shared_net = mx.sym.Activation(data=shared_net, act_type="relu", name="relu1")
shared_net = mx.sym.Pooling(data=shared_net, kernel=(3,3),pool_type="max",
stride=(2,2), name="maxpool1")
shared_net = mx.sym.Convolution(data=shared_net, kernel=(3, 3), stride=(1,1),
pad=(1, 1), num_filter=96, weight=conv_weight[2],
bias=conv_bias[2],name="conv2")
shared_net = mx.sym.Activation(data=shared_net, act_type="relu", name="relu2")
shared_net = mx.sym.Convolution(data=shared_net, kernel=(3, 3), stride=(1,1),
pad=(1, 1), num_filter=96, weight=conv_weight[3],
bias=conv_bias[3],name="conv3")
shared_net = mx.sym.Activation(data=shared_net, act_type="relu", name="relu3")
shared_net = mx.sym.Convolution(data=shared_net, kernel=(3, 3), stride=(1,1),
pad=(1, 1), num_filter=64, weight=conv_weight[4],
bias=conv_bias[4],name="conv4")
shared_net = mx.sym.Activation(data=shared_net, act_type="relu", name="relu4")
shared_net = mx.sym.Pooling(data=shared_net, kernel=(3,3),pool_type="max",
stride=(2,2), name="maxpool4")
return shared_net
def match_net_symbol():
data1=mx.sym.Variable("data1")
data2=mx.sym.Variable("data2")
dim_bottleneck=256
conv_weight = []
conv_bias = []
for i in range(5):
conv_weight.append(mx.sym.Variable('conv' + str(i) + '_weight'))
conv_bias.append(mx.sym.Variable('conv' + str(i) + '_bias'))
conv_res1=SharedFeatureNet(data1,conv_weight,conv_bias)
conv_res2=SharedFeatureNet(data2,conv_weight,conv_bias)
botteneck_weights=mx.sym.Variable("botteneck_weights")
botteneck_bias=mx.sym.Variable("botteneck_bias")
feat1 = mx.sym.FullyConnected(data=conv_res1,num_hidden=dim_bottleneck,
weight=botteneck_weights,bias=botteneck_bias,
name="botteneck")
feat2 = mx.sym.FullyConnected(data=conv_res2,num_hidden=dim_bottleneck,
weight=botteneck_weights,bias=botteneck_bias,
name="botteneck")
conv_res=mx.sym.Concat(feat1,feat2,dim=1, name='conv_res')
net = mx.sym.FullyConnected(data=conv_res,num_hidden=256, name="fc1")
net = mx.sym.Activation(data=net, act_type="relu", name="fc1_relu")
net = mx.sym.FullyConnected(data=net,num_hidden=256, name="fc2")
net = mx.sym.Activation(data=net, act_type="relu", name="fc2_relu")
net = mx.sym.FullyConnected(data=net,num_hidden=2, name="fc3")
net = mx.symbol.Softmax(data=net,name="softmax")
return net
if __name__=="__main__":
matchnet=MatchNetSymbol()
matchnet_ex=matchnet.simple_bind(ctx=mx.cpu(), data1=(50,1,64,64),
data2=(50,1,64,64),softmax_label=(50,))
|
Contains font glyphs prerendered into texture atlas.
Create GlyphCache object with sufficient size and then call AbstractFont::createGlyphCache() to fill it with glyphs.
See Renderer for information about text rendering.
This class supports the GlyphCacheFeature::ImageDownload (and thus calling image()) only on desktop OpenGL, due to using GL::Texture::image(), which is not available on OpenGL ES platforms.
All glyphs parameters are saved relative to originalSize, although the actual glyph cache texture has size. Glyph padding can be used to account for e.g. glyph shadows.
Same as calling the above with originalSize and size the same.
Sets internal texture format to red channel only. On desktop OpenGL requires ARB_texture_rg (also part of OpenGL ES 3.0 and WebGL 2), on ES2 unconditionally uses GL::TextureFormat::Luminance. This is done for consistency with GL::pixelFormat(), which unconditionally returns GL::PixelFormat::Luminance for PixelFormat::R8Unorm. See GlyphCache(GL::TextureFormat, const Vector2i&, const Vector2i&) for an alternative.
|
import sys
from Manifest import Manifest
from ProjectAnalizer import ProjectAnalizer
from os.path import dirname, basename, isdir, join, realpath
__author__ = 'Samuel Flores'
class ManifestAnalizer:
def __init__(self, manifest_p, root=None):
self.manifest = manifest_p
if root:
self.root = root
else:
try:
self.root = self.locate_root()
except EnvironmentError:
print('cant find .repo dir', file=sys.stderr)
sys.exit(-1)
self.project_l = self.get_projects(manifest_p)
self.analizer_l = []
self.missing_buildfile = []
self.missing_module = []
for p in self.project_l:
analizer = ProjectAnalizer(p, self.root)
self.analizer_l.append(analizer)
def get_summary(self):
total = len(self.project_l)
complete = len(self.get_complete())
missing_file = len(self.get_missing_buildfile())
ret = 'Total projects: ' + str(total)
ret += '\nComplete: ' + str(complete)
ret += '\nWith Android.mk but incomplete: ' + str(total-complete-missing_file)
ret += '\nMissing Android.mk: ' + str(missing_file)
return ret
# TODO: optimize this
def get_missing_buildfile(self):
ret = []
for pa in self.analizer_l:
assert isinstance(pa, ProjectAnalizer)
if not pa.buildFile:
ret.append(pa)
return ret
def get_incomplete(self):
ret = []
for pa in self.analizer_l:
assert isinstance(pa, ProjectAnalizer)
if not pa.module_name and pa.buildFile:
ret.append(pa)
return ret
def get_complete(self):
ret = []
for pa in self.analizer_l:
assert isinstance(pa, ProjectAnalizer)
if pa.module_name:
ret.append(pa)
return ret
def get_analizer_for_project(self, project_name):
ret = None
for pa in self.analizer_l:
assert isinstance(pa, ProjectAnalizer)
if pa.project.name == project_name:
ret = pa
break
return ret
# look for ".repo" in parent directory
# thanks to autopatch
def locate_root(self):
lookdir = realpath(dirname(self.manifest))
while not isdir(join(lookdir, '.repo')):
newlookdir = dirname(lookdir)
if lookdir == newlookdir:
# .repo not found
raise EnvironmentError
lookdir = newlookdir
print('found repo top at', lookdir)
return lookdir
def get_projects(self, manifest_p):
m = Manifest(manifest_p)
return m.proj_list
|
Ask and You Shall Receive: What did you ask for when you got up this morning?
Life Simplified!: Simplifying Lives Globally . . .
|
# Copyright (c) 2013-2015 University Corporation for Atmospheric Research/Unidata.
# Distributed under the terms of the MIT License.
# SPDX-License-Identifier: MIT
"""Read upper air data from the Wyoming archives."""
from io import StringIO
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
from .._tools import get_wind_components
from ..http_util import HTTPEndPoint
class WyomingUpperAir(HTTPEndPoint):
"""Download and parse data from the University of Wyoming's upper air archive."""
def __init__(self):
"""Set up endpoint."""
super(WyomingUpperAir, self).__init__('http://weather.uwyo.edu/cgi-bin/sounding')
@classmethod
def request_data(cls, time, site_id, **kwargs):
r"""Retrieve upper air observations from the Wyoming archive.
Parameters
----------
time : datetime
The date and time of the desired observation.
site_id : str
The three letter ICAO identifier of the station for which data should be
downloaded.
kwargs
Arbitrary keyword arguments to use to initialize source
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
endpoint = cls()
df = endpoint._get_data(time, site_id, **kwargs)
return df
def _get_data(self, time, site_id, region='naconf'):
r"""Download and parse upper air observations from an online archive.
Parameters
----------
time : datetime
The date and time of the desired observation.
site_id : str
The three letter ICAO identifier of the station for which data should be
downloaded.
region
Region to request data from
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
raw_data = self._get_data_raw(time, site_id, region)
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(raw_data, skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = get_wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
# Add unit dictionary
df.units = {'pressure': 'hPa',
'height': 'meter',
'temperature': 'degC',
'dewpoint': 'degC',
'direction': 'degrees',
'speed': 'knot',
'u_wind': 'knot',
'v_wind': 'knot'}
return df
def _get_data_raw(self, time, site_id, region='naconf'):
"""Download data from the University of Wyoming's upper air archive.
Parameters
----------
time : datetime
Date and time for which data should be downloaded
site_id : str
Site id for which data should be downloaded
region : str
The region in which the station resides. Defaults to `naconf`.
Returns
-------
a file-like object from which to read the data
"""
path = ('?region={region}&TYPE=TEXT%3ALIST'
'&YEAR={time:%Y}&MONTH={time:%m}&FROM={time:%d%H}&TO={time:%d%H}'
'&STNM={stid}').format(region=region, time=time, stid=site_id)
resp = self.get_path(path)
# See if the return is valid, but has no data
if resp.text.find('Can\'t') != -1:
raise ValueError(
'No data available for {time:%Y-%m-%d %HZ} from region {region} '
'for station {stid}.'.format(time=time, region=region,
stid=site_id))
soup = BeautifulSoup(resp.text, 'html.parser')
return StringIO(soup.find_all('pre')[0].contents[0])
|
You have Three options to receive your shipment inside Oman. You can edit your shipment details on your Account Profile under Account Details Section.
We would only ship to the nearest office next to your location as you selected in your Account Profile under Account Details section. Here are the office's details in Oman with their contact.
If you chose this option, please enter your full address details with your contact number.
Shipping is available for to GCC countries, USA and UK.
|
# -*- coding: utf-8 -*-
'''
The pkgbuild state is the front of Salt package building backend. It
automatically
.. versionadded:: 2015.8.0
.. code-block:: yaml
salt_2015.5.2:
pkgbuild.built:
- runas: thatch
- results:
- salt-2015.5.2-2.el7.centos.noarch.rpm
- salt-api-2015.5.2-2.el7.centos.noarch.rpm
- salt-cloud-2015.5.2-2.el7.centos.noarch.rpm
- salt-master-2015.5.2-2.el7.centos.noarch.rpm
- salt-minion-2015.5.2-2.el7.centos.noarch.rpm
- salt-ssh-2015.5.2-2.el7.centos.noarch.rpm
- salt-syndic-2015.5.2-2.el7.centos.noarch.rpm
- dest_dir: /tmp/pkg
- spec: salt://pkg/salt/spec/salt.spec
- template: jinja
- deps:
- salt://pkg/salt/sources/required_dependency.rpm
- tgt: epel-7-x86_64
- sources:
- salt://pkg/salt/sources/logrotate.salt
- salt://pkg/salt/sources/README.fedora
- salt://pkg/salt/sources/salt-2015.5.2.tar.gz
- salt://pkg/salt/sources/salt-2015.5.2-tests.patch
- salt://pkg/salt/sources/salt-api
- salt://pkg/salt/sources/salt-api.service
- salt://pkg/salt/sources/salt-master
- salt://pkg/salt/sources/salt-master.service
- salt://pkg/salt/sources/salt-minion
- salt://pkg/salt/sources/salt-minion.service
- salt://pkg/salt/sources/saltpkg.sls
- salt://pkg/salt/sources/salt-syndic
- salt://pkg/salt/sources/salt-syndic.service
- salt://pkg/salt/sources/SaltTesting-2015.5.8.tar.gz
/tmp/pkg:
pkgbuild.repo
'''
# Import python libs
from __future__ import absolute_import, print_function
import errno
import logging
import os
# Import salt libs
import salt.utils
from salt.ext import six
log = logging.getLogger(__name__)
def _get_missing_results(results, dest_dir):
'''
Return a list of the filenames specified in the ``results`` argument, which
are not present in the dest_dir.
'''
try:
present = set(os.listdir(dest_dir))
except OSError as exc:
if exc.errno == errno.ENOENT:
log.debug(
'pkgbuild.built: dest_dir \'{0}\' does not exist'
.format(dest_dir)
)
elif exc.errno == errno.EACCES:
log.error(
'pkgbuilt.built: cannot access dest_dir \'{0}\''
.format(dest_dir)
)
present = set()
return sorted(set(results).difference(present))
def built(name,
runas,
dest_dir,
spec,
sources,
tgt,
template=None,
deps=None,
env=None,
results=None,
force=False,
always=None,
saltenv='base',
log_dir='/var/log/salt/pkgbuild'):
'''
Ensure that the named package is built and exists in the named directory
name
The name to track the build, the name value is otherwise unused
runas
The user to run the build process as
dest_dir
The directory on the minion to place the built package(s)
spec
The location of the spec file (used for rpms)
sources
The list of package sources
tgt
The target platform to run the build on
template
Run the spec file through a templating engine
.. versionchanged:: 2015.8.2
This argument is now optional, allowing for no templating engine to
be used if none is desired.
deps
Packages required to ensure that the named package is built
can be hosted on either the salt master server or on an HTTP
or FTP server. Both HTTPS and HTTP are supported as well as
downloading directly from Amazon S3 compatible URLs with both
pre-configured and automatic IAM credentials
env
A dictionary of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
- env:
DEB_BUILD_OPTIONS: 'nocheck'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
results
The names of the expected rpms that will be built
force : False
If ``True``, packages will be built even if they already exist in the
``dest_dir``. This is useful when building a package for continuous or
nightly package builds.
.. versionadded:: 2015.8.2
always
If ``True``, packages will be built even if they already exist in the
``dest_dir``. This is useful when building a package for continuous or
nightly package builds.
.. deprecated:: 2015.8.2
Use ``force`` instead.
saltenv
The saltenv to use for files downloaded from the salt filesever
log_dir : /var/log/salt/rpmbuild
Root directory for log files created from the build. Logs will be
organized by package name, version, OS release, and CPU architecture
under this directory.
.. versionadded:: 2015.8.2
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if always is not None:
salt.utils.warn_until(
'Carbon',
'The \'always\' argument to the pkgbuild.built state has been '
'deprecated, please use \'force\' instead.'
)
force = always
if not results:
ret['comment'] = '\'results\' argument is required'
ret['result'] = False
return ret
if isinstance(results, six.string_types):
results = results.split(',')
needed = _get_missing_results(results, dest_dir)
if not force and not needed:
ret['comment'] = 'All needed packages exist'
return ret
if __opts__['test']:
ret['result'] = None
if force:
ret['comment'] = 'Packages will be force-built'
else:
ret['comment'] = 'The following packages need to be built: '
ret['comment'] += ', '.join(needed)
return ret
# Need the check for None here, if env is not provided then it falls back
# to None and it is assumed that the environment is not being overridden.
if env is not None and not isinstance(env, dict):
ret['comment'] = ('Invalidly-formatted \'env\' parameter. See '
'documentation.')
ret['result'] = False
return ret
ret['changes'] = __salt__['pkgbuild.build'](
runas,
tgt,
dest_dir,
spec,
sources,
deps,
env,
template,
saltenv,
log_dir)
needed = _get_missing_results(results, dest_dir)
if needed:
ret['comment'] = 'The following packages were not built: '
ret['comment'] += ', '.join(needed)
ret['result'] = False
else:
ret['comment'] = 'All needed packages were built'
return ret
def repo(name, keyid=None, env=None):
'''
Make a package repository, the name is directoty to turn into a repo.
This state is best used with onchanges linked to your package building
states
name
The directory to find packages that will be in the repository
keyid
Optional Key ID to use in signing repository
env
A dictionary of environment variables to be utlilized in creating the repository.
Example:
.. code-block:: yaml
- env:
OPTIONS: 'ask-passphrase'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Use of OPTIONS on some platforms, for example: ask-passphrase, will
require gpg-agent or similar to cache passphrases.
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'Package repo at {0} will be rebuilt'.format(name)
return ret
# Need the check for None here, if env is not provided then it falls back
# to None and it is assumed that the environment is not being overridden.
if env is not None and not isinstance(env, dict):
ret['comment'] = ('Invalidly-formatted \'env\' parameter. See '
'documentation.')
return ret
__salt__['pkgbuild.make_repo'](name, keyid, env)
ret['changes'] = {'refresh': True}
return ret
|
I made a whirlwind trip to Bali last week. The occasion: an extended family wedding, along with an opportunity to indulge in some incredibly rare R&R with my mother and twin sister. Bali, as you may recall, is where my Indonesian mother and Dutch photographer father first met. At the time he was shooting images which were later exhibited in the Smithsonian and in Amsterdam's Van Gogh Museum. I've been to Bali numerous times, the first when I was six years old, but it's been over ten years since my last trip. My, how it has changed! It's now teeming with tourists and traffic but, thankfully, the island's inherent magic remains. We crammed in sightseeing, seaside frolicking and shopping in between the wedding festivities, venturing south to Uluwatu (pictured above and below) and inland to Ubud to check out the rice fields. Herewith, my adventures!
Uluwatu is near the southern tip of Bali, and its towering waves along with its sharp and shallow coral reef make a leisurely dip in the ocean pretty much impossible. But perhaps you wouldn't want to swim there anyway: legend has it the Indian Ocean sea goddess Nyai Roro Kiduyou ruthlessly rules the violent waves here, and since she favors green, wearing a green bathing suit is forbidden--lest you want to drown. Believe it or not, both my mother and my surfer husband, who is the least superstitious person I know, abide by this rule.
When it was time to hit the beach, we headed around the peninsula's corner to Finn's Beach Club and its calmer waters and white sandy shores--perfect for chilling out. To get down to this beach club, you have to take the equivalent of a small open-air funicular that slowly scales the steep cliff. The vibe is very Ibiza minus the thumping dance music and foam.
Some of Bali's rice fields are thousands of years old. This one is situated in the backyard of Ubud's Chedi Club, a sprawling and serene estate, where the wedding rehearsal dinner was being staged. The grounds are breathtaking (see below).
All dressed for the party in this gleaming, feather-light Asos top and skirt, double-fisted to boot! (I'm holding my sister's wine glass while she snapped away.) Find my Givenchy sandals here.
Drink from a coconut in Uluwatu.
My twin sister Coliena and I also bonded over early morning runs on the beach, late night chats and rummaging through each other's suitcases for more clothes to wear/borrow/steal. She's wearing an Asos sequin dress, featured here, and I'm sporting a cut-out Self Portrait frock, also highlighted here.
This was the last sunset of my trip--an awe-inspiring, forever memorable sight. I'll be back one day again, Bali!
You are so gorgeous and chic! Every outfit is perfect. I especially love the champagne colored crop top-skirt ASOS combo. So good. This makes me wanna shop and go to Bali.
Where is your monogram bag from?
Can you share the deets on your Asos duo? LOVE!
These are beautiful pics of your trip! Does the sizing from your silver ASOS ensemble run true to size?
love catching up on your blog, especially when you travel!
where is the white blouse from, the one with the cut out dots and front-tie?
Can you give us information about your creme/white tops and the pom pom shorts? Love!
Hi All, Thank you for all your comments. I updated the post with info that was requested. My black dress is by H&M and, sadly, no longer available. So glad you like!
Love the red patterned dress/tunic in the pic where you are drinking from the coconut! Can you tell us who makes it?
The Asos top and skirt , does not link to the site at all !
Hi Divya, Thanks for the heads-up on the Asos link. I fixed it.
One more detail please - the metallic shoes you have on w/black H&M dress, are they K Jacques or someone else? Thank you for any info!
Do you have any idea for similar pom pom shorts? love your shorts!
Where in bali did you buy the shirts?
The Peter Som top is still availalble !! What size did you purchase and how is the fit? Thank you.
Thanks for the wonderful photos. It brought back memories. I lived in Ubud for three years (long before eat-pray-love); Cafe Batujimbar was one of my favorite haunts on beach/big city shopping days.
Your wonderful photos brought back memories. I lived in Ubud for three years, before eat-pray-love. Cafe Batujimbar was one of our favorite haunts on beach/big city shopping days.
This post took my breathe away and made me emotional as my sister and I are about to embark on our own vacation in a few months. The outfits and scenery took my breathe away. I love all the outfits, I felt like they gave me an idea of your state of mind, ahhhhhh. I love the glistening asos dress.it is perfection.
I absolutely love this post and was just looking back at it again! The iomoi canvas tote has been on my Christmas list since I first saw you post about it. I was wondering if you could tell me what size you have it in?
|
#!/usr/bin/env python3
#
# Copyright (c) 2020 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from pathlib import Path
import sys
import subprocess
CHIP_ROOT_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), '../..'))
def checkPythonVersion():
if sys.version_info[0] < 3:
print('Must use Python 3. Current version is ' + str(sys.version_info[0]))
exit(1)
def getGlobalTemplatesTargets():
targets = []
targets.extend([[str(filepath)] for filepath in Path('./examples').rglob('*.zap')])
targets.extend([[str(filepath)] for filepath in Path('./src/darwin').rglob('*.zap')])
targets.extend([[str(filepath)] for filepath in Path('./src/controller/data_model').rglob('*.zap')])
return targets
def getSpecificTemplatesTargets():
targets = []
targets.append(['src/controller/data_model/controller-clusters.zap', '-t', 'src/app/common/templates/templates.json'])
targets.append(['src/controller/data_model/controller-clusters.zap', '-t', 'examples/chip-tool/templates/templates.json'])
targets.append(['src/controller/data_model/controller-clusters.zap', '-t', 'src/controller/python/templates/templates.json'])
targets.append(['src/controller/data_model/controller-clusters.zap', '-t', 'src/darwin/Framework/CHIP/templates/templates.json'])
targets.append(['src/controller/data_model/controller-clusters.zap', '-t', 'src/controller/java/templates/templates.json'])
return targets
def getTargets():
targets = []
targets.extend(getGlobalTemplatesTargets())
targets.extend(getSpecificTemplatesTargets())
return targets
def main():
checkPythonVersion()
os.chdir(CHIP_ROOT_DIR)
targets = getTargets()
for target in targets:
subprocess.check_call(['./scripts/tools/zap/generate.py'] + target)
if __name__ == '__main__':
main()
|
The news comes in the annual State of the Cities Report to the Legislature. The percentage of the population being served by cities is up, while per capita revenues are down across the board. And with health care costs for city employees rising 15 percent a year, more service cuts are inevitable.
The cities and towns aren’t keeping up with the population boom, particularly since state voters capped property-tax growth and abolished the car-tab tax, said Mary Place, Yakima councilwoman and president of the Association of Washington Cities.
The survey said that cities face a $3.4 billion gap in meeting basic transportation needs over the next six years. Deferred maintenance has left streets and bridges in disrepair, and traffic is getting so bad it may choke off economic growth.
A very heart rendering report that misses the mark by a mile. The real public policy question not asked is if Mansfield, home to 325 souls, should be an incorporated city with full time city employees? There are lots of places in this Country, and Europe where cities and towns have had to either unicorporate or combine with other communities. Should it be state policy that state tax monies be redistributed to that every city that looses population should be able to continue to be a city for ever and ever? If so what should the minimum city size be? What should the minimum number of paid full time employees?
Change is a fact of life. Sometimes it is hard to accept. When a city or town falls to a population of 2,500 is it too small to expect to survive? How about when it falls to a population of 500? Is 325 the lower limit? I have heard about rural town in another state that had a populations about 14 that offered itself for sale with the owner being able to choose to be mayor or police chief? Is that important so that sales tax money should be distributed to protect?
Once again, taking the word of a person on the taxpayer dole. Go to the Seattle mayor on the best day ever and ask if he has enough money. Go to the state governor with the same question…ANY governor. Now go to the most profitable buisinessman and lay the cards on the table…services that are essential (and not swimming pools) vs money available, federal, state and local, Ill bet the buisinessman could give you the ESSENTIAL seattle services for 1/3 – 1/2 the money.
In case you dont get it, the policeman will tell you he is understaffed, the Boeing machinist will tell you that there isnt enough hourly, the hospital nurse will say nurses are understaffed, it is the nature of the beast. Additionally you cannot expect a growing budget in a shrinking “town”.
Chuck, your comments are not only insensitive and ridiculous, but you are completely out of touch with reality. You need to pay these towns a visit. You just don’t get it.
And you’d lose that bet. Cities already contract out plenty of services; and businesses have tried to take over other essential services (e.g. schools, prisons) and have failed—whittled away services or gone bankrupt. Government is more efficient than you give it credit for, especially when you consider there’s no profit involved.
I think it’s interesting to see how the supposedly manly, self-sufficient rural areas of Washington are being exposed as the economically unsuccessful, handout-dependent areas of Washington, living off of the generosity of our successful, industrious cities.
And they’ve been able to do it because we prosperous city folk believe that what’s yours is yours . . . and what’s ours is yours, because we care about our rural neighbors and the health of our state.
Change is a fact of life. Sometimes it is hard to accept. When a city or town falls to a population of 2,500 is it too small to expect to survive?
I hate to say it but I think the right wingers has the edge on the argument here. These small towns generally government, nearly all voted for I-695 and probably voted for every Eyman inititive there has been.
How useful is it for the urban voters to step in and tell them to raise their taxes and that I-695 which the small town voters enacted is hurting them? As much as the small towns may like more services, they are even more adement about the dreaded T word.
Ha. Yeah. That’s true also, especially when the multi billion dollar farm subsidy programs are considered. Farmers are some of the greatest recipients of government aid of any group.
I think it would be good public policy (and a good political move for the Democrats) to pass a sales tax equalization payments bill. After all, people in rural towns spend their money in the bigger towns and cities, so ‘their’ sales tax money goes elsewhere. It seems fair that if we’re going to use the sales tax as our major general revenue source, we should make sure that the benefits flow even to non-commercial regions.
Of course, there’s that teensy little problem of how to pay for it. The state government’s already running lean, and nobody wants to increase the sales tax even a tiny bit. Of course, Eastern Washington already gets a larger slice of the budget than the taxes it produces. But good luck trying to convince the East-side legislators to reallocate that revenue to give more to small-town rural folks.
I think this sort of attitude is really detrimental to the discussion, not because it doesn’t express a legitimate sentiment, but because the facts don’t really support this, at least not the facts we have.
The problem for the rural areas is the way taxation is applied. We have no income tax, which is really the best tax of economic activity, in the sense that if the population of an area producs $1m of value and are paid for it, they can be taxed and fund local services.
With a sales tax, you may have significant economic value produced by the people in this community, but in taxing the sales, not the income, the tax receipts go to whereever they spend their money. Smaller towns are not ideal retail locations for obvious reasons (better to be in a central area to draw many people), especially for large outfits like Circuit City, or what have you. I know someone is going to say “Ah ha! but what about town X with a big Wal Mart in it”, which is true for THAT TOWN, but for every town like that, there are 10 (or more) smaller towns with no major retail activity. Hence, no receipts, and no tax. However, those people _are_ paying taxes, just somewhere else.
The reality MAY BE that there is a redistribution of wealth needed, but at present it seems far more likely that this is a flaw in a state that relies on sales and property taxes over income taxes. Need more info.
I lived amongst these towns just last year. I lived on the east side for a year.
Usually when government contracts out in this state it involves the contractor paying “prevailing wage” that alone cripples any attempt at cost savings.
Edgar wrote: “The problem for the rural areas is the way taxation is applied. We have no income tax, which is really the best tax of economic activity, in the sense that if the population of an area producs $1m of value and are paid for it, they can be taxed and fund local services.
I agree with you. The sales tax is not only more regressive than the income tax (hurts the poor more than the rich), but it’s also not a stable basis on which to fund key services. That’s the problem with our state taxation system (no income tax, lots of other taxes to make up for it).
But the people most opposed to income taxes are rural/Republican voters (even if you couple with it massive decreases in property taxes and elimination of the state portion of the sales tax).
So the inevitable consequence of lack of support for income tax or any tax replacement for lost tax revenue from other forms of taxation is the destruction of the quality of life in the areas around WA state that receive more tax money than they generate – namely rural (Red/Republican) areas.
Chuck is right about Cities being forced to pay prevailing wage on capital projects as part of the problem.
So is the brain-fart that requires 1% for the arts in many municipalities. What does a street or infrastructure project have to do with the arts?? Artist freeloader lobby is alive!!!
Some rural counties have adopted an attitude of keep out the big box stores. This may work in the books like A Road to Nowhere and other anti-growth books but these authors don’t have to struggle to live in these small towns. Some Jefferson County residents take pride that there is no Wal-mart, Target etc. Just a Safeway & QFC. The reality is a HUGE % of residents burn up fossil fuels driving to Silverdale or Sequim where they gladly take our retail leakage.
The point is, SOME rural communities have self-inflicted revenue wounds by the land-use policies they have adopted.
Like I dunno – a tax reform summit. The guy’s gotta lot of time on his hands.
I frankly think a defiant property-tax increase now by state gov’t for local gov’t would be the right thing to do.
Chuck, are you referring to Josef’s proposal? you think that if Republicans try to increase property taxes, it will score them points vs. the Democrats??
“Few myths have more traction in Olympia — and Washington, D.C. — than the idea that private industry can do things cheaper and more efficiently than government. Our workers’ compensation system offers an excellent case study in why that’s a bogus assumption.
Now let me give you a tour of my western Washington rural community. Things may work differently in eastern Washington, but the small town near where I live does not own fire trucks–the Fire District owns the fire trucks, and the aid cars, and runs the district completely with volunteers. We have two fire halls, one in town and one about five miles out of town. There are fire hydrants in town and a big tank of water at the rural station. My small town was incorporated in 1906 and has had a static population of about 650 ever since. If our town un-incorporated, we would not lose the fire hall in town–but Timmy has affected our fire district, because it too is restrained by the his property tax reducing initiatives. People living in town have city water, and the city charges citizens in the city limits for the water they use–this income maintains the city’s well, and water delivery system. If the city went out of business a water district would have to be created. The city maintains the streets in town, but only minimally–there are not curbs and sidewalks everywhere, and at times there are lots of potholes. The main street is a state highway, so the maintenance of it is the state’s problem. If the city went out of existence the county would have to assume maintenance of the other roads (not streets) that intersect the state highway. The city provides a building for our library and pays the utilities required to keep the library lit, warm, and cool. The library costs are paid for by Timberland, a library district (also affected by Timmy’s initiatives)–so if the city went under we would probably still have a library. We have no hospital, but it really isn’t that far to go in a fire district aid car to Centralia, or Olympia for hospitalization. The cemetery is maintained by the city–they are able to cover most of the cost from the money they charge for opening and closing grave sites. If the city went out of existence, there would have to be a cemetery district created to take care of the cemetery, because our state has us over a barrel in that department–it is state law that a body can not be buried on private property–bodies must be buried in a cemetery. The town does have a police department, which could not survive if it weren’t for the money the local tribe pays the city to help with law enforcement. The tribe has a casino just over the Thurston County line–tribes agree to help with the additional pressure on local services crated by the location of their casinos. If the city went bust the county would have to send the sheriff’s department to handle law enforcement, and that could take up to thirty minutes.
It would probably be better for my local town to un-incorporate, and it certainly wouldn’t be the end of it, in fact it probably wouldn’t curb its growth at all (and it is starting to grow–all those folks in Oly want to live in a rural area). Just to the east is the largest un-incorporated entity I’ve seen in my 61 years, Rochester/Grand Mound. Rochester/Grand Mound has a school system as big as Centralia’s–has a sewer/water district, fire district, and I assume a cemetery district, as they have a cemetery. They have had so much crime that they persuaded Thurston County to put a district office in Rochester.
I do have a gun and I do investigate strange bumps in the night, I dispatch all those unwanted critters that do damage to my animals–but at times I do need a law enforcement officer–and since I live in un-incorporated territory, that’s at least thirty minutes away.
|
import os
import sys
import re
import math
from spacecharge import Grid3D
class Field_Parser3D:
""" 3D field parser """
def __init__(self):
""" Create instance of the Field_Parser3D class """
self.__lines = []
def __del__(self):
del self.__lines
###########################################################################
#Gets the limits of the file being parsed
###########################################################################
def getLimits(self,filename):
infile = open(filename,"r")
# Resize the grid so more files can be processed
numLines = 0
xmin,xmax,ymin,ymax,zmin,zmax = 0,0,0,0,0,0
for line in infile.readlines():
splitline = line.split()
value = map(float, splitline)
# finding minimums and maximums of data to compute range
xmax = max(xmax,value[0])
ymax = max(ymax,value[1])
zmax = max(zmax,value[2])
xmin = min(xmin,value[0])
ymin = min(ymin,value[1])
zmin = min(zmin,value[2])
numLines += 1
xmax = max(xmax,0)
ymax = max(ymax,0)
zmax = max(zmax,0)
xmin = min(xmin,0)
ymin = min(ymin,0)
zmin = min(zmin,0)
print "Min and Max values: " , xmin, xmax, ymin, ymax, zmin, zmax, numLines, "\n"
limits = [xmin, xmax, ymin, ymax, zmin, zmax, numLines]
return limits
###########################################################################
#Limits is a list of format [xmin,xmax,ymin,ymax,zmin,zmax]
###########################################################################
def getRange(self,limits):
xmin = limits[0]
xmax = limits[1]
ymin = limits[2]
ymax = limits[3]
zmin = limits[4]
zmax = limits[5]
Xrange = xmax-xmin
Yrange = ymax-ymin
Zrange = zmax-zmin
range = [Xrange,Yrange,Zrange]
print "Range of X,Y,Z values in data: ", Xrange, " ", Yrange, " ", Zrange
return range
###############################################################################
# gets the gridsize give the range of each variable
# and the step of each variable
##############################################################################
def getGridSize(self,range, step, usrLimits):
for i in xrange(3):
range[i] = range[i]*1.0/step[i]
gridSize = [range[0]+1,range[1]+1, range[2]+1]
xrnge = usrLimits[1] - usrLimits[0]
yrnge = usrLimits[3] - usrLimits[2]
zrnge = usrLimits[5] - usrLimits[4]
usrRange = [xrnge,yrnge,zrnge]
for i in xrange(3):
usrRange[i] = (usrRange[i]*1.0/step[i]) + 1
for i in xrange(3):
if(usrRange[i]<gridSize[i]):
gridSize[i] = usrRange[i]
gridSize = map(int,gridSize)
print "Grid Size [x,y,z]: " , gridSize
return gridSize
###############################################################################
#Returns the coordinates in the grid given the rawNumbers
#and the limits of each variable.
##############################################################################
def getCoordinates(self, gridSize, step,rawNumbers, limits):
coordinates = [rawNumbers[0] ,rawNumbers[1],rawNumbers[2]]
for i in xrange(len(coordinates)):
coordinates[i] = coordinates[i]*(1.0/step[i])
coordinates[i] = coordinates[i]-limits[2*i]/step[i]
coordinates = map(int, coordinates)
return coordinates
#######################################################################
# Checks to see if the given coordinates are within the range specified
#######################################################################
def checkLimits(self, arrayLimits, value):
if(value[0] >= arrayLimits[0] and
value[0] <= arrayLimits[1]):
if(value[1] >= arrayLimits[2] and
value[1] <= arrayLimits[3]):
if(value[2] >= arrayLimits[4] and
value[2] <= arrayLimits[5]):
return True
else:
return False
##########################################################################
#Checks to see if the point is on the grid given the current step
##########################################################################
def checkGrid(self,step,value):
localStep = [0,0,0]
localValue = [0,0,0]
for i in xrange(3):
localStep[i] = 2*step[i]
localValue[i] = 2*value[i]
map(int, localStep)
map(int, localValue)
for i in xrange(3):
if(value[i]%step[i] != 0):
return False
else:
return True
###############################################################################
# Parameters
# filename: name of the text file to be processed
# xmin,xmax,ymin,ymax,zmin,zmax - user defined limits for the file being parsed
# xstep,ystep,ztep - the step size for the parsing. (0.5 parses 0.0,0.5,1.0,1.5 etc.
# while a 1.0 value will parse 0.0,1.0,2.0, etc.
# All Grid sizes are user defined.
###############################################################################
def parse(self, filename, xmin,xmax,ymin,ymax,zmin,zmax,xstep,ystep,zstep):
usrLimits = [xmin,xmax,ymin,ymax,zmin,zmax]
limits = self.getLimits(filename)
range = self.getRange(limits)
step = [xstep,ystep,zstep]
#Computes the size of the grid given the user limits and the step
gridSize = self.getGridSize(range, step, usrLimits)
numLines = limits[6]
print "Number of lines in the file: ",numLines , "\n"
#for now we will say that the size of the grid encompasses all datapoints
print "GridSize " , gridSize[0],gridSize[1],gridSize[2]
BXGrid = Grid3D(gridSize[0],gridSize[1],gridSize[2])
BYGrid = Grid3D(gridSize[0],gridSize[1],gridSize[2])
BZGrid = Grid3D(gridSize[0],gridSize[1],gridSize[2])
fieldgrid3DMag = Grid3D(gridSize[0],gridSize[1],gridSize[2])
XGrid = []
YGrid = []
ZGrid = []
# Maps values from file to grid.
infile1 = open(filename,"r")
for line in infile1.readlines():
splitLine = line.split()
rawNumbers = map(float, splitLine)
# Maps data points to integers so that they can be evaluated for stepsize
testRS = map(int, rawNumbers)
if(self.checkGrid(step,rawNumbers) and
self.checkLimits(usrLimits,rawNumbers)
):
coordinates = self.getCoordinates(gridSize,step,rawNumbers, usrLimits)
XGrid.append(rawNumbers[0]/100.0)
YGrid.append(rawNumbers[1]/100.0)
ZGrid.append(rawNumbers[2]/100.0)
BXGrid.setValue(rawNumbers[3]/10000.0, coordinates[0], coordinates[1], coordinates[2])
BYGrid.setValue(rawNumbers[4]/10000.0, coordinates[0], coordinates[1], coordinates[2])
BZGrid.setValue(rawNumbers[5]/10000.0, coordinates[0], coordinates[1], coordinates[2])
getMag = ((rawNumbers[3]**2.0+rawNumbers[4]**2.0+rawNumbers[5]**2.0)**0.5)/10000.0
fieldgrid3DMag.setValue(getMag, coordinates[0], coordinates[1], coordinates[2])
MagList = [BXGrid,BYGrid,BZGrid,fieldgrid3DMag,XGrid,YGrid,ZGrid]
return MagList
|
A culture that seems to leave such room for freewill regarding marocain is very interesting! We would like to understand the speed meaning of these three days of dating.
Many Berbers speak French, but speed only spea k Amazigh. A few months later we will marocain up with marocaon brand new marocains, and see how they are dating once the effervescence of the moussem has passed. Do these honeymooners really have the freedom of dating We want to have you on our speed to live together this unique annual event.
The money ugly dating site uk from our student jobs marocain personally cover these additional fees. Kahina Meziantwho is of Algerian Berber origin, sees in this dating marocain an opportunity to get closer to a dating she knows little of, but one which she feels close to. Students in journalism, passionate about cultural diversity. We speed want to support other projects.
Follow us on the path of the wedding moussem of Imilchil. What marocain does the dating Berberian culture have in a speed Moroccan society?
We are embarking on the marocain to the High Atlas mountains to meet the Berber traditions! Free online dating service with webcam, chat, and IM! Online dating maroc Dec 4 zawaj ta3arof fatayat habibti maroc gay speed dating sites pour ado de plusieurs dating artiste comdien comme hassan elfed et hannan.
Le Marrakech Dating, nouveau concept de speed dating au Maroc, propose quarante clibataires, de marocain ensemble quelques jours de vacances Marrakech.
Marketplace, everyone is speed to win the heart of dating ukraine girls on the dating marocains who are over 50 dating sites nz. Koolaid from now until the end of the age of people using. Speed dating la maghrbine: Speed est dans le titre A ceux et celles qui n ont pas test, estce que a vous dit?
Meet maroc and dating singles in Fes, Morocco on the worlds speed online speed dating site. Free online dating with webcam and IM! Apr 22, Video embeddedLe 14 fvrier dernier sur la pniche la Baleine Blanche. Whether you're a marocain, new in town, or just passing through, you'll be magocain to find something on Eventbrite that piques your.
It is good idea. It is ready to support you.COMMENTS (31) Absolutely with you it agree.
Thanks for the help in this question, can I too I can to you than that to help?Horaire des prieres: In it something is.
|
# -*- coding: utf-8 -*-
__author__ = 'ElenaSidorova'
import codecs
dicts = [
'freq.csv',
'ozhegov.csv',
'sharov.csv',
'ushakov.csv',
'zaliznyak.csv'
]
wrong_dicts = [
'adjectives_ija_ends.csv',
'perfect_verbs.csv'
]
ok_dicts = [
'soft_sign_ends.csv'
]
prefixes = [
'all_prefixes.csv'
]
class DictLoader(object):
@classmethod
def load_dicts(cls):
named_dicts = cls.read_data(dicts)
wrong = cls.read_data(wrong_dicts)
ok = cls.read_data(ok_dicts)
all_prefixes = cls.read_data(prefixes)
dict_settings = [named_dicts, wrong, ok, all_prefixes]
return dict_settings
@classmethod
def read_data(cls, arr):
result = []
for d in arr:
with codecs.open(d, 'r', 'utf-8') as inf:
data = inf.read().strip()
data = data.replace(u'\r', u'').lower().split(u'\n')
if 'freq' in d:
new = []
for el in data:
new.append(el.split()[0])
data = new
result.append(data)
return result
# a = DictLoader()
# b = a.load_dicts()
|
Get FREE Eat24 Coupon $10, Eat24 Coupon $5, Eat24 coupon $7 at Coupon4all. Get now!
This site maintains and curates a list of currently active eat24 coupon codes. They are automatically discovered and tested for reliability so you don't have to deal with expired codes ever again.
Get 10 Yelp s and promo codes at CouponBirds. Click to enjoy the latest deals and coupons of Yelp Eat24 and save up to $10 when making purchase at checkout.
Eat24 Coupon Codes How to Redeem a Coupon Code at Eat24. Although there is no box to enter a coupon code at checkout, you can sign up for Eat24's email list.
|
# -*- coding: utf-8 -*-
################################################################################
# Copyright 2014, The Open Aggregator
# GNU General Public License, Ver. 3 (see docs/license.txt)
################################################################################
"""Model Spline File
Each line in a model spline file represents a polynomial segment in
log-probability space. The format is as follows::
spp1
<x>,<y0>,<y1>,<a0>,<a1>,<a2>
...
Each line describes a segment of a probability distribution of y,
conditional on x = ``<x>``. The segment spans from ``<y0>`` to
``<y1>``, where the lowest value of ``<y0>`` may be ``-inf``, and the
highest value of ``<y1>`` may be ``inf``. The ``<x>`` values may also
be categorical or numerical. If they are numerical, it is assumed
that these values represent samples of a smoothly varying function (a
cubic spline in every y).
The values ``<a0>``, ``<a1>`` and ``<a2>`` are the polynomial
coefficients in y (with quadratic coefficients, only normal or
exponential tails are possible). The final segment of the probability
function is:
exp(a0 + a1 y + a2 y2)
"""
__copyright__ = "Copyright 2014, The Open Aggregator"
__license__ = "GPL"
__author__ = "James Rising"
__credits__ = ["James Rising", "Solomon Hsiang", "Bob Kopp"]
__maintainer__ = "James Rising"
__email__ = "jar2234@columbia.edu"
__status__ = "Production"
__version__ = "$Revision$"
# $Source$
import csv, math, string, random, traceback
import numpy as np
from scipy.interpolate import UnivariateSpline
from scipy.stats import norm
from scipy.special import erf
from model import Model
from univariate_model import UnivariateModel
from memoizable import MemoizableUnivariate
class SplineModel(UnivariateModel, MemoizableUnivariate):
posinf = float('inf')
neginf = float('-inf')
samples = 1000
def __init__(self, xx_is_categorical=False, xx=None, conditionals=None, scaled=True):
super(SplineModel, self).__init__(xx_is_categorical, xx, scaled)
if conditionals is not None:
self.conditionals = conditionals
else:
self.conditionals = []
def __repr__(self):
return "Spline model"
def kind(self):
return 'spline_model'
def copy(self):
conditionals = []
for conditional in self.conditionals:
conditionals.append(conditional.copy())
return SplineModel(self.xx_is_categorical, list(self.get_xx()), conditionals, scaled=self.scaled)
def get_xx(self):
if self.xx_is_categorical:
return self.xx_text
else:
return self.xx
def eval_pval(self, x, p, threshold=1e-3):
conditional = self.get_conditional(x)
return conditional.get_pval(p, threshold)
def scale_y(self, a):
for conditional in self.conditionals:
conditional.scale_y(a)
if self.scaled:
conditional.rescale()
return self
def scale_p(self, a):
for conditional in self.conditionals:
conditional.scale_p(a)
if self.scaled:
conditional.rescale()
return self
def filter_x(self, xx):
conditionals = []
for x in xx:
conditionals.append(self.get_conditional(x))
return SplineModel(self.xx_is_categorical, xx, conditionals, scaled=self.scaled)
def interpolate_x(self, newxx):
# Is this a subset of our values?
subset = True
for x in newxx:
if x not in self.get_xx():
subset = False
if subset:
return self.filter_x(newxx)
#(limits, ys) = SplineModelConditional.propose_grid(self.conditionals)
#ddp_model = self.to_ddp(ys).interpolate_x(newxx)
#return SplineModel.from_ddp(ddp_model, limits)
conditionals = []
for x in newxx:
conditionals.append(self.get_conditional(x).copy())
return SplineModel(self.xx_is_categorical, newxx, conditionals, True)
# Only for categorical models
def recategorize_x(self, oldxx, newxx):
"""Construct a new model with categorical x values 'newxx', using the conditionals currently assigned to categorical x values 'oldxx'."""
conditionals = []
for ii in range(len(oldxx)):
if oldxx[ii] == -1 or (not isinstance(oldxx[ii], str) and not isinstance(oldxx[ii], unicode) and np.isnan(oldxx[ii])): # Not available
conditionals.append(SplineModelConditional.make_gaussian(-np.inf, np.inf, np.nan, np.nan))
else:
conditionals.append(self.get_conditional(oldxx[ii]))
return SplineModel(True, newxx, conditionals, scaled=self.scaled)
def add_conditional(self, x, conditional):
if not self.xx_is_categorical:
try:
self.xx.append(float(x))
self.xx_text.append(str(x))
except ValueError:
self.xx_is_categorical = True
if self.xx_is_categorical:
self.xx_text.append(x)
self.xx.append(len(self.xx))
self.conditionals.append(conditional)
def get_conditional(self, x):
if x is None or x == '' or len(self.conditionals) == 1:
return self.conditionals[0]
try:
return self.conditionals[self.xx_text.index(str(x))]
except Exception as e:
return SplineModelConditional.find_nearest(self.xx, x, self.conditionals)
def write_file(self, filename, delimiter):
with open(filename, 'w') as fp:
self.write(fp, delimiter)
def write(self, file, delimiter):
if self.scaled:
file.write("spp1\n")
else:
file.write("spv1\n")
writer = csv.writer(file, delimiter=delimiter)
for ii in range(len(self.xx)):
for jj in range(len(self.conditionals[ii].y0s)):
row = [self.xx_text[ii], self.conditionals[ii].y0s[jj], self.conditionals[ii].y1s[jj]]
row.extend(self.conditionals[ii].coeffs[jj])
writer.writerow(row)
def write_gaussian(self, file, delimiter):
writer = csv.writer(file, delimiter=delimiter)
writer.writerow(['dpc1', 'mean', 'sdev'])
for ii in range(len(self.xx)):
for jj in range(len(self.conditionals[ii].y0s)):
if len(self.conditionals[ii].coeffs[jj]) == 1 and self.conditionals[ii].coeffs[jj][0] == SplineModel.neginf:
continue
elif len(self.conditionals[ii].coeffs[jj]) == 3:
writer.writerow([self.xx_text[ii], self.conditionals[ii].gaussian_mean(jj), self.conditionals[ii].gaussian_sdev(jj)])
else:
writer.writerow([self.xx_text[ii], None, None])
def write_gaussian_plus(self, file, delimiter):
writer = csv.writer(file, delimiter=delimiter)
writer.writerow(['dpc1', 'y0', 'y1', 'mean', 'sdev'])
for ii in range(len(self.xx)):
for jj in range(len(self.conditionals[ii].y0s)):
if len(self.conditionals[ii].coeffs[jj]) == 1 and self.conditionals[ii].coeffs[jj][0] == SplineModel.neginf:
continue
elif len(self.conditionals[ii].coeffs[jj]) == 3:
writer.writerow([self.xx_text[ii], self.conditionals[ii].y0s[jj], self.conditionals[ii].y1s[jj], self.conditionals[ii].gaussian_mean(jj), self.conditionals[ii].gaussian_sdev(jj)])
else:
writer.writerow([self.xx_text[ii], self.conditionals[ii].y0s[jj], self.conditionals[ii].y1s[jj], None, None])
def to_points_at(self, x, ys):
conditional = self.get_conditional(x)
return conditional.to_points(ys)
def cdf(self, xx, yy):
conditional = self.get_conditional(xx)
return conditional.cdf(yy)
def is_gaussian(self, x=None):
conditional = self.get_conditional(x)
return len(conditional.y0s) == 1 and len(conditional.coeffs[0]) == 3
def get_mean(self, x=None):
if not isinstance(x, str) and not isinstance(x, unicode) and np.isnan(x):
return np.nan
conditional = self.get_conditional(x)
if conditional.is_gaussian():
return conditional.gaussian_mean(0)
total = 0
for ii in range(conditional.size()):
total += conditional.nongaussian_xpx(ii)
return total
def get_sdev(self, x=None):
conditional = self.get_conditional(x)
if conditional.is_gaussian():
return conditional.gaussian_sdev(0)
total = 0
for ii in range(conditional.size()):
total += conditional.nongaussian_x2px(ii)
mean = self.get_mean(x)
return math.sqrt(total - mean**2)
def draw_sample(self, x=None):
conditional = self.get_conditional(x)
return conditional.draw_sample()
def init_from_spline_file(self, file, delimiter, status_callback=None):
line = string.strip(file.readline())
if line == "spp1":
self.scaled = True
elif line == 'spv1':
self.scaled = False
else:
raise ValueError("Unknown format: %s" % (line))
self.xx = []
self.xx_text = []
self.xx_is_categorical = False
self.conditionals = []
reader = csv.reader(file, delimiter=delimiter)
x = None
conditional = None
for row in reader:
if row[0] != x:
x = row[0]
conditional = SplineModelConditional()
self.add_conditional(x, conditional)
conditional.add_segment(float(row[1]), float(row[2]), map(float, row[3:]))
if status_callback:
status_callback("Parsing...", reader.line_num / (reader.line_num + 3.0))
if self.scaled:
for conditional in self.conditionals:
conditional.rescale()
return self
def to_ddp(self, ys=None):
if ys is None:
(limits, ys) = SplineModelConditional.propose_grid(self.conditionals)
pp = np.ones((len(self.xx), len(ys)))
for ii in range(len(self.xx)):
pp[ii,] = self.to_points_at(self.xx[ii], ys)
return DDPModel('ddp1', 'spline_model', self.xx_is_categorical, self.get_xx(), False, ys, pp, scaled=self.scaled)
### Memoizable
def eval_pval_index(self, ii, p, threshold=1e-3):
return self.conditionals[ii].get_pval(p, threshold)
### Class Methods
@staticmethod
def create_single(xxs, y0s, y1s, coeffss, order=None, xx_is_categorical=True):
conditionals = []
xx = []
for key in (xxs if order is None else order):
xx.append(key)
conditional = SplineModelConditional.make_single(y0s[key], y1s[key], coeffss[key])
conditionals.append(conditional)
return SplineModel(xx_is_categorical, xx, conditionals, True)
@staticmethod
def create_gaussian(xxs, order=None, xx_is_categorical=True):
"""xxs should be a dictionary of the form {x: (mean, variance)}."""
conditionals = []
xx = []
for key in (xxs if order is None else order):
xx.append(key)
mean = float(xxs[key][0])
var = float(xxs[key][1])
conditional = SplineModelConditional.make_gaussian(SplineModel.neginf, SplineModel.posinf, mean, var)
conditionals.append(conditional)
return SplineModel(xx_is_categorical, xx, conditionals, True)
@staticmethod
def from_ddp(ddp_model, limits):
lps = ddp_model.log_p()
conditionals = []
xx = []
for ii in range(len(ddp_model.xx)):
lp = lps[ii,]
updown = np.concatenate((np.linspace(-1000, -900, np.floor(len(lp)/2)), np.linspace(-900, -1000, np.ceil(len(lp)/2))))
lp[lp == SplineModel.neginf] = updown[lp == SplineModel.neginf]
spline = UnivariateSpline(ddp_model.yy, lp, k=2)
try:
conditionals.append(SplineModelConditional.make_conditional_from_spline(spline, limits).rescale())
xx.append(ddp_model.get_xx()[ii])
except Exception as e:
print e
print traceback.print_exc()
return SplineModel(ddp_model.xx_is_categorical, xx, conditionals, True)
@staticmethod
def merge(models):
for model in models:
if not model.scaled:
raise ValueError("Only scaled distributions can be merged.")
(models, xx) = UnivariateModel.intersect_x_all(models)
model = SplineModel()
for ii in range(len(xx)):
conditional = SplineModelConditional()
y0 = SplineModel.neginf
# Loop through each segment
while y0 != SplineModel.posinf:
y1 = SplineModel.posinf
coeffs = np.zeros(3)
for jj in range(len(models)):
modcond = models[jj].get_conditional(xx[ii])
for kk in range(len(modcond.y0s)):
if modcond.y0s[kk] <= y0 and modcond.y1s[kk] > y0:
if modcond.y1s[kk] < y1:
y1 = modcond.y1s[kk]
if np.all(np.isfinite(modcond.coeffs[kk])): # Ignore NA and Inf
coeffs[0:len(modcond.coeffs[kk])] = np.array(coeffs[0:len(modcond.coeffs[kk])]) + np.array(modcond.coeffs[kk])
while len(coeffs) > 0 and coeffs[-1] == 0:
coeffs = coeffs[0:-1]
conditional.add_segment(y0, y1, coeffs)
y0 = y1
model.add_conditional(xx[ii], conditional.rescale())
return model
@staticmethod
def combine(one, two):
if one.xx_is_categorical != two.xx_is_categorical:
raise ValueError("Cannot combine models that do not agree on categoricity")
if not one.scaled or not two.scaled:
raise ValueError("Cannot combine unscaled models")
(one, two, xx) = UnivariateModel.intersect_x(one, two)
conditionals = []
for ii in range(len(xx)):
conditionals.append(one.get_conditional(xx[ii]).convolve(two.get_conditional(xx[ii])).rescale())
return SplineModel(one.xx_is_categorical, xx, conditionals, True)
class SplineModelConditional():
# coeffs is ordered low-order to high-order
def __init__(self, y0s=None, y1s=None, coeffs=None):
if y0s is None:
self.y0s = np.array([])
self.y1s = np.array([])
self.coeffs = []
else:
self.y0s = np.array(y0s)
self.y1s = np.array(y1s)
self.coeffs = coeffs
def size(self):
return len(self.y0s)
def copy(self):
return SplineModelConditional(list(self.y0s), list(self.y1s), list(self.coeffs))
# Does not maintain scaling
def scale_y(self, a):
for ii in range(self.size()):
self.y0s[ii] *= a
self.y1s[ii] *= a
if len(self.coeffs[ii]) > 1:
self.coeffs[ii][1] /= a
if len(self.coeffs[ii]) > 2:
self.coeffs[ii][2] /= a*a
# Does not maintain scaling
def scale_p(self, a):
for ii in range(self.size()):
self.coeffs[ii] = map(lambda c: a*c, self.coeffs[ii])
# Does not check for overlapping segments
def add_segment(self, y0, y1, coeffs):
self.y0s = np.append(self.y0s, [y0])
self.y1s = np.append(self.y1s, [y1])
self.coeffs.append(coeffs)
indexes = np.argsort(self.y0s)
if indexes[-1] != len(self.y0s) - 1:
self.y0s = self.y0s[indexes]
self.y1s = self.y1s[indexes]
self.coeffs = [self.coeffs[index] for index in indexes]
# Note: after calling, need to set scaled on SplineModel object
def rescale(self):
integral = self.cdf(SplineModel.posinf)
if not np.isnan(integral) and integral > 0:
self.scale(1 / integral)
return self
def scale(self, factor):
if factor == 0:
self.y0s = [SplineModel.neginf]
self.y1s = [SplineModel.posinf]
self.coeffs = [[SplineModel.neginf]]
else:
for ii in range(self.size()):
self.coeffs[ii][0] = self.coeffs[ii][0] + math.log(factor)
# Similar to to_points
def evaluate(self, ii, y):
if y == SplineModel.neginf or y == SplineModel.posinf:
return 0
return np.exp(np.polyval(self.coeffs[ii][::-1], y))
# Similar to evaluate
def to_points(self, ys):
result = np.array(ys) * 0
for ii in range(len(self.y0s)):
valid = np.logical_and(ys >= self.y0s[ii], ys <= self.y1s[ii])
result[valid] = np.exp(np.polyval(self.coeffs[ii][::-1], ys[valid]))
return result
def partial_cdf(self, ii, y1):
if len(self.coeffs[ii]) == 0:
return np.nan
if len(self.coeffs[ii]) == 1:
if self.coeffs[ii][0] == SplineModel.neginf:
return 0
return np.exp(self.coeffs[ii][0]) * (y1 - self.y0s[ii])
elif len(self.coeffs[ii]) == 2:
return (np.exp(self.coeffs[ii][0]) / self.coeffs[ii][1]) * (np.exp(self.coeffs[ii][1] * y1) - np.exp(self.coeffs[ii][1] * self.y0s[ii]))
elif self.coeffs[ii][2] > 0:
if self.y0s[ii] == SplineModel.neginf or self.y1s[ii] == SplineModel.posinf:
raise ValueError("Improper area of spline")
myys = np.linspace(self.y0s[ii], y1, SplineModel.samples)
return sum(np.exp(np.polyval(self.coeffs[ii][::-1], myys))) * (y1 - self.y0s[ii]) / SplineModel.samples
else:
var = -.5 / self.coeffs[ii][2]
mean = self.coeffs[ii][1] * var
if np.isnan(mean) or np.isnan(var) or var <= 0:
return 0
exponent = self.coeffs[ii][0] - (-mean*mean / (2*var) + math.log(1 / math.sqrt(2*math.pi*var)))
if exponent > 100:
# math domain error!
return 0
rescale = math.exp(exponent)
below = 0
if float(self.y0s[ii]) != SplineModel.neginf:
below = norm.cdf(float(self.y0s[ii]), loc=mean, scale=math.sqrt(var))
if exponent > 20 and float(self.y0s[ii]) != SplineModel.neginf and float(self.y1s[ii]) != SplineModel.neginf and y1 != SplineModel.posinf:
# approaching math domain error: assume constant
total = rescale * (norm.cdf(self.y1s[ii], loc=mean, scale=math.sqrt(var)) - below)
return total * (y1 - self.y0s[ii]) / (self.y1s[ii] - self.y0s[ii])
return rescale * (norm.cdf(y1, loc=mean, scale=math.sqrt(var)) - below)
def cdf(self, yy):
integral = 0
for ii in range(len(self.y0s)):
if self.y1s[ii] >= yy:
y1 = yy
else:
y1 = self.y1s[ii]
integral += self.partial_cdf(ii, y1)
if self.y1s[ii] >= yy:
break
return integral
def draw_sample(self):
value = random.random()
return self.get_pval(value)
def get_pval(self, p, threshold=1e-3):
# Use finer thresholds later on
if p < .1:
threshold = threshold * p * 10
elif p > .9:
threshold = threshold * (1 - p) * 10
# First figure out which spline p is in
integral = 0
for ii in range(len(self.y0s)):
if ii == len(self.y0s) - 1:
break # this will bring us to 1
partial = self.partial_cdf(ii, self.y1s[ii])
if integral + partial > p:
break
integral += partial
y = SplineModelConditional.ascinv(p - integral, lambda y: self.partial_cdf(ii, y), self.y0s[ii], self.y1s[ii], threshold)
if np.isnan(y):
# Let's just give back some value
if self.y0s[0] < 0 and self.y1s[len(self.y1s)-1] > 0:
y = 0
else:
y = (self.y0s[0] + self.y1s[len(self.y1s)-1]) / 2
return y
# find the x for a given y of an ascending function
# copied from math.js
@staticmethod
def ascinv(y, func, minx, maxx, threshold):
tries = 0
while tries < 10000:
tries += 1
if (minx == SplineModel.neginf and maxx == SplineModel.posinf) or (minx == SplineModel.neginf and maxx > 0) or (minx < 0 and maxx == SplineModel.posinf):
midpoint = 0
elif minx == SplineModel.neginf:
midpoint = (maxx - 1.0) * 2
elif maxx == SplineModel.posinf:
midpoint = (minx + 1.0) * 2
else:
midpoint = (minx + maxx) / 2.0
error = func(midpoint) - y
if abs(error) < threshold:
return midpoint
elif np.isnan(error):
return np.nan
elif error > 0:
maxx = midpoint
elif error < 0:
minx = midpoint
return np.nan
def approximate_mean(self, limits):
rough_limits = self.rough_limits()
limits = (max(float(limits[0]), rough_limits[0]), min(float(limits[1]), rough_limits[1]))
ys = np.linspace(limits[0], limits[1], self.size() * SplineModel.samples)
ps = self.to_points(ys)
ps = ps / sum(ps)
return sum(ps * ys)
# Allow true gaussian or delta
def is_gaussian(self):
return len(self.y0s) == 1 and (len(self.coeffs[0]) == 3 or len(self.coeffs[0]) == 0)
def gaussian_sdev(self, ii):
if len(self.coeffs[ii]) == 0:
return 0
if self.coeffs[ii][2] == 0:
return np.inf
return 1/math.sqrt(-2*self.coeffs[ii][2])
def gaussian_mean(self, ii):
if len(self.coeffs[ii]) == 0:
return (self.y1s[ii] + self.y0s[ii]) / 2
if self.coeffs[ii][2] == 0:
return np.nan
return -self.coeffs[ii][1] / (2*self.coeffs[ii][2])
def nongaussian_xpx(self, ii):
a = self.coeffs[ii][2] if len(self.coeffs[ii]) > 2 else 0
b = self.coeffs[ii][1] if len(self.coeffs[ii]) > 1 else 0
c = self.coeffs[ii][0]
x0 = self.y0s[ii]
x1 = self.y1s[ii]
# From Matlab
if a == 0:
if x0 == SplineModel.neginf:
return (math.exp(c + b*x1)*(b*x1 - 1))/b**2
elif x1 == SplineModel.posinf:
return -(math.exp(c + b*x0)*(b*x0 - 1))/b**2
return (math.exp(c + b*x1)*(b*x1 - 1))/b**2 - (math.exp(c + b*x0)*(b*x0 - 1))/b**2
sqrtpi = math.pi**.5
na05 = ((-a)**.5)
na15 = ((-a)**1.5)
return (math.exp(a*x1**2 + b*x1)*math.exp(c))/(2*a) - (math.exp(a*x0**2 + b*x0)*math.exp(c))/(2*a) + (sqrtpi*b*math.exp(-b**2/(4*a))*math.exp(c)*erf((b + 2*a*x0)/(2*na05)))/(4*na15) - (sqrtpi*b*math.exp(-(b**2)/(4*a))*math.exp(c)*erf((b + 2*a*x1)/(2*na05)))/(4*na15)
def nongaussian_x2px(self, ii):
a = self.coeffs[ii][2] if len(self.coeffs[ii]) > 2 else 0
b = self.coeffs[ii][1] if len(self.coeffs[ii]) > 1 else 0
c = self.coeffs[ii][0]
x0 = self.y0s[ii]
x1 = self.y1s[ii]
# From Matlab
if a == 0:
if x0 == SplineModel.neginf:
return (math.exp(c + b*x1)*(b**2*x1**2 - 2*b*x1 + 2))/b**3
elif x1 == SplineModel.posinf:
return -(math.exp(c + b*x0)*(b**2*x0**2 - 2*b*x0 + 2))/b**3
return (math.exp(c + b*x1)*(b**2*x1**2 - 2*b*x1 + 2))/b**3 - (math.exp(c + b*x0)*(b**2*x0**2 - 2*b*x0 + 2))/b**3
sqrtpi = math.pi**.5
na05 = ((-a)**.5)
na25 = ((-a)**2.5)
na35 = ((-a)**3.5)
return (2*na25*b*math.exp(a*x0**2 + b*x0 + c) - 2*na25*b*math.exp(a*x1**2 + b*x1 + c) + 4*na35*x0*math.exp(a*x0**2 + b*x0 + c) - 4*na35*x1*math.exp(a*x1**2 + b*x1 + c) - 2*(sqrtpi)*a**3*math.exp((- b**2 + 4*a*c)/(4*a))*erf((b + 2*a*x0)/(2*na05)) + 2*(sqrtpi)*a**3*math.exp((- b**2 + 4*a*c)/(4*a))*erf((b + 2*a*x1)/(2*na05)) + (sqrtpi)*a**2*b**2*math.exp((- b**2 + 4*a*c)/(4*a))*erf((b + 2*a*x0)/(2*na05)) - (sqrtpi)*a**2*b**2*math.exp((- b**2 + 4*a*c)/(4*a))*erf((b + 2*a*x1)/(2*na05)))/(8*((-a)**4.5))
# Duplicated in models.js
def segment_max(self, jj):
maxyy = self.y0s[jj]
maxval = self.evaluate(jj, self.y0s[jj])
val = self.evaluate(jj, self.y1s[jj])
if (val > maxval):
maxval = val
maxyy = self.y1s[jj]
coeffs = self.coeffs[jj]
if len(coeffs) > 2:
yy = -coeffs[1] / (2*coeffs[2])
if yy > self.y0s[jj] and yy < self.y1s[jj]:
val = self.evaluate(jj, yy)
if val > maxval:
maxval = val
maxyy = yy
return (maxyy, maxval)
# Duplicated in models.js
# returns (yy, val)
def find_mode(self):
maxmax = (None, SplineModel.neginf)
for ii in range(self.size()):
mymax = self.segment_max(ii)
if mymax[1] > maxmax[1]:
maxmax = mymax
return maxmax
# Duplicated in models.js
def rough_span(self):
span = 0
for jj in range(self.size()):
if self.y0s[jj] == SplineModel.neginf or self.y1s[jj] == SplineModel.posinf:
if len(self.coeffs[jj]) == 3:
span += 3 / math.sqrt(abs(2*self.coeffs[jj][2]))
elif len(self.coeffs[jj]) == 2:
span += 5 / abs(self.coeffs[jj][1])
else:
span += 1 / abs(self.coeffs[jj][0]) # improper!
else:
span += self.y1s[jj] - self.y0s[jj]
return span
# Duplicated in models.js
def rough_limits(self):
limits0 = float(self.y0s[0])
limits1 = float(self.y1s[-1])
if limits0 == SplineModel.neginf or limits1 == SplineModel.posinf:
maxmax = self.find_mode()
span = self.rough_span()
if limits0 == SplineModel.neginf:
limits0 = maxmax[0] - span
if limits1 == SplineModel.posinf:
limits1 = maxmax[0] + span
return (limits0, limits1)
def convolve(self, other):
# NOTE: below is for a single segment...
# int_s e^P(s) e^Q(t - s) = int_s e^[P(s) + Q(t - s)] = int_s e^[a1 ss + b1 s + c1 + a2 (tt - 2ts + ss) + b2 t - b2 s + c2]
# int_s e^[(a1 + a2) ss + (b1 - 2t - b2) s] e^[a2 (tt) + b2 t + c1 + c2]
# Have to do approximate sum later anyway, so let's just convert to ddp
(limits, ys) = SplineModelConditional.propose_grid([self, other])
pp_self = self.to_points(ys)
pp_other = other.to_points(ys)
newpp = np.convolve(pp_self, pp_other)
newpp = newpp / sum(newpp) # Scale
yy = np.linspace(2*min(ys), 2*max(ys), 2*len(ys) - 1)
if np.any(newpp == 0):
conditional = SplineModelConditional()
# Break into many pieces
ii = 0
y0 = min(yy)
while ii == 0 or (ii < len(newpp) and newpp[ii] == 0):
if newpp[ii] == 0:
while ii < len(newpp) and newpp[ii] == 0:
ii += 1
if ii < len(newpp):
conditional.add_segment(y0, yy[ii], [SplineModel.neginf])
else:
conditional.add_segment(y0, yy[-1], [SplineModel.neginf])
break
y0 = yy[ii]
i0 = ii
while ii < len(newpp) and newpp[ii] > 0:
ii += 1
spline = UnivariateSpline(yy[i0:ii], np.log(newpp[i0:ii]), k=2, s=(ii - i0) / 1000.0)
if ii < len(newpp):
segments = SplineModelConditional.make_conditional_from_spline(spline, (y0, yy[ii]))
else:
segments = SplineModelConditional.make_conditional_from_spline(spline, (y0, yy[-1]))
for jj in range(segments.size()):
conditional.add_segment(segments.y0s[jj], segments.y1s[jj], segments.coeffs[jj])
if ii < len(newpp):
y0 = yy[ii]
else:
break
return conditional
else:
spline = UnivariateSpline(yy, np.log(newpp), k=2)
return SplineModelConditional.make_conditional_from_spline(spline, (2*limits[0], 2*limits[1]))
@staticmethod
def make_single(y0, y1, coeffs):
return SplineModelConditional(y0s=[y0], y1s=[y1], coeffs=[coeffs])
@staticmethod
def make_gaussian(y0, y1, mean, var):
return SplineModelConditional.make_single(y0, y1, [-mean*mean/(2*var) - np.log(np.sqrt(2*math.pi*var)), mean/var, -1/(2*var)])
@staticmethod
def make_conditional_from_spline(spline, limits):
conditional = SplineModelConditional()
knots = spline.get_knots()
midpoint = (knots[-1] + knots[1]) / 2
knots = sorted(knots)
knots[0] = float(limits[0])
knots[-1] = float(limits[1])
for ii in range(1, len(knots)):
if knots[ii-1] == SplineModel.neginf and knots[ii] == SplineModel.posinf:
y = midpoint
elif knots[ii-1] == SplineModel.neginf:
y = knots[ii]
elif knots[ii] == SplineModel.posinf:
y = knots[ii-1]
else:
y = (knots[ii-1] + knots[ii]) / 2
derivs = spline.derivatives(y)
a = derivs[2] / 2
b = derivs[1] - derivs[2] * y
c = derivs[0] - (a*y*y + b*y)
if a > 0 and (knots[ii-1] == SplineModel.neginf or knots[ii] == SplineModel.posinf):
conditional.add_segment(knots[ii-1], knots[ii], [SplineModel.neginf]) # This segment failed!
else:
conditional.add_segment(knots[ii-1], knots[ii], [c, b, a])
return conditional
@staticmethod
def find_nearest(array, value, within):
if isinstance(value, str) or isinstance(value, unicode):
try:
value = int(value)
except:
raise ValueError("Cannot apply find_nearest to categorical values.")
idx = (np.abs(np.array(array)-value)).argmin()
return within[idx]
@staticmethod
def approximate_sum(conditionals):
if len(conditionals) == 1:
return conditionals[0]
(limits, ys) = SplineModelConditional.propose_grid(conditionals)
ps = np.zeros(len(ys))
for ii in range(len(conditionals)):
ps = ps + conditionals[ii].to_points(ys)
lps = np.log(ps)
spline = UnivariateSpline(ys, lps, k=2)
return SplineModelConditional.make_conditional_from_spline(spline, limits)
@staticmethod
def propose_grid(conditionals):
limits = (SplineModel.neginf, SplineModel.posinf)
rough_limits = (SplineModel.posinf, SplineModel.neginf)
max_segments = 0
for conditional in conditionals:
if conditional.y0s[0] == conditional.y1s[-1] or np.isnan(conditional.y0s[0]) or np.isnan(conditional.y1s[-1]):
continue
limits = (max(limits[0], conditional.y0s[0]), min(limits[1], conditional.y1s[-1]))
conditional_rough_limits = conditional.rough_limits()
rough_limits = (min(rough_limits[0], conditional_rough_limits[0]), max(rough_limits[1], conditional_rough_limits[1]))
max_segments = max(max_segments, sum(map(lambda cc: len(cc), conditional.coeffs)))
num_points = 100 * max_segments / (1 + np.log(len(conditionals)))
ys = np.linspace(rough_limits[0], rough_limits[1], num_points)
return (limits, ys)
from ddp_model import DDPModel
Model.mergers["spline_model"] = SplineModel.merge
Model.mergers["spline_model+ddp_model"] = lambda models: DDPModel.merge(map(lambda m: m.to_ddp(), models))
Model.combiners['spline_model+spline_model'] = SplineModel.combine
Model.combiners["spline_model+ddp_model"] = lambda one, two: DDPModel.combine(one.to_ddp(), two)
|
CUTTACK: In an interim relief to IAS officer Vinod Kumar, against whom vigilance court has issued non-bailable warrant (NBW) after convicting him in a corruption case, the Orissa high court on Tuesday put stay order on the Vigilance Court’s order.
The high court directed authorities not to take any coercive action against Kumar till its further orders. The next hearing in the matter has been scheduled to July 19. “The Court will hear the matter in detail on July 19,” said Kumar’s counsel, Pitambar Acharya.
The petitioner has sought relief from high court pleading innocence in the matter. He has cited in his plea that the trial by the vigilance court was not carried in proper manner and hence its order should be quashed by the high court.
Vinod Kumar, who is currently serving as an Officer on Special Duty (OSD) in the higher education department, and five others were convicted by the Special Vigilance Court on July 3 in connection with a corruption case that took place when he was the managing director of the Odisha Rural Housing Development Corporation (ORHDC).
The six were convicted for their alleged involvement in financial irregularities to the tune of Rs 55 lakh in the construction of cyclone shelters after the Super Cyclone in October 1999. Vigilance said Kumar allegedly committed all the financial irregularities during his tenure as managing director of ORHDC between January 2000 to May 2001.
|
# -*- coding: utf-8 -*-
'''
Docer - backend - views
~~~~~~
A document viewing platform.
:copyright: (c) 2015 by Docer.Org.
:license: MIT, see LICENSE for more details.
'''
from . import backend
from flask import request, session, g, redirect, url_for, abort, render_template, flash
@backend.route('/')
def hello_world():
return "Hello admin!"
@backend.route('/user')
def user():
return 'Userpage'
@backend.route('/user/reg')
def reg():
return render_template('admin/reg.html')
@backend.route('/user/do_reg', methods=['POST'])
def do_reg():
from app.models import Users
_uname = request.form['username'].strip()
_pwd = request.form['password'].strip()
# 检验数据
if len(_uname)>20 or len(_uname)<5:
return '用户名要求长度5-20'
elif len(_pwd)>20 or len(_pwd)<8:
return '密码要求长度8-20'
else:
exists_users = Users.objects.filter(username = request.form['username'])
if exists_users.count()>0:
return '帐号已存在'
# 执行注册
new_user = Users(
username = _uname,
password = _pwd
)
new_user.save()
return '注册成功'
|
羊駝分布於南美祕魯、智利的高山地區,是日行性的群居動物,通常由一隻公羊駝帶領數頭母羊駝及小羊駝。 羊駝的毛具有非常強的保暖性,炎熱的夏天需要定時修剪以保持涼快。愛乾淨的羊駝會在固定的地點排泄,視覺、聽覺及嗅覺都很敏銳,奔跑時速可達55公里。性情溫馴的牠們很喜歡作沙浴保持身體乾淨。 The hair of alpaca is very good at keeping warm, and it needs to be trimmed during the summer to keep cool. Female and baby alpacas are normally led by one male. They always excrete at a regular place and take sand bath to keep themselves clean.
|
"""
Model Abstraction of e-economic.com API
"""
import copy
import re
import os
import base64
from collections import defaultdict
from suds.client import Client
class ObjectDoesNotExist(BaseException):
pass
class MultipleObjectsReturned(BaseException):
pass
class EConomicsService(object):
"""
Interface for e-conomic WSDL service
"""
def __init__(self, service, model_factory, soap_factory, codec):
self.service = service
self.model_factory = model_factory
self.soap_factory = soap_factory
self.ncalls = 0
self.codec = codec
def fetch_list(self, name, expected_wsdltype, *args, **kw):
result = getattr(self.service, name)(*args)
self.ncalls += 1
if not result:
return []
if expected_wsdltype and expected_wsdltype not in result.__keylist__:
return [result]
return result[0]
def fetch(self, name, *args, **kw):
return getattr(self.service, name)(*args)
def upgrade_to_order(self, handle, order_model):
hnd = self.fetch('Quotation_UpgradeToOrder', handle)
return self.model_factory.get_or_create_instance(self, order_model, hnd)
def upgrade_to_invoice(self, handle, current_invoice_model):
hnd = self.fetch('Order_UpgradeToInvoice', handle)
return self.model_factory.get_or_create_instance(self, current_invoice_model, hnd)
def book_invoice(self, handle, invoice_model):
hnd = self.fetch('CurrentInvoice_Book', handle)
return self.model_factory.get_or_create_instance(self, invoice_model, hnd)
def next_available_number(self, model):
return self.fetch('%s_GetNextAvailableNumber' % model.__name__)
def delete(self, model, handle):
self.fetch("%s_Delete" % model.__name__, handle)
def create(self, model, **data):
parsed_data = self.codec.encode_data_object(self, model, data)
hnd = self.fetch("%s_CreateFromData" % model.__name__, parsed_data)
return self.get_instance(model, hnd)
def get_or_create(self, model, **spec):
filter_names = [f['name'] for f in model.__filters__]
get_data = dict((k, v,) for k, v in spec.items() if k in filter_names)
try:
return self.get(model, **get_data)
except ObjectDoesNotExist:
return self.create(model, **spec)
def __find_handles(self, model, **spec):
""" find model instances based on given filter (spec)
The filter is based on available server-calls, so some values might not be available for filtering.
Multiple filter-values is going to do multiple server-calls.
For complex filters in small datasets, it might be faster to fetch all and do your own in-memory filter.
Empty filter will fetch all.
:param model: subclass of EConomicsModel
:param spec: mapping of values to filter by
:return: a list of EConomicsModel instances
"""
server_calls = []
filter_names = dict([(f['name'], f['method'],) for f in model.get_filters()])
if not spec:
server_calls.append({'method': "%s_GetAll" % model.__name__, 'args': []})
else:
for key, value in spec.items():
if not key in filter_names:
raise ValueError("no server-method exists for filtering by '%s'" % key)
args = []
if not hasattr(value, '__iter__'):
value = [value]
if key.endswith('_list'):
vtype = type(value[0]).__name__
# TODO: this surely does not cover all cases of data types
array = self.soap_factory.create('ArrayOf%s' % vtype.capitalize())
getattr(array, "%s" % vtype).extend(value)
args.append(array)
else:
args.extend(value)
method = "%s_%s" % (model.__name__, filter_names[key])
if filter_names[key].startswith('GetAll'):
args = []
server_calls.append({'method': method, 'args': args, 'expect': "%sHandle" % model.__name__})
handles = [
map(Handle, self.fetch_list(scall['method'], scall.get('expect'), *scall['args']))
for scall in server_calls
]
return [h.wsdl for h in reduce(set.intersection, map(set, handles))]
def find(self, model, **spec):
handles = self.__find_handles(model, **spec)
return [self.get_instance(model, hnd) for hnd in handles]
def get(self, model, **spec):
"""get a single model instance by handle
:param model: model
:param handle: instance handle
:return:
"""
handles = self.__find_handles(model, **spec)
if len(handles) > 1:
raise MultipleObjectsReturned()
if not handles:
raise ObjectDoesNotExist()
return self.get_instance(model, handles[0])
def get_instance(self, model, handle):
return self.model_factory.get_or_create_instance(self, model, handle)
def load_instance_data(self, instance):
model = instance.__class__
modelname = model.__name__
data = self.fetch("%s_GetData" % modelname, instance._handle)
instance._data = self.codec.decode_data_object(self, instance._handle, model, data)
def load_data(self, instance):
model = instance.__class__
modelname = model.__name__
handles = [inst._handle for (m, inst,) in self.model_factory.instances_iter([model], loaded=False)]
array = self.soap_factory.create('ArrayOf%sHandle' % modelname)
getattr(array, "%sHandle" % modelname).extend(handles)
for data in self.fetch_list("%s_GetDataArray" % modelname, None, array):
handle = data.Handle
inst = self.get_instance(model, handle)
inst._data = self.codec.decode_data_object(self, handle, model, data)
inst._loaded = True
def get_all_changes(self):
changesets = defaultdict(list)
for model, inst in self.model_factory.instances_iter(updated=True):
changesets[model].append(ModelChange(model, inst))
return changesets
def commit(self):
changesets = self.get_all_changes()
for model, changes in changesets.items():
datalist = [self.codec.encode_data_object(self, model, changeset.get_data()) for changeset in changes]
array = self.soap_factory.create('ArrayOf%sData' % model.__name__)
getattr(array, '%sData' % model.__name__).extend(datalist)
self.fetch("%s_UpdateFromDataArray" % model.__name__, array)
[change.apply_and_clear() for change in changes]
def __getattr__(self, name):
return getattr(self.service, name)
class ModelChange(object):
def __init__(self, model, instance):
self.model = model
self.instance = instance
def __repr__(self):
return "<Changes %r %r>" % (self.instance, self.clean_data(self.instance._changes))
def apply_and_clear(self):
self.instance._data.update(self.instance._changes)
self.instance._changes = {}
def clean_data(self, data):
result = {}
for k, v in data.items():
k = pythonize(k)
if k.endswith('_handle'):
k = k[:-7]
result[k] = v
return result
def get_data(self):
if not self.instance._data:
self.instance.fetch()
data = self.clean_data(self.instance._data)
data.update(self.clean_data(self.instance._changes))
data['Handle'] = self.instance._handle
return data
class PropertyCodec(object):
def __init__(self, missing_value=None):
self.missing_value = missing_value
def decode_data_object(self, service, handle, model, data):
decoded_data = {}
for prop in model.properties:
name = prop.name
if prop.name+'Handle' in data:
name = prop.name + 'Handle'
if not name in data:
value = prop.default_value(service, handle)
else:
value = prop.decode_value(service, handle, data[name])
decoded_data[prop.name] = value
return decoded_data
def encode_data_object(self, service, model, data):
#print 'ENCODE', data
encoded_data = {}
if 'Handle' in data:
encoded_data['Handle'] = data['Handle']
for prop in model.properties:
name = prop.pyname
if not name in data:
# encoded_data[prop.name] = self.missing_value
continue
value = data[name]
if value is None:
# encoded_data[prop.name] = value
continue
encoded_data[prop.name] = prop.encode_value(service, data[name])
return encoded_data
class EConomicsModelFactory(object):
def __init__(self):
self.__models = {}
def instances_iter(self, models=None, loaded=None, updated=None):
if models is None:
models = self.__models.keys()
for model in models:
for inst in self.__models[model].values():
if loaded is not None and bool(inst._loaded) != bool(loaded):
continue
if updated is not None and bool(inst._changes) != bool(updated):
continue
yield (model, inst,)
def get_or_create_instance(self, service, model, handle):
hashkey = hash((service, model, handle[0],))
modeldata = self.__models.setdefault(model, {})
return modeldata.setdefault(hashkey, model(service, handle))
class Handle(object):
def __init__(self, wsdl):
self.wsdl = wsdl
def __hash__(self):
return hash(self.wsdl[0])
def __eq__(self, other):
return hash(self) == other
def __repr__(self):
return "<Handle %r>" % self.wsdl.Id
class EConomicsMeta(type):
registry = {}
def __new__(mcs, name, bases, ns):
properties = []
for k, v in ns.items():
if hasattr(v, '__get__'):
properties.append(v)
ns['properties'] = properties
model = type.__new__(mcs, name, bases, ns)
mcs.registry[name] = model
return model
def get_filters(self):
return self.__filters__
class EConomicsBaseProperty(object):
def encode_value(self, service, value):
return value
def decode_value(self, service, handle, value):
return value
def default_value(self, service, handle):
return None
def __get__(self, instance, owner):
_ = owner
if instance is None:
return self
changes = instance._changes
if self.name in changes:
return changes[self.name]
if not instance._loaded:
instance.load()
value = instance._data[self.name]
if hasattr(value, 'fetched') and not value.fetched:
value.fetch()
return value
def __set__(self, instance, value):
instance._changes[self.name] = value
class EConomicsProperty(EConomicsBaseProperty):
def __init__(self, name):
self.name = name
self.pyname = pythonize(name)
def __repr__(self):
return "<%s Data>" % pythonize(self.name)
class EConomicsReference(EConomicsBaseProperty):
def __init__(self, name, model):
self.name = name + 'Handle'
self.model = model
self.pyname = pythonize(name)
def encode_value(self, service, value):
return value._handle
def decode_value(self, service, handle, value):
return service.get_instance(get_model(self.model), value)
def __repr__(self):
return "<%s %s>" % (self.name, self.model)
class QueryList(list):
def __init__(self, service, handle, model, method):
self.service = service
self.handle = handle
self.model = model
self.method = method
self.fetched = False
def __getattribute__(self, name):
if name in ['fetch', 'service', 'handle', 'model', 'method', 'fetched']:
return list.__getattribute__(self, name)
if self.fetched:
self.fetch()
return list.__getattribute__(self, name)
def fetch(self):
handles = self.service.fetch_list(self.method, None, self.handle)
self[:] = [self.service.get_instance(self.model, hnd) for hnd in handles]
self.fetched = True
return self
class EConomicsReferenceList(EConomicsBaseProperty):
def __init__(self, name, model, method):
self.name = name
self.model = model
self.method = method
self.pyname = pythonize(name)
def __repr__(self):
return "<%s [%s]>" % (self.name, self.model)
def encode_value(self, service, value):
return [v._handle for v in value]
def default_value(self, service, handle):
return QueryList(service, handle, get_model(self.model), self.method)
class EConomicsFileProperty(EConomicsBaseProperty):
def __init__(self, name, method, filetype):
self.name = name
self.filetype = filetype
self.method = method
self.pyname = pythonize(name)
def __repr__(self):
return "<%s %s file>" % (self.name, self.filetype)
def default_value(self, service, handle):
return FileObject(service, self.method, handle, self.filetype)
class FileObject(object):
def __init__(self, service, method, handle, filetype):
self.filedata = None
self.method = method
self.service = service
self.handle = handle
self.filetype = filetype
self.fetched = False
self.__last_location = None
def fetch(self):
self.filedata = self.service.fetch(self.method, self.handle)
self.fetched = True
return self
def save(self, location):
if not location.endswith(self.filetype):
location += '.' + self.filetype
with open(location, 'wb') as f:
f.write(base64.b64decode(self.filedata))
self.__last_location = location
def show(self):
if not self.__last_location:
self.save('/tmp/economic_tmp')
os.system('xdg-open %s' % self.__last_location)
class EConomicsModel(object):
__filters__ = []
__metaclass__ = EConomicsMeta
def __init__(self, service, handle):
self._handle = handle
self._loaded = False
self._service = service
self._data = {}
self._changes = {}
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self._handle[0])
def fetch(self):
self._service.load_instance_data(self)
return self
def update(self, **data):
for k, v in data.items():
setattr(self, k, v)
def load(self):
self._service.load_data(self)
def delete(self):
self._service.delete(self.__class__, self._handle)
def get_model(name):
return EConomicsMeta.registry[name]
def pythonize(name):
return re.sub('([A-Z])([a-z])', r'_\1\2', name).strip('_').lower()
def camelcase(name):
return ''.join(map(str.capitalize, name.split('_')))
def build_model_code(client):
"""
Generate source code for e-conomic models based on WSDL connection.
This is based on the assumption that the API follows a specific method naming-convention.
Not all models and attributes has been tested.
The source-generation is mostly to help improve readability and IDE auto-completion.
:param client:
:return: source code for models.py
"""
models = {}
references = {}
for method in client.wsdl.services[0].ports[0].methods.values():
if not '_' in method.name:
continue
model, action = method.name.split('_')
models.setdefault(model, {'properties': [], 'filters': []})
references[model] = model
if model[-1] == 'y':
references[model[:-1] + 'ies'] = model
else:
references[model+'s'] = model
references['OurReference'] = 'Employee'
references['GetYourReference'] = 'DebtorContact'
references['GetAttention'] = 'DebtorContact'
references['Layout'] = 'TemplateCollection'
special = {
'Order_GetPdf': {
'type': 'EConomicsFileProperty',
'args': ["'Order_GetPdf'", "'pdf'"]
},
'Invoice_GetPdf': {
'type': 'EConomicsFileProperty',
'args': ["'Invoice_GetPdf'", "'pdf'"]
},
'CurrentInvoice_GetPdf': {
'type': 'EConomicsFileProperty',
'args': ["'CurrentInvoice_GetPdf'", "'pdf'"]
}
}
for line in ['Order', 'Invoice', 'CurrentInvoice', 'Quotation']:
method = '%s_GetLines' % line
special[method] = {
'type': 'EConomicsReferenceList',
'args': ["'%sLine'" % line, "'%s'" % method]
}
for method in client.wsdl.services[0].ports[0].methods.values():
if not '_' in method.name:
continue
model, action = method.name.split('_')
if action in ['GetData', 'GetAll', 'GetDataArray']:
continue
modeldata = models[model]
if action == 'GetAllUpdated':
camelname = action[3:]
modeldata['filters'].append({'name': pythonize(camelname), 'method': action})
if re.findall('GetAll[A-Z].+', action):
camelname = action[3:]
modeldata['filters'].append({'name': pythonize(camelname), 'method': action})
elif action.startswith('FindBy'):
camelname = action[6:]
modeldata['filters'].append({'name': pythonize(camelname), 'method': action})
elif action.startswith('Get'):
propname = action[3:]
pyname = pythonize(propname)
if not propname:
continue
get_type = re.findall('Get(%s)[a-z0-9]*?$' % ('|'.join(references.keys())), action)
if get_type and get_type[0] in references:
refmodel = references[get_type[0]]
if action[-1] == 's':
modeldata['properties'].append({
'type': 'EConomicsReferenceList',
'args': ["'%s'" % propname, "'%s'" % refmodel, "'%s'" % method.name],
'name': pyname
})
else:
modeldata['properties'].append({
'type': 'EConomicsReference',
'args': ["'%s'" % propname, "'%s'" % refmodel],
'name': pyname
})
elif method.name in special:
spdata = special[method.name]
modeldata['properties'].append({
'type': spdata['type'],
'args': ["'%s'" % propname] + spdata['args'],
'name': pyname
})
else:
modeldata['properties'].append({
'type': 'EConomicsProperty',
'args': ["'%s'" % propname],
'name': pyname
})
classes = []
for modelname, modeldata in models.items():
propertycode = ["%s = %s(%s)" % (md['name'], md['type'], ', '.join(md['args']))
for md in modeldata['properties']]
code = "class %s(%s):\n __filters__ = %r\n %s" % (modelname, 'EConomicsModel',
modeldata['filters'], '\n '.join(propertycode))
classes.append(code)
return "from pyconomic.base import *\n\n\n" + "\n\n\n".join(classes)
|
In a saucepan over low heat, melt together the marshmallows, butter, vanilla, and food coloring. Mix in the cornflakes cereal.
Drop by spoonfuls on wax paper, and decorate with red hots. Set aside, and allow to cool.
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Writes dependency ordered list of native libraries.
The list excludes any Android system libraries, as those are not bundled with
the APK.
This list of libraries is used for several steps of building an APK.
In the component build, the --input-libraries only needs to be the top-level
library (i.e. libcontent_shell_content_view). This will then use readelf to
inspect the shared libraries and determine the full list of (non-system)
libraries that should be included in the APK.
"""
# TODO(cjhopman): See if we can expose the list of library dependencies from
# gyp, rather than calculating it ourselves.
# http://crbug.com/225558
import optparse
import os
import re
import sys
from util import build_utils
_readelf = None
_library_dirs = None
_library_re = re.compile(
'.*NEEDED.*Shared library: \[(?P<library_name>[\w/.]+)\]')
def SetReadelfPath(path):
global _readelf
_readelf = path
def SetLibraryDirs(dirs):
global _library_dirs
_library_dirs = dirs
def FullLibraryPath(library_name):
assert _library_dirs is not None
for directory in _library_dirs:
path = '%s/%s' % (directory, library_name)
if os.path.exists(path):
return path
return library_name
def IsSystemLibrary(library_name):
# If the library doesn't exist in the libraries directory, assume that it is
# an Android system library.
return not os.path.exists(FullLibraryPath(library_name))
def CallReadElf(library_or_executable):
assert _readelf is not None
readelf_cmd = [_readelf,
'-d',
FullLibraryPath(library_or_executable)]
return build_utils.CheckOutput(readelf_cmd)
def GetDependencies(library_or_executable):
elf = CallReadElf(library_or_executable)
return set(_library_re.findall(elf))
def GetNonSystemDependencies(library_name):
all_deps = GetDependencies(FullLibraryPath(library_name))
return set((lib for lib in all_deps if not IsSystemLibrary(lib)))
def GetSortedTransitiveDependencies(libraries):
"""Returns all transitive library dependencies in dependency order."""
return build_utils.GetSortedTransitiveDependencies(
libraries, GetNonSystemDependencies)
def GetSortedTransitiveDependenciesForBinaries(binaries):
if binaries[0].endswith('.so'):
libraries = [os.path.basename(lib) for lib in binaries]
else:
assert len(binaries) == 1
all_deps = GetDependencies(binaries[0])
libraries = [lib for lib in all_deps if not IsSystemLibrary(lib)]
return GetSortedTransitiveDependencies(libraries)
def main():
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--input-libraries',
help='A list of top-level input libraries.')
parser.add_option('--libraries-dir',
help='The directory which contains shared libraries.')
parser.add_option('--readelf', help='Path to the readelf binary.')
parser.add_option('--output', help='Path to the generated .json file.')
parser.add_option('--stamp', help='Path to touch on success.')
options, _ = parser.parse_args()
SetReadelfPath(options.readelf)
SetLibraryDirs(options.libraries_dir.split(','))
libraries = build_utils.ParseGypList(options.input_libraries)
if len(libraries):
libraries = GetSortedTransitiveDependenciesForBinaries(libraries)
# Convert to "base" library names: e.g. libfoo.so -> foo
java_libraries_list = (
'{%s}' % ','.join(['"%s"' % s[3:-3] for s in libraries]))
build_utils.WriteJson(
{'libraries': libraries, 'java_libraries_list': java_libraries_list},
options.output,
only_if_changed=True)
if options.stamp:
build_utils.Touch(options.stamp)
if options.depfile:
print libraries
build_utils.WriteDepfile(
options.depfile,
libraries + build_utils.GetPythonDependencies())
if __name__ == '__main__':
sys.exit(main())
|
Superior Switch Panel with Dual Switches and USB Flush mount plastic switch panel with 8 blue LED switches(with stickers), DC Voltmeter, Ciga Socket, Illuminated Lighter Socket inc Lighter and a dual USB plug(1A and 2.1A) The panel comes pre-wi..
Superior DC Main Control Panel Flush mount aluminium DC main control panel with 8 Blue LED switches on 2 circuits, Ciga Socket and a Volt/Amp Meter. All switches are protected by circuit breakers. The panel comes pre-wired for simple installat..
Superior Dual USB Charge with Voltage Meter Socket Suitable For Toyota Hilux Revo/Landcruiser 200 Series Simply remove one of the factory blanking plates and clip this in place and have a convenient way to power two USB devices. Comes pre-wired..
Superior Dual USB Charge/Audio Socket Suitable For Toyota Hilux Revo/Landcruiser 200 Series Simply remove one of the factory blanking plates and clip this in place and have a convenient way to power a USB device and convenient USB connection for U..
Superior Racing Starter/Kill Switch Flush mount aluminium starter switch panel with illuminated started button, illuminated shut off missile/rocket switch and ignition on light. Includes wiring and relay..
Superior Racing Starter/Kill Switch Flush mount aluminium starter switch panel with Illuminated shut off missile/rocket switch and ignition on light. Includes wiring and relay..
Superior Racing Starter/Kill Switch with Acc Flush mount aluminium starter switch panel with Illuminated shut off missile/rocket switch and 2 Accessory switchs. Includes wiring and relay..
|
from rest_framework import generics
from bluebottle.geo.models import Location
from bluebottle.geo.serializers import LocationSerializer
from bluebottle.projects.models import Project
from .serializers import CountrySerializer
from .models import Country
class CountryList(generics.ListAPIView):
serializer_class = CountrySerializer
queryset = Country.objects.all()
def get_queryset(self):
return self.queryset.filter(alpha2_code__isnull=False).order_by(
'name').all()
class CountryDetail(generics.RetrieveAPIView):
serializer_class = CountrySerializer
queryset = Country.objects.all()
def get_queryset(self):
qs = super(CountryDetail, self).get_queryset()
return qs
class UsedCountryList(CountryList):
def get_queryset(self):
qs = super(UsedCountryList, self).get_queryset()
project_country_ids = Project.objects.filter(
status__viewable=True).values_list('country', flat=True).distinct()
return qs.filter(id__in=project_country_ids)
class LocationList(generics.ListAPIView):
serializer_class = LocationSerializer
queryset = Location.objects.all()
|
Helen Crawford made her mark in health sciences librarianship by literally building the medical school library at University of Wisconsin at Madison and through numerous publications and service to the Medical Library Association. Her professional accomplishments earned her the Presidential Citation of the State Medical Society of Wisconsin.
Helen was born in North Dakota to parents who were teachers and fully expected her to go to college however uncommon it was at the time. She fulfilled that expectation by earning a Bachelor of Arts in German at the University of North Dakota in 1928. After working at the North Dakota Historical Society for a year, Helen went to Simmons College to earn a B.S. in library science and to get "a little Eastern polish." She returned to the Midwest in 1931 as a classifier for the Iowa State University and was active in the American Library Association. In 1944, Helen decided to purse a master's degree in library science at the University of Chicago. The next year, before she completed her thesis, she was recruited for the director's position for the medical library at the University of Wisconsin at Madison.
Helen's biggest professional challenge at Wisconsin was coping with lack of space. After several plans for expansion failed, "...the alumni organized to build a building because Dr. Middleton wanted it. One of them said they thought he had holes in his head, but if he wanted a library, they'd get him a library. So they collected $250,000, which doesn't seem much by today's standards, but the legislature then gave $100,000." Just as the building plans were being realized in 1961, Helen underwent surgery for colon cancer and endured three-and-a-half years of experimental chemotherapy. From this personal experience, she was acutely aware of the delays in the dissemination of medical advances. "And I know for several years when I went to meetings, I'd have to have treatment at various centers, and the only doctor who knew anything about this cancer chemotherapy was out at Estes Park...it shows how slow these things are sometimes, in getting into general currency."
Related to the lack of space was lack of staff. Initially, Helen had a staff of three full-timers plus student assistants. Even with this configuration, the library continued its support for a long-standing extension service to the physicians in the entire state. This was a loan service from an unbound duplicate collection until the introduction of Xerox machines. Her passion for outreach to remote users would not be shared by most in the profession until the 1960's. Another of her passions was rare books and she was very much involved in the development of the history of medicine collection at Wisconsin.
Helen's involvement with the Medical Library Association, first as an institutional member, began with an annual meeting in New Haven in 1946. Within three years Helen was delivering papers and serving on committees. She recalled lively discussion and a close vote on certification, the evolution of the central office and regional groups and sections, the maturing of the organizational structure, and poor attendance at the business meetings. What she most valued about her affiliation with the Medical Library Association was "the public meetings and the writing. That's the ability to talk to the whole membership and to talk frankly about our problems and to try to persuade people that we were not being secretive about them--that we were members together of an organization that utilized whatever skills we could offer." In 1972, Helen assumed the role of president, by which time she was retired from Wisconsin. But she wondered about the timing of such a duty in relation to one's career. "It's almost a full-time job...there is also the question of energy, and I'm not entirely sure that people near the end of their professional careers are the best ones...They have a certain amount of wisdom, but they lack a certain amount of drive." On the other hand, she was quite certain about the role of history when quoting Guy Stanton Ford: "I do not think that any man fully grasps all that his profession means, the sense of all that is behind him and the importance of what he is and what he does unless he knows the long road traveled by all those on whose shoulders he stands today."
|
from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean, any, sum
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from numpy.core.multiarray import _insert, add_docstring
from numpy.core.multiarray import digitize, bincount, interp as compiled_interp
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
from numpy.compat import long
from numpy.compat.py3k import basestring
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def _hist_bin_sqrt(x):
"""
Square root histogram bin estimator.
Bin width is inversely proportional to the data size. Used by many
programs for its simplicity.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / np.sqrt(x.size)
def _hist_bin_sturges(x):
"""
Sturges histogram bin estimator.
A very simplistic estimator based on the assumption of normality of
the data. This estimator has poor performance for non-normal data,
which becomes especially obvious for large data sets. The estimate
depends only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / (np.log2(x.size) + 1.0)
def _hist_bin_rice(x):
"""
Rice histogram bin estimator.
Another simple estimator with no normality assumption. It has better
performance for large data than Sturges, but tends to overestimate
the number of bins. The number of bins is proportional to the cube
root of data size (asymptotically optimal). The estimate depends
only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / (2.0 * x.size ** (1.0 / 3))
def _hist_bin_scott(x):
"""
Scott histogram bin estimator.
The binwidth is proportional to the standard deviation of the data
and inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
def _hist_bin_doane(x):
"""
Doane's histogram bin estimator.
Improved version of Sturges' formula which works better for
non-normal data. See
http://stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
if x.size > 2:
sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
sigma = np.std(x)
if sigma > 0.0:
# These three operations add up to
# g1 = np.mean(((x - np.mean(x)) / sigma)**3)
# but use only one temp array instead of three
temp = x - np.mean(x)
np.true_divide(temp, sigma, temp)
np.power(temp, 3, temp)
g1 = np.mean(temp)
return x.ptp() / (1.0 + np.log2(x.size) +
np.log2(1.0 + np.absolute(g1) / sg1))
return 0.0
def _hist_bin_fd(x):
"""
The Freedman-Diaconis histogram bin estimator.
The Freedman-Diaconis rule uses interquartile range (IQR) to
estimate binwidth. It is considered a variation of the Scott rule
with more robustness as the IQR is less affected by outliers than
the standard deviation. However, the IQR depends on fewer points
than the standard deviation, so it is less accurate, especially for
long tailed distributions.
If the IQR is 0, this function returns 1 for the number of bins.
Binwidth is inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2.0 * iqr * x.size ** (-1.0 / 3.0)
def _hist_bin_auto(x):
"""
Histogram bin estimator that uses the minimum width of the
Freedman-Diaconis and Sturges estimators.
The FD estimator is usually the most robust method, but its width
estimate tends to be too large for small `x`. The Sturges estimator
is quite good for small (<1000) datasets and is the default in the R
language. This method gives good off the shelf behaviour.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
See Also
--------
_hist_bin_fd, _hist_bin_sturges
"""
# There is no need to check for zero here. If ptp is, so is IQR and
# vice versa. Either both are zero or neither one is.
return min(_hist_bin_fd(x), _hist_bin_sturges(x))
# Private dict initialized at module load time
_hist_bin_selectors = {'auto': _hist_bin_auto,
'doane': _hist_bin_doane,
'fd': _hist_bin_fd,
'rice': _hist_bin_rice,
'scott': _hist_bin_scott,
'sqrt': _hist_bin_sqrt,
'sturges': _hist_bin_sturges}
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
r"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string from the list below, `histogram` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range. While the bin width will be optimal for the actual data
in the range, the number of bins will be computed to fill the
entire range, including the empty portions. For visualisation,
using the 'auto' option is suggested. Weighted data is not
supported for automated bin size selection.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good
all round performance
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into
account data variability and data size .
'doane'
An improved version of Sturges' estimator that works better
with non-normal datasets.
'scott'
Less robust estimator that that takes into account data
variability and data size.
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only
optimal for gaussian data and underestimates number of bins
for large non-gaussian datasets.
'sqrt'
Square root (of data size) estimator, used by Excel and
other programs for its speed and simplicity.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the ``density``
keyword instead. If ``False``, the result will contain the
number of samples in each bin. If ``True``, the result is the
value of the probability *density* function at the bin,
normalized such that the *integral* over the range is 1. Note
that this latter behavior is known to be buggy with unequal bin
widths; use ``density`` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). If `density` is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
density : bool, optional
If ``False``, the result will contain the number of samples in
each bin. If ``True``, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the ``normed`` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words,
if `bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
*includes* 4.
.. versionadded:: 1.11.0
The methods to estimate the optimal number of bins are well founded
in literature, and are inspired by the choices R provides for
histogram visualisation. Note that having the number of bins
proportional to :math:`n^{1/3}` is asymptotically optimal, which is
why it appears in most estimators. These are simply plug-in methods
that give good starting points for number of bins. In the equations
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
``np.round(np.ceil(range / h))`.
'Auto' (maximum of the 'Sturges' and 'FD' estimators)
A compromise to get a good value. For small datasets the Sturges
value will usually be chosen, while larger datasets will usually
default to FD. Avoids the overly conservative behaviour of FD
and Sturges for small and large datasets respectively.
Switchover point is usually :math:`a.size \approx 1000`.
'FD' (Freedman Diaconis Estimator)
.. math:: h = 2 \frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good for large
datasets. The IQR is very robust to outliers.
'Scott'
.. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}}
The binwidth is proportional to the standard deviation of the
data and inversely proportional to cube root of ``x.size``. Can
be too conservative for small datasets, but is quite good for
large datasets. The standard deviation is not very robust to
outliers. Values are very similar to the Freedman-Diaconis
estimator in the absence of outliers.
'Rice'
.. math:: n_h = 2n^{1/3}
The number of bins is only proportional to cube root of
``a.size``. It tends to overestimate the number of bins and it
does not take into account data variability.
'Sturges'
.. math:: n_h = \log _{2}n+1
The number of bins is the base 2 log of ``a.size``. This
estimator assumes normality of data and is too conservative for
larger, non-normal datasets. This is the default method in R's
``hist`` method.
'Doane'
.. math:: n_h = 1 + \log_{2}(n) +
\log_{2}(1 + \frac{|g_1|}{\sigma_{g_1})}
g_1 = mean[(\frac{x - \mu}{\sigma})^3]
\sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
An improved version of Sturges' formula that produces better
estimates for non-normal datasets. This estimator attempts to
account for the skew of the data.
'Sqrt'
.. math:: n_h = \sqrt n
The simplest and fastest estimator. Only takes into account the
data size.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
Automated Bin Selection Methods example, using 2 peak random data
with 2000 points:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size=1000),
... rng.normal(loc=5, scale=2, size=1000)))
>>> plt.hist(a, bins='auto') # plt.hist passes it's arguments to np.histogram
>>> plt.title("Histogram with 'auto' bins")
>>> plt.show()
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
# Do not modify the original value of range so we can check for `None`
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
mn, mx = 0.0, 1.0
else:
mn, mx = a.min() + 0.0, a.max() + 0.0
else:
mn, mx = [mi + 0.0 for mi in range]
if mn > mx:
raise ValueError(
'max must be larger than min in range parameter.')
if not np.all(np.isfinite([mn, mx])):
raise ValueError(
'range parameter must be finite.')
if mn == mx:
mn -= 0.5
mx += 0.5
if isinstance(bins, basestring):
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
if bins not in _hist_bin_selectors:
raise ValueError("{0} not a valid estimator for bins".format(bins))
if weights is not None:
raise TypeError("Automated estimation of the number of "
"bins is not supported for weighted data")
# Make a reference to `a`
b = a
# Update the reference if the range needs truncation
if range is not None:
keep = (a >= mn)
keep &= (a <= mx)
if not np.logical_and.reduce(keep):
b = a[keep]
if b.size == 0:
bins = 1
else:
# Do not call selectors on empty arrays
width = _hist_bin_selectors[bins](b)
if width:
bins = int(np.ceil((mx - mn) / width))
else:
# Width can be zero for some estimators, e.g. FD when
# the IQR of the data is zero.
bins = 1
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
# At this point, if the weights are not integer, floating point, or
# complex, we have to use the slow algorithm.
if weights is not None and not (np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, np.complex)):
bins = linspace(mn, mx, bins + 1, endpoint=True)
if not iterable(bins):
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
# Initialize empty histogram
n = np.zeros(bins, ntype)
# Pre-compute histogram scaling factor
norm = bins / (mx - mn)
# Compute the bin edges for potential correction.
bin_edges = linspace(mn, mx, bins + 1, endpoint=True)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= mn)
keep &= (tmp_a <= mx)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
tmp_a_data = tmp_a.astype(float)
tmp_a = tmp_a_data - mn
tmp_a *= norm
# Compute the bin indices, and for values that lie exactly on mx we
# need to subtract one
indices = tmp_a.astype(np.intp)
indices[indices == bins] -= 1
# The index computation is not guaranteed to give exactly
# consistent results within ~1 ULP of the bin edges.
decrement = tmp_a_data < bin_edges[indices]
indices[decrement] -= 1
# The last bin includes the right edge. The other bins do not.
increment = (tmp_a_data >= bin_edges[indices + 1]) & (indices != bins - 1)
indices[increment] += 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real, minlength=bins)
n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins)
else:
n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype)
# Rename the bin edges for return.
bins = bin_edges
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise ValueError(
'bins must increase monotonically.')
# Initialize empty histogram
n = np.zeros(bins.shape, ntype)
if weights is None:
for i in arange(0, len(a), BLOCK):
sa = sort(a[i:i+BLOCK])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
if not np.all(np.isfinite(range)):
raise ValueError(
'range parameter must be finite.')
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : array_type or double
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix):
a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
a = a + 0.0
wgt = np.asarray(weights)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis, dtype=np.result_type(a.dtype, wgt.dtype))
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or
column-major (Fortran-style) memory representation.
Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print('ValueError')
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
# Only able to stack vertically if the array is 1d or less
if x.ndim <= 1:
condlist = np.vstack([condlist, ~totlist])
else:
condlist = [asarray(c, dtype=bool) for c in condlist]
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
# 2014-02-24, 1.9
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it separately optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
# 2014-02-24, 1.9
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and either first differences or second order accurate
one-sides (forward or backwards) differences at the boundaries. The
returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : scalar or list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
single scalar specifies sample distance for all dimensions.
if `axis` is given, the number of varargs must equal the number of axes.
edge_order : {1, 2}, optional
Gradient is calculated using N\ :sup:`th` order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
axis : None or int or tuple of ints, optional
Gradient is calculated only along the given axis or axes
The default (axis = None) is to calculate the gradient for all the axes of the input array.
axis may be negative, in which case it counts from the last to the first axis.
.. versionadded:: 1.11.0
Returns
-------
gradient : list of ndarray
Each element of `list` has the same shape as `f` giving the derivative
of `f` with respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
For two dimensional arrays, the return will be two arrays ordered by
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
>>> dx = np.gradient(x)
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
The axis keyword can be used to specify a subset of axes of which the gradient is calculated
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0)
array([[ 2., 2., -1.],
[ 2., 2., -1.]])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
axes = kwargs.pop('axis', None)
if axes is None:
axes = tuple(range(N))
# check axes to have correct type and no duplicate entries
if isinstance(axes, int):
axes = (axes,)
if not isinstance(axes, tuple):
raise TypeError("A tuple of integers or a single integer is required")
# normalize axis values:
axes = tuple(x + N if x < 0 else x for x in axes)
if max(axes) >= N or min(axes) < 0:
raise ValueError("'axis' entry is out of bounds")
if len(set(axes)) != len(axes):
raise ValueError("duplicate value in 'axis'")
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == len(axes):
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for i, axis in enumerate(axes):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
out /= dx[i]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if len(axes) == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th discrete difference along given axis.
The first difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The n-th differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None, period=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
.. versionadded:: 1.10.0
Returns
-------
y : float or ndarray
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
Interpolation with periodic x-coordinates:
>>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
"""
if period is None:
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
else:
if period == 0:
raise ValueError("period must be a non-zero value")
period = abs(period)
left = None
right = None
return_array = True
if isinstance(x, (float, int, number)):
return_array = False
x = [x]
x = np.asarray(x, dtype=np.float64)
xp = np.asarray(xp, dtype=np.float64)
fp = np.asarray(fp, dtype=np.float64)
if xp.ndim != 1 or fp.ndim != 1:
raise ValueError("Data points must be 1-D sequences")
if xp.shape[0] != fp.shape[0]:
raise ValueError("fp and xp are not of the same length")
# normalizing periodic boundaries
x = x % period
xp = xp % period
asort_xp = np.argsort(xp)
xp = xp[asort_xp]
fp = fp[asort_xp]
xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = np.concatenate((fp[-1:], fp, fp[0:1]))
if return_array:
return compiled_interp(x, xp, fp, left, right)
else:
return compiled_interp(x, xp, fp, left, right).item()
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : ndarray or scalar
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Note that `place` does the exact opposite of `extract`.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress, place
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : ndarray
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
if not isinstance(arr, np.ndarray):
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(arr).__name__))
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
aweights=None):
"""
Estimate a covariance matrix, given data and weights.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
See the notes for an outline of the algorithm.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same form
as that of `m`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N - 1)``, where ``N`` is the
number of observations given (unbiased estimate). If `bias` is True, then
normalization is by ``N``. These values can be overridden by using the
keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
If not ``None`` the default value implied by `bias` is overridden.
Note that ``ddof=1`` will return the unbiased estimate, even if both
`fweights` and `aweights` are specified, and ``ddof=0`` will return
the simple average. See the notes for the details. The default value
is ``None``.
.. versionadded:: 1.5
fweights : array_like, int, optional
1-D array of integer freguency weights; the number of times each
observation vector should be repeated.
.. versionadded:: 1.10
aweights : array_like, optional
1-D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ``ddof=0`` the array of
weights can be used to assign probabilities to observation vectors.
.. versionadded:: 1.10
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Notes
-----
Assume that the observations are in the columns of the observation
array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
steps to compute the weighted covariance are as follows::
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
>>> m -= np.sum(m * w, axis=1, keepdims=True) / v1
>>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
Note that when ``a == 1``, the normalization factor
``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``
as it should.
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print(np.cov(X))
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print(np.cov(x, y))
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print(np.cov(x))
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if rowvar == 0 and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return np.array([]).reshape(0, 0)
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
if rowvar == 0 and y.shape[0] != 1:
y = y.T
X = np.vstack((X, y))
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
# Get the product of frequencies and weights
w = None
if fweights is not None:
fweights = np.asarray(fweights, dtype=np.float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
if fweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional fweights")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and fweights")
if any(fweights < 0):
raise ValueError(
"fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = np.asarray(aweights, dtype=np.float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and aweights")
if any(aweights < 0):
raise ValueError(
"aweights cannot be negative")
if w is None:
w = aweights
else:
w *= aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
# Determine the normalization
if w is None:
fact = X.shape[1] - ddof
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof*sum(w*aweights)/w_sum
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
X -= avg[:, None]
if w is None:
X_T = X.T
else:
X_T = (X*w).T
c = dot(X, X_T.conj())
c *= 1. / np.float64(fact)
return c.squeeze()
def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `R`, and the
covariance matrix, `C`, is
.. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `R` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
ddof : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
Returns
-------
R : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
Notes
-----
Due to floating point rounding the resulting array may not be Hermitian,
the diagonal elements may not be 1, and the elements may not satisfy the
inequality abs(a) <= 1. The real and imaginary parts are clipped to the
interval [-1, 1] in an attempt to improve on that situation but is not
much help in the complex case.
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no effect and are deprecated',
DeprecationWarning)
c = cov(x, y, rowvar)
try:
d = diag(c)
except ValueError:
# scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
stddev = sqrt(d.real)
c /= stddev[:, None]
c /= stddev[None, :]
# Clip real and imaginary parts to [-1, 1]. This does not guarantee
# abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without
# excessive work.
np.clip(c.real, -1, 1, out=c.real)
if np.iscomplexobj(c):
np.clip(c.imag, -1, 1, out=c.imag)
return c
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function Kapable of receiving an axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis or axes along which the medians are computed. The default
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
`median`. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. If `overwrite_input` is ``True`` and `a` is not already an
`ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result. If the input contains integers
or floats smaller than ``float64``, then the output data-type is
``np.float64``. Otherwise, the data-type of the output is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, percentile
Notes
-----
Given a vector ``V`` of length ``N``, the median of ``V`` is the
middle value of a sorted copy of ``V``, ``V_sorted`` - i
e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the
two middle values of ``V_sorted`` when ``N`` is even.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
# Set the partition indexes
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
kth = [szh - 1, szh]
else:
kth = [(sz - 1) // 2]
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
kth.append(-1)
if overwrite_input:
if axis is None:
part = a.ravel()
part.partition(kth)
else:
a.partition(kth, axis=axis)
part = a
else:
part = partition(a, kth, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact) and sz > 0:
# warn and return nans like mean would
rout = mean(part[indexer], axis=axis, out=out)
part = np.rollaxis(part, axis, part.ndim)
n = np.isnan(part[..., -1])
if rout.ndim == 0:
if n == True:
warnings.warn("Invalid value encountered in median",
RuntimeWarning)
if out is not None:
out[...] = a.dtype.type(np.nan)
rout = out
else:
rout = a.dtype.type(np.nan)
elif np.count_nonzero(n.ravel()) > 0:
warnings.warn("Invalid value encountered in median for" +
" %d results" % np.count_nonzero(n.ravel()),
RuntimeWarning)
rout[n] = np.nan
return rout
else:
# if there are no nans
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile(s) of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute, which must be between 0 and 100 inclusive.
axis : {int, sequence of int, None}, optional
Axis or axes along which the percentiles are computed. The
default is to compute the percentile(s) along a flattened
version of the array. A sequence of axes is supported since
version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a`
calculations. The input array will be modified by the call to
`percentile`. This will save memory when you do not need to
preserve the contents of the input array. In this case you
should not make any assumptions about the contents of the input
`a` after this function completes -- treat it as undefined.
Default is False. If `a` is not already an array, this parameter
will have no effect as `a` will be converted to an array
internally regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction``
is the fractional part of the index surrounded by ``i``
and ``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If `q` is a single percentile and `axis=None`, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the percentiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, median, nanpercentile
Notes
-----
Given a vector ``V`` of length ``N``, the ``q``-th percentile of
``V`` is the value ``q/100`` of the way from the mimumum to the
maximum in in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
will determine the percentile if the normalized ranking does not
match the location of ``q`` exactly. This function is the same as
the median if ``q=50``, the same as the minimum if ``q=0`` and the
same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
3.5
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([ 7., 2.])
>>> np.percentile(a, 50, axis=1, keepdims=True)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=out)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a == b)
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = 0.5 * (floor(indices) + ceil(indices))
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
n = np.array(False, dtype=bool) # check for nan's flag
if indices.dtype == intp: # take the points along axis
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = concatenate((indices, [-1]))
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = indices[:-1]
n = np.isnan(ap[-1:, ...])
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = concatenate((indices_above, [-1]))
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
weights_below = np.rollaxis(weights_below, axis, 0)
weights_above = np.rollaxis(weights_above, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = indices_above[:-1]
n = np.isnan(ap[-1:, ...])
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
if np.any(n):
warnings.warn("Invalid value encountered in percentile",
RuntimeWarning)
if zerod:
if ap.ndim == 1:
if out is not None:
out[...] = a.dtype.type(np.nan)
r = out
else:
r = a.dtype.type(np.nan)
else:
r[..., n.squeeze(0)] = a.dtype.type(np.nan)
else:
if r.ndim == 1:
r[:] = a.dtype.type(np.nan)
else:
r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
The sample points corresponding to the `y` values. If `x` is None,
the sample points are assumed to be evenly spaced `dx` apart. The
default is None.
dx : scalar, optional
The spacing between sample points when `x` is None. The default is 1.
axis : int, optional
The axis along which to integrate.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""
Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
arrorder = 'F' if arr.flags.fnc else 'C'
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy())
else:
return arr.copy()
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arrorder)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arrorder)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
# 2013-09-24, 1.9
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays along an existing axis.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
arrorder = 'F' if arr.flags.fnc else 'C'
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning)
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arrorder)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arrorder)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
|
A little wavy or full of curls. Flexible, natural, and durable. Our range of wavy hair extensions come in a variety of lengths and colours.
With a selection of wavy hair extensions from the most recognised brands in hair extensions, we are sure will will find a product to suit your individual style. So turn up the volume and go seek that fresh new look thats all you.
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
import json
from django import http
from django.conf import settings
from django.contrib.auth.decorators import permission_required
from django.core.cache import cache
from django.shortcuts import redirect, render
from django.urls import reverse
from django.utils.http import urlquote
from csp.decorators import csp_update
from crashstats.crashstats import forms, models, utils
from crashstats.crashstats.decorators import pass_default_context
from crashstats.supersearch.models import SuperSearchFields
from socorro.external.crashstorage_base import CrashIDNotFound
# To prevent running in to a known Python bug
# (http://bugs.python.org/issue7980)
# we, here at "import time" (as opposed to run time) make use of time.strptime
# at least once
datetime.datetime.strptime('2013-07-15 10:00:00', '%Y-%m-%d %H:%M:%S')
def ratelimit_blocked(request, exception):
# http://tools.ietf.org/html/rfc6585#page-3
status = 429
# If the request is an AJAX on, we return a plain short string.
# Also, if the request is coming from something like curl, it will
# send the header `Accept: */*`. But if you take the same URL and open
# it in the browser it'll look something like:
# `Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8`
if (
request.is_ajax() or
'text/html' not in request.META.get('HTTP_ACCEPT', '')
):
# Return a super spartan message.
# We could also do something like `{"error": "Too Many Requests"}`
return http.HttpResponse(
'Too Many Requests',
status=status,
content_type='text/plain'
)
return render(request, 'crashstats/ratelimit_blocked.html', status=status)
def robots_txt(request):
return http.HttpResponse(
'User-agent: *\n'
'%s: /' % ('Allow' if settings.ENGAGE_ROBOTS else 'Disallow'),
content_type='text/plain',
)
def build_id_to_date(build_id):
yyyymmdd = str(build_id)[:8]
return '{}-{}-{}'.format(
yyyymmdd[:4],
yyyymmdd[4:6],
yyyymmdd[6:8],
)
@csp_update(CONNECT_SRC='analysis-output.telemetry.mozilla.org')
@pass_default_context
def report_index(request, crash_id, default_context=None):
valid_crash_id = utils.find_crash_id(crash_id)
if not valid_crash_id:
return http.HttpResponseBadRequest('Invalid crash ID')
# Sometimes, in Socorro we use a prefix on the crash ID. Usually it's
# 'bp-' but this is configurable.
# If you try to use this to reach the perma link for a crash, it should
# redirect to the report index with the correct crash ID.
if valid_crash_id != crash_id:
return redirect(reverse('crashstats:report_index', args=(valid_crash_id,)))
context = default_context or {}
context['crash_id'] = crash_id
refresh_cache = request.GET.get('refresh') == 'cache'
raw_api = models.RawCrash()
try:
context['raw'] = raw_api.get(crash_id=crash_id, refresh_cache=refresh_cache)
except CrashIDNotFound:
# If the raw crash can't be found, we can't do much.
return render(request, 'crashstats/report_index_not_found.html', context, status=404)
utils.enhance_raw(context['raw'])
context['your_crash'] = (
request.user.is_active and
context['raw'].get('Email') == request.user.email
)
api = models.UnredactedCrash()
try:
context['report'] = api.get(crash_id=crash_id, refresh_cache=refresh_cache)
except CrashIDNotFound:
# ...if we haven't already done so.
cache_key = 'priority_job:{}'.format(crash_id)
if not cache.get(cache_key):
priority_api = models.PriorityJob()
priority_api.post(crash_ids=[crash_id])
cache.set(cache_key, True, 60)
return render(request, 'crashstats/report_index_pending.html', context)
if 'json_dump' in context['report']:
json_dump = context['report']['json_dump']
if 'sensitive' in json_dump and not request.user.has_perm('crashstats.view_pii'):
del json_dump['sensitive']
context['raw_stackwalker_output'] = json.dumps(
json_dump,
sort_keys=True,
indent=4,
separators=(',', ': ')
)
utils.enhance_json_dump(json_dump, settings.VCS_MAPPINGS)
parsed_dump = json_dump
else:
context['raw_stackwalker_output'] = 'No dump available'
parsed_dump = {}
# NOTE(willkg): pull cpu count from parsed_dump if it's not in report;
# remove in July 2019
if 'cpu_count' not in context['report']:
context['report']['cpu_count'] = parsed_dump.get('system_info', {}).get('cpu_count')
# NOTE(willkg): "cpu_name" is deprecated, but populate "cpu_arch" if
# cpu_arch is empty; remove in July 2019.
if 'cpu_arch' not in context['report']:
context['report']['cpu_arch'] = context['report']['cpu_name']
context['crashing_thread'] = parsed_dump.get('crash_info', {}).get('crashing_thread')
if context['report']['signature'].startswith('shutdownhang'):
# For shutdownhang signatures, we want to use thread 0 as the
# crashing thread, because that's the thread that actually contains
# the useful data about what happened.
context['crashing_thread'] = 0
context['parsed_dump'] = parsed_dump
context['bug_product_map'] = settings.BUG_PRODUCT_MAP
context['bug_associations'] = list(
models.BugAssociation.objects
.filter(signature=context['report']['signature'])
.values('bug_id', 'signature')
.order_by('-bug_id')
)
context['raw_keys'] = []
if request.user.has_perm('crashstats.view_pii'):
# hold nothing back
context['raw_keys'] = context['raw'].keys()
else:
context['raw_keys'] = [
x for x in context['raw']
if x in models.RawCrash.API_ALLOWLIST()
]
# Sort keys case-insensitively
context['raw_keys'] = sorted(context['raw_keys'], key=lambda s: s.lower())
if request.user.has_perm('crashstats.view_rawdump'):
context['raw_dump_urls'] = [
reverse('crashstats:raw_data', args=(crash_id, 'dmp')),
reverse('crashstats:raw_data', args=(crash_id, 'json'))
]
if context['raw'].get('additional_minidumps'):
suffixes = [
x.strip()
for x in context['raw']['additional_minidumps'].split(',')
if x.strip()
]
for suffix in suffixes:
name = 'upload_file_minidump_%s' % (suffix,)
context['raw_dump_urls'].append(
reverse('crashstats:raw_data_named', args=(crash_id, name, 'dmp'))
)
if (
context['raw'].get('ContainsMemoryReport') and
context['report'].get('memory_report') and
not context['report'].get('memory_report_error')
):
context['raw_dump_urls'].append(
reverse('crashstats:raw_data_named', args=(crash_id, 'memory_report', 'json.gz'))
)
# Add descriptions to all fields.
all_fields = SuperSearchFields().get()
descriptions = {}
for field in all_fields.values():
key = '{}.{}'.format(field['namespace'], field['in_database_name'])
descriptions[key] = '{} Search: {}'.format(
field.get('description', '').strip() or 'No description for this field.',
field['is_exposed'] and field['name'] or 'N/A',
)
def make_raw_crash_key(key):
"""In the report_index.html template we need to create a key
that we can use to look up against the 'fields_desc' dict.
Because you can't do something like this in jinja::
{{ fields_desc.get(u'raw_crash.{}'.format(key), empty_desc) }}
we do it here in the function instead.
The trick is that the lookup key has to be a unicode object or
else you get UnicodeEncodeErrors in the template rendering.
"""
return u'raw_crash.{}'.format(key)
context['make_raw_crash_key'] = make_raw_crash_key
context['fields_desc'] = descriptions
context['empty_desc'] = 'No description for this field. Search: unknown'
context['BUG_PRODUCT_MAP'] = settings.BUG_PRODUCT_MAP
# report.addons used to be a list of lists.
# In https://bugzilla.mozilla.org/show_bug.cgi?id=1250132
# we changed it from a list of lists to a list of strings, using
# a ':' to split the name and version.
# See https://bugzilla.mozilla.org/show_bug.cgi?id=1250132#c7
# Considering legacy, let's tackle both.
# In late 2017, this code is going to be useless and can be removed.
if (
context['report'].get('addons') and
isinstance(context['report']['addons'][0], (list, tuple))
):
# This is the old legacy format. This crash hasn't been processed
# the new way.
context['report']['addons'] = [
':'.join(x) for x in context['report']['addons']
]
return render(request, 'crashstats/report_index.html', context)
@pass_default_context
def login(request, default_context=None):
context = default_context or {}
return render(request, 'crashstats/login.html', context)
def quick_search(request):
query = request.GET.get('query', '').strip()
crash_id = utils.find_crash_id(query)
if crash_id:
url = reverse(
'crashstats:report_index',
kwargs=dict(crash_id=crash_id)
)
elif query:
url = '%s?signature=%s' % (
reverse('supersearch:search'),
urlquote('~%s' % query)
)
else:
url = reverse('supersearch:search')
return redirect(url)
@utils.json_view
def buginfo(request, signatures=None):
form = forms.BugInfoForm(request.GET)
if not form.is_valid():
return http.HttpResponseBadRequest(str(form.errors))
bug_ids = form.cleaned_data['bug_ids']
bzapi = models.BugzillaBugInfo()
result = bzapi.get(bug_ids)
return result
@permission_required('crashstats.view_rawdump')
def raw_data(request, crash_id, extension, name=None):
api = models.RawCrash()
if extension == 'json':
format = 'meta'
content_type = 'application/json'
elif extension == 'dmp':
format = 'raw'
content_type = 'application/octet-stream'
elif extension == 'json.gz' and name == 'memory_report':
# Note, if the name is 'memory_report' it will fetch a raw
# crash with name and the files in the memory_report bucket
# are already gzipped.
# This is important because it means we don't need to gzip
# the HttpResponse below.
format = 'raw'
content_type = 'application/octet-stream'
else:
raise NotImplementedError(extension)
data = api.get(crash_id=crash_id, format=format, name=name)
response = http.HttpResponse(content_type=content_type)
if extension == 'json':
response.write(json.dumps(data))
else:
response.write(data)
return response
@pass_default_context
def about_throttling(request, default_context=None):
"""Return a simple page that explains about how throttling works."""
context = default_context or {}
return render(request, 'crashstats/about_throttling.html', context)
@pass_default_context
def home(request, default_context=None):
context = default_context or {}
return render(request, 'crashstats/home.html', context)
@pass_default_context
def product_home(request, product, default_context=None):
context = default_context or {}
# Figure out versions
if product not in context['products']:
raise http.Http404('Not a recognized product')
if product in context['active_versions']:
context['versions'] = [
x['version']
for x in context['active_versions'][product]
if x['is_featured']
]
# If there are no featured versions but there are active
# versions, then fall back to use that instead.
if not context['versions'] and context['active_versions'][product]:
# But when we do that, we have to make a manual cut-off of
# the number of versions to return. So make it max 4.
context['versions'] = [
x['version']
for x in context['active_versions'][product]
][:settings.NUMBER_OF_FEATURED_VERSIONS]
else:
context['versions'] = []
return render(request, 'crashstats/product_home.html', context)
def handler500(request, template_name='500.html'):
if getattr(request, '_json_view', False):
# Every view with the `utils.json_view` decorator sets,
# on the request object, that it wants to eventually return
# a JSON output. Let's re-use that fact here.
return http.JsonResponse({
'error': 'Internal Server Error',
'path': request.path,
'query_string': request.META.get('QUERY_STRING'),
}, status=500)
context = {}
return render(request, '500.html', context, status=500)
def handler404(request, exception, template_name='404.html'):
if getattr(request, '_json_view', False):
# Every view with the `utils.json_view` decorator sets,
# on the request object, that it wants to eventually return
# a JSON output. Let's re-use that fact here.
return http.JsonResponse({
'error': 'Page not found',
'path': request.path,
'query_string': request.META.get('QUERY_STRING'),
}, status=404)
context = {}
return render(request, '404.html', context, status=404)
|
Rogan: Why Isn’t Kickboxing More Popular?
Lateral Spin/Drop in the Gi.
Will DC be able to impose his wrestling on the heavyweight champ?
|
from conans import ConanFile, CMake, tools
import shutil
class LibDisasmConan(ConanFile):
name = "libdisasm"
version = "0.23"
license = "Clarified Artistic License"
description = "An basic x86 disassembler in library form."
topics = ("libdisasm", "disasm")
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = {"shared": False}
generators = "cmake"
exports_sources = [ "CMakeLists.txt", "sizeofvoid.patch" ]
def source(self):
tools.download("https://sourceforge.net/projects/bastard/files/libdisasm/{0}/libdisasm-{0}.tar.gz/download".format(self.version),
"libdisasm-{}.tar.gz".format(self.version))
tools.untargz("libdisasm-{}.tar.gz".format(self.version))
tools.patch(patch_file="sizeofvoid.patch",
base_path="libdisasm-{}".format(self.version))
shutil.move("CMakeLists.txt", "libdisasm-{}/".format(self.version))
def get_env(self):
cmake = CMake(self)
cmake.configure(source_folder="libdisasm-{}".format(self.version))
return cmake
def build(self):
cmake = self.get_env()
cmake.build()
def package(self):
cmake = self.get_env()
cmake.install()
def package_info(self):
self.cpp_info.libs = ["disasm"]
self.cpp_info.includedirs = ["include"]
|
50, 60, 100,110 and 120 Micron.
Available in 25 or 50 Pound buckets.
Glass Beads are carried in 50 and 80 Micron sizes.
Dental Pumice is available in the following sizes: Very Coarse, Coarse, Medium, Fine, Very Fine and Flour.
Walnut Shells for polishing and blasting.
All shipments are made within 24 hours from our large local inventory. Shipments are made via UPS. For your convenience, all material is packaged in damage and moisture resistant, resealable plastic containers.
Copyright 2009 dental blast. All rights reserved.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# **************************************************************************
# Copyright © 2017 jianglin
# File Name: count.py
# Author: jianglin
# Email: xiyang0807@gmail.com
# Created: 2017-03-29 21:28:52 (CST)
# Last Update:星期日 2017-4-2 15:24:37 (CST)
# By:
# Description: 一些统计信息
# **************************************************************************
from flask import request
from .extension import redis_data
class Count(object):
@classmethod
def board_topic_count(cls, boardId, value=None):
key = 'count:board:%s' % str(boardId)
if value is not None:
pipe = redis_data.pipeline()
pipe.hincrby(key, 'topic', value)
pipe.execute()
return redis_data.hget(key, 'topic') or 0
@classmethod
def board_post_count(cls, boardId, value=None):
key = 'count:board:%s' % str(boardId)
if value is not None:
pipe = redis_data.pipeline()
pipe.hincrby(key, 'post', value)
pipe.execute()
return redis_data.hget(key, 'post') or 0
@classmethod
def topic_reply_count(cls, topicId, value=None):
key = 'count:topic:%s' % str(topicId)
if value is not None:
pipe = redis_data.pipeline()
pipe.hincrby(key, 'replies', value)
pipe.execute()
return redis_data.hget(key, 'replies') or 0
@classmethod
def topic_read_count(cls, topicId, value=None):
key = 'count:topic:%s' % str(topicId)
expire_key = 'expire:topic:read:{}'.format(request.remote_addr)
if not redis_data.exists(expire_key):
# 设置三分钟之内,阅读次数不增加
redis_data.set(expire_key, '1')
redis_data.expire(expire_key, 180)
if value is not None:
redis_data.hincrby(key, 'read', value)
return redis_data.hget(key, 'read') or 0
@classmethod
def reply_liker_count(cls, replyId, value=None):
key = 'count:reply:%s' % str(replyId)
if value is not None:
pipe = redis_data.pipeline()
pipe.hincrby(key, 'liker', value)
pipe.execute()
return redis_data.hget(key, 'liker') or 0
@classmethod
def user_topic_count(cls, userId, value=None):
key = 'count:user:%s' % str(userId)
if value is not None:
pipe = redis_data.pipeline()
pipe.hincrby(key, 'topic', value)
pipe.execute()
cls.forums_post_count(1)
cls.forums_topic_count(1)
return redis_data.hget(key, 'topic') or 0
@classmethod
def user_reply_count(cls, userId, value=None):
key = 'count:user:%s' % str(userId)
if value is not None:
pipe = redis_data.pipeline()
pipe.hincrby(key, 'replies', value)
pipe.execute()
cls.forums_post_count(1)
return redis_data.hget(key, 'replies') or 0
@classmethod
def user_message_count(cls, userId, value=None, clear=False):
key = 'count:user:%s' % str(userId)
if value is not None:
pipe = redis_data.pipeline()
pipe.hincrby(key, 'message', value)
pipe.execute()
if clear:
redis_data.hset(key, 'message', 0)
return redis_data.hget(key, 'message') or 0
@classmethod
def user_email_time(cls, userId, value=None):
key = 'count:user:%s' % str(userId)
if value is not None:
redis_data.hset(key, 'email', value)
return redis_data.hget(key, 'email') or '2015-1-1 1:1:1'
@classmethod
def forums_user_count(cls, value=None):
key = 'count:forums'
if value is not None:
redis_data.hincrby(key, 'user', value)
return redis_data.hget(key, 'user') or 0
@classmethod
def forums_topic_count(cls, value=None):
key = 'count:forums'
if value is not None:
redis_data.hincrby(key, 'topic', value)
return redis_data.hget(key, 'topic') or 0
@classmethod
def forums_post_count(cls, value=None):
key = 'count:forums'
if value is not None:
redis_data.hincrby(key, 'post', value)
return redis_data.hget(key, 'post') or 0
|
Lease a Surface Book 2 and All the Benefits of Our Service Wrapper Included with a 25% Discount!
Microsoft’s second generation in their iconic break into premium products is now on SALE for just £16.20 per week + VAT. So pick up this Box Open Microsoft Surface Book 2 with Core i7, 16GB RAM, 512GB SSD and 13.5″ PixelSense Touchscreen for this bargain price.
This product allows you to take full advantage of our Flexi-Lease solution providing you with our Three Year “No Quibble” Warranty, Technical Support throughout your lease and Garantueed Ownership for just £1 at the end of your lease!
|
import sys
import time
from contextlib import contextmanager
from .main import EventReceiver
class Recorder(object):
"""Use this class to record the result of running python code as a xunit xml
It allows you to record a series of steps into a single xunit.xml file.
"""
def __init__(self, xunit_destination, name, package_name=None):
self.name = name
self.package_name = package_name
self.destination = xunit_destination
self.event_receiver = None
def __enter__(self):
self.event_receiver = EventReceiver()
return self
def now_seconds(self):
return time.time()
def step(self, step_name):
"""Start a new step. returns a context manager which allows you to
report an error"""
@contextmanager
def step_context(step_name):
if self.event_receiver.current_case is not None:
raise Exception('cannot open a step within a step')
self.event_receiver.begin_case(step_name, self.now_seconds(), self.name)
try:
yield self.event_receiver
except:
etype, evalue, tb = sys.exc_info()
self.event_receiver.error('%r' % [etype, evalue, tb])
raise
finally:
self.event_receiver.end_case(step_name, self.now_seconds())
return step_context(step_name)
def __exit__(self, *exc_info):
results = self.event_receiver.results()
if not results:
already_throwing = exc_info and exc_info[0] is not None
if not already_throwing:
raise ValueError('your hook must at least perform one step!')
self.destination.write_reports(
self.name, self.name, results, package_name=self.package_name,
)
|
Sunidhi Chauhan’s Mohabbat Chords – Guitar | Presenting the Guitar chords of latest song by Sunidhi Chauhan ji – Mohhabat starring Aishwarya Rai Bachhan. The lyrics of this song are penned by Irshad Kamil and sung by none other than Sunidhi Chauhan. The scale of Mohabbat song is Gb and this song is 4 chord song. The chords that we’ve used to play the song on Guitar are Gb, B, Db, D#m.
|
import pytest
from .. import mock
from .. import unittest
from compose.network import check_remote_network_config
from compose.network import Network
from compose.network import NetworkConfigChangedError
class NetworkTest(unittest.TestCase):
def test_check_remote_network_config_success(self):
options = {'com.docker.network.driver.foo': 'bar'}
ipam_config = {
'driver': 'default',
'config': [
{'subnet': '172.0.0.1/16', },
{
'subnet': '156.0.0.1/25',
'gateway': '156.0.0.1',
'aux_addresses': ['11.0.0.1', '24.25.26.27'],
'ip_range': '156.0.0.1-254'
}
],
'options': {
'iface': 'eth0',
}
}
labels = {
'com.project.tests.istest': 'true',
'com.project.sound.track': 'way out of here',
}
remote_labels = labels.copy()
remote_labels.update({
'com.docker.compose.project': 'compose_test',
'com.docker.compose.network': 'net1',
})
net = Network(
None, 'compose_test', 'net1', 'bridge',
options, enable_ipv6=True, ipam=ipam_config,
labels=labels
)
check_remote_network_config(
{
'Driver': 'bridge',
'Options': options,
'EnableIPv6': True,
'Internal': False,
'Attachable': True,
'IPAM': {
'Driver': 'default',
'Config': [{
'Subnet': '156.0.0.1/25',
'Gateway': '156.0.0.1',
'AuxiliaryAddresses': ['24.25.26.27', '11.0.0.1'],
'IPRange': '156.0.0.1-254'
}, {
'Subnet': '172.0.0.1/16',
'Gateway': '172.0.0.1'
}],
'Options': {
'iface': 'eth0',
},
},
'Labels': remote_labels
},
net
)
def test_check_remote_network_config_whitelist(self):
options = {'com.docker.network.driver.foo': 'bar'}
remote_options = {
'com.docker.network.driver.overlay.vxlanid_list': '257',
'com.docker.network.driver.foo': 'bar',
'com.docker.network.windowsshim.hnsid': 'aac3fd4887daaec1e3b',
}
net = Network(
None, 'compose_test', 'net1', 'overlay',
options
)
check_remote_network_config(
{'Driver': 'overlay', 'Options': remote_options}, net
)
@mock.patch('compose.network.Network.true_name', lambda n: n.full_name)
def test_check_remote_network_config_driver_mismatch(self):
net = Network(None, 'compose_test', 'net1', 'overlay')
with pytest.raises(NetworkConfigChangedError) as e:
check_remote_network_config(
{'Driver': 'bridge', 'Options': {}}, net
)
assert 'driver has changed' in str(e.value)
@mock.patch('compose.network.Network.true_name', lambda n: n.full_name)
def test_check_remote_network_config_options_mismatch(self):
net = Network(None, 'compose_test', 'net1', 'overlay')
with pytest.raises(NetworkConfigChangedError) as e:
check_remote_network_config({'Driver': 'overlay', 'Options': {
'com.docker.network.driver.foo': 'baz'
}}, net)
assert 'option "com.docker.network.driver.foo" has changed' in str(e.value)
def test_check_remote_network_config_null_remote(self):
net = Network(None, 'compose_test', 'net1', 'overlay')
check_remote_network_config(
{'Driver': 'overlay', 'Options': None}, net
)
def test_check_remote_network_config_null_remote_ipam_options(self):
ipam_config = {
'driver': 'default',
'config': [
{'subnet': '172.0.0.1/16', },
{
'subnet': '156.0.0.1/25',
'gateway': '156.0.0.1',
'aux_addresses': ['11.0.0.1', '24.25.26.27'],
'ip_range': '156.0.0.1-254'
}
]
}
net = Network(
None, 'compose_test', 'net1', 'bridge', ipam=ipam_config,
)
check_remote_network_config(
{
'Driver': 'bridge',
'Attachable': True,
'IPAM': {
'Driver': 'default',
'Config': [{
'Subnet': '156.0.0.1/25',
'Gateway': '156.0.0.1',
'AuxiliaryAddresses': ['24.25.26.27', '11.0.0.1'],
'IPRange': '156.0.0.1-254'
}, {
'Subnet': '172.0.0.1/16',
'Gateway': '172.0.0.1'
}],
'Options': None
},
},
net
)
@mock.patch('compose.network.Network.true_name', lambda n: n.full_name)
def test_check_remote_network_labels_mismatch(self):
net = Network(None, 'compose_test', 'net1', 'overlay', labels={
'com.project.touhou.character': 'sakuya.izayoi'
})
remote = {
'Driver': 'overlay',
'Options': None,
'Labels': {
'com.docker.compose.network': 'net1',
'com.docker.compose.project': 'compose_test',
'com.project.touhou.character': 'marisa.kirisame',
}
}
with mock.patch('compose.network.log') as mock_log:
check_remote_network_config(remote, net)
mock_log.warning.assert_called_once_with(mock.ANY)
_, args, kwargs = mock_log.warning.mock_calls[0]
assert 'label "com.project.touhou.character" has changed' in args[0]
def test_remote_config_labels_none(self):
remote = {'Labels': None}
local = Network(None, 'test_project', 'test_network')
check_remote_network_config(remote, local)
|
Howdy peoples, this image is about The Guest Bath Had A Shower Area That Was Dated And Confining. A New Frameless (awesome Bath Shower #5). This photo is a image/jpeg and the resolution of this file is 662 x 993. This attachment's file size is only 75 KB. If You want to save This photo to Your PC, you could Click here. You also too download more attachments by clicking the following photo or read more at this article: Bath Shower.
Bath Shower on the veranda of the home will make your minimalist household symbol so that the style seems stylish, of the rooftop should really be perfect and lavish. This luxury will even supply the feeling of being about the front-porch minimalism that is relaxed and appears more lovely to look from your external.
All of that can be recognized by choosing the floor that was right with regards to hues and motifs. Colors are pure and vivid typically the most popular selection nowadays, color age, because these shades can provide magnificent atmosphere and an appropriate environment neat of elegance.
One of the pieces which make a comfortable house observed from the vision, looked excellent and lavish household is The Guest Bath Had A Shower Area That Was Dated And Confining. A New Frameless (awesome Bath Shower #5). With the choice and right sleeping of ceramic flooring, the bedrooms were ordinary may be developed into a room that seems huge and magnificent.
|
# -*- coding: utf-8 -*-
#
#
# Author: Alexandre Fayolle
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp.osv import orm, fields
class stock_move(orm.Model):
_inherit = 'stock.move'
_columns = {
'sequence': fields.integer('Sequence',
help="Gives the sequence of this line when "
"displaying the picking."),
}
_order = 'date_expected desc, sequence, id'
_defaults = {'sequence': 10,
}
class stock_picking(orm.Model):
_inherit = 'stock.picking'
def _prepare_invoice_line(
self, cr, uid, group, picking, move_line, invoice_id,
invoice_vals, context=None
):
res = super(stock_picking, self)._prepare_invoice_line(cr, uid,
group,
picking,
move_line,
invoice_id,
invoice_vals,
context)
res['sequence'] = move_line.sequence
return res
class sale_order(orm.Model):
_inherit = 'sale.order'
def _prepare_order_line_move(
self, cr, uid, order, line, picking_id, date_planned, context=None
):
res = super(sale_order, self)._prepare_order_line_move(cr, uid,
order,
line,
picking_id,
date_planned,
context)
res['sequence'] = line.sequence
return res
|
As part of “The Sound of Sax” exhibit, The Museum of Making Music has installed several Lilitab iPad kiosks to share videos and sound clips to entertain and educate visitors about the process of music creation. The Lilitab kiosks contain headphone mounts on the stands, providing enhanced sound and improved visitor interactions.
“It was a great solution”, says B.J. Morgan, Marketing Manager at The Museum. The iPads® remain fully charged when plugged into an outlet using the integrated charging cable in each stand. Kiosk set-up is quick and easy; just attach an iPad, secure the head unit, and plug in the power cord.
Installing different home button access face plates regulates visitor interactions; full web access, a single site or specific application can be configured for varying visitor experiences.
Temporary installations are easily managed with the lightweight and portable Lilitab kiosk. The modular design of the mounting solution provides for several types of visitor stations: wall mounted, surface mounted, tabletop or floor stand.
Full ADA compliance assures the Lilitab provides easy iPad® interactions for wheelchair visitors.
The Lilitab iPad enclosure head unit securely locks the tablet in place. Every mounting solution has the option to be bolted or cable-locked to the floor or other fixture. Integrated into the Lilitab design from the very beginning, museums can be sure their iPad® resources are safe in public venues.
The Lilitab iPad kiosk is versatile, modular and easy to customize. The Lilitab Tabletop turns any desk or table into a self-service station with quick installation; the Lilitab Surface is for a more permanent solution for a wall or counter. Keyboard and printer shelves are also available as added accessories. Optional banners to promote special events or seasonal activities enhance the visitor experience even more.
The Museum of Making Music, founded in 1998 by the National Association of Music Merchants (NAMM), was developed to showcase and celebrate the music products industry. The Museum provides opportunities for cultural enrichment while preserving our musical heritage through special exhibitions, innovative concerts and educational programs aimed toward a wide range of audiences.
Lilitab is a group of designers, engineers & business people passionate about the use of tablet computers in public-use environments. With their compelling interface, compact form-factor and economical price-point, there are all kinds of new and unique uses for putting interactivity into the public space. Additional applications from Lilitab include aniPad credit card reader and iPad ID scanner.
Founded in 2011 by Adam Aronson, an award winning kiosk designer, Lilitab is committed to delivering the most secure, functional and stylish security enclosures for ANY tablet. Adam has been designing custom kiosk enclosures for public-use environments since 1994 in San Francisco.
Museums can engage visitors using the affordable and sleek Lilitab iPad kiosk.
|
# This Python file uses the following encoding: utf-8
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for vcfio module."""
import glob
import gzip
import logging
import os
import tempfile
import unittest
import apache_beam as beam
from apache_beam.io.filesystem import CompressionTypes
import apache_beam.io.source_test_utils as source_test_utils
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from gcp_variant_transforms.testing import asserts
from gcp_variant_transforms.beam_io import vcfio
from gcp_variant_transforms.beam_io.vcfio import _VcfSource as VcfSource
from gcp_variant_transforms.beam_io.vcfio import ReadAllFromVcf
from gcp_variant_transforms.beam_io.vcfio import ReadFromVcf
from gcp_variant_transforms.beam_io.vcfio import Variant
from gcp_variant_transforms.beam_io.vcfio import VariantCall
from gcp_variant_transforms.beam_io.vcfio import SampleNameEncoding
from gcp_variant_transforms.testing import testdata_util
from gcp_variant_transforms.testing.temp_dir import TempDir
# Note: mixing \n and \r\n to verify both behaviors.
_SAMPLE_HEADER_LINES = [
'##fileformat=VCFv4.2\n',
'##INFO=<ID=NS,Number=1,Type=Integer,Description="Number samples">\n',
'##INFO=<ID=AF,Number=A,Type=Float,Description="Allele Frequency">\n',
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\r\n',
'##FORMAT=<ID=GQ,Number=1,Type=Integer,Description="Genotype Quality">\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSample1\tSample2\r'
'\n',
]
_SAMPLE_TEXT_LINES = [
'20\t14370\t.\tG\tA\t29\tPASS\tAF=0.5\tGT:GQ\t0|0:48\t1|0:48\n',
'20\t17330\t.\tT\tA\t3\tq10\tAF=0.017\tGT:GQ\t0|0:49\t0|1:3\n',
'20\t1110696\t.\tA\tG,T\t67\tPASS\tAF=0.3,0.7\tGT:GQ\t1|2:21\t2|1:2\n',
'20\t1230237\t.\tT\t.\t47\tPASS\t.\tGT:GQ\t0|0:54\t0|0:48\n',
'19\t1234567\t.\tGTCT\tG,GTACT\t50\tPASS\t.\tGT:GQ\t0/1:35\t0/2:17\n',
'20\t1234\trs123\tC\tA,T\t50\tPASS\tAF=0.5\tGT:GQ\t0/0:48\t1/0:20\n',
'19\t123\trs1234\tGTC\t.\t40\tq10;s50\tNS=2\tGT:GQ\t1|0:48\t0/1:.\n',
'19\t12\t.\tC\t<SYMBOLIC>\t49\tq10\tAF=0.5\tGT:GQ\t0|1:45\t.:.\n'
]
hash_name = testdata_util.hash_name
VCF_LINE_1 = ('20 1234 rs123;rs2 C A,T 50 '
'PASS AF=0.5,0.1;NS=1;SVTYPE=BÑD GT:GQ 0/0:48 1/0:20\n')
VCF_LINE_2 = '19 123 rs1234 GTC . 40 q10;s50 NS=2 GT:GQ .|0:48 0/.:.\n'
VCF_LINE_3 = (
'19 12 . C <SYMBOLIC> 49 q10 AF=0.5 GT:PS:GQ 0|1:1:45 .:.:.\n')
GVCF_LINE = '19 1234 . C <NON_REF> 50 . END=1236 GT:GQ 0/0:99\n'
def _get_hashing_function(file_name, use_hashing):
def _hash_name_method(sample_name):
return sample_name if not use_hashing else hash_name(sample_name, file_name)
return _hash_name_method
def _get_sample_variant_1(file_name='', use_1_based_coordinate=False,
use_hashing=True, move_hom_ref_calls=False):
"""Get first sample variant.
Features:
multiple alternates
not phased
multiple names
utf-8 encoded
"""
hash_name_method = _get_hashing_function(file_name, use_hashing)
variant = vcfio.Variant(
reference_name='20', start=1233 + use_1_based_coordinate, end=1234,
reference_bases='C', alternate_bases=['A', 'T'], names=['rs123', 'rs2'],
quality=50, filters=['PASS'],
hom_ref_calls=([('Sample1', hash_name_method('Sample1'))] if
move_hom_ref_calls else None),
info={'AF': [0.5, 0.1], 'NS': 1, 'SVTYPE': ['BÑD']})
if not move_hom_ref_calls:
variant.calls.append(
vcfio.VariantCall(sample_id=hash_name_method('Sample1'), name='Sample1',
genotype=[0, 0], info={'GQ': 48}))
variant.calls.append(
vcfio.VariantCall(sample_id=hash_name_method('Sample2'), name='Sample2',
genotype=[1, 0], info={'GQ': 20}))
return variant
def _get_sample_variant_2(file_name='', use_1_based_coordinate=False,
use_hashing=True, move_hom_ref_calls=False):
"""Get second sample variant.
Features:
multiple references
no alternate
phased
multiple filters
missing format field
"""
hash_name_method = _get_hashing_function(file_name, use_hashing)
variant = vcfio.Variant(
reference_name='19',
start=122 + use_1_based_coordinate, end=125, reference_bases='GTC',
alternate_bases=[], names=['rs1234'], quality=40,
filters=['q10', 's50'], hom_ref_calls=[] if move_hom_ref_calls else None,
info={'NS': 2})
variant.calls.append(
vcfio.VariantCall(sample_id=hash_name_method('Sample1'), name='Sample1',
genotype=[-1, 0], phaseset=vcfio.DEFAULT_PHASESET_VALUE,
info={'GQ': 48}))
variant.calls.append(
vcfio.VariantCall(sample_id=hash_name_method('Sample2'), name='Sample2',
genotype=[0, -1], info={'GQ': None}))
return variant
def _get_sample_variant_3(file_name='', use_1_based_coordinate=False,
use_hashing=True, move_hom_ref_calls=False):
"""Get third sample variant.
Features:
symbolic alternate
no calls for sample 2
alternate phaseset
"""
hash_name_method = _get_hashing_function(file_name, use_hashing)
variant = vcfio.Variant(
reference_name='19', start=11 + use_1_based_coordinate, end=12,
reference_bases='C', alternate_bases=['<SYMBOLIC>'], quality=49,
filters=['q10'], hom_ref_calls=[] if move_hom_ref_calls else None,
info={'AF': [0.5]})
variant.calls.append(
vcfio.VariantCall(sample_id=hash_name_method('Sample1'), name='Sample1',
genotype=[0, 1], phaseset='1', info={'GQ': 45}))
variant.calls.append(
vcfio.VariantCall(sample_id=hash_name_method('Sample2'), name='Sample2',
genotype=[vcfio.MISSING_GENOTYPE_VALUE],
info={'GQ': None}))
return variant
def _get_sample_non_variant(use_1_based_coordinate=False):
"""Get sample non variant."""
non_variant = vcfio.Variant(
reference_name='19', start=1233 + use_1_based_coordinate, end=1236,
reference_bases='C', alternate_bases=['<NON_REF>'], quality=50)
non_variant.calls.append(
vcfio.VariantCall(sample_id=hash_name('Sample1'), name='Sample1',
genotype=[0, 0], info={'GQ': 99}))
return non_variant
class VcfSourceTest(unittest.TestCase):
VCF_FILE_DIR_MISSING = not os.path.exists(testdata_util.get_full_dir())
def _create_temp_vcf_file(
self, lines, tempdir, compression_type=CompressionTypes.UNCOMPRESSED):
if compression_type in (CompressionTypes.UNCOMPRESSED,
CompressionTypes.AUTO):
suffix = '.vcf'
elif compression_type == CompressionTypes.GZIP:
suffix = '.vcf.gz'
elif compression_type == CompressionTypes.BZIP2:
suffix = '.vcf.bz2'
else:
raise ValueError('Unrecognized compression type {}'.format(
compression_type))
return tempdir.create_temp_file(
suffix=suffix, lines=lines, compression_type=compression_type)
def _read_records(self, file_or_pattern, representative_header_lines=None,
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH,
**kwargs):
return source_test_utils.read_from_source(
VcfSource(file_or_pattern,
representative_header_lines=representative_header_lines,
sample_name_encoding=sample_name_encoding,
**kwargs))
def _create_temp_file_and_read_records(
self, lines, representative_header_lines=None,
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH):
return self._create_temp_file_and_return_records_with_file_name(
lines, representative_header_lines, sample_name_encoding)[0]
def _create_temp_file_and_return_records_with_file_name(
self, lines, representative_header_lines=None,
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH):
with TempDir() as tempdir:
file_name = tempdir.create_temp_file(suffix='.vcf', lines=lines)
return (self._read_records(file_name, representative_header_lines,
sample_name_encoding), file_name)
def _assert_variants_equal(self, actual, expected):
self.assertEqual(
sorted(expected),
sorted(actual))
def _get_invalid_file_contents(self):
"""Gets sample invalid files contents.
Returns:
A `tuple` where the first element is contents that are invalid because
of record errors and the second element is contents that are invalid
because of header errors.
"""
malformed_vcf_records = [
# POS should be an integer.
[
'##FILTER=<ID=PASS,Description="All filters passed">\n',
'##FILTER=<ID=q10,Description="Quality is less than 10.">\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSample\n',
'19\tabc\trs12345\tT\tC\t9\tq10\tAF=0.2;NS=2\tGT:GQ\t1|0:48\n',
]
]
return malformed_vcf_records #, malformed_header_lines
def _assert_pipeline_read_files_record_count_equal(
self, input_pattern, expected_count, use_read_all=False):
"""Helper method for verifying total records read.
Args:
input_pattern (str): Input file pattern to read.
expected_count (int): Expected number of reacords that was read.
use_read_all (bool): Whether to use the scalable ReadAllFromVcf transform
instead of ReadFromVcf.
"""
pipeline = TestPipeline()
if use_read_all:
pcoll = (pipeline
| 'Create' >> beam.Create([input_pattern])
| 'Read' >> ReadAllFromVcf())
else:
pcoll = pipeline | 'Read' >> ReadFromVcf(input_pattern)
assert_that(pcoll, asserts.count_equals_to(expected_count))
pipeline.run()
@unittest.skipIf(VCF_FILE_DIR_MISSING, 'VCF test file directory is missing')
def test_read_single_file_large(self):
test_data_conifgs = [
{'file': 'valid-4.0.vcf', 'num_records': 5},
{'file': 'valid-4.0.vcf.gz', 'num_records': 5},
{'file': 'valid-4.0.vcf.bz2', 'num_records': 5},
{'file': 'valid-4.1-large.vcf', 'num_records': 9882},
{'file': 'valid-4.2.vcf', 'num_records': 13},
]
for config in test_data_conifgs:
read_data = self._read_records(
testdata_util.get_full_file_path(config['file']))
self.assertEqual(config['num_records'], len(read_data))
@unittest.skipIf(VCF_FILE_DIR_MISSING, 'VCF test file directory is missing')
def test_read_file_pattern_large(self):
read_data = self._read_records(
os.path.join(testdata_util.get_full_dir(), 'valid-*.vcf'))
self.assertEqual(9900, len(read_data))
read_data_gz = self._read_records(
os.path.join(testdata_util.get_full_dir(), 'valid-*.vcf.gz'))
self.assertEqual(9900, len(read_data_gz))
def test_single_file_no_records(self):
for content in [[''], [' '], ['', ' ', '\n'], ['\n', '\r\n', '\n']]:
self.assertEqual([], self._create_temp_file_and_read_records(
content, _SAMPLE_HEADER_LINES))
def test_single_file_1_based_verify_details(self):
variant = _get_sample_variant_1(use_1_based_coordinate=True)
read_data = None
with TempDir() as tempdir:
file_name = tempdir.create_temp_file(
suffix='.vcf', lines=_SAMPLE_HEADER_LINES + [VCF_LINE_1])
read_data = source_test_utils.read_from_source(
VcfSource(file_name,
representative_header_lines=None,
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH,
use_1_based_coordinate=True))
self.assertEqual(1, len(read_data))
self.assertEqual(variant, read_data[0])
def test_file_pattern_move_hom_ref_calls_verify_details(self):
variant_1 = _get_sample_variant_1(move_hom_ref_calls=True)
variant_2 = _get_sample_variant_2(move_hom_ref_calls=True)
variant_3 = _get_sample_variant_3(move_hom_ref_calls=True)
with TempDir() as tempdir:
_ = tempdir.create_temp_file(
suffix='.vcf', lines=_SAMPLE_HEADER_LINES + [VCF_LINE_1])
_ = tempdir.create_temp_file(
suffix='.vcf', lines=_SAMPLE_HEADER_LINES + [VCF_LINE_2, VCF_LINE_3])
read_data = source_test_utils.read_from_source(
VcfSource(os.path.join(tempdir.get_path(), '*.vcf'),
representative_header_lines=None,
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH,
move_hom_ref_calls=True))
self.assertEqual(3, len(read_data))
self._assert_variants_equal([variant_1, variant_2, variant_3], read_data)
def test_file_pattern_1_based_verify_details(self):
variant_1 = _get_sample_variant_1(use_1_based_coordinate=True)
variant_2 = _get_sample_variant_2(use_1_based_coordinate=True)
variant_3 = _get_sample_variant_3(use_1_based_coordinate=True)
with TempDir() as tempdir:
_ = tempdir.create_temp_file(
suffix='.vcf', lines=_SAMPLE_HEADER_LINES + [VCF_LINE_1])
_ = tempdir.create_temp_file(
suffix='.vcf', lines=_SAMPLE_HEADER_LINES + [VCF_LINE_2, VCF_LINE_3])
read_data = source_test_utils.read_from_source(
VcfSource(os.path.join(tempdir.get_path(), '*.vcf'),
representative_header_lines=None,
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH,
use_1_based_coordinate=True))
self.assertEqual(3, len(read_data))
self._assert_variants_equal([variant_1, variant_2, variant_3], read_data)
def test_single_file_verify_details(self):
read_data = self._create_temp_file_and_read_records(
_SAMPLE_HEADER_LINES + [VCF_LINE_1])
variant_1 = _get_sample_variant_1()
read_data = self._create_temp_file_and_read_records(
_SAMPLE_HEADER_LINES + [VCF_LINE_1])
self.assertEqual(1, len(read_data))
self.assertEqual(variant_1, read_data[0])
variant_2 = _get_sample_variant_2()
variant_3 = _get_sample_variant_3()
read_data = self._create_temp_file_and_read_records(
_SAMPLE_HEADER_LINES + [VCF_LINE_1, VCF_LINE_2, VCF_LINE_3])
self.assertEqual(3, len(read_data))
self._assert_variants_equal([variant_1, variant_2, variant_3], read_data)
def test_file_pattern_verify_details(self):
variant_1 = _get_sample_variant_1()
variant_2 = _get_sample_variant_2()
variant_3 = _get_sample_variant_3()
with TempDir() as tempdir:
self._create_temp_vcf_file(_SAMPLE_HEADER_LINES + [VCF_LINE_1], tempdir)
self._create_temp_vcf_file((_SAMPLE_HEADER_LINES +
[VCF_LINE_2, VCF_LINE_3]),
tempdir)
read_data = self._read_records(os.path.join(tempdir.get_path(), '*.vcf'))
self.assertEqual(3, len(read_data))
self._assert_variants_equal([variant_1, variant_2, variant_3], read_data)
def test_single_file_verify_details_encoded_sample_name_without_file(self):
variant_1 = _get_sample_variant_1()
read_data = self._create_temp_file_and_read_records(
_SAMPLE_HEADER_LINES + [VCF_LINE_1],
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH)
self.assertEqual(1, len(read_data))
self.assertEqual(variant_1, read_data[0])
variant_2 = _get_sample_variant_2()
variant_3 = _get_sample_variant_3()
read_data = self._create_temp_file_and_read_records(
_SAMPLE_HEADER_LINES + [VCF_LINE_1, VCF_LINE_2, VCF_LINE_3],
sample_name_encoding=SampleNameEncoding.WITHOUT_FILE_PATH)
self.assertEqual(3, len(read_data))
self._assert_variants_equal([variant_1, variant_2, variant_3], read_data)
def test_single_file_verify_details_encoded_sample_name_with_file(self):
read_data, file_name = (
self._create_temp_file_and_return_records_with_file_name(
_SAMPLE_HEADER_LINES + [VCF_LINE_1],
sample_name_encoding=SampleNameEncoding.WITH_FILE_PATH))
variant_1 = _get_sample_variant_1(file_name)
self.assertEqual(1, len(read_data))
self.assertEqual(variant_1, read_data[0])
read_data, file_name = (
self._create_temp_file_and_return_records_with_file_name(
_SAMPLE_HEADER_LINES + [VCF_LINE_1, VCF_LINE_2, VCF_LINE_3],
sample_name_encoding=SampleNameEncoding.WITH_FILE_PATH))
variant_1 = _get_sample_variant_1(file_name)
variant_2 = _get_sample_variant_2(file_name)
variant_3 = _get_sample_variant_3(file_name)
self.assertEqual(3, len(read_data))
self._assert_variants_equal([variant_1, variant_2, variant_3], read_data)
def test_single_file_verify_details_without_encoding(self):
read_data, file_name = (
self._create_temp_file_and_return_records_with_file_name(
_SAMPLE_HEADER_LINES + [VCF_LINE_1],
sample_name_encoding=SampleNameEncoding.NONE))
variant_1 = _get_sample_variant_1(file_name='', use_hashing=False)
self.assertEqual(1, len(read_data))
self.assertEqual(variant_1, read_data[0])
read_data, file_name = (
self._create_temp_file_and_return_records_with_file_name(
_SAMPLE_HEADER_LINES + [VCF_LINE_1, VCF_LINE_2, VCF_LINE_3],
sample_name_encoding=SampleNameEncoding.NONE))
variant_1 = _get_sample_variant_1(file_name='', use_hashing=False)
variant_2 = _get_sample_variant_2(file_name='Name1', use_hashing=False)
variant_3 = _get_sample_variant_3(file_name=file_name, use_hashing=False)
self.assertEqual(3, len(read_data))
self._assert_variants_equal([variant_1, variant_2, variant_3], read_data)
@unittest.skipIf(VCF_FILE_DIR_MISSING, 'VCF test file directory is missing')
def test_read_after_splitting(self):
file_name = testdata_util.get_full_file_path('valid-4.1-large.vcf')
source = VcfSource(file_name)
splits = list(p for p in source.split(desired_bundle_size=500))
self.assertGreater(len(splits), 1)
sources_info = ([
(split.source, split.start_position, split.stop_position) for
split in splits])
self.assertGreater(len(sources_info), 1)
split_records = []
for source_info in sources_info:
split_records.extend(source_test_utils.read_from_source(*source_info))
self.assertEqual(9882, len(split_records))
def test_invalid_file(self):
invalid_file_contents = self._get_invalid_file_contents()
for content in invalid_file_contents:
with TempDir() as tempdir, self.assertRaises(ValueError):
self._read_records(self._create_temp_vcf_file(content, tempdir))
self.fail('Invalid VCF file must throw an exception')
# Try with multiple files (any one of them will throw an exception).
with TempDir() as tempdir, self.assertRaises(ValueError):
for content in invalid_file_contents:
self._create_temp_vcf_file(content, tempdir)
self._read_records(os.path.join(tempdir.get_path(), '*.vcf'))
def test_allow_malformed_records(self):
invalid_records = self._get_invalid_file_contents()
# Invalid records should not raise errors
for content in invalid_records:
with TempDir() as tempdir:
self._read_records(self._create_temp_vcf_file(content, tempdir),
allow_malformed_records=True)
def test_no_samples(self):
header_line = '#CHROM POS ID REF ALT QUAL FILTER INFO\n'
record_line = '19 123 . G A . PASS AF=0.2'
expected_variant = Variant(
reference_name='19', start=122, end=123, reference_bases='G',
alternate_bases=['A'], filters=['PASS'], info={'AF': [0.2]})
read_data = self._create_temp_file_and_read_records(
_SAMPLE_HEADER_LINES[:-1] + [header_line, record_line])
self.assertEqual(1, len(read_data))
self.assertEqual(expected_variant, read_data[0])
def test_no_info(self):
record_line = 'chr19 123 . . . . . . GT . .'
expected_variant = Variant(reference_name='chr19', start=122, end=123)
expected_variant.calls.append(
VariantCall(sample_id=hash_name('Sample1'),
name='Sample1',
genotype=[vcfio.MISSING_GENOTYPE_VALUE]))
expected_variant.calls.append(
VariantCall(sample_id=hash_name('Sample2'),
name='Sample2',
genotype=[vcfio.MISSING_GENOTYPE_VALUE]))
read_data = self._create_temp_file_and_read_records(
_SAMPLE_HEADER_LINES + [record_line])
self.assertEqual(1, len(read_data))
self.assertEqual(expected_variant, read_data[0])
def test_info_numbers_and_types(self):
info_headers = [
'##INFO=<ID=HA,Number=A,Type=String,Description="StringInfo_A">\n',
'##INFO=<ID=HG,Number=G,Type=Integer,Description="IntInfo_G">\n',
'##INFO=<ID=HR,Number=R,Type=Character,Description="ChrInfo_R">\n',
'##INFO=<ID=HF,Number=0,Type=Flag,Description="FlagInfo">\n',
'##INFO=<ID=HU,Number=.,Type=Float,Description="FloatInfo_variable">\n']
record_lines = [
'19 2 . A T,C . . HA=a1,a2;HG=1,2,3;HR=a,b,c;HF;HU=0.1 GT 1/0 0/1\n',
'19 124 . A T . . HG=3,4,5;HR=d,e;HU=1.1,1.2 GT 0/0 0/1']
variant_1 = Variant(
reference_name='19', start=1, end=2, reference_bases='A',
alternate_bases=['T', 'C'],
info={'HA': ['a1', 'a2'], 'HG': [1, 2, 3], 'HR': ['a', 'b', 'c'],
'HF': True, 'HU': [0.1]})
variant_1.calls.append(VariantCall(sample_id=hash_name('Sample1'),
name='Sample1',
genotype=[1, 0]))
variant_1.calls.append(VariantCall(sample_id=hash_name('Sample2'),
name='Sample2',
genotype=[0, 1]))
variant_2 = Variant(
reference_name='19', start=123, end=124, reference_bases='A',
alternate_bases=['T'],
info={'HG': [3, 4, 5], 'HR': ['d', 'e'], 'HU': [1.1, 1.2]})
variant_2.calls.append(VariantCall(sample_id=hash_name('Sample1'),
name='Sample1',
genotype=[0, 0]))
variant_2.calls.append(VariantCall(sample_id=hash_name('Sample2'),
name='Sample2',
genotype=[0, 1]))
read_data = self._create_temp_file_and_read_records(
info_headers + _SAMPLE_HEADER_LINES[1:] + record_lines)
self.assertEqual(2, len(read_data))
self._assert_variants_equal([variant_1, variant_2], read_data)
def test_use_of_representative_header(self):
# Info field `HU` is defined as Float in file header while data is String.
# This results in parser failure. We test if parser completes successfully
# when a representative headers with String definition for field `HU` is
# given.
file_content = [
'##INFO=<ID=HU,Number=.,Type=Float,Description="Info">\n',
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\r\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\r\n',
'19 2 . A T . . HU=a,b GT 0/0 0/1\n',]
representative_header_lines = [
'##INFO=<ID=HU,Number=.,Type=String,Description="Info">\n',
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\r\n',]
variant = Variant(
reference_name='19', start=1, end=2, reference_bases='A',
alternate_bases=['T'], info={'HU': ['a', 'b']})
variant.calls.append(VariantCall(sample_id=hash_name('Sample1'),
name='Sample1',
genotype=[0, 0]))
variant.calls.append(VariantCall(sample_id=hash_name('Sample2'),
name='Sample2',
genotype=[0, 1]))
# `file_headers` is used.
read_data = self._create_temp_file_and_read_records(file_content)
# Pysam expects Float value for HU, and returns Nones when list is given.
self.assertEqual([None, None], read_data[0].info['HU'])
# `representative_header` is used.
read_data = self._create_temp_file_and_read_records(
file_content, representative_header_lines)
self.assertEqual(1, len(read_data))
self._assert_variants_equal([variant], read_data)
def test_use_of_representative_header_two_files(self):
# Info field `HU` is defined as Float in file header while data is String.
# This results in parser failure. We test if parser completes successfully
# when a representative headers with String definition for field `HU` is
# given.
file_content_1 = [
'##INFO=<ID=HU,Number=.,Type=Float,Descri\n',
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\r\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSample1\r\n',
'9\t2\t.\tA\tT\t.\t.\tHU=a,b\tGT\t0/0']
file_content_2 = [
'##INFO=<ID=HU,Number=.,Type=Float,Descri\n',
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\r\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSample2\r\n',
'19\t2\t.\tA\tT\t.\t.\tHU=a,b\tGT\t0/1\n',]
representative_header_lines = [
'##INFO=<ID=HU,Number=.,Type=String,Description="Info">\n',
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\r\n',]
variant_1 = Variant(
reference_name='9', start=1, end=2, reference_bases='A',
alternate_bases=['T'], info={'HU': ['a', 'b']})
variant_1.calls.append(VariantCall(sample_id=hash_name('Sample1'),
name='Sample1', genotype=[0, 0]))
variant_2 = Variant(
reference_name='19', start=1, end=2, reference_bases='A',
alternate_bases=['T'], info={'HU': ['a', 'b']})
variant_2.calls.append(VariantCall(sample_id=hash_name('Sample2'),
name='Sample2', genotype=[0, 1]))
read_data_1 = self._create_temp_file_and_read_records(
file_content_1, representative_header_lines)
self.assertEqual(1, len(read_data_1))
self._assert_variants_equal([variant_1], read_data_1)
read_data_2 = self._create_temp_file_and_read_records(
file_content_2, representative_header_lines)
self.assertEqual(1, len(read_data_2))
self._assert_variants_equal([variant_2], read_data_2)
def test_end_info_key(self):
end_info_header_line = (
'##INFO=<ID=END,Number=1,Type=Integer,Description="End of record.">\n')
record_lines = ['19 123 . A T . . END=1111 GT 1/0 0/1\n',
'19 123 . A T . . . GT 0/1 1/1\n']
variant_1 = Variant(
reference_name='19', start=122, end=1111, reference_bases='A',
alternate_bases=['T'])
variant_1.calls.append(VariantCall(sample_id=hash_name('Sample1'),
name='Sample1',
genotype=[1, 0]))
variant_1.calls.append(VariantCall(sample_id=hash_name('Sample2'),
name='Sample2',
genotype=[0, 1]))
variant_2 = Variant(
reference_name='19', start=122, end=123, reference_bases='A',
alternate_bases=['T'])
variant_2.calls.append(VariantCall(sample_id=hash_name('Sample1'),
name='Sample1',
genotype=[0, 1]))
variant_2.calls.append(VariantCall(sample_id=hash_name('Sample2'),
name='Sample2',
genotype=[1, 1]))
read_data = self._create_temp_file_and_read_records(
[end_info_header_line] + _SAMPLE_HEADER_LINES[1:] + record_lines)
self.assertEqual(2, len(read_data))
self._assert_variants_equal([variant_1, variant_2], read_data)
def test_end_info_key_unknown_number(self):
end_info_header_line = (
'##INFO=<ID=END,Number=.,Type=Integer,Description="End of record.">\n')
record_lines = ['19 123 . A T . . END=1111 GT 1/0 0/1\n']
variant_1 = Variant(
reference_name='19', start=122, end=1111, reference_bases='A',
alternate_bases=['T'])
variant_1.calls.append(VariantCall(sample_id=hash_name('Sample1'),
name='Sample1',
genotype=[1, 0]))
variant_1.calls.append(VariantCall(sample_id=hash_name('Sample2'),
name='Sample2',
genotype=[0, 1]))
read_data = self._create_temp_file_and_read_records(
[end_info_header_line] + _SAMPLE_HEADER_LINES[1:] + record_lines)
self.assertEqual(1, len(read_data))
self._assert_variants_equal([variant_1], read_data)
def test_end_info_key_unknown_number_invalid(self):
end_info_header_line = (
'##INFO=<ID=END,Number=.,Type=Integer,Description="End of record.">\n')
# PySam should only take first END field.
variant = Variant(
reference_name='19', start=122, end=150, reference_bases='A',
alternate_bases=['T'])
variant.calls.append(VariantCall(sample_id=hash_name('Sample1'),
name='Sample1',
genotype=[1, 0]))
variant.calls.append(VariantCall(sample_id=hash_name('Sample2'),
name='Sample2',
genotype=[0, 1]))
read_data = self._create_temp_file_and_read_records(
[end_info_header_line] + _SAMPLE_HEADER_LINES[1:] +
['19 123 . A T . . END=150,160 GT 1/0 0/1\n'])
self.assertEqual(1, len(read_data))
self._assert_variants_equal([variant], read_data)
# END should be rounded down.
read_data = self._create_temp_file_and_read_records(
[end_info_header_line] + _SAMPLE_HEADER_LINES[1:] +
['19 123 . A T . . END=150.9 GT 1/0 0/1\n'])
self.assertEqual(1, len(read_data))
self._assert_variants_equal([variant], read_data)
# END should not be a string.
with self.assertRaises(ValueError):
self._create_temp_file_and_read_records(
[end_info_header_line] + _SAMPLE_HEADER_LINES[1:] +
['19 123 . A T . . END=text GT 1/0 0/1\n'])
def test_custom_phaseset(self):
phaseset_header_line = (
'##FORMAT=<ID=PS,Number=1,Type=Integer,Description="Phaseset">\n')
record_lines = ['19 123 . A T . . . GT:PS 1|0:1111 0/1:.\n',
'19 121 . A T . . . GT:PS 1|0:2222 0/1:2222\n']
variant_1 = Variant(
reference_name='19', start=122, end=123, reference_bases='A',
alternate_bases=['T'])
variant_1.calls.append(
VariantCall(sample_id=hash_name('Sample1'), name='Sample1',
genotype=[1, 0], phaseset='1111'))
variant_1.calls.append(VariantCall(sample_id=hash_name('Sample2'),
name='Sample2', genotype=[0, 1]))
variant_2 = Variant(
reference_name='19', start=120, end=121, reference_bases='A',
alternate_bases=['T'])
variant_2.calls.append(
VariantCall(sample_id=hash_name('Sample1'), name='Sample1',
genotype=[1, 0], phaseset='2222'))
variant_2.calls.append(
VariantCall(sample_id=hash_name('Sample2'), name='Sample2',
genotype=[0, 1], phaseset='2222'))
read_data = self._create_temp_file_and_read_records(
[phaseset_header_line] + _SAMPLE_HEADER_LINES[1:] + record_lines)
self.assertEqual(2, len(read_data))
self._assert_variants_equal([variant_1, variant_2], read_data)
def test_format_numbers(self):
format_headers = [
'##FORMAT=<ID=FU,Number=.,Type=String,Description="Format_variable">\n',
'##FORMAT=<ID=F1,Number=1,Type=Integer,Description="Format_1">\n',
'##FORMAT=<ID=F2,Number=2,Type=Character,Description="Format_2">\n',
'##FORMAT=<ID=AO,Number=A,Type=Integer,Description="Format_3">\n',
'##FORMAT=<ID=AD,Number=G,Type=Integer,Description="Format_4">\n',]
record_lines = [
('19 2 . A T,C . . . '
'GT:FU:F1:F2:AO:AD 1/0:a1:3:a,b:1:3,4 '
'0/1:a2,a3:4:b,c:1,2:3')]
expected_variant = Variant(
reference_name='19', start=1, end=2, reference_bases='A',
alternate_bases=['T', 'C'])
expected_variant.calls.append(VariantCall(
sample_id=hash_name('Sample1'),
name='Sample1',
genotype=[1, 0],
info={'FU': ['a1'], 'F1': 3, 'F2': ['a', 'b'], 'AO': [1],
'AD': [3, 4]}))
expected_variant.calls.append(VariantCall(
sample_id=hash_name('Sample2'),
name='Sample2',
genotype=[0, 1],
info={'FU': ['a2', 'a3'], 'F1': 4, 'F2': ['b', 'c'], 'AO': [1, 2],
'AD':[3]}))
read_data = self._create_temp_file_and_read_records(
format_headers + _SAMPLE_HEADER_LINES[1:] + record_lines)
self.assertEqual(1, len(read_data))
self.assertEqual(expected_variant, read_data[0])
def test_pipeline_read_single_file(self):
with TempDir() as tempdir:
file_name = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir)
self._assert_pipeline_read_files_record_count_equal(
file_name, len(_SAMPLE_TEXT_LINES))
def test_pipeline_read_all_single_file(self):
with TempDir() as tempdir:
file_name = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir)
self._assert_pipeline_read_files_record_count_equal(
file_name, len(_SAMPLE_TEXT_LINES), use_read_all=True)
@unittest.skipIf(VCF_FILE_DIR_MISSING, 'VCF test file directory is missing')
def test_pipeline_read_single_file_large(self):
self._assert_pipeline_read_files_record_count_equal(
testdata_util.get_full_file_path('valid-4.1-large.vcf'), 9882)
@unittest.skipIf(VCF_FILE_DIR_MISSING, 'VCF test file directory is missing')
def test_pipeline_read_all_single_file_large(self):
self._assert_pipeline_read_files_record_count_equal(
testdata_util.get_full_file_path('valid-4.1-large.vcf'), 9882)
@unittest.skipIf(VCF_FILE_DIR_MISSING, 'VCF test file directory is missing')
def test_pipeline_read_file_pattern_large(self):
self._assert_pipeline_read_files_record_count_equal(
os.path.join(testdata_util.get_full_dir(), 'valid-*.vcf'), 9900)
@unittest.skipIf(VCF_FILE_DIR_MISSING, 'VCF test file directory is missing')
def test_pipeline_read_all_file_pattern_large(self):
self._assert_pipeline_read_files_record_count_equal(
os.path.join(testdata_util.get_full_dir(), 'valid-*.vcf'), 9900)
@unittest.skipIf(VCF_FILE_DIR_MISSING, 'VCF test file directory is missing')
def test_pipeline_read_all_gzip_large(self):
self._assert_pipeline_read_files_record_count_equal(
os.path.join(testdata_util.get_full_dir(), 'valid-*.vcf.gz'), 9900,
use_read_all=True)
@unittest.skipIf(VCF_FILE_DIR_MISSING, 'VCF test file directory is missing')
def test_pipeline_read_all_multiple_files_large(self):
pipeline = TestPipeline()
pcoll = (pipeline
| 'Create' >> beam.Create(
[testdata_util.get_full_file_path('valid-4.0.vcf'),
testdata_util.get_full_file_path('valid-4.1-large.vcf'),
testdata_util.get_full_file_path('valid-4.2.vcf')])
| 'Read' >> ReadAllFromVcf())
assert_that(pcoll, asserts.count_equals_to(9900))
pipeline.run()
def test_pipeline_read_all_gzip(self):
with TempDir() as tempdir:
file_name_1 = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir,
compression_type=CompressionTypes.GZIP)
file_name_2 = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir,
compression_type=CompressionTypes.GZIP)
pipeline = TestPipeline()
pcoll = (pipeline
| 'Create' >> beam.Create([file_name_1, file_name_2])
| 'Read' >> ReadAllFromVcf())
assert_that(pcoll, asserts.count_equals_to(2 * len(_SAMPLE_TEXT_LINES)))
pipeline.run()
def test_pipeline_read_all_bzip2(self):
with TempDir() as tempdir:
file_name_1 = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir,
compression_type=CompressionTypes.BZIP2)
file_name_2 = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir,
compression_type=CompressionTypes.BZIP2)
pipeline = TestPipeline()
pcoll = (pipeline
| 'Create' >> beam.Create([file_name_1, file_name_2])
| 'Read' >> ReadAllFromVcf())
assert_that(pcoll, asserts.count_equals_to(2 * len(_SAMPLE_TEXT_LINES)))
pipeline.run()
def test_pipeline_read_all_multiple_files(self):
with TempDir() as tempdir:
file_name_1 = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir)
file_name_2 = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir)
pipeline = TestPipeline()
pcoll = (pipeline
| 'Create' >> beam.Create([file_name_1, file_name_2])
| 'Read' >> ReadAllFromVcf())
assert_that(pcoll, asserts.count_equals_to(2 * len(_SAMPLE_TEXT_LINES)))
pipeline.run()
def test_read_reentrant_without_splitting(self):
with TempDir() as tempdir:
file_name = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir)
source = VcfSource(file_name)
source_test_utils.assert_reentrant_reads_succeed((source, None, None))
def test_read_reentrant_after_splitting(self):
with TempDir() as tempdir:
file_name = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir)
source = VcfSource(file_name)
splits = list(split for split in source.split(desired_bundle_size=100000))
assert len(splits) == 1
source_test_utils.assert_reentrant_reads_succeed(
(splits[0].source, splits[0].start_position, splits[0].stop_position))
def test_dynamic_work_rebalancing(self):
with TempDir() as tempdir:
file_name = self._create_temp_vcf_file(
_SAMPLE_HEADER_LINES + _SAMPLE_TEXT_LINES, tempdir)
source = VcfSource(file_name)
splits = list(split for split in source.split(desired_bundle_size=100000))
assert len(splits) == 1
source_test_utils.assert_split_at_fraction_exhaustive(
splits[0].source, splits[0].start_position, splits[0].stop_position)
class VcfSinkTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.path = tempfile.NamedTemporaryFile(suffix='.vcf').name
self.variants, self.variant_lines = list(zip(
(_get_sample_variant_1(), VCF_LINE_1),
(_get_sample_variant_2(), VCF_LINE_2),
(_get_sample_variant_3(), VCF_LINE_3),
(_get_sample_non_variant(), GVCF_LINE)))
def _assert_variant_lines_equal(self, actual, expected):
actual_fields = actual.strip().split('\t')
expected_fields = expected.strip().split('\t')
self.assertEqual(len(actual_fields), len(expected_fields))
self.assertEqual(actual_fields[0], expected_fields[0])
self.assertEqual(actual_fields[1], expected_fields[1])
self.assertCountEqual(actual_fields[2].split(';'),
expected_fields[2].split(';'))
self.assertEqual(actual_fields[3], expected_fields[3])
self.assertCountEqual(actual_fields[4].split(','),
expected_fields[4].split(','))
self.assertEqual(actual_fields[5], actual_fields[5])
self.assertCountEqual(actual_fields[6].split(';'),
expected_fields[6].split(';'))
self.assertCountEqual(actual_fields[7].split(';'),
expected_fields[7].split(';'))
self.assertCountEqual(actual_fields[8].split(':'),
expected_fields[8].split(':'))
# Assert calls are the same
for call, expected_call in zip(actual_fields[9:], expected_fields[9:]):
actual_split = call.split(':')
expected_split = expected_call.split(':')
# Compare the first and third values of the GT field
self.assertEqual(actual_split[0], expected_split[0])
# Compare the rest of the items ignoring order
self.assertCountEqual(actual_split[1:], expected_split[1:])
def _get_coder(self, bq_uses_1_based_coordinate=False):
return vcfio._ToVcfRecordCoder(bq_uses_1_based_coordinate)
def test_to_vcf_line_0_based(self):
coder = self._get_coder()
for variant, line in zip(self.variants, self.variant_lines):
self._assert_variant_lines_equal(
coder.encode(variant).decode('utf-8'), line)
empty_variant = vcfio.Variant()
empty_line = '\t'.join(['.' for _ in range(9)])
self._assert_variant_lines_equal(
coder.encode(empty_variant).decode('utf-8'), empty_line)
def test_to_vcf_line_1_based(self):
coder = self._get_coder(bq_uses_1_based_coordinate=True)
variants = [
_get_sample_variant_1(use_1_based_coordinate=True),
_get_sample_variant_2(use_1_based_coordinate=True),
_get_sample_variant_3(use_1_based_coordinate=True),
_get_sample_non_variant(use_1_based_coordinate=True)]
for variant, line in zip(variants, self.variant_lines):
self._assert_variant_lines_equal(
coder.encode(variant).decode('utf-8'), line)
empty_variant = vcfio.Variant()
empty_line = '\t'.join(['.' for _ in range(9)])
self._assert_variant_lines_equal(
coder.encode(empty_variant).decode('utf-8'), empty_line)
def test_missing_info_key(self):
coder = self._get_coder()
variant = Variant()
variant.calls.append(VariantCall(sample_id=hash_name('Sample1'),
name='Sample1',
genotype=[0, 1],
info={'GQ': 10, 'AF': 20}))
variant.calls.append(VariantCall(sample_id=hash_name('Sample2'),
name='Sample2', genotype=[0, 1],
info={'AF': 20}))
expected = ('. . . . . . . . GT:AF:GQ 0/1:20:10 '
'0/1:20:.\n')
self._assert_variant_lines_equal(
coder.encode(variant).decode('utf-8'), expected)
def test_info_list(self):
coder = self._get_coder()
variant = Variant()
variant.calls.append(VariantCall(sample_id=hash_name('Sample'),
name='Sample',
genotype=[0, 1],
info={'LI': [1, None, 3]}))
expected = '. . . . . . . . GT:LI 0/1:1,.,3\n'
self._assert_variant_lines_equal(
coder.encode(variant).decode('utf-8'), expected)
def test_info_field_count(self):
coder = self._get_coder()
variant = Variant()
variant.info['NS'] = 3
variant.info['AF'] = [0.333, 0.667]
variant.info['DB'] = True
variant.info['CSQ'] = ['G|upstream_gene_variant||MODIFIER',
'T|||MODIFIER']
expected = ('. . . . . . . NS=3;AF=0.333,0.667;DB;'
'CSQ=G|upstream_gene_variant||MODIFIER,T|||MODIFIER .\n')
self._assert_variant_lines_equal(
coder.encode(variant).decode('utf-8'), expected)
def test_empty_sample_calls(self):
coder = self._get_coder()
variant = Variant()
variant.calls.append(
VariantCall(sample_id=hash_name('Sample2'), name='Sample2',
genotype=-1))
expected = '. . . . . . . . GT .\n'
self._assert_variant_lines_equal(
coder.encode(variant).decode('utf-8'), expected)
def test_missing_genotype(self):
coder = self._get_coder()
variant = Variant()
variant.calls.append(
VariantCall(sample_id=hash_name('Sample'), name='Sample',
genotype=[1, vcfio.MISSING_GENOTYPE_VALUE]))
expected = '. . . . . . . . GT 1/.\n'
self._assert_variant_lines_equal(
coder.encode(variant).decode('utf-8'), expected)
def test_triploid_genotype(self):
coder = self._get_coder()
variant = Variant()
variant.calls.append(VariantCall(
sample_id=hash_name('Sample'), name='Sample', genotype=[1, 0, 1]))
expected = '. . . . . . . . GT 1/0/1\n'
self._assert_variant_lines_equal(
coder.encode(variant).decode('utf-8'), expected)
def test_write_dataflow_0_based(self):
pipeline = TestPipeline()
pcoll = pipeline | beam.Create(self.variants, reshuffle=False)
_ = pcoll | 'Write' >> vcfio.WriteToVcf(
self.path, bq_uses_1_based_coordinate=False)
pipeline.run()
read_result = []
for file_name in glob.glob(self.path + '*'):
with open(file_name, 'r') as f:
read_result.extend(f.read().splitlines())
for actual, expected in zip(read_result, self.variant_lines):
self._assert_variant_lines_equal(actual, expected)
def test_write_dataflow_1_based(self):
variants = [
_get_sample_variant_1(use_1_based_coordinate=True),
_get_sample_variant_2(use_1_based_coordinate=True),
_get_sample_variant_3(use_1_based_coordinate=True),
_get_sample_non_variant(use_1_based_coordinate=True)]
pipeline = TestPipeline()
pcoll = pipeline | beam.Create(variants, reshuffle=False)
_ = pcoll | 'Write' >> vcfio.WriteToVcf(self.path)
pipeline.run()
read_result = []
for file_name in glob.glob(self.path + '*'):
with open(file_name, 'r') as f:
read_result.extend(f.read().splitlines())
for actual, expected in zip(read_result, self.variant_lines):
self._assert_variant_lines_equal(actual, expected)
def test_write_dataflow_auto_compression(self):
pipeline = TestPipeline()
pcoll = pipeline | beam.Create(self.variants, reshuffle=False)
_ = pcoll | 'Write' >> vcfio.WriteToVcf(
self.path + '.gz',
compression_type=CompressionTypes.AUTO,
bq_uses_1_based_coordinate=False)
pipeline.run()
read_result = []
for file_name in glob.glob(self.path + '*'):
with gzip.GzipFile(file_name, 'r') as f:
read_result.extend(f.read().splitlines())
for actual, expected in zip(read_result, self.variant_lines):
self._assert_variant_lines_equal(actual.decode('utf-8'), expected)
def test_write_dataflow_header(self):
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> beam.Create(self.variants, reshuffle=False)
headers = ['foo\n']
_ = pcoll | 'Write' >> vcfio.WriteToVcf(
self.path + '.gz',
compression_type=CompressionTypes.AUTO,
headers=headers,
bq_uses_1_based_coordinate=False)
pipeline.run()
read_result = []
for file_name in glob.glob(self.path + '*'):
with gzip.GzipFile(file_name, 'r') as f:
read_result.extend(f.read().splitlines())
self.assertEqual(read_result[0].decode('utf-8'), 'foo')
for actual, expected in zip(read_result[1:], self.variant_lines):
self._assert_variant_lines_equal(actual.decode('utf-8'), expected)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
A-1 Payday Loans in Vicksburg, Mississippi offers payday loans, cash advances, and installment loans. Payday Loans, also knows as cash advances are short term loans that are paid back in full on or near your next pay date. Payday Loan amounts vary from as little as $50.00 to over $400.00. Installment loans are higher amount loans that are paid back in monthly installments, usually from 6 to 10 months. Installment loan amounts vary from $500.00 to over $1,000.00. With lower payment amounts for installment loans, sometimes they are a more affordable option than payday loans.
Do you own A-1 Payday Loans?
Due to the unique way that the eBusinessPages Business Directory is structured a Premium + Verified Listing is the most powerful way to get more clients to contact A-1 Payday Loans. Not only will your listing be more noticeable and more trustworthy, it will also be displayed more prominently in our category (Check Cashing Service and Parent Categories) and location (Vicksburg, MS and USA Wide) listings. A-1 Payday Loans will be displayed in your competitor's listings while no ads will be shown in your own listing.
Starting at $2.78 for a Premium Verified Listing, there is every reason to give A-1 Payday Loans the eBusinessPages Premium treatment today.
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2004-2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
from __future__ import with_statement
import pkg_resources
import re
from genshi.builder import tag
from trac.attachment import AttachmentModule
from trac.config import IntOption
from trac.core import *
from trac.mimeview.api import IContentConverter, Mimeview
from trac.perm import IPermissionRequestor
from trac.resource import *
from trac.search import ISearchSource, search_to_sql, shorten_result
from trac.timeline.api import ITimelineEventProvider
from trac.util import get_reporter_id
from trac.util.datefmt import from_utimestamp, to_utimestamp
from trac.util.text import shorten_line
from trac.util.translation import _, tag_
from trac.versioncontrol.diff import get_diff_options, diff_blocks
from trac.web.api import IRequestHandler
from trac.web.chrome import (Chrome, INavigationContributor, ITemplateProvider,
add_ctxtnav, add_link, add_notice, add_script,
add_stylesheet, add_warning, prevnext_nav,
web_context)
from trac.wiki.api import IWikiPageManipulator, WikiSystem, validate_page_name
from trac.wiki.formatter import format_to, OneLinerFormatter
from trac.wiki.model import WikiPage
class InvalidWikiPage(TracError):
"""Exception raised when a Wiki page fails validation.
:deprecated: Not used anymore since 0.11
"""
class WikiModule(Component):
implements(IContentConverter, INavigationContributor, IPermissionRequestor,
IRequestHandler, ITimelineEventProvider, ISearchSource,
ITemplateProvider)
page_manipulators = ExtensionPoint(IWikiPageManipulator)
max_size = IntOption('wiki', 'max_size', 262144,
"""Maximum allowed wiki page size in bytes. (''since 0.11.2'')""")
PAGE_TEMPLATES_PREFIX = 'PageTemplates/'
DEFAULT_PAGE_TEMPLATE = 'DefaultPage'
# IContentConverter methods
def get_supported_conversions(self):
yield ('txt', _('Plain Text'), 'txt', 'text/x-trac-wiki', 'text/plain',
9)
def convert_content(self, req, mimetype, content, key):
return (content, 'text/plain;charset=utf-8')
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'wiki'
def get_navigation_items(self, req):
if 'WIKI_VIEW' in req.perm('wiki'):
yield ('mainnav', 'wiki',
tag.a(_('Wiki'), href=req.href.wiki(), accesskey=1))
yield ('metanav', 'help',
tag.a(_('Help/Guide'), href=req.href.wiki('TracGuide'),
accesskey=6))
# IPermissionRequestor methods
def get_permission_actions(self):
actions = ['WIKI_CREATE', 'WIKI_DELETE', 'WIKI_MODIFY', 'WIKI_RENAME',
'WIKI_VIEW']
return actions + [('WIKI_ADMIN', actions)]
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'/wiki(?:/(.+))?$', req.path_info)
if match:
if match.group(1):
req.args['page'] = match.group(1)
return 1
def process_request(self, req):
action = req.args.get('action', 'view')
pagename = req.args.get('page', 'WikiStart')
version = req.args.get('version')
old_version = req.args.get('old_version')
if pagename.startswith('/') or pagename.endswith('/') or \
'//' in pagename:
pagename = re.sub(r'/{2,}', '/', pagename.strip('/'))
req.redirect(req.href.wiki(pagename))
if not validate_page_name(pagename):
raise TracError(_("Invalid Wiki page name '%(name)s'",
name=pagename))
page = WikiPage(self.env, pagename)
versioned_page = WikiPage(self.env, pagename, version=version)
req.perm(page.resource).require('WIKI_VIEW')
req.perm(versioned_page.resource).require('WIKI_VIEW')
if version and versioned_page.version != int(version):
raise ResourceNotFound(
_('No version "%(num)s" for Wiki page "%(name)s"',
num=version, name=page.name))
add_stylesheet(req, 'common/css/wiki.css')
if req.method == 'POST':
if action == 'edit':
if 'cancel' in req.args:
req.redirect(req.href.wiki(page.name))
has_collision = int(version) != page.version
for a in ('preview', 'diff', 'merge'):
if a in req.args:
action = a
break
versioned_page.text = req.args.get('text')
valid = self._validate(req, versioned_page)
if action == 'edit' and not has_collision and valid:
return self._do_save(req, versioned_page)
else:
return self._render_editor(req, page, action, has_collision)
elif action == 'delete':
self._do_delete(req, versioned_page)
elif action == 'rename':
return self._do_rename(req, page)
elif action == 'diff':
style, options, diff_data = get_diff_options(req)
contextall = diff_data['options']['contextall']
req.redirect(req.href.wiki(versioned_page.name, action='diff',
old_version=old_version,
version=version,
contextall=contextall or None))
elif action == 'delete':
return self._render_confirm_delete(req, page)
elif action == 'rename':
return self._render_confirm_rename(req, page)
elif action == 'edit':
return self._render_editor(req, page)
elif action == 'diff':
return self._render_diff(req, versioned_page)
elif action == 'history':
return self._render_history(req, versioned_page)
else:
format = req.args.get('format')
if format:
Mimeview(self.env).send_converted(req, 'text/x-trac-wiki',
versioned_page.text,
format, versioned_page.name)
return self._render_view(req, versioned_page)
# ITemplateProvider methods
def get_htdocs_dirs(self):
return []
def get_templates_dirs(self):
return [pkg_resources.resource_filename('trac.wiki', 'templates')]
# Internal methods
def _validate(self, req, page):
valid = True
# Validate page size
if len(req.args.get('text', '')) > self.max_size:
add_warning(req, _('The wiki page is too long (must be less '
'than %(num)s characters)',
num=self.max_size))
valid = False
# Give the manipulators a pass at post-processing the page
for manipulator in self.page_manipulators:
for field, message in manipulator.validate_wiki_page(req, page):
valid = False
if field:
add_warning(req, _("The Wiki page field '%(field)s' is "
"invalid: %(message)s",
field=field, message=message))
else:
add_warning(req, _("Invalid Wiki page: %(message)s",
message=message))
return valid
def _page_data(self, req, page, action=''):
title = get_resource_summary(self.env, page.resource)
if action:
title += ' (%s)' % action
return {'page': page, 'action': action, 'title': title}
def _prepare_diff(self, req, page, old_text, new_text,
old_version, new_version):
diff_style, diff_options, diff_data = get_diff_options(req)
diff_context = 3
for option in diff_options:
if option.startswith('-U'):
diff_context = int(option[2:])
break
if diff_context < 0:
diff_context = None
diffs = diff_blocks(old_text, new_text, context=diff_context,
ignore_blank_lines='-B' in diff_options,
ignore_case='-i' in diff_options,
ignore_space_changes='-b' in diff_options)
def version_info(v, last=0):
return {'path': get_resource_name(self.env, page.resource),
# TRANSLATOR: wiki page
'rev': v or _('currently edited'),
'shortrev': v or last + 1,
'href': req.href.wiki(page.name, version=v) if v else None}
changes = [{'diffs': diffs, 'props': [],
'new': version_info(new_version, old_version),
'old': version_info(old_version)}]
add_stylesheet(req, 'common/css/diff.css')
add_script(req, 'common/js/diff.js')
return diff_data, changes
def _do_delete(self, req, page):
if page.readonly:
req.perm(page.resource).require('WIKI_ADMIN')
else:
req.perm(page.resource).require('WIKI_DELETE')
if 'cancel' in req.args:
req.redirect(get_resource_url(self.env, page.resource, req.href))
version = int(req.args.get('version', 0)) or None
old_version = int(req.args.get('old_version', 0)) or version
with self.env.db_transaction as db:
if version and old_version and version > old_version:
# delete from `old_version` exclusive to `version` inclusive:
for v in range(old_version, version):
page.delete(v + 1, db)
else:
# only delete that `version`, or the whole page if `None`
page.delete(version, db)
if not page.exists:
add_notice(req, _("The page %(name)s has been deleted.",
name=page.name))
req.redirect(req.href.wiki())
else:
if version and old_version and version > old_version + 1:
add_notice(req, _('The versions %(from_)d to %(to)d of the '
'page %(name)s have been deleted.',
from_=old_version + 1, to=version, name=page.name))
else:
add_notice(req, _('The version %(version)d of the page '
'%(name)s has been deleted.',
version=version, name=page.name))
req.redirect(req.href.wiki(page.name))
def _do_rename(self, req, page):
if page.readonly:
req.perm(page.resource).require('WIKI_ADMIN')
else:
req.perm(page.resource).require('WIKI_RENAME')
if 'cancel' in req.args:
req.redirect(get_resource_url(self.env, page.resource, req.href))
old_name, old_version = page.name, page.version
new_name = req.args.get('new_name', '')
new_name = re.sub(r'/{2,}', '/', new_name.strip('/'))
redirect = req.args.get('redirect')
# verify input parameters
warn = None
if not new_name:
warn = _("A new name is mandatory for a rename.")
elif not validate_page_name(new_name):
warn = _("The new name is invalid (a name which is separated "
"with slashes cannot be '.' or '..').")
elif new_name == old_name:
warn = _("The new name must be different from the old name.")
elif WikiPage(self.env, new_name).exists:
warn = _("The page %(name)s already exists.", name=new_name)
if warn:
add_warning(req, warn)
return self._render_confirm_rename(req, page, new_name)
with self.env.db_transaction as db:
page.rename(new_name)
if redirect:
redirection = WikiPage(self.env, old_name, db=db)
redirection.text = _('See [wiki:"%(name)s"].', name=new_name)
author = get_reporter_id(req)
comment = u'[wiki:"%s@%d" %s] \u2192 [wiki:"%s"].' % (
new_name, old_version, old_name, new_name)
redirection.save(author, comment, req.remote_addr)
req.redirect(req.href.wiki(old_name if redirect else new_name))
def _do_save(self, req, page):
if page.readonly:
req.perm(page.resource).require('WIKI_ADMIN')
elif not page.exists:
req.perm(page.resource).require('WIKI_CREATE')
else:
req.perm(page.resource).require('WIKI_MODIFY')
if 'WIKI_ADMIN' in req.perm(page.resource):
# Modify the read-only flag if it has been changed and the user is
# WIKI_ADMIN
page.readonly = int('readonly' in req.args)
try:
page.save(get_reporter_id(req, 'author'), req.args.get('comment'),
req.remote_addr)
add_notice(req, _("Your changes have been saved in version "
"%(version)s.", version=page.version))
req.redirect(get_resource_url(self.env, page.resource, req.href,
version=None))
except TracError:
add_warning(req, _("Page not modified, showing latest version."))
return self._render_view(req, page)
def _render_confirm_delete(self, req, page):
if page.readonly:
req.perm(page.resource).require('WIKI_ADMIN')
else:
req.perm(page.resource).require('WIKI_DELETE')
version = None
if 'delete_version' in req.args:
version = int(req.args.get('version', 0))
old_version = int(req.args.get('old_version') or 0) or version
what = 'multiple' if version and old_version \
and version - old_version > 1 \
else 'single' if version else 'page'
num_versions = 0
new_date = None
old_date = None
for v, t, author, comment, ipnr in page.get_history():
if (v <= version or what == 'page') and new_date is None:
new_date = t
if (v <= old_version and what == 'multiple' or
num_versions > 1 and what == 'single'):
break
num_versions += 1
old_date = t
data = self._page_data(req, page, 'delete')
data.update({'what': what, 'new_version': None, 'old_version': None,
'num_versions': num_versions, 'new_date': new_date,
'old_date': old_date})
if version is not None:
data.update({'new_version': version, 'old_version': old_version})
self._wiki_ctxtnav(req, page)
return 'wiki_delete.html', data, None
def _render_confirm_rename(self, req, page, new_name=None):
if page.readonly:
req.perm(page.resource).require('WIKI_ADMIN')
else:
req.perm(page.resource).require('WIKI_RENAME')
data = self._page_data(req, page, 'rename')
data['new_name'] = new_name if new_name is not None else page.name
self._wiki_ctxtnav(req, page)
return 'wiki_rename.html', data, None
def _render_diff(self, req, page):
if not page.exists:
raise TracError(_('Version %(num)s of page "%(name)s" does not '
'exist',
num=req.args.get('version'), name=page.name))
old_version = req.args.get('old_version')
if old_version:
old_version = int(old_version)
if old_version == page.version:
old_version = None
elif old_version > page.version:
# FIXME: what about reverse diffs?
old_version = page.resource.version
page = WikiPage(self.env, page.name, version=old_version)
req.perm(page.resource).require('WIKI_VIEW')
latest_page = WikiPage(self.env, page.name, version=None)
req.perm(latest_page.resource).require('WIKI_VIEW')
new_version = int(page.version)
date = author = comment = ipnr = None
num_changes = 0
prev_version = next_version = None
for version, t, a, c, i in latest_page.get_history():
if version == new_version:
date = t
author = a or 'anonymous'
comment = c or '--'
ipnr = i or ''
else:
if version < new_version:
num_changes += 1
if not prev_version:
prev_version = version
if old_version is None or version == old_version:
old_version = version
break
else:
next_version = version
if not old_version:
old_version = 0
old_page = WikiPage(self.env, page.name, old_version)
req.perm(old_page.resource).require('WIKI_VIEW')
# -- text diffs
old_text = old_page.text.splitlines()
new_text = page.text.splitlines()
diff_data, changes = self._prepare_diff(req, page, old_text, new_text,
old_version, new_version)
# -- prev/up/next links
if prev_version:
add_link(req, 'prev', req.href.wiki(page.name, action='diff',
version=prev_version),
_('Version %(num)s', num=prev_version))
add_link(req, 'up', req.href.wiki(page.name, action='history'),
_('Page history'))
if next_version:
add_link(req, 'next', req.href.wiki(page.name, action='diff',
version=next_version),
_('Version %(num)s', num=next_version))
data = self._page_data(req, page, 'diff')
data.update({
'change': {'date': date, 'author': author, 'ipnr': ipnr,
'comment': comment},
'new_version': new_version, 'old_version': old_version,
'latest_version': latest_page.version,
'num_changes': num_changes,
'longcol': 'Version', 'shortcol': 'v',
'changes': changes,
'diff': diff_data,
})
prevnext_nav(req, _('Previous Change'), _('Next Change'),
_('Wiki History'))
return 'wiki_diff.html', data, None
def _render_editor(self, req, page, action='edit', has_collision=False):
if has_collision:
if action == 'merge':
page = WikiPage(self.env, page.name, version=None)
req.perm(page.resource).require('WIKI_VIEW')
else:
action = 'collision'
if page.readonly:
req.perm(page.resource).require('WIKI_ADMIN')
else:
req.perm(page.resource).require('WIKI_MODIFY')
original_text = page.text
comment = req.args.get('comment', '')
if 'text' in req.args:
page.text = req.args.get('text')
elif 'template' in req.args:
template = self.PAGE_TEMPLATES_PREFIX + req.args.get('template')
template_page = WikiPage(self.env, template)
if template_page and template_page.exists and \
'WIKI_VIEW' in req.perm(template_page.resource):
page.text = template_page.text
elif 'version' in req.args:
old_page = WikiPage(self.env, page.name,
version=int(req.args['version']))
req.perm(page.resource).require('WIKI_VIEW')
page.text = old_page.text
comment = _("Reverted to version %(version)s.",
version=req.args['version'])
if action in ('preview', 'diff'):
page.readonly = 'readonly' in req.args
author = get_reporter_id(req, 'author')
defaults = {'editrows': 20}
prefs = dict((key, req.session.get('wiki_%s' % key, defaults.get(key)))
for key in ('editrows', 'sidebyside'))
if 'from_editor' in req.args:
sidebyside = req.args.get('sidebyside') or None
if sidebyside != prefs['sidebyside']:
req.session.set('wiki_sidebyside', int(bool(sidebyside)), 0)
else:
sidebyside = prefs['sidebyside']
if sidebyside:
editrows = max(int(prefs['editrows']),
len(page.text.splitlines()) + 1)
else:
editrows = req.args.get('editrows')
if editrows:
if editrows != prefs['editrows']:
req.session.set('wiki_editrows', editrows,
defaults['editrows'])
else:
editrows = prefs['editrows']
data = self._page_data(req, page, action)
context = web_context(req, page.resource)
data.update({
'author': author,
'comment': comment,
'edit_rows': editrows, 'sidebyside': sidebyside,
'scroll_bar_pos': req.args.get('scroll_bar_pos', ''),
'diff': None,
'attachments': AttachmentModule(self.env).attachment_data(context),
})
if action in ('diff', 'merge'):
old_text = original_text.splitlines() if original_text else []
new_text = page.text.splitlines() if page.text else []
diff_data, changes = self._prepare_diff(
req, page, old_text, new_text, page.version, '')
data.update({'diff': diff_data, 'changes': changes,
'action': 'preview', 'merge': action == 'merge',
'longcol': 'Version', 'shortcol': 'v'})
elif sidebyside and action != 'collision':
data['action'] = 'preview'
self._wiki_ctxtnav(req, page)
Chrome(self.env).add_wiki_toolbars(req)
Chrome(self.env).add_auto_preview(req)
add_script(req, 'common/js/folding.js')
return 'wiki_edit.html', data, None
def _render_history(self, req, page):
"""Extract the complete history for a given page.
This information is used to present a changelog/history for a given
page.
"""
if not page.exists:
raise TracError(_("Page %(name)s does not exist", name=page.name))
data = self._page_data(req, page, 'history')
history = []
for version, date, author, comment, ipnr in page.get_history():
history.append({
'version': version,
'date': date,
'author': author,
'comment': comment,
'ipnr': ipnr
})
data.update({'history': history, 'resource': page.resource})
add_ctxtnav(req, _("Back to %(wikipage)s", wikipage=page.name),
req.href.wiki(page.name))
return 'history_view.html', data, None
def _render_view(self, req, page):
version = page.resource.version
# Add registered converters
if page.exists:
for conversion in Mimeview(self.env).get_supported_conversions(
'text/x-trac-wiki'):
conversion_href = req.href.wiki(page.name, version=version,
format=conversion[0])
# or...
conversion_href = get_resource_url(self.env, page.resource,
req.href, format=conversion[0])
add_link(req, 'alternate', conversion_href, conversion[1],
conversion[3])
data = self._page_data(req, page)
if page.name == 'WikiStart':
data['title'] = ''
ws = WikiSystem(self.env)
context = web_context(req, page.resource)
higher, related = [], []
if not page.exists:
if 'WIKI_CREATE' not in req.perm(page.resource):
raise ResourceNotFound(_('Page %(name)s not found',
name=page.name))
formatter = OneLinerFormatter(self.env, context)
if '/' in page.name:
parts = page.name.split('/')
for i in range(len(parts) - 2, -1, -1):
name = '/'.join(parts[:i] + [parts[-1]])
if not ws.has_page(name):
higher.append(ws._format_link(formatter, 'wiki',
'/' + name, name, False))
else:
name = page.name
name = name.lower()
related = [each for each in ws.pages
if name in each.lower()
and 'WIKI_VIEW' in req.perm('wiki', each)]
related.sort()
related = [ws._format_link(formatter, 'wiki', '/' + each, each,
False)
for each in related]
latest_page = WikiPage(self.env, page.name, version=None)
req.perm(latest_page.resource).require('WIKI_VIEW')
prev_version = next_version = None
if version:
try:
version = int(version)
for hist in latest_page.get_history():
v = hist[0]
if v != version:
if v < version:
if not prev_version:
prev_version = v
break
else:
next_version = v
except ValueError:
version = None
prefix = self.PAGE_TEMPLATES_PREFIX
templates = [template[len(prefix):]
for template in ws.get_pages(prefix)
if 'WIKI_VIEW' in req.perm('wiki', template)]
# -- prev/up/next links
if prev_version:
add_link(req, 'prev',
req.href.wiki(page.name, version=prev_version),
_('Version %(num)s', num=prev_version))
parent = None
if version:
add_link(req, 'up', req.href.wiki(page.name, version=None),
_('View latest version'))
elif '/' in page.name:
parent = page.name[:page.name.rindex('/')]
add_link(req, 'up', req.href.wiki(parent, version=None),
_("View parent page"))
if next_version:
add_link(req, 'next',
req.href.wiki(page.name, version=next_version),
_('Version %(num)s', num=next_version))
# Add ctxtnav entries
if version:
prevnext_nav(req, _('Previous Version'), _('Next Version'),
_('View Latest Version'))
else:
if parent:
add_ctxtnav(req, _('Up'), req.href.wiki(parent))
self._wiki_ctxtnav(req, page)
# Plugin content validation
fields = {'text': page.text}
for manipulator in self.page_manipulators:
manipulator.prepare_wiki_page(req, page, fields)
text = fields.get('text', '')
data.update({
'context': context,
'text': text,
'latest_version': latest_page.version,
'attachments': AttachmentModule(self.env).attachment_data(context),
'default_template': self.DEFAULT_PAGE_TEMPLATE,
'templates': templates,
'version': version,
'higher': higher, 'related': related,
'resourcepath_template': 'wiki_page_path.html',
})
add_script(req, 'common/js/folding.js')
return 'wiki_view.html', data, None
def _wiki_ctxtnav(self, req, page):
"""Add the normal wiki ctxtnav entries."""
add_ctxtnav(req, _('Start Page'), req.href.wiki('WikiStart'))
add_ctxtnav(req, _('Index'), req.href.wiki('TitleIndex'))
if page.exists:
add_ctxtnav(req, _('History'), req.href.wiki(page.name,
action='history'))
# ITimelineEventProvider methods
def get_timeline_filters(self, req):
if 'WIKI_VIEW' in req.perm:
yield ('wiki', _('Wiki changes'))
def get_timeline_events(self, req, start, stop, filters):
if 'wiki' in filters:
wiki_realm = Resource('wiki')
for ts, name, comment, author, version in self.env.db_query("""
SELECT time, name, comment, author, version FROM wiki
WHERE time>=%s AND time<=%s
""", (to_utimestamp(start), to_utimestamp(stop))):
wiki_page = wiki_realm(id=name, version=version)
if 'WIKI_VIEW' not in req.perm(wiki_page):
continue
yield ('wiki', from_utimestamp(ts), author,
(wiki_page, comment))
# Attachments
for event in AttachmentModule(self.env).get_timeline_events(
req, wiki_realm, start, stop):
yield event
def render_timeline_event(self, context, field, event):
wiki_page, comment = event[3]
if field == 'url':
return context.href.wiki(wiki_page.id, version=wiki_page.version)
elif field == 'title':
name = tag.em(get_resource_name(self.env, wiki_page))
if wiki_page.version > 1:
return tag_('%(page)s edited', page=name)
else:
return tag_('%(page)s created', page=name)
elif field == 'description':
markup = format_to(self.env, None,
context.child(resource=wiki_page), comment)
if wiki_page.version > 1:
diff_href = context.href.wiki(
wiki_page.id, version=wiki_page.version, action='diff')
markup = tag(markup,
' (', tag.a(_('diff'), href=diff_href), ')')
return markup
# ISearchSource methods
def get_search_filters(self, req):
if 'WIKI_VIEW' in req.perm:
yield ('wiki', _('Wiki'))
def get_search_results(self, req, terms, filters):
if not 'wiki' in filters:
return
with self.env.db_query as db:
sql_query, args = search_to_sql(db, ['w1.name', 'w1.author',
'w1.text'], terms)
wiki_realm = Resource('wiki')
for name, ts, author, text in db("""
SELECT w1.name, w1.time, w1.author, w1.text
FROM wiki w1,(SELECT name, max(version) AS ver
FROM wiki GROUP BY name) w2
WHERE w1.version = w2.ver AND w1.name = w2.name
AND """ + sql_query, args):
page = wiki_realm(id=name)
if 'WIKI_VIEW' in req.perm(page):
yield (get_resource_url(self.env, page, req.href),
'%s: %s' % (name, shorten_line(text)),
from_utimestamp(ts), author,
shorten_result(text, terms))
# Attachments
for result in AttachmentModule(self.env).get_search_results(
req, wiki_realm, terms):
yield result
|
Winners of our End of Year Giveaway!
Thank you all for participating on our End of Year Giveaway! Here are our lucky winners.
Congratulations to all four winners! We will be in touch shortly with instructions on how to claim your prize.
Thank you and welcome 2017!
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
"""Demonstration of TZAwareDateTime composite column for sqlalchemy"""
__author__ = 'Andrew Ittner <aji@rhymingpanda.com>'
__copyright__ = "Public Domain (CC0) <http://creativecommons.org/publicdomain/zero/1.0/>"
# stdlib
from datetime import datetime
# sqlalchemy
from sqlalchemy import MetaData, Table, Column, DateTime, Unicode, Integer
from sqlalchemy import create_engine
from sqlalchemy.orm import mapper, relation, composite, create_session
# timezone-aware composite column
from tzaware_datetime import TZAwareDateTime
# 3rd-party: dateutil <http://labix.org/python-dateutil>
from dateutil import tz
# demonstration parent table
class InfoMatic(object):
"""sqlalchemy main demonstration table: contains basic info, plus a composite TZAwareDateTime column"""
def __init__(self, info=None, tzawaredate=None, expectedoffset=None):
self.info = info
self.tzawaredate = tzawaredate
self.expectedoffset = expectedoffset
def __repr__(self):
return "<InfoMatic('%s', %s, %s)" % (self.info, self.tzawaredate, self.expectedoffset)
def prep_database():
global myengine
# create engine
myengine = create_engine('sqlite:///:memory:', echo=False)
# setup table metadata
metadata = MetaData()
table_infomatic = Table('infomatic', metadata,
Column('id', Integer, primary_key=True),
Column('info', Unicode(255)),
Column('expectedoffset', Integer),
Column('utcdate', DateTime), # for TZAwareDateTime
Column('tzname', Unicode), # for TZAwareDateTime
Column('tzoffset', Integer)) # for TZAwareDateTime
# setup mappings
mapper(InfoMatic, table_infomatic, properties={
'info': table_infomatic.c.info,
'expectedoffset': table_infomatic.c.expectedoffset,
'tzawaredate': composite(TZAwareDateTime,
table_infomatic.c.utcdate,
table_infomatic.c.tzname,
table_infomatic.c.tzoffset)
})
# create all tables
metadata.create_all(myengine)
def run_demo():
"""prep the database, create a session, run some example code"""
global myengine
prep_database()
# create session
session = create_session(bind=myengine, autocommit=True, autoflush=True) #autoflush=True: key!
# create & save info objects
lots_of_dates = [InfoMatic(u"first date", TZAwareDateTime(realdate=datetime.now(tz.tzutc())), 0)]
lots_of_dates.append(InfoMatic(u"null date", TZAwareDateTime(), None))
lots_of_dates.append(InfoMatic(u"PST date",
TZAwareDateTime(realdate=datetime.now(tz.gettz("PST"))),
28800))
lots_of_dates.append(InfoMatic(u"New Zealand date",
TZAwareDateTime(realdate=datetime.now(tz.gettz("Pacific/Auckland"),
))))
session.add_all(lots_of_dates)
# print all objects
info_count = session.query(InfoMatic).count()
print '\tAll infomatic objects (%s)' % info_count
for infomatic in session.query(InfoMatic):
assert isinstance(infomatic, InfoMatic)
if infomatic.tzawaredate is not None:
assert isinstance(infomatic.tzawaredate, TZAwareDateTime)
print infomatic
print '\t', infomatic.info
print '\ttzawaredate.realdate', infomatic.tzawaredate.realdate
print '\ttzawaredate.utcdt', infomatic.tzawaredate.utcdt
session.close()
if __name__ == '__main__':
run_demo()
|
If you want to enhance your temperament of whole bedroom furniture, will seriously consider furniture from the following five aspects to choose furniture. First,the high level design. Furniture quality depends on a good design idea, it not only makes the original function of furniture perfect application, at the same time also let furniture is no longer a single as one of the articles for daily use, and its application of material, the design of human nature, and the expansion of the function as a living work of art, that It could fully reflects a sculpture of the wisdom and aesthetic belong to the host. Second, the fine workmanship. Furniture quality needs not only the first-class design, but also have the first-class work, each line of the grinding processing, every corner, every structure reinforcement, as well as the installation of each parts could achieve excellence, It is the real good furniture. While a poorly made furniture, even in the elegant decoration, high-grade electrical appliances.they are difficult to show their beauty. Third, the exquisite details. Details not only determine success or failure, also can decide the quality of a furniture, detail is the pursuit of the ultimate goal of many fashion products, furniture is not only reflected in the details of the design, the selection of materials and fine workmanship, the sofa of the classical style, for example, in the handrail with a tassel design, details reveal luxurious quality. Fourth, saving energy and protecting environment. Energy conservation and environmental protection in addition to the most basic of green environmental protection, also including the reuse of resources. Real wood kind of wooden furniture, as well as some older well-designed furniture, has the advantages of, can satisfy the demand of the urban population to be close to nature, can show a more natural quality and fashion. Fifth, cultural accumulation. A furniture culture, reflects not only the quality of furniture, more show master of literature and art culture. Cultural connotation of furniture often through aspects such as modelling, material quality, detail processing show, truthfully wood furniture used in all kinds of wooden is exquisite, every wooden embodies a meaning, while the cane to make furniture is a kind of classic breath. This kind of furniture itself can reveal a unique temperament, let a bedroom add luster.
Plywood display showcase and baking paint display showcase.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.