text
stringlengths 29
850k
|
|---|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.template import TemplateSyntaxError, TemplateDoesNotExist
from django.template.loader import get_template
from django.utils.safestring import mark_safe
from itertools import ifilter
from ragendja.template import Library
import re
whitespace = re.compile(r'[\s\-\'\"]+')
register = Library()
@register.filter
def searchexcerpt(text, query, context_words=10, startswith=True):
if not isinstance(query, (list, tuple, set)):
query = set(whitespace.split(query))
query = [re.escape(q) for q in query if q]
exprs = [re.compile(r"^%s$" % p, re.IGNORECASE) for p in query]
re_template = startswith and r"\b(%s)" or r"\b(%s)\b"
pieces = re.compile(re_template % "|".join(query), re.IGNORECASE).split(text)
matches = {}
word_lists = []
index = {}
for i, piece in enumerate(pieces):
word_lists.append(whitespace.split(piece))
if i % 2:
index[i] = expr = ifilter(lambda e: e.match(piece), exprs).next()
matches.setdefault(expr, []).append(i)
def merge(lists):
merged = []
for words in lists:
if not words:
continue
if merged:
merged[-1] += words[0]
del words[0]
merged.extend(words)
return merged
i = 0
merged = []
for j in map(min, matches.itervalues()):
merged.append(merge(word_lists[i:j]))
merged.append(word_lists[j])
i = j + 1
merged.append(merge(word_lists[i:]))
output = []
for i, words in enumerate(merged):
omit = None
if i == len(merged) - 1:
omit = slice(max(1, 2 - i) * context_words + 1, None)
elif i == 0:
omit = slice(-context_words - 1)
elif not i % 2:
omit = slice(context_words + 1, -context_words - 1)
if omit and words[omit]:
words[omit] = ["..."]
output.append(" ".join(words))
return ''.join(output)
@register.filter
def highlightedexcerpt(text, query, context_words=10, startswith=True, class_name='highlight'):
if not isinstance(query, (list, tuple, set)):
query = set(whitespace.split(query))
text = searchexcerpt(text, query, context_words=context_words, startswith=startswith)
query = [re.escape(q) for q in query if q]
re_template = startswith and r"\b(%s)" or r"\b(%s)\b"
expr = re.compile(re_template % "|".join(query), re.IGNORECASE)
template = '<span class="%s">%%s</span>' % class_name
matches = []
def replace(match):
matches.append(match)
return template % match.group(0)
return mark_safe(expr.sub(replace, text))
@register.context_tag
def global_search_form(context, url, label='Search'):
request = context['request']
form_module, xxx, class_name= getattr(settings, 'GLOBAL_SEARCH_FORM',
'search.forms.SearchForm').rpartition('.')
form_class = getattr(__import__(form_module, {}, {}, ['']), class_name)
html = '<form action="%(url)s" method="get">%(input)s<input type="submit" value="%(label)s" /></form>'
if request.path == url:
form = form_class(request.GET, auto_id='global_search_%s')
else:
form = form_class(auto_id='global_search_%s')
return html % {'url': url, 'input': form['query'], 'label': label}
@register.context_tag
def load_object_list(context):
"""
Loads search__object_list for iteration and applies the converter to it.
"""
name = context['template_object_name'] + '_list'
object_list = context[name]
converter = context.get('search__converter')
if converter:
object_list = converter(object_list)
context['search__object_list'] = object_list
return ''
@register.context_tag
def display_in_list(context, item):
template_name = '%s/%s_in_list.html' % (
item._meta.app_label, item._meta.object_name.lower())
context.push()
context[ context['template_object_name'] ] = item
try:
output = get_template(template_name).render(context)
except (TemplateSyntaxError, TemplateDoesNotExist), e:
if settings.TEMPLATE_DEBUG:
raise
output = ''
except:
output = '' # Fail silently for invalid included templates.
context.pop()
return output
@register.filter
def resultsformat(hits, results_format):
if not hits:
format = results_format[0]
elif hits == 1:
format = results_format[1]
elif hits <= 300:
format = results_format[2]
else:
format = results_format[3]
hits -= 1
return format % {'hits': hits}
@register.inclusion_tag('search/pagenav.html', takes_context=True)
def pagenav(context, adjacent_pages=3):
"""
To be used in conjunction with the object_list generic view.
Adds pagination context variables for use in displaying first, adjacent and
last page links in addition to those created by the object_list generic
view.
"""
page = context['page']
pages = context['pages']
if page < adjacent_pages:
page_range = range(1, 2 * adjacent_pages)
elif pages - page + 1 < adjacent_pages:
page_range = range(pages - 2 * adjacent_pages + 2, pages + 1)
else:
page_range = range(page - adjacent_pages, page + adjacent_pages + 1)
page_range = [n for n in page_range if n >= 1 and n <= pages]
if pages not in page_range and pages - 1 in page_range:
page_range.append(pages)
if 1 not in page_range and 2 in page_range:
page_range.insert(0, 1)
return {
'hits': context['hits'],
'results_per_page': context['results_per_page'],
'page': page,
'pages': pages,
'page_range': page_range,
'next': context['next'],
'previous': context['previous'],
'has_next': context['has_next'],
'has_previous': context['has_previous'],
'show_first': 1 not in page_range,
'show_last': pages not in page_range,
'base_url': context['base_url'],
}
|
It present era most the users use MS Outlook email client to preserve their data for their business. But some time due to corruption and many other reasons the OST file gets inaccessible then the user needs to recover data from the orphan file. So, to do that they need a trustworthy tool that is OST to PST Converter application. With the help of this tool, you recover your data easily and convert them into the new format that is PST. Before the conversion, it shows the preview of recovered mailbox along with the Metadata such as To, Cc, From, Date subject, Object and Bcc. You store your data into PST, EML, EMLX, MBOX, HTML, vCal, vCard, and MSG file format for future use.
Yes, absolutely you can convert OST file into PST file format by using PDS OST to PST Converter Software in few seconds. In additionally it convert all crashed, damaged and inaccessible OST file into readable PST file format and other additional file formats such as EML, MSG, MBOX, HTML, PDF and Office 365 file formats. With the help of this conversion you can safely and securely convert all OST mailbox items folder such as emails, contacts, calendars, zip attachments, journals, appointment, inbox, outbox etc. including all emails properties such as to, from, cc, bcc, subject, date and time.
Better application SysInspir Can OST file be converted PST file is helpful for repairing damaged and corrupted exchange OST File and migrate all recovered OST File into PST Outlook data. It is windows program included all feature ads multiple like: - calendar, contact, inbox items, outbox items, task, draft and appointments into emails option:- Outlook PST, MSG, EMLX, MBOX, HTML, EML and HTML including a properties attachments like:- to, cc, bcc, date, time, images and subject formats. Convert OST to PST Software supports all MS Outlook versions: - 97, 2000, 2002, 2003, 2008, 2009, 2010, 2013, 2016 and 2019.
Export OST to PST file with eSoftTools OST to PST export tool that free permits user to save 25 emails per folders of Exchange OST file into every provided format with free of cost and if you need to convert complete OST files then you have to purchase the full version of the software.
|
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
Written (W) 2013 Heiko Strathmann
Written (W) 2013 Dino Sejdinovic
"""
from numpy import zeros, cos, sin, sqrt
from numpy.lib.twodim_base import diag
from numpy.linalg import svd
from scipy.linalg.basic import solve_triangular
class MatrixTools(object):
@staticmethod
def rotation_matrix(theta):
"""
Returns a 2d rotation matrix where theta is in radiants
"""
R = zeros((2, 2))
R[0, 0] = cos(theta)
R[0, 1] = -sin(theta)
R[1, 0] = sin(theta)
R[1, 1] = cos(theta)
return R
@staticmethod
def low_rank_approx(K, d):
"""
Returns a low rank approximation factor L of the given psd matrix such that
LL^T \approx K with a given number of principal components to use
K - psd matrix to compute low-rank approximation of
d - number of principal components to use
returns (L, s, V) where
L - LL^T \approx K
s - 1D vector of Eigenvalues
V - matrix containing Eigen-row-vectors
"""
# perform SVD and only use first d components. Note that U^T=V if K psd and
# rows of V are Eigenvectors of K
U, s, V = svd(K)
U = U[:, 0:d]
V = V[0:d, :]
s = s[0:d]
S = diag(s)
# K \approx=U.dot(S.dot(V))
L = sqrt(S).dot(V)
# LL^T \approx K
return (L, s, V)
@staticmethod
def cholesky_solve(L, x):
"""
Solves X^-1 x = (LL^T) ^-1 x = L^-T L ^-1 * x for a given Cholesky
X=LL^T
"""
x = solve_triangular(L, x.T, lower=True)
x = solve_triangular(L.T, x, lower=False)
return x
|
Children's Bible; Family Devotional Bible; Florida and is the Executive Director of Sojourn Network. Gospel Shaped Worship Leader's Guide Wilson,... Children & Family Worship & Liturgy "Dollar Store Children Sermons": Here are ten points on the map, which will help guide you to your final destination.
Children; Family Worship Projects The Israelites Make a Golden Calf This guide can help you teach your children how to find joy in serving Jehovah.
Are you looking for content to help guide your family in weekly worship? Use this weekly resource to help guide your family in deepening the connection with God.
How to Do Family Worship with Young Kids with the practice of family worship even when the children don�t stay guide to use with the whole family.
|
import os
from os.path import join as pjoin
dirname = os.path.dirname(__file__)
nqpt = 3
wtq = [0.125, 0.5, 0.375]
DDB_fnames = [ pjoin(dirname, fname) for fname in """
odat_calc_DS5_DDB.nc
odat_calc_DS9_DDB.nc
odat_calc_DS13_DDB.nc
""".split() ]
EIG_fnames = [ pjoin(dirname, fname) for fname in """
odat_calc_DS6_EIG.nc
odat_calc_DS10_EIG.nc
odat_calc_DS14_EIG.nc
""".split() ]
EIGR2D_fnames = [ pjoin(dirname, fname) for fname in """
odat_calc_DS7_EIGR2D.nc
odat_calc_DS11_EIGR2D.nc
odat_calc_DS15_EIGR2D.nc
""".split() ]
EIGI2D_fnames = [ pjoin(dirname, fname) for fname in """
odat_calc_DS7_EIGI2D.nc
odat_calc_DS11_EIGI2D.nc
odat_calc_DS15_EIGI2D.nc
""".split() ]
FAN_fnames = [ pjoin(dirname, fname) for fname in """
odat_calc_DS7_FAN.nc
odat_calc_DS11_FAN.nc
odat_calc_DS15_FAN.nc
""".split() ]
GKK_fnames = [ pjoin(dirname, fname) for fname in """
odat_calc_DS7_GKK.nc
odat_calc_DS11_GKK.nc
odat_calc_DS15_GKK.nc
""".split() ]
EIG0_fname = pjoin(dirname, 'odat_calc_DS3_EIG.nc')
fnames = dict(
eigk_fname=EIG0_fname,
eigq_fnames=EIG_fnames,
ddb_fnames=DDB_fnames,
eigr2d_fnames=EIGR2D_fnames,
eigi2d_fnames=EIGI2D_fnames,
#fan_fnames=FAN_fnames,
gkk_fnames=GKK_fnames,
)
refdir = pjoin(dirname, 'epc_outputs')
|
January 27, 2019 – RVFD and MCFRS units battled heavy fire and cold weather conditions on Sunday that damaged the Georgetown Hill Early School located at the Woodley Gardens Swim Club on Nelson St. E703B and AT723 were originally dispatched at 8:15pm for an automatic fire alarm. When E703 arrived, they found heavy fire extending through the roof of the day care school and requested to have Montgomery County ECC fill out the box alarm assignment. ECC also alerted the Rapid Intervention Dispatch. Due to the heavy fire conditions, incident command ordered that initial fire attack be conducted by exterior operations and master stream devices. AT723, AT703 used elevated master streams and E703jB used their deck gun to fight the fire. Other crews used large-size hand lines to attack the fire through the windows on the front and rear of the building. Other crews entered the connected swim club building to check for possible extension via the common attic.
The fire appeared to have originated in the office area of the school and extended to adjacent classrooms. Other classrooms were damaged by heavy smoke and heat. While no extension was found in the swim club, parts of that building suffered water, smoke and heat damage, as well as damage by firefighters pulling ceilings looking for possible extension. The cause of the fire is under investigation. No injuries were reported and damage is estimated at $750,000.
In a statement, school CEO Peter Cromwell advised that they plan to serve school age families at the Montgomery Square location at 12300 Falls Rd. in Potomac.
|
from django.core.management.base import BaseCommand, CommandError
from main.models import EventAction, PositionUncertainty, PositionSource
from django.conf import settings
from main import utils
# This file is part of https://github.com/cpina/science-cruise-data-management
#
# This project was programmed in a hurry without any prior Django experience,
# while circumnavigating the Antarctic on the ACE expedition, without proper
# Internet access, with 150 scientists using the system and doing at the same
# cruise other data management and system administration tasks.
#
# Sadly there aren't unit tests and we didn't have time to refactor the code
# during the cruise, which is really needed.
#
# Carles Pina (carles@pina.cat) and Jen Thomas (jenny_t152@yahoo.co.uk), 2016-2017.
class Command(BaseCommand):
help = 'Updates locations of latitude longitude of Event Actions'
def __init__(self):
self._dry_run = True
self._position_source_object = None
self._position_uncertainty_object = None
self._force_update = False
def add_arguments(self, parser):
parser.add_argument('action', help="[update|dry-run|force-update]", type=str)
def handle(self, *args, **options):
if options['action'] == "dry-run":
self._dry_run = True
elif options['action'] == "update" or options['action'] == "force-update":
self._dry_run = False
self._force_update = (options['action'] == "force-update")
else:
print("Unknown action, should be dry-run, update or force-update")
exit(1)
self._update_locations()
def _update_locations(self):
event_actions = EventAction.objects.order_by('time')
for event_action in event_actions:
if event_action.position_depends_on_time(self._force_update):
self._update(event_action)
def _update(self, event_action):
ship_location = utils.ship_location(event_action.time)
action_text_before=""
if ship_location.latitude is not None and ship_location.longitude is not None:
if self._dry_run:
action_text = "Should update"
else:
action_text_before = "(Previously: Latitude: {} Longitude: {})".format(event_action.latitude, event_action.longitude)
if event_action.latitude == float("{:.4f}".format(ship_location.latitude)) and event_action.longitude == float("{:.4f}".format(ship_location.longitude)):
print("Was going to update {} but it's the same than before, skips".format(event_action))
return
event_action.latitude = "{:.4f}".format(ship_location.latitude)
event_action.longitude = "{:.4f}".format(ship_location.longitude)
event_action.position_source = self._position_source()
event_action.position_uncertainty = self._position_uncertainty()
action_text = "Updated"
event_action.save()
print("{} event_action: {}\t{} {:.4f} {:.4f} {}".format(action_text, event_action.id, event_action.time,
ship_location.latitude, ship_location.longitude, action_text_before))
elif not ship_location.is_valid:
print("Event action {} location in the database is invalid. Date time: {}".format(event_action.id, event_action.time))
print("In the event action is: Latitude: {} Longitude: {}".format(event_action.latitude, event_action.longitude))
else:
print("Missing information for event action ID: {} Time: {}".format(event_action.id, event_action.time))
def _position_uncertainty(self):
if self._position_uncertainty_object is None:
self._position_uncertainty_object = PositionUncertainty.objects.get(name=settings.UPDATE_LOCATION_POSITION_UNCERTAINTY_NAME)
return self._position_uncertainty_object
def _position_source(self):
if self._position_source_object is None:
self._position_source_object = PositionSource.objects.get(name=settings.UPDATE_LOCATION_POSITION_SOURCE_NAME)
return self._position_source_object
|
This comfortable hotel enjoys a convenient location just outside Amsterdam's central train station. Many of the city's famous attractions such as the Anne Frank House, Dam Square and the old town are within easy walking distance, while the Van Gogh Museum, Rijksmuseum, and Leidseplein Square can be reached by train within 10 minutes. Art aficionados will not want to miss the Rembrandt House Museum and Stedelijk Museum, while history buffs can enjoy the Museum Van Loon and Bijbels Museum. The hotel's modern and functional rooms are decorated with bright accents and come with free Wi-Fi, air-conditioning and en suite bathrooms. Guests can wake up to a free buffet breakfast with fresh and healthy options for the perfect start to a day of sightseeing or business meetings. Whether travelling for business or leisure, this hotel provides hassle-free accommodation in a wholly central location.
|
# Copyright (C) 2016 SignalFx, Inc. All rights reserved.
class SignalFlowException(Exception):
"""A generic error encountered when interacting with the SignalFx
SignalFlow API."""
def __init__(self, code, message=None, error_type=None):
self._code = code
self._message = message
self._error_type = error_type
@property
def code(self):
"""Returns the HTTP error code."""
return self._code
@property
def message(self):
"""Returns an optional error message attached to this error."""
return self._message
@property
def error_type(self):
"""Returns an optional error type attached to this error."""
return self._error_type
def __str__(self):
err = self._code
if self._error_type:
err = '{0} ({1})'.format(self._code, self._error_type)
if self._message:
return '{0}: {1}'.format(err, self._message)
return 'Error {0}'.format(err)
class ComputationAborted(Exception):
"""Exception thrown if the computation is aborted during its execution."""
def __init__(self, abort_info):
self._state = abort_info['sf_job_abortState']
self._reason = abort_info['sf_job_abortReason']
@property
def state(self):
return self._state
@property
def reason(self):
return self._reason
def __str__(self):
return 'Computation {0}: {1}'.format(
self._state.lower(), self._reason)
class ComputationFailed(Exception):
"""Exception thrown when the computation failed after being started."""
def __init__(self, errors):
self._errors = errors
@property
def errors(self):
return self._errors
def __str__(self):
return 'Computation failed ({0})'.format(self._errors)
|
The Quiqup service is designed to connect you to a third-party courier, vehicle operator or logistics provider (the “Carrier”) to carry out deliveries on your behalf (the “Service”). The Service may be provided to you via the Quiqdash (“Quiqdash Service”) or via the Integrated APIs (“Partner Service”), depending on what you have agreed with us.
The Service is provided by Quiqup Limited (“we”, “us” and “our”), company number 08903058 whose place of business is at Unit 0.2, Chandelier Building, 8 Scrubs Lane, London, NW10 6RB (email accountmanagers@quiqup.com). Our VAT number is 192 6023 14.
Use of the service will be governed by these Terms.
Subject to your compliance with your obligations set out in these Terms, we agree to use commercially reasonable efforts to provide and make available the Service to you.
In consideration of our provision of the Service, you shall pay to us all fees and charges set out or referred to in these Terms (collectively the “Fees”).
You shall pay interest on any late or unpaid Fees from the due date until the date of actual payment (whether before or after judgment) at the rate of eight per cent (8%) per annum above the base rate of the Bank of England from time to time, such interest to accrue on a day to day basis and to be compounded monthly.
The current in-force price list used to calculate the Fees as at the Effective Date is set out in Schedule 2.
To request a booking for a Carrier, you must submit your request via the Quiqdash (for the Quiqdash Service) or via our Integrated APIs (for the Partner Service) and input into those systems the information required by the Service.
if a Carrier has been assigned and has already arrived at your collection location and has been waiting at your collection location for more than 5 minutes, your cancellation will result in you being charged the full order value.
If a Carrier has already collected the items and due to cancellation these items must be subsequently returned to the collection location, you will be charged 200% of the full order value.
If a Carrier cannot complete a delivery due to the Collected Items exceeding the weight and dimension restrictions set out in Clause 6.6 for the mode of transport selected, the delivery will be cancelled and you will be charged £5.00 for the cancellation. In such an instance, if you would still like the delivery to be completed, you will be required to make a new booking with the appropriate mode of transport selected.
Where you are using the Service to deliver the Collected Items to a Recipient, the contract of sale in relation to your Collected Item is made between you and that Recipient and not with us or the Carrier.
any goods, products or items which you have been notified by us or a Carrier as being prohibited.
You are responsible for preparing and packaging the Collected Items in a form suitable for delivery by a Carrier.
You agree that you shall not use the Service to deliver fragile or delicate products, goods or items and, if you do, you only do so at your sole risk. We shall not be liable to you for any damage to delicate or fragile products, goods or items and you shall ensure that any such goods, products or items that you do send are appropriately packaged and protected.
We reserve the right in our sole discretion to reject a booking request placed by you. We may do this, for example, if we suspect that the booking request or the goods or products that you have requested to be collected using the Service are not in accordance with these Terms.
Delivery and collection times selected by you or allocated via the Service are approximate only, and the time for collection or delivery is not of the essence. We will use commercially reasonable efforts to meet the delivery and collection times set out in the Service Level Agreement. However, we shall not be liable for any failure to meet the delivery and collection times set out in the Service Level Agreement if such failure is caused by any of the circumstances listed in Clauses 7.2, 7.3, 7.4(a) or 7.4(b) below.
If you fail to provide adequate instructions about the collection of the Collected Items and such failure results in the Carrier being unable to collect the Collected Items within a reasonable amount of time, we may cancel the delivery and charge you £5.00.
If you (or someone on your behalf) are unavailable to hand over the Collected Items to the Carrier within 10 minutes following the Carrier’s arrival at your collection location, we may cancel the delivery and charge you £5.00, plus an additional £2.50 for every 10 minutes that the Carrier has been waiting at your collection location.
Recipient is unavailable to take possession of the Collected Items within a reasonable time following arrival at a delivery location by the Carrier, the Carrier shall return the Collected Item to where it was collected from and we will be entitled charge you (i) 200% of the full Fees we would have charged if the delivery had been completed; and/or (ii) our reasonable costs incurred as a result of the Carrier returning the Collected Item to where it was collected from.
We shall only be required to facilitate collection and delivery of goods, products or items through the Service from or to locations within the Delivery Area.
We (or our licensors) retain ownership of the Service and its associated Intellectual Property Rights (including those in our website and mobile application). We grant you a non-exclusive, non-transferable right to use the Service’s features solely for your own personal, non-commercial purposes, subject to these Terms. All rights we do not grant to you specifically in these Terms are reserved by us.
is not used to create any software or service that is substantially similar to the Service.
You further warrant that you will perform your obligations under these Terms, including preparation and packaging of Collected Items for delivery, with due care and skill and in accordance with industry standards.
We warrant that we will perform our obligations under these Terms with due care and skill and in accordance with industry standards.
If a Recipient makes a complaint to us about a Collected Item itself (such as a complaint about the quality of a Product), we shall be entitled to re-direct that complaint to you.
the Collected Items are inadequate, poor quality or otherwise not fit for purpose.
Neither party will be liable for a failure or delay in performing its obligations under these Terms to the extent that such failure or delay is caused by an event outside of the reasonable control of that party and which could not have been prevented or avoided by that party taking reasonable steps.
Subject to Clause 12.2, the Receiver shall keep the Discloser’s Confidential Information confidential and shall not use or disclose such information other than as is necessary to perform its obligations under these Terms.
Notwithstanding Clause 12.1, the Receiver may disclose the Discloser’s Confidential Information as required by law.
If the Receiver discloses the Discloser’s Confidential Information to its employees, directors, contractors or agents for the purposes of performing its obligations under these Terms, the Receiver shall ensure that the person(s) to whom such information is disclosed treat it as confidential and only use and disclose it for the purposes of performing the Receiver’s obligations under these Terms.
The parties acknowledge and agree that they are each data controllers of personal data where a Recipient is a data subject (“Recipient Data”).
Each party will ensure that it complies with Data Protection Law applicable to it in full whenever Recipient Data is processed by them or on their behalf by a data processor.
You will ensure that a clear and unambiguous notice which is compliant with Data Protection Law is displayed to Recipients or potential Recipients which states that you may use us to fulfil orders and that we may therefore receive and process Recipient Data.
This agreement shall commence on the Effective Date and shall remain in effect until terminated in accordance with the provisions of these Terms (the “Term”).
We may terminate these Terms immediately and close your account any time without notice if we cease to provide the Service in the Delivery Area.
We reserve the right to suspend, restrict or terminate your access to the Service at any time without notice if we have reasonable grounds to believe you have breached any of your obligations set out in these Terms.
Suspension, restriction or termination of your access to the Service will not limit our right to take any other legal action against you that may be appropriate.
Either party may terminate these Terms upon twenty (20) days’ advance written notice to the other party.
All outstanding payment obligations in relation to the Service shall survive termination of these Terms.
Subject to Clause 15.4, our total aggregate liability in relation to loss, destruction or damage to Collected Items (taken together) caused by any reason whatsoever including by our breach of contract, tort or negligence or that of a Carrier is limited to the cash value of the Collected Items but shall in no event exceed £100. If you consider that the potential loss or any other damage to you caused by the loss, destruction or damage of all (or part) of a single collection and delivery of Collected Items would exceed this amount then you must arrange separate cover or insurance to cover such potential loss.
Subject to Clause 15.4, our total aggregate liability under or in connection with these Terms, whether in contract, tort (including negligence), statute or otherwise for loss or damage not limited by Clause 15.1, is limited to the greater of (i) £500; and (ii) the total Fees paid or payable to us in the 2 months preceding the event giving rise to the liability.
Subject to Clause 15.4, neither party is liable to the other for any economic losses (including loss of revenues, profits, contracts, business or anticipated savings) or any special, indirect or consequential losses of any nature, whether or not such losses were known to the parties at the Effective Date.
Nothing in these Terms shall limit either party’s liability for death or personal injury caused by its negligence, fraud or any other statutory or other liability which cannot be excluded or limited by law.
any harm, loss or damage suffered by you or anyone else (including any Recipients) if the Service is interrupted, suspended or otherwise not provided to you or if we do not comply with these Terms because of events beyond our control (for example, industrial disputes, technical difficulties, failure of or delays in communications networks, acts of terrorism or power failure).
The Service is provided without express or implied warranty or condition of any kind, on an “as-is” basis and all implied warranties (whether by statute, custom or otherwise) are excluded to the maximum extent permitted by applicable law.
You acknowledge and accept that your access to the Service is dependent on connectivity over communications networks and facilities that are outside of our operation and/or control and that your use of the Service may be subject to limitations, delays and other problems inherent in the use of such networks and facilities.
We give no warranty that your use of the Service will be uninterrupted or error free. We will use our reasonable endeavours to provide a good, consistent service, however, you acknowledge and accept that we are not responsible for any loss or damage of any kind that you may suffer as a result of any interruption or delay to your access to the Service, or as a result of any failure or error in the transfer of data over those networks and facilities in connection with your use of the Service.
we solely provide information and enable you to book those third party courier services, but we do not provide courier services ourselves or act in any way as a courier.
You agree that we have no responsibility or liability for the acts or omissions of a Carrier unless expressly set out in these Terms.
You warrant that you will comply with any terms and conditions applicable to the use of a third party Carrier’s service and we will not be responsible for any act or omission by you or the Carrier in connection with your use of their service.
We do not endorse or take any responsibility for statements, advertisements or any content whatsoever transmitted through, or linked to from, the Service by other users or third party service providers (including Carriers).
For any Carrier-specific queries, please contact our live customer support team, who can contact the Carrier in the first instance.
Unless these Terms expressly state otherwise, all notices under these Terms shall (i) for any notices to be served on us by you, be given in writing and delivered, posted or faxed to Unit 0.2, Chandelier Building, 8 Scrubs Lane, London, NW10 6RB; or (ii) for notices to be served on you by us, be emailed or be given in writing and delivered or posted to the email address or postal address specified by you in your user account (as applicable).
We can assign, sub-contract and/or otherwise transfer any or all of our rights and/or obligations under these Terms to any company, firm or person. You may not subcontract, assign, novate or otherwise deal with your obligations or interests under these Terms without our prior written consent.
The parties shall be independent contractors and nothing in these Terms shall be deemed to constitute a partnership or any employment relationship between the parties.
Subject to Clause 3.1, a provision of these Terms or right, power or remedy created under it may not be varied or waived except in writing signed by the party to be bound.
No waiver of any breach of these Terms shall constitute a waiver of any other breach of the same or other provisions of these Terms.
If any part of these Terms is found by a court of competent jurisdiction or other competent authority to be invalid, unlawful or unenforceable then such part shall be severed from the remainder of these Terms which shall continue to be valid and enforceable to the fullest extent permitted by law.
These Terms are the entire agreement between the parties about its subject matter and supersede all previous agreements, understandings, representations, statements and negotiations on that subject matter whether written or oral.
Each party acknowledges and agrees that it does not rely on, and shall have no remedy in respect of, any promise, assurance, statement, warranty, undertaking or representation made (whether innocently or negligently) by any other party or any other person except as expressly set out in these Terms in respect of which its sole remedy shall be for breach of contract.
These Terms shall be governed by the law of England and Wales and each party agrees to submit to the exclusive jurisdiction of the courts of England and Wales.
“VAT” means the value added tax.
This Schedule 1 represents a Service Level Agreement (“SLA”) for the provisioning of the Service.
Our expectation is that the Carrier will arrive at the pickup location within approximately 20 minutes of assignment and thereafter complete the order as quickly as factors such as travel distance and traffic allow.
If you use the Partner Service, you acknowledge that the service levels set out above shall not apply in certain circumstances as set out in the Key Partner Agreement between us and you or us and your franchisor (as applicable).
Business live support line: +44(0) 20 3800 9946 from 12:00 – 22:30, Monday to Sunday (excluding bank holidays). Calls received out of these hours will be forwarded to a mobile phone and reasonable efforts will be made to answer/action the call.
We will notify you by email of any changes to the above contact information.
Email addresses are monitored 09:00 to 18:00, Monday to Friday (excluding bank holidays).
Emails received outside of these hours will be collected but may not be actioned until the next working day.
0-3 hours (during business hours) for issues classified as High priority (i.e. live operational issues/enquiries).
Within 24 hours for issues classified as Medium priority (i.e. payment issues require prompt attention).
Within 48 hours for issues classified as Lower priority (i.e. administrative issues/enquiries).
The above timeframes are measured from the time at which the incident or request is submitted to us.
The priority of an incident or request will be determined by us.
|
import os
from flask import request, jsonify, abort
from flask_app import create_app, db
from flask_app.models import User, Role, Document
config_name = os.getenv('FLASK_CONFIG')
app = create_app(config_name)
@app.route('/')
def index():
return jsonify({ "message": "Welcome to Document Manager"})
@app.route('/users', methods=['POST'])
def create_user():
user = User(
email=str(request.form.get('email')),
full_name=str(request.form.get('full_name')),
password=str(request.form.get('password')))
db.session.add(user)
db.session.commit()
response = jsonify({ "success": "user details saved successfully" })
response.status_code = 201
return response
@app.route('/documents', methods=['POST'])
def create_document():
document = Document(
title=str(request.form.get('title')),
access=str(request.form.get('access')),
content=str(request.form.get('content')),
roleId=(request.form.get('roleId')),
ownerId=(request.form.get('ownerId')))
db.session.add(document)
db.session.commit()
response = jsonify({ "success": "document saved successfully" })
response.status_code = 201
return response
@app.route('/roles', methods=['POST'])
def create_role():
role = Role(
title=str(request.form.get('title')))
print "role"
db.session.add(role)
db.session.commit()
response = jsonify({
'status': 'Role created successfully',
'role': role.title
})
response.status_code = 201
return response
if __name__ == '__main__':
app.run()
|
The system also allows an applicant to attach in the prescribed format the specification and drawings and also, if desired, he may attach the priority document/s with its English translation, Special Power of Attorney, Deed of Assignment, and any other documents that the applicant wishes to provide to support the application.
If the applicant is not domiciled in the Philippines, he cannot file an online application unless he hires a resident agent or an authorized representative in the Philippines and indicate them in the application. Within 60 days from filing of the application, a Special Power of Attorney must be submitted.
The IPOPHL EIDFILE is accessible to anyone 24/7 and applicant can pay the official fees in any of the authorized banks. The filing date is the date when the full payment was made (either online or over-the-counter). The filing receipt will be received no later than 3 days from the date the application was filed.
Finally, the Intellectual Property Industrial Design Patent Code and the Regulations for industrial design will apply to all applications filed electronically.
|
# -*- coding: utf-8 -*-
"""
DrQueue render template for Mental Ray
Copyright (C) 2011 Andreas Schroeder
This file is part of DrQueue.
Licensed under GNU General Public License version 3. See LICENSE for details.
"""
import os
import DrQueue
from DrQueue import engine_helpers
def run_renderer(env_dict):
# define external variables as global
globals().update(env_dict)
global DRQUEUE_OS
global DRQUEUE_ETC
global DRQUEUE_SCENEFILE
global DRQUEUE_FRAME
global DRQUEUE_BLOCKSIZE
global DRQUEUE_ENDFRAME
global DRQUEUE_RENDERDIR
global DRQUEUE_IMAGE
global DRQUEUE_CAMERA
global DRQUEUE_RESX
global DRQUEUE_RESY
global DRQUEUE_FILEFORMAT
global DRQUEUE_RENDERTYPE
global DRQUEUE_LOGFILE
# initialize helper object
helper = engine_helpers.Helper(env_dict['DRQUEUE_LOGFILE'])
# range to render
block = helper.calc_block(DRQUEUE_FRAME, DRQUEUE_ENDFRAME, DRQUEUE_BLOCKSIZE)
# renderer path/executable
engine_path = "ray"
# replace paths on Windows
if DRQUEUE_OS in ["Windows", "Win32"]:
DRQUEUE_SCENEFILE = helper.replace_stdpath_with_driveletter(DRQUEUE_SCENEFILE, 'n:')
DRQUEUE_RENDERDIR = helper.replace_stdpath_with_driveletter(DRQUEUE_RENDERDIR, 'n:')
if ("DRQUEUE_IMAGEFILE" in globals()) and (DRQUEUE_IMAGEFILE != ""):
image_args = "-im " + DRQUEUE_IMAGEFILE
else:
image_args = ""
if ("DRQUEUE_CAMERA" in globals()) and (DRQUEUE_CAMERA != ""):
camera_args = "-cam " + DRQUEUE_CAMERA
else:
camera_args=""
if ("DRQUEUE_RESX" in globals()) and ("DRQUEUE_RESX" in globals()) and (int(DRQUEUE_RESX) > 0) and (int(DRQUEUE_RESY) > 0):
res_args = "-x " + DRQUEUE_RESX + " -y " + DRQUEUE_RESY
else:
res_args = ""
if ("DRQUEUE_FILEFORMAT" in globals()) and (DRQUEUE_FILEFORMAT != ""):
format_args = "-of " + DRQUEUE_FILEFORMAT
else:
format_args = ""
if ("DRQUEUE_RENDERDIR" in globals()) and (DRQUEUE_RENDERDIR != ""):
os.chdir(DRQUEUE_RENDERDIR)
# extra stuff for rendering single images in a couple of parts
if DRQUEUE_RENDERTYPE == "single image":
# calculate parts to render
for line in open(DRQUEUE_SCENEFILE):
if "resolution" in line:
res_arr = line.split()
if res_arr[0] == "resolution":
scene_height = res_arr[2]
scene_width = res_arr[1]
part_height = scene_height / (DRQUEUE_ENDFRAME + 1)
height_high = scene_height - (DRQUEUE_FRAME * part_height)
height_low = height_high - part_height
print("rendering dimensions: 0 " + height_low + " " + scene_width + " " + height_high)
# generate frame filename
for line in open(DRQUEUE_SCENEFILE):
if "resolution" in line:
if "." in line:
res_arr = line.split()
outputname = string.replace(res_arr[3], "\"", "")
basename, extension = os.path.splitext(outputname)
framename = basename + "_" + string.zfill(DRQUEUE_FRAME, 4) + "." + extension
command = engine_path + " -window 0 " + str(height_low) + " " + str(scene_width) + " " + str(height_high) + " " + DRQUEUE_SCENEFILE + " -file_name " + framename
else:
command = engine_path + " " + DRQUEUE_SCENEFILE + " -render " + str(DRQUEUE_FRAME) + " " + str(block)
# log command line
helper.log_write(command + "\n")
# check scenefile
helper.check_scenefile(DRQUEUE_SCENEFILE)
# run renderer and wait for finish
ret = helper.run_command(command)
# return exit status to IPython
return helper.return_to_ipython(ret)
|
Please choose a different timestamp.
The Integrated Multi-satelliE Retrievals for GPM (IMERG) is the unified U.S. algorithm that provides the Day-1 multi-satellite precipitation product for the U.S. GPM team. The precipitation estimates from the various precipitation-relevant satellite passive microwave (PMW) sensors comprising the GPM constellation are computed using the 2014 version of the Goddard Profiling Algorithm (GPROF2014), then gridded, intercalibrated to the GPM Combined Instrument product, and combined into half-hourly 0.1x0.1 degree fields.
The IMERG system is run twice in near-real time: "Early" multi-satellite product ~6 hr after observation time (goal of 4) and "Late" multi-satellite product ~18 hr after observation time (goal of 12). Once the monthly gauge analysis is received, ~3 months after the observation month, a "Final" satellite-gauge product is produced.
The IMERG Early Run, which is the near-realtime product in the IMERG suite of products currently runs about 6 hours after observation time. Many of the qualitative comments about the Final Run are also true for the Early Run. One difference is that the Early Run necessarily uses calibrations based on trailing accumulations of match-ups, since these cannot be computed into the future. In addition, the Early Run only has forward propagation of the microwave data (unlike both the Late and Final Runs), and it has climatological calibration to the monthly gauge data (as does the Late, but unlike the Final, which uses actual monthly gauge analyses). The beginning of the Early Run record is being computed with "seed" calibrations based on October 2014 data. Accordingly, users should expect the start of the Early Run record to be less accurate than following months of data that will have fully populated recent calibrations.
One catalog. A world of data.
Create a free account to access our growing catalog of environmental data.
|
"""
quicksort.py -- sorts an array of integers using the Quicksort sorting algorithm. When executed, uses a randomized
version of quicksort to obtain a good average-case performance over all inputs.
"""
__author__ = 'Tom'
import random
def sort(data, p, r):
""" Sorts the data from p to r
Attributes:
data -- an array of elements
p -- a starting index
r -- an end index
"""
if p < r:
q = partition(data, p, r)
sort(data, p, q - 1)
sort(data, q + 1, r)
return data
def partition(data, p, r):
""" Partitions the subarray data[p..r] around a pivot element, data[r], moving it into its place in
the array.
Attributes:
data -- an array of elements
p -- a starting index
r -- an end index
"""
x = data[r]
i = p - 1
for j in range(p, r):
if data[j] <= x:
i += 1
data[i], data[j] = data[j], data[i]
data[i + 1], data[r] = data[r], data[i + 1]
return i + 1
def randomized_sort(data, p, r):
""" Sorts the data from p to r using a randomized partition.
"""
if p < r:
q = randomized_partition(data, p, r)
randomized_sort(data, p, q - 1)
randomized_sort(data, q + 1, r)
return data
def randomized_partition(data, p, r):
""" Partitions the subarray data[p..r] around a randomly-chosen pivot element.
"""
i = random.randint(p, r)
data[r], data[i] = data[i], data[r]
return partition(data, p, r)
if __name__ == "__main__":
import argparse
# create the top-level parser
parser = argparse.ArgumentParser(description='Sort integers using quicksort')
# add arguments
parser.add_argument('integers', metavar='N', type=int, nargs='+', help='an integer for the sort')
parser.add_argument('--begin', metavar='P', type=int, default=0, help='an integer for the start index')
parser.add_argument('--end', metavar='R', type=int, help='an integer for the end index')
# parse arguments
args = parser.parse_args()
# populates end index if it is None
if args.end is None:
args.end = len(args.integers) - 1
# print sorted array
print randomized_sort(args.integers, args.begin, args.end)
|
This beautiful soul has become one of my favorite artists to collaborate with. If you’re following along my photographer journey, Tori + I went to college together. Fun fact! We actually we're in Women’s Choir together. BUT she had the voice of Adele’s angel cousin and I was really good at swaying on the risers.
Anywho, we started working together the summer after we graduated. I wanted to not just shoot weddings (still true!) and she needed some photo work. Our first collab was for a shoot for Angelic Magazine. That was fun.
Since then I’ve followed this woman through clubs and bars as she tore up Hollywood and beyond with her stunning talent + soulful lyrics.
Tori loves the Lord and His people. I will always love that she intentionally chose not to write Christian music so she could be used to reach people where the Church can’t always.
This was a very early San Diego morning where we met up, got some coffee, and strolled, caught up, and created some more social media content for Tori.
She really wanted to express more of her personality not just her rad modeling skills. But I’m pretty biased. She’s one of my favorite models. Very thankful for this friend. Enjoy our overcast morning together.
If you are in need of some lifestyle creative content, feel free to hit me up! I love collaborating with bloggers, musicians, artists, and small business owners. Whether you need more content for your gym, coffee shop, or jewelry business, or you want to tell the story behind your product, message me. I’m all about photographing people + what they are passionate about.
|
# -*- coding: utf-8 -*-
u"""
Copyright 2013-2014 Olivier Cortès <oc@1flow.io>.
This file is part of the 1flow project.
1flow is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
1flow is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with 1flow. If not, see http://www.gnu.org/licenses/
"""
import logging
from django.contrib import admin
# from django.utils.translation import ugettext_lazy as _
# from django_object_actions import DjangoObjectActions
# from ..models.common import DUPLICATE_STATUS
from ..models.reldb import ( # NOQA
Language,
abstract_replace_duplicate_task
)
LOGGER = logging.getLogger(__name__)
class LanguageAdmin(admin.ModelAdmin):
""" Language admin class. """
list_display = (
'id', 'name', 'dj_code',
'iso639_1', 'iso639_2', 'iso639_3',
'parent',
'duplicate_of',
'duplicate_status',
)
list_display_links = ('id', 'name', )
list_filter = ('parent', 'duplicate_status', )
ordering = ('dj_code', )
change_list_template = "admin/change_list_filter_sidebar.html"
change_list_filter_template = "admin/filter_listing.html"
search_fields = ('name', 'dj_code', 'iso639_1', 'iso639_2', 'iso639_3', )
# def get_object_actions(self, request, context, **kwargs):
# """ Get object actions. """
# objectactions = []
# # Actions cannot be applied to new objects (i.e. Using "add" new obj)
# if 'original' in context:
# # The obj to perform checks against to determine
# # object actions you want to support.
# obj = context['original']
# LOGGER.warning('YALLAAA')
# if obj.duplicate_of:
# if obj.duplicate_status in (DUPLICATE_STATUS.NOT_REPLACED,
# DUPLICATE_STATUS.FAILED, ):
# objectactions.append('replace_duplicate_again')
# return objectactions
# def replace_duplicate_again(self, request, obj):
# """ Re-run the replace_duplicate() task. """
# abstract_replace_duplicate_task.delay(obj._meta.app_label,
# obj._meta.object_name,
# obj.id,
# obj.duplicate_of.id)
# replace_duplicate_again.label = _(u'Replace again')
# replace_duplicate_again.short_description = \
# _(u'Re-run the replace_duplicate() task.')
|
Priory Blandford CAMHS education unit is a satellite provision of Priory Lyndhurst. The unit provides education to young people aged 12-18 whilst they are inpatients of the Priory Blandford CAMHS provision. We offer quality education for young people who are unable to access their own school for medical or psychological reasons.
Young people join us following a referral supported by medical evidence. The Priory CAMHS education unit within Blandford Forum will work closely with young people's mainstream schools and is committed to creating links to promote educational inclusion. Our role is to support, nurture, educate and enable young people to return to their home school when, and if, it is appropriate.
Each young person has a personalised timetable which includes access to both core and foundation subjects. We also offer a range of accreditation and a therapeutic curriculum. Individual timetables are designed to meet young people's academic needs, whilst taking account of health and emotional factors.
We work in consultation with families and carers along with medical and nursing staff. The school operates within a multi-disciplinary framework; working closely with a range of professionals including consultant doctors, mental health professionals, occupational therapists, speech therapists, fitness instructors and other agencies.
|
"""
Django settings for foodbank project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import platform
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# -> this *MUST BE OVERRIDEN* with settings_local in production
SECRET_KEY = 'e6#83hi)*reeq2lk1v9y59u(z@i7(wto-ter#q&3ii8f6t8n2x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'127.0.0.1',
'localhost',
'osale.toidupank.ee',
'test-osale.toidupank.ee',
'uus-osale.toidupank.ee',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tinymce',
'nested_admin',
'locations',
'campaigns',
'coordinators',
'volunteers',
'auditlog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'foodbank.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'foodbank.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'et-ee'
TIME_ZONE = 'Europe/Tallinn'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
HTDOCS = os.path.join(BASE_DIR, '..', 'htdocs')
STATIC_URL = '/static/media/'
STATIC_ROOT = os.path.join(HTDOCS, 'static', 'media')
MEDIA_URL = '/static/uploads/'
MEDIA_ROOT = os.path.join(HTDOCS, 'static', 'uploads')
# Rich-text editor
TINYMCE_DEFAULT_CONFIG = {
'plugins': 'link lists code',
'menubar': 'edit format',
'toolbar': 'undo redo | styleselect | bold italic | removeformat | link | bullist numlist | code',
'width': 500,
'height': 400,
}
# Fix Estonian date formatting
FORMAT_MODULE_PATH = 'foodbank.formats'
EMAIL_BACKEND = 'django_sendmail_backend.backends.EmailBackend'
# For testing email:
# EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
# EMAIL_FILE_PATH = '/tmp/toidupank-email-messages'
ADMIN_URL_PREFIX = 'haldus/'
# Mandatory settings override in production environment
PRODUCTION_HOSTNAME = 'atria.elkdata.ee'
IS_PRODUCTION_ENV = False
if platform.node() == PRODUCTION_HOSTNAME and 'live-osale' in BASE_DIR:
IS_PRODUCTION_ENV = True
from .settings_live import *
else:
# Optional local settings override, especially useful during development
try:
from .settings_local import *
except ImportError:
pass
|
Show off your skills with Blade Mahjong! This Mahjong game may seem simple at first glance, but it can be tricky. The long line of mahjong tiles across the center of the blade can prove trouble, if you don't pay enough attention. Great news though, 24/7 Mahjong has now added a shuffle button on the no more moves menu, in case you need a little help along the way and want to play to a mahjong victory!
|
# -*- coding: utf-8 -*-
import datetime
PAGINATION=250
PRIMARY_MASTER="rvvmdns03pl.server.intra.rve."
EMAIL_ADMIN="nsmaster.regione.veneto.it."
BASE_DIR="/home/chiara/dinosaurus"
VERSION_FILE="/home/chiara/dinosaurus/VERSION"
fd=open(VERSION_FILE,'r')
VERSION="".join(fd.readlines()).strip()
fd.close()
THEMES_DIR=BASE_DIR+"/share/themes"
TEMPLATES_HTML=BASE_DIR+"/var/templates/html"
TEMPLATES_CSS=BASE_DIR+"/var/templates/css"
TEMPLATES_IMG=BASE_DIR+"/var/templates/img"
TEMPLATES_JS=BASE_DIR+"/var/templates/js"
TEMPLATES_FONTS=BASE_DIR+"/var/templates/fonts"
t=datetime.datetime.today()
SERIAL_NUMBER="%4.4d%2.2d%2.2d%2.2d" % (t.year,t.month,t.day,00)
HTML_BASE_VAR = { "VERSION": VERSION,
"TIMESTAMP": t.strftime("%A, %d %B %Y - %H:%M") }
DEFAULTS={
"ttl": [86400],
"nx_ttl": [3600],
"refresh": [86400,900],
"retry": [1800,600],
"expiry": [2592000,86400],
}
def get_non_default(data,defaults):
""" Ritorna gli elementi che non hanno un valore di default, o il primo dei valori di default.
:param data: Array di valori. Gli elementi uguali a "_" vengono ignorati.
:param defaults: Array di valori di default.
:return: * Se nessun elemento di data ha un valore non di default, ritorna il primo elemento di defaults.
* Se un solo elemento ha un valore non di default, ritorna quell'elemento.
* Altrimenti ritorna un array con gli elementi non di default.
"""
defaults=map(str,defaults)
if type(data)!=list:
return str(data)
L=filter(lambda x: x!="_",data)
L=filter(lambda x: x not in defaults,L)
if len(L)==0: return defaults[0]
if len(L)==1: return L[0]
return L
def max_or_default(data,defaults):
""" Ritorna il maggiore degli elementi che non hanno un valore di default, o il primo dei valori di default.
:param data: Array di valori. Gli elementi uguali a "_" vengono ignorati.
:param defaults: Array di valori di default.
:return: * Se nessun elemento di data ha un valore non di default, ritorna il primo elemento di defaults.
* Altrimenti ritorna il maggiore degli elementi non di default, trasfromato in stringa.
"""
x=get_non_default(data,defaults)
if not x: return defaults[0]
if type(x)!=list: return str(x)
return str( max(map(int,x)) )
def min_or_default(data,defaults):
""" Ritorna il minore degli elementi che non hanno un valore di default, o il primo dei valori di default.
:param data: Array di valori. Gli elementi uguali a "_" vengono ignorati.
:param defaults: Array di valori di default.
:return: * Se nessun elemento di data ha un valore non di default, ritorna il primo elemento di defaults.
* Altrimenti ritorna il minore degli elementi non di default, trasformato in stringa.
"""
x=get_non_default(data,defaults)
if not x: return defaults[0]
if type(x)!=list: return str(x)
return str( min(map(int,x)) )
class GetSequence(object):
""" Oggetti callable che generano numeri in sequenza. """
def __init__(self):
self.val=-1
def __call__(self):
self.val+=1
return self.val
def ip_cmp(x,y):
"""
Confronto tra due ip.
:param x: ip
:param y: ip
:return: * 1 se x>y;
* 0 se x==y;
* -1 se x<y.
"""
if x==y: return 0
if y=="::1": return 1
if x=="::1": return -1
try:
x_t=map(int,x.split(".")[:4])
except ValueError, e:
return -1
try:
y_t=map(int,y.split(".")[:4])
except ValueError, e:
return 1
if (x_t[0]==127) and (y_t[0]!=127):
return -1
if (x_t[0]!=127) and (y_t[0]==127):
return 1
if (x_t[0] in [ 127,0,10 ]) and (y_t[0] not in [ 127,0,10 ]):
return -1
if (x_t[0] not in [ 127,0,10 ]) and (y_t[0] in [ 127,0,10 ]):
return 1
if (x_t[0]==172) and (x_t[1] in range(16,32)):
if (y_t[0]!=172): return -1
if (y_t[1] not in range(16,32)): return -1
if (y_t[0]==172) and (y_t[1] in range(16,32)):
if (x_t[0]!=172): return 1
if (x_t[1] not in range(16,32)): return 1
if (x_t[0]==192) and (x_t[1]==168):
if (y_t[0]!=192): return -1
if (y_t[1]!=168): return -1
if (y_t[0]==192) and (y_t[1]==168):
if (x_t[0]!=192): return 1
if (x_t[1]!=168): return 1
if (x_t[0]==169) and (x_t[1]==254):
if (y_t[0]!=169): return -1
if (y_t[1]!=254): return -1
if (y_t[0]==169) and (y_t[1]==254):
if (x_t[0]!=169): return 1
if (x_t[1]!=254): return 1
for n in range(0,4):
if x_t[n]<y_t[n]: return -1
if x_t[n]>y_t[n]: return 1
return 0
|
Young ones can enjoy hours of awesome superhero adventures with the latest Jolly Kiddie Meal toys, the DC Justice League! Jolly kids can help unite the league by collecting Superman, Batman, Wonder Woman, The Flash, and Supergirl to fight evil forces and save the world.
With their super strengths and abilities, powerful gadgets, lightning fast speed, and special powers, the DC Justice League superheroes can truly make playtime more exciting for little heroes.
Let the Man of Steel float in the air as he watches over the city by assembling the base and placing Superman on top. Kids can let the Caped Crusader fight criminals by simply inserting discs inside the Batmobile and pressing down on Batman’s head to shoot the discs. The Flash can also run after escaped enemies while leaving a lightning trail behind. Simply hold the base firmly and push him forward to see the base light up as he runs.
Trust Wonder Woman to round up the bad guys without a sweat. Just insert the lasso into her hand and rotate the dial at her back to watch her spin the magical rope. Supergirl’s powerful punch is enough to knock out even the toughest criminals. Simply slide the lever at her back and release it to perform her powerful punch.
Collect the five DC Justice League superheroes with every purchase of a Yumburger (P80), Yumburger Meal with Drink (P100), Jolly Spaghetti (P95), Jolly Spaghetti Meal with Drink (P105), 1-piece Burger Steak (P95), 1-piece Burger Steak Meal with Drink (P105), 1-piece Chickenjoy with rice (P120), or 1-piece Chickenjoy Meal with Drink (P135), each with its own Jolly Joy Box.
Hurry and help unite the league with the DC Justice League toys, available until October 31. Visit your nearest Jollibee branch to collect all five superheroes!
|
# -*- coding: utf-8; -*-
"""Export methods for Inventory application."""
import tarfile
import time
import io
import hashlib
from pathlib import PurePath
from yaml import dump
try:
from yaml import CDumper as Dumper
except ImportError:
from yaml import Dumper
from django.core import serializers
from django.conf import settings
from django.utils.text import slugify
from rest_framework.renderers import JSONRenderer
import pharmaship.inventory.models as models
import pharmaship.inventory.serializers
from pharmaship.core.utils import remove_yaml_pk, get_content_types
from pharmaship.core.utils import log, query_count_all
def serialize_allowance(allowance, content_types):
"""Export an allowance using the YAML format.
To have an usable export, the user needs:
- the :mod:`pharmaship.inventory.models.Allowance` selected instance,
And related to this instance:
- the :mod:`pharmaship.inventory.models.Molecule` objects list,
- the :mod:`pharmaship.inventory.models.Equipment` objects list,
- the :mod:`pharmaship.inventory.models.MoleculeReqQty` objects list,
- the :mod:`pharmaship.inventory.models.EquipmentReqQty` objects list,
- the :mod:`pharmaship.inventory.models.RescueBagReqQty` objects list,
- the :mod:`pharmaship.inventory.models.FirstAidKitReqQty` objects list,
- the :mod:`pharmaship.inventory.models.TelemedicalReqQty` objects list,
- the :mod:`pharmaship.inventory.models.LaboratoryReqQty` objects list.
This function grabs all these together in a list of tuples::
[('filename.yaml', <yaml content string>)]
In addition, it returns the Equipment and Molecule lists.
:param pharmaship.inventory.models.Allowance allowance: Allowance to \
serialize.
:return: List of tuples filenames and streams
:rtype: tuple(list(tuple(str, str)), django.db.models.query.QuerySet, \
django.db.models.query.QuerySet)
"""
log.debug("Start serialize")
renderer = JSONRenderer()
# Molecules used by the allowance
molecule_id_list = []
equipment_id_list = []
# Required quantities for molecules
molecule_reqqty_list = models.MoleculeReqQty.objects.filter(allowance__in=[allowance]).prefetch_related("base")
molecule_id_list += molecule_reqqty_list.values_list("base_id", flat=True)
serialized = pharmaship.inventory.serializers.MoleculeReqQtySerializer(molecule_reqqty_list, many=True)
molecule_reqqty_data = renderer.render(
data=serialized.data,
accepted_media_type='application/json; indent=2'
)
query_count_all()
# Required quantities for equipments
equipment_reqqty_list = models.EquipmentReqQty.objects.filter(allowance__in=[allowance]).prefetch_related("base")
equipment_id_list += equipment_reqqty_list.values_list("base_id", flat=True)
serialized = pharmaship.inventory.serializers.EquipmentReqQtySerializer(equipment_reqqty_list, many=True)
equipment_reqqty_data = renderer.render(
data=serialized.data,
accepted_media_type='application/json; indent=2'
)
query_count_all()
# Required quantities for Laboratory
laboratory_reqqty_list = models.LaboratoryReqQty.objects.filter(allowance__in=[allowance]).prefetch_related("base")
equipment_id_list += laboratory_reqqty_list.values_list("base_id", flat=True)
serialized = pharmaship.inventory.serializers.LaboratoryReqQtySerializer(laboratory_reqqty_list, many=True)
laboratory_reqqty_data = renderer.render(
data=serialized.data,
accepted_media_type='application/json; indent=2'
)
query_count_all()
# Required quantities for Telemedical
telemedical_reqqty_list = models.TelemedicalReqQty.objects.filter(allowance__in=[allowance]).prefetch_related("base")
equipment_id_list += telemedical_reqqty_list.values_list("base_id", flat=True)
serialized = pharmaship.inventory.serializers.TelemedicalReqQtySerializer(telemedical_reqqty_list, many=True)
telemedical_reqqty_data = renderer.render(
data=serialized.data,
accepted_media_type='application/json; indent=2'
)
query_count_all()
# Required quantities for First Aid Kit
first_aid_kit_reqqty_list = models.FirstAidKitReqQty.objects.filter(allowance__in=[allowance]).prefetch_related("base")
molecule_id_list += first_aid_kit_reqqty_list.filter(
content_type_id=content_types["molecule"]
).values_list("object_id", flat=True)
equipment_id_list += first_aid_kit_reqqty_list.filter(
content_type_id=content_types["equipment"]
).values_list("object_id", flat=True)
serialized = pharmaship.inventory.serializers.FirstAidKitReqQtySerializer(first_aid_kit_reqqty_list, many=True)
first_aid_kit_reqqty_data = renderer.render(
data=serialized.data,
accepted_media_type='application/json; indent=2'
)
query_count_all()
# Required quantities for Rescue Bag
rescue_bag_reqqty_list = models.RescueBagReqQty.objects.filter(allowance__in=[allowance]).prefetch_related("base")
molecule_id_list += rescue_bag_reqqty_list.filter(
content_type_id=content_types["molecule"]
).values_list("object_id", flat=True)
equipment_id_list += rescue_bag_reqqty_list.filter(
content_type_id=content_types["equipment"]
).values_list("object_id", flat=True)
serialized = pharmaship.inventory.serializers.RescueBagReqQtySerializer(rescue_bag_reqqty_list, many=True)
rescue_bag_reqqty_data = renderer.render(
data=serialized.data,
accepted_media_type='application/json; indent=2'
)
query_count_all()
# Equipment used by the allowance
equipment_list = models.Equipment.objects.filter(id__in=equipment_id_list).prefetch_related("group")
equipment_data = serializers.serialize(
"yaml",
equipment_list,
use_natural_foreign_keys=True,
fields=(
"name_en",
"packaging_en",
"remark_en",
"consumable",
"perishable",
"picture",
"group",
)
)
log.debug("Equipment")
query_count_all()
# Molecule used by the allowance
molecule_list = models.Molecule.objects.filter(id__in=molecule_id_list).prefetch_related("group")
molecule_data = serializers.serialize(
"yaml",
molecule_list,
use_natural_foreign_keys=True,
fields=(
"name_en",
"composition_en",
"remark_en",
"roa",
"dosage_form",
"medicine_list",
"group",
)
)
log.debug("Molecule")
query_count_all()
# Allowance record
allowance_data = serializers.serialize(
"yaml",
(allowance,),
fields=('name', 'author', 'version', 'date', 'additional'),
use_natural_foreign_keys=True
)
log.debug("Allowance")
query_count_all()
log.debug("End serialize")
# Returning a list with tuples: (filename, data)
return ([
('inventory/molecule_obj.yaml', remove_yaml_pk(molecule_data)),
('inventory/equipment_obj.yaml', remove_yaml_pk(equipment_data)),
('inventory/molecule_reqqty.json', molecule_reqqty_data),
('inventory/equipment_reqqty.json', equipment_reqqty_data),
('inventory/laboratory_reqqty.json', laboratory_reqqty_data),
('inventory/telemedical_reqqty.json', telemedical_reqqty_data),
('inventory/first_aid_kit_reqqty.json', first_aid_kit_reqqty_data),
('inventory/rescue_bag_reqqty.json', rescue_bag_reqqty_data),
('inventory/allowance.yaml', remove_yaml_pk(allowance_data)),
], equipment_list, molecule_list)
def get_pictures(equipment_list):
"""Return a list of picture paths to include in the archive.
:param equipment_list: List of equipment for serialized allowance.
:type equipment_list: django.db.models.query.QuerySet
:return: List of pictures filenames.
:rtype: list
"""
# Pictures attached to equipments
pictures = equipment_list.exclude(picture='').values_list(
'picture', flat=True)
return pictures
def get_hash(name, content=None, filename=None):
"""Return sha256 hash and filename for MANIFEST file.
:param str name: Name of the file to hash.
:param content: Content of the file to hash.
:type content: bytes or str
:param str filename: Path to the file to hash.
:return: Name and file hash in hexadecimal string.
:rtype: tuple(str, str)
"""
if content is None and filename is None:
return None
m = hashlib.sha256()
if content:
if isinstance(content, bytes):
m.update(content)
else:
m.update(bytes(content, "utf-8"))
elif filename:
try:
with open(filename, 'rb') as fdesc:
m.update(fdesc.read())
except IOError as error:
log.error("File %s not readable. %s", filename, error)
return None
return (name, m.hexdigest())
def create_tarinfo(name, content):
"""Return a the TarInfo for a virtual file.
:param str name: Name of the file
:param content: Content of the file to add to the tar file.
:type content: bytes or str
:return: :class:`tarfile.TarInfo` and :class:`io.BytesIO` instance of the
file content.
:rtype: tuple
"""
if isinstance(content, bytes):
f = io.BytesIO(content)
else:
f = io.BytesIO(bytes(content, "utf-8"))
info = tarfile.TarInfo()
info.name = name
info.type = tarfile.REGTYPE
info.uid = info.gid = 0
info.uname = info.gname = "root"
info.mtime = time.time()
info.size = len(f.getvalue())
return (info, f)
def create_manifest(items):
"""Create the data to write into the MANIFEST file.
:param list(tuple) items: list of files with their hash.
:return: Formatted string
:rtype: str
"""
content = ""
for item in items:
content += "{1} {0}\n".format(item[0], item[1])
return content
def create_package_yaml(allowance):
"""Export package info in YAML string.
:param allowance: Allowance instance to export
:type allowance: pharmaship.inventory.models.Allowance
:return: YAML string containing Allowance data.
:rtype: str
"""
content = {
"info": {
"author": allowance.author,
"date": allowance.date,
"version": allowance.version
},
"modules": {
"inventory": {
"install_file": False
}
}
}
content_string = dump(content, Dumper=Dumper)
return content_string
def create_pot(allowance):
"""Create of PO template file for Equipment & Molecule strings."""
# Get serialized Allowance data
content_types = get_content_types()
_data, equipment_list, molecule_list = serialize_allowance(allowance, content_types)
strings = []
for item in equipment_list:
strings.append(item.name)
strings.append(item.packaging)
strings.append(item.remark)
for item in molecule_list:
strings.append(item.name)
strings.append(item.remark)
# Remove empty strings
strings = list(filter(None, strings))
# Remove duplicates
strings = list(set(strings))
# Sort for easier translation
strings.sort()
# Create POT file
result = """msgid ""
msgstr ""
"Project-Id-Version: Pharmaship export\\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=UTF-8\\n"
"Content-Transfer-Encoding: 8bit\\n"\n
"""
for item in strings:
result += "msgid \"{0}\"\n".format(item.replace("\"", "\\\""))
result += "msgstr \"\"\n\n"
return result
def create_po(allowance, lang_code):
# Get serialized Allowance data
content_types = get_content_types()
_data, equipment_list, molecule_list = serialize_allowance(allowance, content_types)
strings = {}
for item in equipment_list:
strings[item.name_en] = getattr(item, "name_{0}".format(lang_code))
strings[item.packaging_en] = getattr(item, "packaging_{0}".format(lang_code))
strings[item.remark_en] = getattr(item, "remark_{0}".format(lang_code))
for item in molecule_list:
strings[item.name_en] = getattr(item, "name_{0}".format(lang_code))
strings[item.composition_en] = getattr(item, "composition_{0}".format(lang_code))
strings[item.remark_en] = getattr(item, "remark_{0}".format(lang_code))
# Create PO file
result = """msgid ""
msgstr ""
"Project-Id-Version: Pharmaship export\\n"
"MIME-Version: 1.0\\n"
"Language: {0}\\n"
"Plural-Forms: nplurals=2; plural=(n != 1)\\n"
"Content-Type: text/plain; charset=UTF-8\\n"
"Content-Transfer-Encoding: 8bit\\n"\n
""".format(lang_code.lower())
for item in strings:
if not item:
continue
result += "msgid \"{0}\"\n".format(item.replace("\"", "\\\""))
result += "msgstr \"{0}\"\n\n".format(strings[item].replace("\"", "\\\""))
return result
def create_archive(allowance, file_obj):
"""Create an archive from the given `Allowance` instance.
The response is a tar.gz file containing YAML files generated by the
function `serialize_allowance`.
Pictures are added if any.
The package description file (``package.yaml``) and the ``MANIFEST`` file
are created at the end.
:param allowance: Allowance instance to export
:type allowance: pharmaship.inventory.models.Allowance
:param file_obj: Destination file object
:type file_obj: argparse.FileType or any compatible file object
:return: ``True`` if success
:rtype: bool
"""
# Creating a tar.gz archive
hashes = []
serialized_data, equipment_list, molecule_list = serialize_allowance(
allowance=allowance,
content_types=get_content_types()
)
with tarfile.open(fileobj=file_obj, mode='w') as tar:
# Processing the database
for item in serialized_data:
info, f = create_tarinfo(item[0], item[1])
tar.addfile(info, f)
hashes.append(get_hash(info.name, content=item[1]))
# Adding the pictures of Equipment
for item in get_pictures(equipment_list):
picture_filename = settings.PICTURES_FOLDER / item
log.debug(picture_filename)
try:
tar.add(picture_filename, arcname=PurePath("pictures", item))
# TODO: Detail Exception
except Exception as error:
log.error("Error: %s", error)
hashes.append(
get_hash(PurePath("pictures", item), filename=picture_filename)
)
# Adding the translation files if any
# TODO: Generate MO if only PO is found...
mo_filename = "{0}.mo".format(slugify(allowance.name))
for item in settings.TRANSLATIONS_FOLDER.glob("*/LC_MESSAGES/{0}".format(mo_filename)):
log.debug(item)
relative_path = PurePath("locale", item.relative_to(settings.TRANSLATIONS_FOLDER))
tar.add(item, arcname=relative_path)
hashes.append(get_hash(relative_path, filename=item))
# Try to get also the PO file
po_filename = item.with_suffix(".po")
if po_filename.exists():
log.debug(po_filename)
relative_path = PurePath("locale", po_filename.relative_to(settings.TRANSLATIONS_FOLDER))
tar.add(po_filename, arcname=relative_path)
hashes.append(get_hash(relative_path, filename=po_filename))
# Add the package description file
package_content = create_package_yaml(allowance)
info, f = create_tarinfo("package.yaml", package_content)
tar.addfile(info, f)
hashes.append(get_hash("package.yaml", content=package_content))
# Add the MANIFEST
manifest_content = create_manifest(hashes)
info, f = create_tarinfo("MANIFEST", manifest_content)
tar.addfile(info, f)
return True
|
Yuri fans, rejoice! I have put together a list of yuri/shoujo-ai anime and manga. If I missed something please let me know, but make sure that girls romance is the main focus of the story or is very prevelent throughout the series.
This thread will also serve as a general discussion thread for anything yuri related.
If you need help finding where to download a series listed below, just ask and I or someone else here will help you out. I'll eventually work on providing download links for every completed series listed below.
|
"""
IPTables configuration
======================
Module for processing output of the ``iptables-save`` and ``ip6tables-save``
commands. Parsers included are:
IPTables - command ``iptables-save``
------------------------------------
IP6Tables - command ``ip6tables-save``
--------------------------------------
IPTabPermanent - file ``/etc/sysconfig/iptables``
-------------------------------------------------
IP6TabPermanent - file ``/etc/sysconfig/ip6tables``
---------------------------------------------------
Sample input data looks like::
# Generated by iptables-save v1.4.7 on Tue Aug 16 10:18:43 2016
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [769:196899]
:REJECT-LOG - [0:0]
-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -s 192.168.0.0/24 -j ACCEPT
-A INPUT -p icmp -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT
-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A REJECT-LOG -p tcp -j REJECT --reject-with tcp-reset
COMMIT
# Completed on Tue Aug 16 10:18:43 2016
# Generated by iptables-save v1.4.7 on Tue Aug 16 10:18:43 2016
*mangle
:PREROUTING ACCEPT [451:22060]
:INPUT ACCEPT [451:22060]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [594:47151]
:POSTROUTING ACCEPT [594:47151]
COMMIT
# Completed on Tue Aug 16 10:18:43 2016
# Generated by iptables-save v1.4.7 on Tue Aug 16 10:18:43 2016
*nat
:PREROUTING ACCEPT [0:0]
:POSTROUTING ACCEPT [3:450]
:OUTPUT ACCEPT [3:450]
COMMIT
# Completed on Tue Aug 16 10:18:43 2016
* Each table of iptables starts with a ``# Generated by ...`` line.
* Each table starts with ``*<table-name>``, for example ``*filter``.
* Each chain specifications starts with a ``:`` sign.
* A chain specification looks like ``:<chain-name> <chain-policy> [<packet-counter>:<byte-counter>]``
* The chain-name may be for example ``INPUT``.
* Each ``iptables`` rule starts with a `-` sign.
Examples:
>>> ipt.rules[0] == {'target': 'ACCEPT', 'chain': 'INPUT', 'rule': '-m state --state RELATED,ESTABLISHED -j ACCEPT', 'table': 'filter', 'target_options': None, 'target_action': 'jump', 'constraints': '-m state --state RELATED,ESTABLISHED'}
True
>>> ipt.get_chain('INPUT')[1] == {'target': 'ACCEPT', 'chain': 'INPUT', 'rule': '-s 192.168.0.0/24 -j ACCEPT', 'table': 'filter', 'target_options': None, 'target_action': 'jump', 'constraints': '-s 192.168.0.0/24'}
True
>>> ipt.table_chains('mangle') == {'FORWARD': [], 'INPUT': [], 'POSTROUTING': [], 'PREROUTING': [], 'OUTPUT': []}
True
>>> ipt.get_table('nat')[-1] == {'policy': 'ACCEPT', 'table': 'nat', 'byte_counter': 450, 'name': 'OUTPUT', 'packet_counter': 3}
True
"""
from .. import Parser, parser, get_active_lines, CommandParser
from insights.specs import Specs
class IPTablesConfiguration(Parser):
"""
A general class for parsing iptables configuration in the
``iptables-save``-like format.
"""
def parse_content(self, content):
self.chains = []
self.rules = []
current_table = None
for line in get_active_lines(content):
if line.startswith("*"):
current_table = line[1:].strip()
elif line.startswith(":"):
name, policy, counter = line[1:].split()
packet_counter, byte_counter = counter.strip("[]").split(":")
self.chains.append({
"policy": policy if policy != "-" else None,
"table": current_table,
"name": name,
"packet_counter": int(packet_counter),
"byte_counter": int(byte_counter),
})
elif line.startswith("-"):
line_spl = line[3:].split(None, 1)
if not line_spl:
continue
chain_name = line_spl[0]
rule = line_spl[1] if len(line_spl) == 2 else ''
target_option = [i for i in (' -j', '-j ', ' -g', '-g ') if i in rule]
if target_option:
constraints, target = [i.strip() for i in rule.split(target_option[-1])]
if " " in target:
target, target_options = target.split(None, 1)
else:
target_options = None
self.rules.append({
"table": current_table,
"chain": chain_name,
"rule": rule,
"target_action": "jump" if target_option[-1].strip() == "-j" else "goto",
"constraints": constraints,
"target": target,
"target_options": target_options
})
else:
self.rules.append({
"table": current_table,
"chain": chain_name,
"rule": rule
})
def get_chain(self, name, table="filter"):
"""
Get the list of rules for a particular chain. Chain order is kept intact.
Args:
name (str): chain name, e.g. ``
table (str): table name, defaults to ``filter``
Returns:
list: rules
"""
return [r for r in self.rules if r["table"] == table and r["chain"] == name]
def get_table(self, name="filter"):
"""
Get the list of chains for a particular table.
Args:
name (str): table name, defaults to ``filter``
Returns:
list: chains
"""
return [c for c in self.chains if c["table"] == name]
def table_chains(self, table="filter"):
"""
Get a dict where the keys are all the chains for the given table
and each value is the set of rules defined for the given chain.
Args:
table (str): table name, defaults to ``filter``
Returns:
dict: chains with set of defined rules
"""
return dict((c["name"], self.get_chain(c["name"], table)) for c in self.get_table(table))
def get_rule(self, s):
"""
Get the list of rules that contain the given string.
Args:
s (str): string to look for in iptables rules
Returns:
list: rules containing given string
"""
return [r for r in self.rules if s in r["rule"]]
def __contains__(self, s):
return any(s in r["rule"] for r in self.rules)
@parser(Specs.iptables)
class IPTables(CommandParser, IPTablesConfiguration):
"""
Process output of the ``iptables-save`` command.
See the :py:class:`insights.parsers.iptables.IPTablesConfiguration` base
class for additional information.
"""
pass
@parser(Specs.ip6tables)
class IP6Tables(CommandParser, IPTablesConfiguration):
"""
Process output of the ``ip6tables-save`` command.
See the :py:class:`insights.parsers.iptables.IPTablesConfiguration` base
class for additional information.
"""
pass
@parser(Specs.iptables_permanent)
class IPTabPermanent(IPTablesConfiguration):
"""
Process ``iptables`` configuration saved in file ``/etc/sysconfig/iptables``.
The configuration in this file is loaded by the ``iptables`` service when the system boots.
New configuration is saved by using the ``service iptables save`` command. This configuration
file is not available on a system with ``firewalld`` service.
See the :py:class:`insights.parsers.iptables.IPTablesConfiguration` base
class for additional information.
"""
pass
@parser(Specs.ip6tables_permanent)
class IP6TabPermanent(IPTablesConfiguration):
"""
Process ``ip6tables`` configuration saved in file ``/etc/sysconfig/ip6tables``.
The configuration in this file is loaded by the ``ip6tables`` service when the system boots.
New configuration is saved by using the ``service ip6tables save`` command. This configuration
file is not available on a system with ``firewalld`` service.
See the :py:class:`insights.parsers.iptables.IPTablesConfiguration` base
class for additional information.
"""
pass
|
iconsDB.com currently has 4185 icons in the database that you can customize and download in any color and any size you want ! 516,383,778 icon downloads and counting ! 2709 icons can be used freely in both personal and commercial projects with no attribution required, but always appreciated and 1476 icons require a link to be used. All logos and trademarks presented in some icons are copyright of their respective trademark owners.
|
#----------------------------------------------------------------------
# Copyright (c) 2013 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
from GenericInstaller import GenericInstaller
from gram.am.gram import config
class Keystone(GenericInstaller):
# Return a list of command strings for installing this component
def installCommands(self):
self.comment("*** Keystone Install ***")
self.add("rm -f /var/lib/keystone/keystone.db")
# Set the SQL connection in /etc/keystone/conf
self.comment("Step 2. Edit /etc/keystone/keystone.conf")
keystone_user = config.keystone_user
keystone_password = config.keystone_password
keystone_conf_filename = '/etc/keystone/keystone.conf'
saved_keystone_conf_filename = '/home/gram/gram/juno/install/control_files/keystone.conf'
os_password = config.os_password
os_region_name = config.os_region_name
service_token = config.service_token
backup_directory = config.backup_directory
connection_command = "connection = mysql:\/\/" + \
keystone_user + ":" + keystone_password + \
"@" + config.control_host + "\/keystone"
self.backup("/etc/keystone", backup_directory, "keystone.conf")
self.add("TMPTOKEN=`openssl rand -hex 10`")
self.add("cp " + saved_keystone_conf_filename + " " + keystone_conf_filename)
self.sed("s/^connection =.*/"+connection_command+"/",
keystone_conf_filename)
self.sed("s/^admin_token=.*/admin_token=${TMPTOKEN}/", keystone_conf_filename)
# Restart keystone and create the database tables
self.comment("Step 3. Restart Keystone and create DB tables")
self.add("su -s /bin/sh -c \"keystone-manage db_sync\" keystone")
self.add("service keystone restart")
self.add("sleep 5")
#Start a cron job that purges expired tokens hourly
cron_cmd = "(crontab -l -u keystone 2>&1 | grep -q token_flush) || " + \
"echo '@hourly /usr/bin/keystone-manage token_flush >/var/log/keystone/keystone-tokenflush.log 2>&1' >> /var/spool/cron/crontabs/keystone"
self.add(cron_cmd)
# Install data and enpoints
self.comment("Step 4. Download data script")
saved_data_script_filename = '/home/gram/gram/juno/install/control_files/keystone_basic.sh'
data_script_filename = 'keystone_basic.sh'
self.add("rm -f " + data_script_filename)
self.add("cp " + saved_data_script_filename + " " + data_script_filename)
self.sed("s/CONTROL_HOST=.*/CONTROL_HOST=" + config.control_host + "/",data_script_filename)
self.sed("s/OS_SERVICE_TOKEN=.*/OS_SERVICE_TOKEN=${TMPTOKEN}/", data_script_filename)
self.sed("s/OS_PASSWORD=.*/OS_PASSWORD=" + config.os_password + "/",data_script_filename)
self.sed("s/OS_EMAIL=.*/OS_EMAIL=" + config.control_email_addr + "/",data_script_filename)
self.sed("s/OS_SERVICE_PASSWORD=.*/OS_SERVICE_PASSWORD=" + config.service_password + "/",data_script_filename)
self.add("chmod a+x ./" + data_script_filename)
self.add("./" + data_script_filename)
# Create the novarc file
self.comment("Step 5. Create novarc file")
novarc_file = "/etc/novarc"
self.backup("/etc", backup_directory, "novarc")
self.writeToFile("export OS_TENANT_NAME=admin", novarc_file)
self.appendToFile("export OS_USERNAME=admin", novarc_file)
self.appendToFile("export OS_PASSWORD=" + config.os_password , novarc_file)
self.appendToFile("export OS_AUTH_URL=http://" + config.control_host + ":35357/v2.0", novarc_file)
#self.appendToFile("export OS_NO_CACHE=" + str(config.os_no_cache), novarc_file)
#self.appendToFile("export OS_REGION_NAME=" + config.os_region_name, novarc_file)
#self.appendToFile("export SERVICE_TOKEN=" + config.service_token, novarc_file)
#self.appendToFile("export SERVICE_ENDPOINT=" + config.service_endpoint, novarc_file)
self.add("sleep 5")
self.add("source " + novarc_file)
# Return a list of command strings for uninstalling this component
def uninstallCommands(self):
mysql_password = config.mysql_password
backup_directory = config.backup_directory
self.comment("*** Keystone Uninstall ***")
self.restore("/etc/keystone", backup_directory, "keystone.conf")
self.restore("/etc/keystone", backup_directory, "logging.conf")
self.restore("/etc", backup_directory, "novarc")
|
Are you tired of your regular routine and planning a getaway for the weekend? If yes then what could be a better vacation then to visit Alton Towers Theme Parks. This theme park/water park located in the region of Staffordshire, United Kingdom has been ranked as number one on the charts of United Kingdom’s top most theme parks. And why shouldn’t it? With its mesmerizing attractions, Alton towers totally deserve the first spot.
Alton Tower has been a legitimate source of entertainment for families since 1980. The theme park remains open from late March to early November, providing you with approximately 225 days to visit the exciting place whenever you want. This most visited theme park is home to many of the nation’s best, thrilling rides.
Nemesis, Oblivion, Air, Rita, and TH13TEEN – these are names of some of the appealing rides awaiting you at Alton Towers. That is not just it! In 2013, The Smiler was launched at this theme park, which is known to be the first 14 inversion roller coaster all around the globe. Isn’t that amazing? Also, Alton Towers not just offers a thrilling experience, but gives you an opportunity to calm your nerves and relax at the spas and Water Park situated here.
Alton Towers is not just a theme park, but is also a hotel complex. Thus, if you are planning to leave your home alone for the night, you can get yourself a room at the hotel and spend your night here for an enhanced experience. The interiors of the hotel quite creatively revolve around the exploits of explorer Sir Algernon. The space of the hotel is flooded with minute, quirky things, Sir Algernon’s very own flying ship placed in the middle of the hotel being one of them. If you are staying at the hotel then the park will be open to you an hour before the rest of the public can step in. Plus, the resort also offers some outstanding seasonal events, like Halloween scare fest, firework display, and so on.
Already packing your bags to leave for Alton Tower, but worried how are you going to get there? Well here is an option for you. You can avail the services offered by us; you can hire our minibus. We can you up from your residence and take you to your destination, Alto Tower, in shortest possible time, and in safely manner. Our drivers are professional, who excel in driving and are accurate with the routes. If you hire our minibus, you will also be freeing yourself of the headache of parking your own car and worrying about the traffic. That will be our responsibility. So, pick up that phone now and hire our services to go for a vacation this weekend.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-21 19:42
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Batch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_time', models.DateTimeField(auto_now_add=True)),
('updated_time', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('1', 'To do'), ('2', 'To be labelled'), ('3', 'To be reviewed'), ('4', 'Done')], default='1', max_length=1)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_time', models.DateTimeField(auto_now_add=True)),
('message', models.CharField(max_length=20)),
('batch', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='app.Batch')),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('src_path', models.CharField(max_length=50)),
('raw_path', models.CharField(max_length=20)),
('batch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Batch')),
],
),
migrations.CreateModel(
name='Label',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('x', models.PositiveSmallIntegerField()),
('y', models.PositiveSmallIntegerField()),
('width', models.PositiveSmallIntegerField()),
('height', models.PositiveSmallIntegerField()),
('brand', models.CharField(max_length=20)),
('model', models.CharField(max_length=20)),
('color', models.CharField(max_length=10)),
('nickname', models.CharField(blank=True, max_length=20)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Image')),
],
),
migrations.CreateModel(
name='MyUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('isreviewer', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='app.MyUser'),
),
migrations.AddField(
model_name='batch',
name='labeller',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='labeller', to='app.MyUser'),
),
migrations.AddField(
model_name='batch',
name='reviewer',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='reviewer', to='app.MyUser'),
),
]
|
Michael Jackson's mother and temporary administrator of his estate is in possession of a will Jackson drew up three weeks before he died.
The will was handed over to Katherine Jackson by Michael's attorney John Branco today. Jackson had a net worth of $200 million when he died of a heart attack last Thursday.
In the will, Michael left everything to his mother and the three children he raised as his own. Jackson's father, who forced his children to call him "Joseph" all their lives, gets nothing.
Michael Jackson's older brother Jermaine Jackson was spotted outside a yogurt shop yesterday sporting a cool throwback vintage Jackson 5 jersey.
Meanwhile, Debbie Rowe's attorney is disputing reports that she is not the biological mother of the three children Michael Jackson raised as his own.
She says Rowe's eggs were fertilized by donor sperm in a process called in vitro fertilization (IVF). I believe her. The two oldest kids look just like her. She couldn't deny them if she tried.
And Michael's former producer Quincy Jones tells ETonline.com he won't be attending the funeral circus for Michael at Neverland Ranch on July 3. "I won't go to any more funerals as long as I live... I can't handle it," he says.
Vibe magazine, the urban-music magazine founded in 1993 by Quincy Jones, is the latest victim of the media recession. Multiple sources both within and outside the magazine confirmed that it is shutting down.
Reached for comment, chief financial officer Angela Zucconi said, "We will be making a statement by the end of the day. That's all I can say at this point." She referred further questions to CEO Steve Aaron, who was not immediately available. Messages left for editor in chief Danyel Smith and publishers Edgar Hernandez were not immediately returned.
I just heard from a source close to Tiny's camp who informed me that the appearance of Tiny's preteen group OMG onstage with rappers Lil Wayne and Drake at the BET Awards was neither planned nor rehearsed.
My source said the group's appearance onstage was a last minute decision by Wayne and no one else.
According to my source, what happened was Wayne's daughter Reginae, 9, was upset that she didn't get the chance to accompany her father, Lil Wayne, to the stage to accept his award because she was sitting too far back in the audience.
Once backstage, the youngster began to cry. And Wayne, who they say is a great dad, hated to see his little girl cry. So he told Reginae she could come out onstage during his performance with Drake. Reginae, being the team player that she is, asked the other members of her group to go out on stage with her.
Also, remember that the 2009 BET Awards was pre-recorded on Saturday afternoon. So the girls were not up past their bedtimes.
In an earlier post I confused Shingai Shoniwa, lead singer of London's Indie group the Noisettes, for a black Asian due to her name. A few readers emailed me to say Shingai is actually of Zimbabwean descent. At the time I meant to search YouTube for the group's music but I forgot.
Thanks to Universal Music's JD Anderson for sending this video link of the Noisettes new hit "Never Forget You." The Noisettes' music reminds me of the Motown sound of the 1960s, but updated! How exciting that good music and great singers are making a comeback -- and the British are leading the way!
Hopefully radio will play the Noisettes so today's kids can begin to develop an appreciation for real music.
You SIMPLY MUST check out this group the Noisettes from the UK in London. Their sound is soooo unique and the look of the group is very appealing. Lead singer - Shingai Shoniwa has an incredible look and style to her as well.
The current single is "Never Forget You" and their album comes out on September 22nd. The single "Never Forget You" also lands on iTunes July 7th.
I didn't watch the 2009 BET Awards on Sunday so I didn't know that Tiny's new group, OMG, were the underage girls who appeared onstage with mentally impaired rapper Lil Wayne and that gay rapper Drake.
According to upset parents, the youngsters gyrated onstage as Wayne rapped his tune "I Want To F*ck Every Girl in the World" -- or whatever the name of the track is.
As most of you know, the group's lead singer is Zonnique, 12, daughter of Tameka "Tiny" Cottle. The other three members are Reginae Carter, 9, Wayne's daughter with Antonia Carter who stars on Tiny & Toya (which premieres on BET tonight at 10pm); Bahjah Rodriguez, who turns 12 in August, and her sister Lourdes Rodriguez, who turns 11 in September.
Earlier this month, I spoke with Shamra Rodriguez, the proud mother of Bahjah and Lourdes, who told me that Bahjah's father, Jonathen “John-John” Rasboro (of the R&B group Silk), was not aware of his daughter's Atlanta audition with the group.
In this post I told you that Bahjah was in tears because her dad was a no-show at the audition. But Shamra did reveal that "John-John" is a deadbeat dad with at least 7 other kids spread around the U.S.
Shamra said "John John" was jailed earlier this year for a month for his refusal to pay child support for Bahjah.
A few loyal readers emailed me after the 2009 BET Awards with their shocked reactions to the Lil Wayne performance. In a post on Momlogic.com, Dr. Janet Taylor, who has two teenage daughters of her own, asked, "Why are we as women and mothers passively allowing our young girls to be utilized as sexual entertainment?"
Who is this Grammy Winning singer disguised under heavy makeup for a movie role? If you guessed Eminem, you're close!
See who it is after the break!
Y'all going to learn to listen to your auntie when I tell you something.
According to TMZ.com, the three children raised by Michael Jackson as his own are not his biologicals. It should have been obvious just by looking at them, but I guess some people need a lot more convincing.
Surprisingly, Debbie Rowe is not the biological mother of the children either. All three children were conceived in vitro -- outside the womb.
Michael Jackson was not the biological father of any of his children. And Debbie Rowe is not the biological mother of the two kids she bore for Michael. All three children were conceived in vitro -- outside the womb.
Multiple sources deeply connected to the births tell us Michael was not the sperm donor for any of his kids. Debbie's eggs were not used. She was merely the surrogate, and paid well for her services in the births of Michael Jr. and Paris.
In the case of Prince Michael II (the youngest), we're told the surrogate was never told of the identity of the "receiving parent" -- Michael Jackson. Three days after Prince was born at Grossmont Hospital in San Diego County, Jackson's lawyer came to the hospital to pick the baby up and deliver him to Michael.
Who Cares Why Chris Brown Didn’t Attend The BET Awards?
Now that the 2009 BET Awards is over, the biggest controversy is not the fact that mentally impaired rapper Lil Wayne cursed and performed a track containing inappropriate lyrics while underage children gyrated on stage.
The biggest controversy surrounds convicted woman beater Chris Brown, who is upset because he was disinvited to perform during the telecast.
Word is that Rihanna's mentor and former lover Jay Z blacklisted Chris from the BET Awards. Supposedly, BET's head honchos also considered CB's appearance a bit risky.
The "source" sounds like an idiot. Jay Z is not a big name anywhere -- Beyonce is. If it weren't for Beyonce no one would be checking for Jay's old azz.
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from os import path
import codecs
import os
import re
import sys
# When creating the sdist, make sure the django.mo file also exists:
if 'sdist' in sys.argv or 'develop' in sys.argv:
os.chdir('any_urlfield')
try:
from django.core import management
management.call_command('compilemessages', stdout=sys.stderr, verbosity=1)
except ImportError:
if 'sdist' in sys.argv:
raise
finally:
os.chdir('..')
def read(*parts):
file_path = path.join(path.dirname(__file__), *parts)
return codecs.open(file_path, encoding='utf-8').read()
def find_version(*parts):
version_file = read(*parts)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return str(version_match.group(1))
raise RuntimeError("Unable to find version string.")
setup(
name='django-any-urlfield',
version=find_version('any_urlfield', '__init__.py'),
license='Apache 2.0',
requires=[
'Django (>=1.8)',
],
description='An improved URL selector to choose between internal models and external URLs',
long_description=read('README.rst'),
author='Diederik van der Boor',
author_email='opensource@edoburu.nl',
url='https://github.com/edoburu/django-any-urlfield',
download_url='https://github.com/edoburu/django-any-urlfield/zipball/master',
packages=find_packages(exclude=('example*',)),
include_package_data=True,
test_suite = 'runtests',
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Framework :: Django',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
Making art is a meditative practice for Gary. This is a collection of some of the things he has made, through various mediums including paintings, drawings, textitles, and sculpture.
Llamatism is a collection of things, a cabinet of curiosities, and reports from explorations on things, by Gary Llama.
|
#!/usr/bin/env python
#General Imports
import roslib; #roslib.load_manifest('smach_tutorials')
roslib.load_manifest('fsm')
import rospy
import time
import random
import math
import numpy
import smach
import smach_ros
#For dealing with msgs
from std_msgs.msg import Float32
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion
# The Listener object used in the FSM
#from ListenerClass import ListenerClass
#from beginner_tutorials.srv import *
from Utility import *
from quadrotor_input.srv import *
# define state MANUAL
class MANUAL(smach.State):
def __init__(self,flightStatus):
smach.State.__init__(self, outcomes=['Finish',
'Monitor',
'TOAUTONOMOUS'])
self.flightStatus = flightStatus
def execute(self, userdata):
## rospy.loginfo('Executing state MANUAL')
rospy.sleep(self.flightStatus.sleepTime)
## print "\nstable:", self.flightStatus.getStable()
if ( self.flightStatus.IsBatteryOK() ) and ( self.flightStatus.listener.AutoPilotSwitch == True ) and self.flightStatus.getStable() == True :
print ("AutoPilot switch is ON, there is enough battery ---->>> Transfering control to PC ")
return 'TOAUTONOMOUS'
if self.flightStatus.IsTimeExceeded() :
print ("Mission Duration Exceeded - Finish")
return 'Finish'
else:
return 'Monitor'
class AUTONOMOUS_INIT(smach.State):
def __init__(self,flightStatus):
smach.State.__init__(self,outcomes=['ToIdle',
'ToHover',
'ToLand',
'Failure'])
## input_keys=['AutoInit_mission_stage_in'],
## output_keys=['AutoInit_mission_stage_out'])
self.flightStatus = flightStatus
def execute(self, userdata):
## rospy.loginfo('Executing state AUTONOMOUS_INIT')
rospy.sleep(self.flightStatus.sleepTime)
if self.flightStatus.IsBatteryOK() and self.flightStatus.listener.AutoPilotSwitch == True :
z = self.flightStatus.getCurrentAltitude()
if z - self.flightStatus.getSafeAltitude() > self.flightStatus.tolerance: #Safe
print ("Vehicle above minimal safe altitude - goto HOVER")
return 'ToHover'
elif z - self.flightStatus.getGroundLevel() < self.flightStatus.tolerance: #On the GROUND
print ("Vehicle seems to be still on the ground - goto IDLE")
return 'ToIdle'
else :
print ("Vehicle in intermediate altitude - goto LAND")
return 'ToLand' #Intermediate altitude - LAND!
else:
return 'Failure'
# define state TAKEOFF
class TAKEOFF(smach.State):
def __init__(self,flightStatus):
smach.State.__init__(self, outcomes=['Success',
'Aborted_NoBatt',
'Aborted_Diverge',
'Maintain'])
## input_keys = ['TakeOff_mission_stage_in'],
## output_keys = ['TakeOff_mission_stage_out'])
self.flightStatus = flightStatus
def execute(self, userdata):
## rospy.loginfo('Executing state TAKEOFF')
rospy.sleep(self.flightStatus.sleepTime)
if ( self.flightStatus.listener.AutoPilotSwitch == False ) or ( self.flightStatus.PositionErrorDiverge()== True ):
print ("Either pilot wants control back or vehicle is unstable - goto MANUAL")
return 'Aborted_Diverge'
elif (self.flightStatus.listener.MissionGoSwitch == False ) or ( self.flightStatus.IsBatteryOK()== False ):
print ("Either pilot wants vehicle to come home or there is no Batt - goto LAND")#Later should be mapped to GOHOME state
return 'Aborted_NoBatt'
if self.flightStatus.PositionErrorConverge()==True:
print ("Takeoff complete and succeful - goto HOVER")
return 'Success'
print ("TakingOff...")#Later should be mapped to GOHOME state
return 'Maintain'
# define state HOVER
class HOVER(smach.State):
def __init__(self,flightStatus):
smach.State.__init__(self, outcomes=['Aborted_NoBatt',
'Aborted_Diverge',
'Maintain',
'GoHome',
'FollowTraj'])
## input_keys = ['Hover_mission_stage_in'],
## output_keys = ['Hover_mission_stage_out'])
self.flightStatus = flightStatus
def execute(self, userdata):
## rospy.loginfo('Executing state HOVER')
rospy.sleep(self.flightStatus.sleepTime)
if self.flightStatus.PositionErrorDiverge() or (self.flightStatus.listener.AutoPilotSwitch is False) :
print ("Either pilot wants control back or vehicle is unstable - goto MANUAL")
return 'Aborted_Diverge' #->Manual!
if ( not self.flightStatus.IsBatteryOK() ) or (self.flightStatus.listener.MissionGoSwitch is False) or self.flightStatus.IsTimeExceeded() :
print ("Either pilot wants vehicle to come home, duration exceeded or there is no Battery")#Later should be mapped to GOHOME state
#print "Battery Voltage Level: " ,self.flightStatus.getCurrentBatteryVoltage()
#print "MissionGo Switch: " ,self.flightStatus.listener.MissionGoSwitch
if self.flightStatus.IsHome():
return 'Aborted_NoBatt' #->Vehicle should LAND
else:
return 'GoHome' #->Vehicle should return home
#print "self.flightStatus.DistanceToTarget(3)", self.flightStatus.DistanceToTarget(3)
if self.flightStatus.DistanceToTarget(3) > 15 * self.flightStatus.tolerance:
print("Far away from target, should generate a trajectry to go there")
return 'FollowTraj'
print("Hovering....")
return 'Maintain'
# define state LAND
class LAND(smach.State):
def __init__(self,flightStatus):
smach.State.__init__(self, outcomes=['Success',
'Failure',
'Maintain'])
self.flightStatus = flightStatus
def execute(self, userdata):
## rospy.loginfo('Executing state LAND')
rospy.sleep(self.flightStatus.sleepTime)
if self.flightStatus.PositionErrorDiverge() or self.flightStatus.listener.AutoPilotSwitch==False:
print ("Vehicle is unstable - goto MANUAL")
return 'Failure' #->Manual!
if self.flightStatus.PositionErrorConverge():
print ("Vehicle has landed - goto IDLE")
return 'Success' #->Idle
print ("Landing...")
return 'Maintain' #Remain in Land!
# define state IDLE
class IDLE(smach.State):
def __init__(self,flightStatus):
smach.State.__init__(self, outcomes=['Finish',
'Start',
'Maintain'])
## input_keys = ['Idle_mission_stage_in'],
## output_keys = ['Idle_mission_stage_out'])
self.flightStatus = flightStatus
def execute(self, userdata):
## rospy.loginfo('Executing state IDLE')
rospy.sleep(self.flightStatus.sleepTime)
if self.flightStatus.listener.AutoPilotSwitch == False or not self.flightStatus.IsBatteryOK():
print ('All Controllers turned off - we are DONE')
#Waited for a while in idle or one of the switches is OFF
print ("AutoPilot is OFF --->>> goto MANUAL")
return 'Finish' #- to manual
elif self.flightStatus.IsThrottleUp() and self.flightStatus.IsBatteryOK() and self.flightStatus.listener.MissionGoSwitch == True :
#Throttle is up and there is enough battery
print ("Seems like pilot wants to take off and there's enough battery --->>> goto TAKEOFF")
return 'Start' #- to takeoff
print("Idle...")
return 'Maintain'
# define state FOLLOWTRAJECTORY (This state should be is a template for GoHome or any other follow traj, the only difference is what is the trajectory to follow)
class FOLLOW_TRAJECTORY(smach.State):
def __init__(self,flightStatus,str_ParentStateName):
smach.State.__init__(self, outcomes=['Arrived',
'Aborted_Diverge',
'Maintain'])
## input_keys = ['TrajFol_mission_stage_in'],
## output_keys = ['TrajFol_mission_stage_out'])
self.flightStatus = flightStatus
self.str_ParentStateName = str_ParentStateName
def execute(self, userdata):
## rospy.loginfo('Executing state FOLLOW_TRAJECTORY inside %s', self.str_ParentStateName )
rospy.sleep(self.flightStatus.sleepTime)
#Should add that is in follow trajectory and target pose is homepose, then maintain...
if self.flightStatus.PositionErrorDiverge() or self.flightStatus.listener.AutoPilotSwitch == False:
print ("Either pilot wants control back or vehicle is unstable --->>> goto MANUAL")
return 'Aborted_Diverge' #--->>>Manual!
if self.flightStatus.PositionErrorConverge() : #Regardless of parent container, if arrived at destination, should goto HOVER
print ("Seems like vehicle arrived at destination --->>> goto HOVER")
return 'Arrived'
for case in switch(self.str_ParentStateName):
if case('GO_HOME'):
if self.flightStatus.IsBatteryOK() and not self.flightStatus.IsTimeExceeded() and self.flightStatus.listener.MissionGoSwitch == True :
return 'Arrived' #->Vehicle should go to HOVER
else:
print ("Vehicle returning home...")
return 'Maintain' #->Vehicle should continue going home
break
if case('FOLLOW_TRAJ'):
if self.flightStatus.listener.MissionGoSwitch == False or not self.flightStatus.IsBatteryOK() or self.flightStatus.IsTimeExceeded() :
return 'Arrived' #->Vehicle should go to HOVER
break
print("Following Trajectory...")
return 'Maintain'
# define state InitContoller
class CONTROLLER_INIT(smach.State):
def __init__(self, flightStatus, controlManagement , str_ParentStateName):
smach.State.__init__(self, outcomes=['Success','Failure'])
self.flightStatus = flightStatus
self.controlManagement = controlManagement
self.str_ParentStateName = str_ParentStateName #Used to indicate which controller should be turned ON
self.dict = {'True': 'ON', 'False':'OFF'};
#self.stateDictionary = {'IDLE': 1, 'HOVER':2,'LAND':3,'TAKEOFF':4,'GO_HOME':5};
def execute(self,userdata):
## rospy.loginfo('Executing state ControllerInit in %s' , self.str_ParentStateName )
rospy.sleep(self.flightStatus.sleepTime)
# Create a service client
Service_in = CommandControllerRequest()
# Designated whether there are controller gains to be adjusted (Default)
#Service_in.input_gain_flag = True
# Controller PID gains (regarded only if flag is TRUE, Default)
#Service_in.gains = [1.0,2.0,3.0]
# Default - Controller should turn ON
Service_in.running = True
if self.str_ParentStateName is 'IDLE':
## print "SwitchCase IDLE"
if self.flightStatus.listener.AutoPilotSwitch == False or self.flightStatus.listener.MissionGoSwitch == False or not self.flightStatus.IsBatteryOK():
print ('All Controllers should be turned off...')
# Designated that the controller should turn OFF
Service_in.running = False
else:
print("Getting ready to start mission...")
print ("Creating a TargetPose to be used as a constant ref signal for the controller")
self.flightStatus.setTargetPose(self.flightStatus.getCurrentPose().position)
self.flightStatus._targetPose.position.z = self.flightStatus.getCurrentAltitude()
else:
for case in switch(self.str_ParentStateName):
if case('HOVER'):
## print "SwitchCase HOVER"
## print("Starting Controller for HOVER")
print ("Creating a StampedPose to be used as a constant ref signal for the controller")
self.flightStatus.setTargetPose(self.flightStatus.getCurrentPose().position)
break
## Service_in.path.poses.append(self.flightStatus.getCurrentPoseStamped())
if case('LAND'):
## print "SwitchCase LAND"
self.flightStatus.setTargetPose(self.flightStatus.getCurrentPose().position)
self.flightStatus._targetPose.position.z = self.flightStatus.getGroundLevel()
print 'Generating trajectory for LAND'
break
if case('TAKEOFF'):
## print "SwitchCase TAKEOFF"
self.flightStatus.setTargetPose(self.flightStatus.getCurrentPose().position)
self.flightStatus._targetPose.position.z = self.flightStatus.getSafeAltitude() + 0.1 #MOdify the z value of the private targetPose attribute
print 'Generating trajectory for TAKEOFF'
break
if case('GO_HOME'):
## print "SwitchCase GOHOME"
self.flightStatus.setTargetPose(self.flightStatus.getHomePose().position)
## print 'GO_HOME'
## print 'GO_HOME'
break
## print "Prior to generating a trajectory"
## print "Current:" , self.flightStatus.getCurrentPose()
## print "Target:", self.flightStatus.getTargetPose()
# Call a function that generates a trajectory for the controller to follow - - >>>> SHOULD BE A SERVICE PROVIDOR
Service_in.path.poses = getTrajectory(self.flightStatus.getCurrentPose(),
self.flightStatus.getTargetPose())
if self.controlManagement.ControllerClient(Service_in):
print("Controller SUCCEEDED to turn " + self.dict[str(Service_in.running)] )
return 'Success'
else:
print("Controller FAILED to turn " + self.dict[str(Service_in.running)])
return 'Failure'
|
I've read the relevant HP manual sections over and over & still cannot make links or anchors that work. The examples in the manual don't answer any of my puzzlements.
1. I gather that the target text must be in the main window in order to set it as a target, and that one creates the Special Link in that text FIRST, then one goes to the Table of Contents, highlights the relevant item, and creates another Special Link, returning to the target text, in each case highlighting the relevant frame for the text display. So it is a back-and-forth procedure, starting with the target, then adding the pointer, then returning to the target to confirm. Is that correct?
2. My manual text is in several *.rtf files in the same directory as the *.aeh file. Importing a file into the main frame results in only a highlighted title being displayed, not the text itself. Why?
3. The only way I can get a file into the frame is by loading it into Word Perfect and doing a copy+paste. Then I want to do ANOTHER file with a DIFFERENT link from the Table of Contents. How do I REPLACE the text in the main frame instead of adding it to the existing text at the cursor point?
4. I would join several files into one PROVIDED I could get anchors to work such that a click on a Table of Contents item would display the text and the place where the anchor has been placed turns up at the TOP of the main frame. I have not been able to do this. Apparently one must FIRST insert an anchor in the target text (page 48 in manual), THEN highlight the pointer text & click special link. When I do this, the OK button is inoperable, however. Why?
5. However, if I highlight the target text element next to the anchor I have inserted, THEN click Special Link, the OK button works. But that is not the pointer, it is the target. If I insert an anchor next to the Table of Contents item, then highlight that, when I select "target page", the anchor I have placed nor the relevant text does not display in the dialog window. I have tried all the procedures described on page 48 without success.
Please describe, step by step, with NOTHING left out, how to create an anchor in my main frame text, pointed to by an item in the table of contents frame, such that the main frame text displays ONLY the text below the anchor in the main frame text.
Thank you for your attention. This has been a major exercise in frustration for me. I sense the brilliant qualities of HP software, but I am afraid your manual has been more confusing than helpful to me in trying to make links work the way I need them to.
1) To learn how to create a left menu, hit the F1 key (or use ?-Help) so you will see the help, then click on Tutorials. Everything is explained step by step.
Please open a new file and try the steps provided or you will not understand. It is very simple but you must actually do it, to explain frames with words is a very long exercise, while to actually do the action is very fast, especially when you realize how simple, effective, and mathematical is the mechanism.
There are two ways. Please try both and use the one you like more; please understand that the one that uses Special Links will show only in "preview" mode.
The target of a link: is not mandatory the target is in the current page, the target can be everywhere.
2) when you import a RTF file, it is automatically divided into multiple pages. Better to copy and paste with the MSWindows clipboard if you want to do different things.
3) I don't understand so much your question, sorry. You may want to reword it. If you mean you want to create another frame/content/page that will replace the main frame, create a new page, then paste the text within that page, then use Special links in order to replace a frame when the user clicks on a link.
Please another time try to limit yourself to ONE QUESTION a time as there is a limited length for a post so to reply to 5 questions we must be extremely concise.
also, please understand that special links work only in PREVIEW mode.
within the editor, the editor does not respect inter/frames links (otherwise everything will be messed up) and will not go to anchors.
Thank you for your efforts, but you have clearly not understood my difficulties. You explain the obvious, like how to create a left menu, which I have already done and did NOT ask about. And you did not provide the step-by-step procedure for inserting an anchor into a text pointed to by an item in the left menu, which I DID ask for. I am disappointed in your technical support.
First: this is the ** user's ** forum, this is not the technical support.
Second, we did provide the instructions.
In order to better help you, I repeat them with a number, and by expanding the instructions as maybe they are not sufficiently detailed for you.
Most people will be bored with this and if we do reply with a so long reply first, most people insult us "I am not a kid" "please don't waste my time with so much detail"
So here the long list.
Please tell us what point (what number) is not clear.
3) highlight the text that you want to be clicked (when the user will click on that text, he/she will jump to the anchor, with all the page or within a frame, depending on your following selections) and use use "Insert >Special link"
5) now you see a preview that shows all available pages. Select the target page from the left list (or select it by browsing the preview - you can navigate link in the preview). Within the preview that is on the rightside of the dialog, click on the anchor. If you correctly followed the instructions at point (1) the page preview must show an anchor (please notice that this anchor won't be shown to the end user). Then click the OK button.
If this is not clear, or we did not understand you, please try to better explain yourself. Please kindly consider the fact that maybe you was not able to explain yourself, and that we are all humans like you, no wizards here, we can not read your mind.
You may send us an email with images - create some images that show what you would like to obtain, and what you actually obtain, usually this helps. Thank you for helping us to better help you.
Thank you for the courtesy and detail in your reply. I apologize for being grumpy, but my frustration level with this is out of sight. This morning I uninstalled and re-installed the software, set up the frames, inserted content in the left hand table of contents, and imported two files which now show up as pages in the *.aeh file. One file special linked perfectly to the master frame and displays as it should. The other file special link only displays the file name and a dialog asking whether to save, execute, or cancel, none of which is appropriate. Both files are *.rtf, both are text only, and they are in the same directory. I have prepared a sequence of 4 jpgs to show what happens as I try to set the special link. You suggest I email you pictures, but I can find no email address on this or any other page in the forum. Please provide an email address that works. Thank you.
We already replied by email (email address on the HyperPublish website and within the internal help file, I don't copy it here to avoid spammers) but I copy the reply here as it may be helpful for other users.
I see you used links to file (4th tab of the main link dialog). When you place a link to file, a click on the link does normally show the dialog save / execute; it is not an error - the link to file is supposed to do so, so what you are experiencing is the normal behavior. This is intended for people that want to put on the website files that they want to make available for download. For example, a zip archive, a program executable file (like HyperPublish install file, see for example the page www.visualvision.com/software/hyperpublish_e.html ), a PDF file, a document, an ebook.
I believe that you expected another behavior because, sometimes, when publishing in the Internet, the browser (Firefox, Internet Explorer, Opera whatever you use...) is configured in order to display the file when a link to file is clicked, instead of starting downloading it - you can test this by using Publish >Browser preview. Maybe your browser will show the file. This behavior (if set) is a property/setting of the browser (Firefox, Internet Explorer, Opera whatever you use...) and is decided by the end user (ok, you, on your computer, but, on another computer, you don't know, and no, you can not override the end user setting....).
In case you want to be sure that the content of the file (a RTF file in your situation) will be always shown in a frame (I understand that you don't want it to prompt for the download, in any case), the only way is to create a new page in HyperPublish, then copy and paste the content of the RTF file on that new page (eventually in more than one page in case the file is very long), then you have to create a link or a special link to this page.
Thanks to some emails, I understood what was not clear for the user, and I believe this could be interesting for the other readers of the forum.
It was the difference from an imported page (imported from a RTF file) and a link to an external file (a direct RTF file, not imported).
- internal pages (that you can manage visually, with active previews where you can also browse, all without using files and having to remember file names); you can write internal pages within the built-in editor, or you can import them (or sections of them) with the import menu, or with copy and paste, etc.
When you create a CD or a website, normally you use only internal pages (as explained on Chapter 1 of the PDF manual) that are very easy to manage and to link.
The external links are provided in order to not limit you and to allow you to link to any kind of file.
some files can not be "imported" and the only chance is to link them as embedded external files.
|
#!/usr/bin/python
from datagrama import Datagrama
class Porta(object):
def __init__(self):
#definicao dos campos de um segmento
self.enlace = None
self.ip = None
self.tamanhoBuffer = None
self.buffer = []
self.modoVerboso = False
super(Porta, self).__init__()
def setEnlace(self,enlace):
self.enlace = enlace
def getEnlace(self):
return self.enlace
def setIp(self,ip):
self.ip = ip
def getIp(self):
return self.ip
def setTamanhoBuffer(self,tamanhoBuffer):
self.tamanhoBuffer = tamanhoBuffer
def getIp(self):
return self.tamanhoBuffer
def addNoBuffer(self,pacote):
if len(self.buffer) < self.tamanhoBuffer:
self.buffer.append(pacote)
def bufferEstaVazio(self):
try:
if self.buffer[0]:
return False
except IndexError:
return True
def getDoBuffer(self):
if self.buffer[0]:
topoBuffer = self.buffer[0]
del self.buffer[0]
return topoBuffer
def printBuffer(self):
print self.buffer
print str(self)
# for d in self.buffer:
# print d
def receber(self, datagrama):
if self.modoVerboso:
print "PORTA: vai receber " + str(self)
self.addNoBuffer(datagrama)
if self.modoVerboso:
print self.buffer
def enviar(self, router, datagrama):
if self.modoVerboso:
print str(self) + ": " + str(datagrama)
self.enlace.enviar(self, datagrama)
|
rocklobster (post: 1444473) wrote: Hey, Animegirl. You should review Peach Fuzz and Bone. I've read both and I think they're great.
Just posted a review for "Moonlit Dead Love" by Marshmellow Muffin, a bonus one-shot story that was included with "The World Stops!"
Posted a review for "Heroic Age".
Hey! I saw a review up for Rozen Maiden and the first volume of Shelter of Wings, but when I clicked on them, it said they didn't exist. Is there something wrong with your blog or the links?
Atria35 (post: 1444634) wrote: Hey! I saw a review up for Rozen Maiden and the first volume of Shelter of Wings, but when I clicked on them, it said they didn't exist. Is there something wrong with your blog or the links?
^ Okay! Thats a bit of a relief- I thought there was something wrong with my browser!
Atria35 (post: 1444643) wrote: ^ Okay! Thats a bit of a relief- I thought there was something wrong with my browser!
Don't worry, nothing is wrong ^.^ I save alot of the reviews I plan to do as drafts and sometimes I do mistakes and publish them XD!! So I corrected it so they're drafts again. I won't be able to do Shelter Of Wings 'till all of it is up at her website or until I have the money to buy it, but I can't wait to review that one!!!
Also, I added a new page to my blog called "Library" where I have links to every single review I have done so far!! Everytime I have a new review, the list will be updated.
Now added a page list of items I have seen or own that I will be reviewing in the future!
I LOVE the Library tab! It's a fantastic, easy way to sort through the different types of reviews you've done!
Atria35 (post: 1444868) wrote: I LOVE the Library tab! It's a fantastic, easy way to sort through the different types of reviews you've done!
Glad you like it ^.^ I wanted to make an easier way for people to find stuff, and I thought I better do it now before I get too many reviews on the site where it'd take forever to make that list XD!!!
Added a review for "The Silver Kiss" a fan-made manga adaption of Anette Curtis Klause's novel of the same title.
I think I'll move this under "doujinshi" in the Library tab, since even though it's just an adaption of the novel, it's still a fancomic none-the-less.
Posted a review for "Angelic Layer: Complete Series"
Posted a review of "Avatar The Last Airbender: Book 1: Water" volume 1. Yes, anime puriests, I am aware it's not a Japanese anime, but if there was such thing as American anime this would be it.
WOW! You're a reviewing fiend today!
besides, animegirl, we've had American Animation that was pretty close to anime before: Thundercats, Silverhawks, even Transformers!
I LOVE Thundercats! My dad introduced me to that show! Hmmm...never thought of it as anime looking. Than again, it was an 80s show and 80s anime probably looked more like that. XD!!!
Just posted a review for "Rozen Maiden: Overture" OVA.
Just posted a "Merry Christmas" post, with some reviews to come, and stating it's been a full year of reviewing! Sooo...Happy Birthday "Breaking Metal Windows" XD!!!
My first review of the year! A 7-page doujinshi (9 if you count the beginning and end repeated cover) of Tsubasa Reservoir Chronicles entitled "Fai and Mokona: Partners In Crime" PLUS! All reviews for read-online-only comics now have links above the comments (except The Anti-Squee which has the links at the end of the comments). Enjoy!
^ I think that would be a great idea! Finding song lyrics that fit the anime or the review? I think that sounds pretty cool!
Updated the review for Tsubasa Tokyo Revelations and bumped it up as most recent. This now includes the synopsis, dub analysis, and DVD extra listing. I will update Spring Thunder later XD!!!
You should review Rahxephon. It has one of the best soundtracks ever! Oh and Azumanga Daioh. Chiyo was so adorable!
What's Rahxephone about? And also, I've been wanting to see/read Azumanga Daioh.
Just posted a review for Sarah Ellerton's "Inverloch, Vol.1" and updated my review for Tsubasa Spring Thunder to include dub analysis, so it's been bumped up as a more recent review.
RaphXephon: In the year 2012 Japan was invaded by the Mu. Human-like beings from another dimension with blue blood.
In the year 2015 Tokyo is attacked by invaders, who are repelled by a humanoid weapon called a Dolem. During the chaos, Ayato Kamina meets Reika Mishima, a girl whom he had never met but had done a painting of the morning before.
The Mu thing reminds me of To Terra (a manga that came out in the 70's- which, by the way, I happen to be highly recommending!). I haven't seen it myself, but it gets some decent praise.
It's got one of the best soundtracks I ever listened to. The voices on both the dub and the original are excellent, particularly the voices used for Quon.
Studio Gibli's "Whisper Of The Heart"
Heh heh, I am just getting Whisper of the heart through Netflix today so thank you for posting the review.
|
from pymongo import MongoClient
#Step 1: Connect to MongoDB - Note: Change connection string as needed
client = MongoClient("mongodb://jun_ishikawa:EUgx3hId7TuJnZeZ@car-e-mainframe-shard-00-00-tppcz.mongodb.net:27017,car-e-mainframe-shard-00-01-tppcz.mongodb.net:27017,car-e-mainframe-shard-00-02-tppcz.mongodb.net:27017/<DATABASE>?ssl=true&replicaSet=Car-E-Mainframe-shard-0&authSource=admin")
db=client.database
#Step 2: Create sample data
names = ['Avijoy Haldar','Rajat Saxena','Divyansh Agrawal']
car_number = ['WB069102','DL011303','TN021011']
passkey = ['010111', '100101', '111001']
for x in xrange(0, 3):
database = {
'name' : names[x],'car number' : car_number[x],'pass code' : passkey[x]
}
#Step 3: Insert business object directly into MongoDB via isnert_one
result=db.database.insert_one(database)
#Step 4: Print to the console the ObjectID of the new document
print('Created {0} of 3 as {1}'.format(x,result.inserted_id))
#Step 5: Tell us that you are done
insert_result=db.database.insert_one({'name' : 'Zayn Malik','car number' : 'DL012201','pass code':'110011'})
print('finished creating 3 user records')
|
TCNS is an all-inclusive feature-rich provider of domains, websites and email. Pick a menu from the left and order service. Or, let us help you upgrade, bundle and save, call: 1-800-827-8267.
Domain registration, dns, web site hosting, and email over one of the largest, fastest and most reliable internet pipelines in the world. Service excellence for thousands of customers since 1994.
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import os
import cmath
import math
import sys
import numpy as np
import glob
import subprocess
import re
from matplotlib_venn import venn2
import pandas as pd
from collections import defaultdict
from operator import itemgetter
import matplotlib as mpl
import matplotlib.pyplot as plt
import shutil
from optparse import OptionParser
mpl.rcParams['savefig.dpi'] = 2 * mpl.rcParams['savefig.dpi']
# <codecell>
global sampleName
global outfilepath
global logFile
global logOpen
### File name ###
sampleName=sys.argv[1]
infilepath=os.getcwd() + '/' + 'rawdata/'
outfilepath=os.getcwd() + '/results/%s/'%sampleName
# <codecell>
# Create log and start pipeline
logFile=outfilepath + "runLog"
logOpen=open(logFile, 'w')
# <codecell>
### Parameters ###
iCLIP3pBarcode='AGATCGGAAGAGCGGTTCAGCAGGAATGCCGAGACCGATCTCGTATGCCGTCTTCTGCTTG' # Barcode sequence to trim from reads.
q=25 # Minimum quality score to keep during filtering.
p=80 # Percentage of bases that must have quality > q during filtering.
iCLIP5pBasesToTrim=13 # Number of reads to trim from 5' end of clip reads.
k='1' # k=N distinct, valid alignments for each read in bt2 mapping.
threshold=3 # Sum of RT stops (for both replicates) required to keep file.
expand=15 # Bases to expand around RT position after RT stops are merged.
repeat_index=os.getcwd() + '/docs/repeat/rep' # bt2 index for repeat RNA.
repeatGenomeBuild=os.getcwd()+'/docs/repeat/repeatRNA.fa' # Sequence of repeat index.
repeatAnnotation=os.getcwd()+'/docs/repeat/Hs_repeatIndex_positions.txt' # Repeat annotation file.
start18s=3657
end18s=5527
start5s=6623
end5s=6779
start28s=7935
end28s=12969
rRNAend=13314
threshold_rep=1 # RT stop threshold for repeat index.
index=os.getcwd() + '/docs/hg19/hg19' # bt2 index for mapping.
index_tag='hg19' # Name of bt2 index.
genomeFile=os.getcwd()+'/docs/human.hg19.genome' # Genome file for bedGraph, etc.
genomeForCLIPper='-shg19' # Parameter for CLIPper.
blacklistregions=os.getcwd()+'/docs/wgEncodeDukeMapabilityRegionsExcludable.bed' # Blacklist masker.
repeatregions=os.getcwd()+'/docs/repeat_masker.bed' # Repeat masker.
geneAnnot=glob.glob(os.getcwd()+'/docs/genes_types/*') # List of genes by type.
snoRNAmasker=os.getcwd()+'/docs/snoRNA_reference/snoRNAmasker_formatted_5pExtend.bed' # snoRNA masker file.
miRNAmasker=os.getcwd()+'/docs/miR_sort_clean.bed' # miRNA masker file.
fivePUTRBed=os.getcwd()+'/docs/5pUTRs_Ensbl_sort_clean_uniq.bed' # UTR annotation file.
threePUTRBed=os.getcwd()+'/docs/3pUTRs_Ensbl_sort_clean_uniq.bed' # UTR annotation file.
cdsBed=os.getcwd()+'/docs/Exons_Ensbl_sort_clean_uniq.bed' # UTR annotation file.
utrFile=os.getcwd()+'/docs/hg19_ensembl_UTR_annotation.txt' # UTR annotation file.
genesFile=os.getcwd()+'/docs/hg19_ensembl_genes.txt' # Gene annotation file.
sizesFile=os.getcwd()+'/docs/hg19.sizes' # Genome sizes file.
snoRNAindex=os.getcwd()+'/docs/snoRNA_reference/sno_coordinates_hg19_formatted.bed' # snoRNA coordinate file.
CLIPPERoutNameDelim='_' # Delimiter that for splitting gene name in the CLIPper windows file.
# <codecell>
import datetime
now=datetime.datetime.now()
logOpen.write("Timestamp:%s\n"%str(now))
logOpen.write("\n###Parameters used###\n")
logOpen.write("3' barcode:%s\n'"%iCLIP3pBarcode)
logOpen.write("Minimum quality score (q):%s\n"%q)
logOpen.write("Percentage of bases with > q:%s\n"%p)
logOpen.write("5' bases to trim:%s\n'"%iCLIP5pBasesToTrim)
logOpen.write("k distinct, valid alignments for each read in bt2 mapping:%s\n"%k)
logOpen.write("Threshold for minimum number of RT stops:%s\n"%threshold)
logOpen.write("Bases for expansion around conserved RT stops:%s\n"%expand)
logOpen.write("\n\n\n")
# <codecell>
print "Processing sample %s" %(sampleName)
logOpen.write("Processing sample: "+sampleName+'\n')
read1=infilepath+sampleName+'_R1.fastq'
read2=infilepath+sampleName+'_R2.fastq'
unzippedreads=[read1,read2]
# <codecell>
def trimReads3p(unzippedreads,adapter3p):
# Usage: Trims a specified adapter sequence from the 3p end of the reads.
# Input: List of fastq files.
# Output: List of 3p trimmed files.
trimparam='-a'+adapter3p # Adapter string
trimmedReads=[]
try:
for inread in unzippedreads:
outread=inread.replace("rawdata/", "results/%s/"%sampleName)
outread=outread.replace(".fastq", "_3ptrimmed.fastq")
process=subprocess.Popen(['fastx_clipper',trimparam,'-n','-l33','-Q33','-i',inread,'-o',outread],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout, stderr = process.communicate()
logOpen.write("Trim 3p end of reads.\n")
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
trimmedReads=trimmedReads+[outread]
return trimmedReads
except:
logOpen.write("Problem with 3p trimming.\n")
print "Problem with 3p trimming."
print "Trim 3p adapter from reads."
trimmedReads3p=trimReads3p(unzippedreads,iCLIP3pBarcode)
# <codecell>
def qualityFilter(trim3pReads,q,p):
# Usage: Filters reads based upon quality score.
# Input: List of fastq file names as well as the quality paramters p and q.
# Output: List of modified fastq file names.
qualityparam='-q'+str(q)
percentrageparam='-p'+str(p)
filteredReads=[]
try:
for inread in trim3pReads:
outread=inread.replace(".fastq", "_filter.fastq")
process=subprocess.Popen(['fastq_quality_filter',qualityparam,percentrageparam,'-Q33','-i',inread,'-o',outread],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout, stderr=process.communicate()
logOpen.write("Perform quality filtering.\n")
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
filteredReads=filteredReads+[outread]
return filteredReads
except:
logOpen.write("Problem with quality filter.\n")
print "Problem with quality filter."
print "Perform quality filtering."
filteredReads=qualityFilter(trimmedReads3p,q,p)
# <codecell>
def dupRemoval(filteredReads):
# Usage: Removes duplicate reads.
# Input: List of fastq file names.
# Output: List of reads in FASTA format.
program=os.getcwd() + '/bin/fasta_to_fastq.pl'
noDupes=[]
try:
for inread in filteredReads:
outread=inread.replace(".fastq","_nodupe.fasta")
process=subprocess.Popen(['fastx_collapser','-Q33','-i',inread,'-o',outread],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout, stderr=process.communicate()
logOpen.write("Perform duplicate removal.\n")
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
fastqOut=outread.replace('.fasta', '.fastq') # fastx_collapser returns fasta files, which are then converted to fastq.
outfh=open(fastqOut, 'w')
process=subprocess.Popen(['perl',program,outread],stdout=outfh)
process.communicate() # Wait for the process to complete.
os.remove(outread) # Remove the remaining .fasta file.
noDupes=noDupes+[fastqOut]
return noDupes
except:
logOpen.write("Problem with duplicate removal.\n")
print "Problem with duplicate removal."
print "Perform duplicate removal."
nodupReads=dupRemoval(filteredReads)
# <codecell>
def trimReads5p(nodupes,n):
# Usage: Trims a specified number of bases from the 5' end of each read.
# Input: List of fastq files.
# Output: List of 5p trimmed files.
trimparam='-f'+str(n)
trimmedReads=[]
try:
for inread in nodupes:
outread=inread.replace(".fastq", "_5ptrimmed.fastq")
process=subprocess.Popen(['fastx_trimmer', trimparam, '-Q33', '-i', inread,'-o',outread],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout, stderr=process.communicate()
logOpen.write("Perform 5' barcode trimming.\n")
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
trimmedReads=trimmedReads+[outread]
return trimmedReads
except:
logOpen.write("Problem with 5' barcode trimming.\n")
print "Problem with 5' barcode trimming."
print "Perform 5' barcode trimming."
trimmedReads5p=trimReads5p(nodupReads,iCLIP5pBasesToTrim)
# <codecell>
def runBowtie(fastqFiles,index,index_tag):
# Usage: Read mapping to reference.
# Input: Fastq files of replicate trimmed read files.
# Output: Path to samfile for each read.
program='bowtie2'
mappedReads=[]
unMappedReads=[]
try:
for infastq in fastqFiles:
outfile=infastq.replace(".fastq","_mappedTo%s.sam"%index_tag)
unmapped=infastq.replace(".fastq","_notMappedTo%s.fastq"%index_tag)
process=subprocess.Popen([program,'-x',index,'-k',k,'-U',infastq,'--un',unmapped,'-S',outfile],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout,stderr=process.communicate()
logOpen.write("Perform mapping to %s index.\n"%index_tag)
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
mappedReads = mappedReads + [outfile]
unMappedReads = unMappedReads + [unmapped]
return (mappedReads,unMappedReads)
except:
logOpen.write("Problem with mapping.\n")
print "Problem with mapping."
print "Run mapping to repeat index."
mappedReads_rep,unmappedReads_rep=runBowtie(trimmedReads5p,repeat_index,'repeat')
# <codecell>
def runSamtools(samfiles):
# Usage: Samfile processing.
# Input: Sam files from Bowtie mapping.
# Output: Sorted bedFiles.
program = 'samtools'
program2 = 'bamToBed'
outBedFiles=[]
try:
for samfile in samfiles:
bamfile = samfile.replace('.sam','.bam')
proc = subprocess.Popen( [program,'view','-bS','-o', bamfile, samfile])
proc.communicate()
bamfile_sort = bamfile.replace('.bam','_sorted')
proc2 = subprocess.Popen([program,'sort',bamfile, bamfile_sort])
proc2.communicate()
bedFile = bamfile_sort.replace('_sorted', '_withDupes.bed')
outfh = open(bedFile,'w')
proc3 = subprocess.Popen( [program2,'-i', bamfile_sort+'.bam'],stdout=outfh)
proc3.communicate()
outBedFiles=outBedFiles+[bedFile]
return outBedFiles
except:
logOpen.write("Problem with samtools.\n")
print "Problem with samtools."
print "Run samtools."
logOpen.write("Run samtools.\n")
mappedBedFiles_rep=runSamtools(mappedReads_rep)
# <codecell>
def seperateStrands(mappedReads):
# Usage: Seperate positive and negative strands.
# Input: Paths to two bed files from Samtools.
# Output: Paths to bed files isolated by strand.
negativeStrand=[]
positiveStrand=[]
for mapFile in mappedReads:
with open(mapFile, 'r') as infile:
neg_strand=mapFile.replace('.bed','_neg.bed')
pos_strand=mapFile.replace('.bed','_pos.bed')
neg = open(neg_strand, 'w')
pos = open(pos_strand, 'w')
negativeStrand=negativeStrand+[neg_strand]
positiveStrand=positiveStrand+[pos_strand]
for line in infile:
if str(line.strip().split('\t')[5]) == '-':
neg.write(line)
elif str(line.strip().split('\t')[5]) == '+':
pos.write(line)
return (negativeStrand,positiveStrand)
def modifyNegativeStrand(negativeStrandReads):
# Usage: For negative stranded reads, ensure 5' position (RT stop) is listed first.
# Input: Bed file paths to all negative stranded.
# Output: Paths to modified bed files.
negativeStrandEdit=[]
for negativeRead in negativeStrandReads:
neg_strand_edited=negativeRead.replace('_neg.bed','_negEdit.bed')
negativeStrandEdit=negativeStrandEdit+[neg_strand_edited]
neg_edit = open(neg_strand_edited, 'w')
with open(negativeRead, 'r') as infile:
for line in infile:
chrom,start,end,name,quality,strand=line.strip().split('\t')
neg_edit.write('\t'.join((chrom,end,str(int(end)+30),name,quality,strand))+'\n')
return negativeStrandEdit
def isolate5prime(strandedReads):
# Usage: Isolate only the Chr, 5' position (RT stop), and strand.
# Input: Bed file paths to strand seperated reads.
# Output: Paths RT stop files.
RTstops=[]
for reads in strandedReads:
RTstop=reads.replace('.bed','_RTstop.bed')
f = open(RTstop, 'w')
with open(reads, 'r') as infile:
RTstops=RTstops+[RTstop]
for line in infile:
chrom,start,end,name,quality,strand=line.strip().split('\t')
f.write('\t'.join((chrom,start,strand))+'\n')
return RTstops
print "RT stop isolation (repeat)."
logOpen.write("RT stop isolation (repeat).\n")
readsByStrand_rep=seperateStrands(mappedBedFiles_rep)
negativeRTstop_rep=isolate5prime(modifyNegativeStrand(readsByStrand_rep[0]))
positiveRTstop_rep=isolate5prime(readsByStrand_rep[1])
# <codecell>
def fileCat(destinationFile,fileList):
f = open(destinationFile, "w")
for tempfile in fileList:
readfile = open(tempfile, "r")
f.write(readfile.read())
readfile.close()
f.close()
def RTcounts(RTfile):
posRT_R1=pd.DataFrame(pd.read_table(RTfile,index_col=None,header=None,sep='\t'))
posRT_R1.columns=['Chr','Start','Strand']
cts=posRT_R1.groupby(['Chr','Start']).size()
return cts
def mergeRT(RTstopFiles,outfilename,threshold,expand,strand):
# Usage: Merge RT stops between replicates and keep only those positions that exceed threshold.
# Input: Files with RT stops for each replicate, outfile, threshold, strand, and bases to expand around RT stop.
# Output: None. Writes merged RT stop file.
cts_R1=RTcounts(RTstopFiles[0])
cts_R2=RTcounts(RTstopFiles[1])
m=pd.concat([cts_R1,cts_R2],axis=1,join='inner')
m.columns=['Rep_1','Rep_2']
m['Sum']=m['Rep_1']+m['Rep_2']
m_filter=m[m['Sum']>threshold]
f = open(outfilename, 'w')
for i in m_filter.index:
chrom=i[0]
RT=i[1]
count=m_filter.loc[i,'Sum']
if RT > expand:
read='\t'.join((chrom,str(int(RT)-expand),str(int(RT)+expand),'CLIPread','255',strand))+'\n'
else:
read='\t'.join((chrom,str(int(RT)),str(int(RT)+expand),'CLIPread','255',strand))+'\n'
f.write(read*(count))
print "Merge RT stops."
logOpen.write("Merge RT stops.\n")
posMerged=outfilepath+sampleName+'_repeat_positivereads.mergedRT'
strand='+'
mergeRT(positiveRTstop_rep,posMerged,threshold_rep,expand,strand)
negMerged=outfilepath+sampleName+'_repeat_negativereads.mergedRT'
strand='-'
mergeRT(negativeRTstop_rep,negMerged,threshold_rep,expand,strand)
negAndPosMerged=outfilepath+sampleName+'_threshold=%s'%threshold_rep+'_repeat_allreads.mergedRT.bed'
fileCat(negAndPosMerged,[posMerged,negMerged])
# <codecell>
print "Run mapping to %s."%index_tag
mappedReads,unmappedReads=runBowtie(unmappedReads_rep,index,index_tag)
# <codecell>
print "Run samtools."
logOpen.write("Run samtools.\n")
mappedBedFiles=runSamtools(mappedReads)
# <codecell>
def runRepeatMask(mappedReads,repeatregions):
# Usage: Remove repeat regions from bedfile following mapping.
# Input: .bed file after mapping (duplicates removed by samtools) and blastlist regions removed.
# Output: Bedfile with repeat regions removed.
program='intersectBed'
masked=[]
try:
for bedIn in mappedReads:
noRepeat=bedIn.replace('.bed','_noRepeat.bed')
outfh=open(noRepeat, 'w')
proc=subprocess.Popen([program,'-a',bedIn,'-b',repeatregions,'-v','-s'],stdout=outfh)
proc.communicate()
outfh.close()
masked=masked+[noRepeat]
return (masked)
except:
print "Problem with repeat masking."
logOpen.write("Problem with repeat masking.\n")
def runBlacklistRegions(mappedReads,blacklistregions):
# Usage: Remove blacklisted regions from bedfile following mapping.
# Input: .bed file after mapping (duplicates removed by samtools).
# Output: Bedfile with blacklisted regions removed.
program='intersectBed'
blackListed=[]
try:
for bedIn in mappedReads:
noBlacklist=bedIn.replace('.bed','_noBlacklist.bed')
outfh=open(noBlacklist, 'w')
proc=subprocess.Popen([program,'-a',bedIn,'-b',blacklistregions,'-v'],stdout=outfh)
proc.communicate()
outfh.close()
blackListed=blackListed+[noBlacklist]
return (blackListed)
except:
print "Problem with blacklist."
logOpen.write("Problem with blacklist.\n")
print "Run repeat and blacklist region masker."
logOpen.write("Run repeat and blacklist masker.\n")
blacklistedBedFiles=runBlacklistRegions(mappedBedFiles,blacklistregions)
maskedBedFiles=runRepeatMask(blacklistedBedFiles,repeatregions)
# <codecell>
print "RT stop isolation."
logOpen.write("RT stop isolation.\n")
readsByStrand=seperateStrands(maskedBedFiles)
negativeRTstop=isolate5prime(modifyNegativeStrand(readsByStrand[0]))
positiveRTstop=isolate5prime(readsByStrand[1])
print "Merge RT stops."
logOpen.write("Merge RT stops.\n")
posMerged=outfilepath+sampleName+'_%s_positivereads.mergedRT'%index_tag
strand='+'
mergeRT(positiveRTstop,posMerged,threshold,expand,strand)
negMerged=outfilepath+sampleName+'_%s_negativereads.mergedRT'%index_tag
strand='-'
mergeRT(negativeRTstop,negMerged,threshold,expand,strand)
negAndPosMerged=outfilepath+sampleName+'_threshold=%s'%threshold+'_%s_allreads.mergedRT.bed'%index_tag
fileCat(negAndPosMerged,[posMerged,negMerged])
# <codecell>
def runCLIPPER(RTclusterfile,genome,genomeFile):
# Useage: Process the mergedRT file and pass through CLIPper FDR script.
# Input: Merged RT file.
# Output: CLIPper input (.bed) file and output file.
program='bedToBam'
program2='samtools'
program3='bamToBed'
program4='clipper'
bamfile=RTclusterfile.replace('.bed','.bam')
outfh=open(bamfile, 'w')
proc=subprocess.Popen([program,'-i',RTclusterfile,'-g',genomeFile],stdout=outfh)
proc.communicate()
bamfile_sort=bamfile.replace('.bam','.srt')
proc2=subprocess.Popen([program2,'sort',bamfile,bamfile_sort])
proc2.communicate()
bamfile_sorted=bamfile_sort+'.bam'
mapStats=bamfile_sorted.replace('.srt.bam','.mapStats.txt')
outfh=open(mapStats, 'w')
proc3=subprocess.Popen([program2,'flagstat',bamfile_sorted],stdout=outfh)
proc3.communicate()
proc4=subprocess.Popen([program2,'index',bamfile_sorted])
proc4.communicate()
CLIPPERin=bamfile_sorted.replace('.srt.bam','_CLIPPERin.bed')
outfh=open(CLIPPERin, 'w')
proc5=subprocess.Popen([program3,'-i',bamfile_sorted],stdout=outfh)
proc5.communicate()
CLIPPERout=CLIPPERin.replace('_CLIPPERin.bed','_CLIP_clusters')
proc6=subprocess.Popen([program4,'--bam',bamfile_sorted,genome,'--outfile=%s'%CLIPPERout],)
proc6.communicate()
outfh.close()
return (CLIPPERin,CLIPPERout)
def makeGeneNameDict(fi):
# Usage: Make a dictionary that maps RT stop to gene name.
# Input: File path to intersected CLIPper windows and input RT stop coordinates.
# Output Dictionary mapping RT stop to name.
nameDict={}
with open(fi, 'r') as infile:
for read in infile:
elementList=read.strip().split('\t')
RT_id='_'.join((elementList[0],elementList[1],elementList[2],elementList[5]))
if RT_id not in nameDict:
geneName=elementList[9].strip().split(CLIPPERoutNameDelim)[0]
nameDict[RT_id]=geneName
return nameDict
def modCLIPPERout(CLIPPERin,CLIPPERout):
# Usage: Process the CLIPper output and isolate lowFDR reads based upon CLIPper windows.
# Input: .bed file passed into CLIPper and the CLIPper windows file.
# Output: Low FDR reads recovered using the CLIPer windows file, genes per cluster, gene list of CLIPper clusters, and CLIPper windows as .bed.
program='intersectBed'
CLIPperOutBed=CLIPPERout+'.bed'
CLIPpeReadsPerCluster=CLIPPERout+'.readsPerCluster'
CLIPpeGeneList=CLIPPERout+'.geneNames'
f = open(CLIPperOutBed,'w')
g = open(CLIPpeReadsPerCluster,'w')
h = open(CLIPpeGeneList,'w')
with open(CLIPPERout,'r') as infile:
for line in infile:
try:
# Note that different versions on CLIPper will report the gene name differently. So, we must handle this.
chrom,start,end,name,stats,strand,start_2,end_2 = line.strip().split('\t')
if CLIPPERoutNameDelim=='_':
readPerCluster=name.strip().split(CLIPPERoutNameDelim)[2]
else:
readPerCluster=(name.strip().split(CLIPPERoutNameDelim)[1]).split('_')[2]
geneName=name.strip().split(CLIPPERoutNameDelim)[0]
f.write('\t'.join((chrom,start,end,name,stats,strand))+'\n')
g.write((readPerCluster+'\n'))
h.write((geneName+'\n'))
except:
print ""
f.close()
g.close()
h.close()
# Intersect input reads with the CLIPper windows, report full result for both, include strand, do not duplicate reads from -a if they interset with multiple windows.
clusterWindowInt=CLIPperOutBed.replace('.bed','_fullClusterWindow.bed')
outfh=open(clusterWindowInt,'w')
proc=subprocess.Popen([program,'-a',CLIPPERin,'-b',CLIPperOutBed,'-wa','-wb','-s'],stdout=outfh)
proc.communicate()
outfh.close()
# Use the full window intersection to make a dictionary mapping RTstop to gene name.
nameDict=makeGeneNameDict(clusterWindowInt)
# Intersect input reads with CLIPper windows, but only report one intersection per read (as reads can overlap with multiple windows).
clusterWindowIntUniq=CLIPperOutBed.replace('.bed','_oneIntPerRead.bed')
outfh=open(clusterWindowIntUniq,'w')
proc=subprocess.Popen([program,'-a',CLIPPERin,'-b',CLIPperOutBed,'-wa','-s','-u'],stdout=outfh)
proc.communicate()
outfh.close()
# Process the uniquly intersected RT stops by adding gene name.
CLIPPERlowFDR=CLIPperOutBed.replace('.bed','_lowFDRreads.bed')
outfh=open(CLIPPERlowFDR,'w')
with open(clusterWindowIntUniq, 'r') as infile:
for read in infile:
bed=read.strip().split('\t')
RT_id='_'.join((bed[0],bed[1],bed[2],bed[5]))
geneName=nameDict[RT_id]
outfh.write('\t'.join((bed[0],bed[1],bed[2],geneName,bed[4],bed[5],'\n')))
outfh.close()
infile.close()
return (CLIPPERlowFDR,CLIPpeReadsPerCluster,CLIPpeGeneList,CLIPperOutBed)
print "Run CLIPper."
logOpen.write("Run CLIPper.\n")
CLIPPERio=runCLIPPER(negAndPosMerged,genomeForCLIPper,genomeFile)
CLIPPERin=CLIPPERio[0]
CLIPPERout=CLIPPERio[1]
clipperStats=modCLIPPERout(CLIPPERin,CLIPPERout)
CLIPPERlowFDR=clipperStats[0] # Low FDR reads returned filtred through CLIPper windows
CLIPpeReadsPerCluster=clipperStats[1] # Number of reads per CLIPper cluster
CLIPpeGeneList=clipperStats[2] # Gene names returned from the CLIPper file
CLIPperOutBed=clipperStats[3] # CLIPper windows as a bed file
# <codecell>
def getBedCenterPoints(inBed):
# Usage: Obtain ceter coordiantes of bedFile.
# Input: BedFile.
# Output: Center coodinates returned.
outBed=inBed.replace('.bed','_centerCoord.bed')
f=open(outBed, 'w')
with open(inBed, 'r') as infile:
for line in infile:
elementList=line.strip().split('\t')
f.write('\t'.join((elementList[0],str(int(elementList[1])+expand),str(int(elementList[1])+expand+1),elementList[3],elementList[4],elementList[5],'\n')))
f.close()
return outBed
def cleanBedFile(inBed):
# Usage: Sort and recover only first 6 fields from a bed file.
# Input: BedFile.
# Output: Sorted bedFile with correct number of fields.
program='sortBed'
CLIPperOutBed=inBed.replace('.bed','_cleaned.bed')
sortedBed=CLIPperOutBed.replace('_cleaned.bed','_cleaned_sorted.bed')
f=open(CLIPperOutBed, 'w')
with open(inBed, 'r') as infile:
for line in infile:
elementList=line.strip().split('\t')
f.write('\t'.join((elementList[0],elementList[1],elementList[2],elementList[3],elementList[4],elementList[5],'\n')))
f.close()
outfh=open(sortedBed, 'w')
proc=subprocess.Popen([program, '-i', CLIPperOutBed],stdout=outfh)
proc.communicate()
outfh.close()
return sortedBed
def makeBedGraph(lowFDRreads,sizesFile):
# Usage: From a bedFile, generate a bedGraph and bigWig.
# Input: BedFile.
# Output: BedGraph file.
program='genomeCoverageBed'
program2=os.getcwd() + '/bin/bedGraphToBigWig'
cleanBed=cleanBedFile(lowFDRreads)
outname=cleanBed.replace('.bed','.bedgraph')
outname2=cleanBed.replace('.bed','.bw')
outfh=open(outname,'w')
proc=subprocess.Popen([program,'-bg','-split','-i',cleanBed,'-g',sizesFile],stdout=outfh)
proc.communicate()
outfh2=open(outname2,'w')
proc2=subprocess.Popen([program2,outname,sizesFile,outname2],stdout=subprocess.PIPE)
proc2.communicate()
return outname
print "Make bedGraph"
logOpen.write("Make bedGraph.\n")
bedGraphCLIPout=makeBedGraph(CLIPPERlowFDR,genomeFile)
CLIPPERlowFDRcenters=getBedCenterPoints(CLIPPERlowFDR)
allLowFDRCentersBedGraph=makeBedGraph(CLIPPERlowFDRcenters,genomeFile)
# <codecell>
def filterSnoRNAs(proteinCodingReads,snoRNAmasker,miRNAmasker):
# Usage: Filter snoRNA and miRNAs from protein coding reads.
# Input: .bed file with protein coding reads.
# Output: snoRNA and miR filtered .bed file.
program='intersectBed'
proteinWithoutsnoRNAs=proteinCodingReads.replace('.bed','_snoRNAremoved.bed')
proteinWithoutmiRNAs=proteinWithoutsnoRNAs.replace('.bed','_miRNAremoved.bed')
outfh=open(proteinWithoutsnoRNAs, 'w')
proc=subprocess.Popen([program,'-a',proteinCodingReads,'-b',snoRNAmasker,'-v','-s'],stdout=outfh)
proc.communicate()
outfh.close()
outfh=open(proteinWithoutmiRNAs, 'w')
proc=subprocess.Popen([program,'-a',proteinWithoutsnoRNAs,'-b',miRNAmasker,'-v','-s'],stdout=outfh)
proc.communicate()
outfh.close()
return (proteinWithoutmiRNAs)
def getLowFDRReadTypes(CLIPPERlowFDR,pathToGeneLists):
# Usage: Given a list of genes, return all reads for the associated genes.
# Input: Gene list and the path to lowFDR read file.
# Output: List of reads assocaited with the given genes.
lowFDRgenelist=[]
for path in pathToGeneLists:
outfile=path+'_LowFDRreads.bed'
proc=subprocess.Popen('grep -F -f %s %s > %s'%(path,CLIPPERlowFDR,outfile),shell=True)
proc.communicate()
return_code=proc.wait() # *** Remove later. ***
lowFDRgenelist=lowFDRgenelist+[outfile]
return lowFDRgenelist
def compareLists(list1,list2,outname):
# Usage: Compare gene lists and output matches to the file.
# Input: Two gene lists.
# Output: Path file containing the matching genes.
f=open(list1,'r')
g=open(list2,'r')
commonGenes=set(f.readlines()) & set(g.readlines())
geneCategory=outname.split('.')[1]
outputName=outfilepath+'clipGenes_'+geneCategory
outfh=open(outputName,'w')
for gene in commonGenes:
outfh.write(gene)
outfh.close()
return outputName
def getLowFDRGeneTypes(CLIPpeGeneList,geneAnnot):
# Usage: Get all genes listed under each type, compare to CLIPper targets.
# Input: .bed file passed into CLIPper and the CLIPper windows file.
# Output: Path to file containing all CLIPper genes of each type.
geneTypes=[]
for genepath in geneAnnot:
lowFDRgenes=compareLists(CLIPpeGeneList,genepath,os.path.split(genepath)[1])
geneTypes=geneTypes+[lowFDRgenes]
return geneTypes
print "Partition reads by type."
logOpen.write("Partition reads by type.\n")
pathToGeneLists=getLowFDRGeneTypes(CLIPpeGeneList,geneAnnot)
pathToReadLists=getLowFDRReadTypes(CLIPPERlowFDR,pathToGeneLists)
proteinCodingReads=outfilepath+'clipGenes_proteinCoding_LowFDRreads.bed'
proteinBedGraph=makeBedGraph(proteinCodingReads,genomeFile)
filteredProteinCodingCenters=filterSnoRNAs(getBedCenterPoints(proteinCodingReads),snoRNAmasker,miRNAmasker)
filteredProteinCentersBedGraph=makeBedGraph(filteredProteinCodingCenters,genomeFile)
lincRNAReads=outfilepath+'clipGenes_lincRNA_LowFDRreads.bed'
filteredLincRNACenters=filterSnoRNAs(getBedCenterPoints(lincRNAReads),snoRNAmasker,miRNAmasker)
# <codecell>
# --- #
# <codecell>
def sortFilteredBed(bedFile):
bf=pd.DataFrame(pd.read_table(bedFile,header=None))
bf.columns=['Chr','Start','Stop','CLIPper_name','Q','Strand']
geneCounts=countHitsPerGene(bf)
return geneCounts
def countHitsPerGene(bf):
# *** THIS MAY DEPEND UPON THE VERSION OF CLIPPER USED ***
bf['geneName']=bf['CLIPper_name'].apply(lambda x: x.split('_')[0])
geneCounts=bf.groupby('geneName').size()
geneCounts.sort(ascending=False)
return geneCounts
def getSnoRNAreads(CLIPPERlowFDRcenters,snoRNAindex):
program='intersectBed'
bedFile=outfilepath+'clipGenes_snoRNA_LowFDRreads.bed'
outfh=open(bedFile, 'w')
proc=subprocess.Popen([program,'-a',CLIPPERlowFDRcenters,'-b',snoRNAindex,'-s','-wa','-wb'],stdout=outfh)
proc.communicate()
outfh.close()
return bedFile
def countSnoRNAs(bedFile_sno):
bf=pd.DataFrame(pd.read_table(bedFile_sno,header=None))
bf.columns=['Chr','Start','End','CLIPper_name','Q','Strand','Chr_snoRNA','Start_snoRNA','Stop_snoRNA','name_snoRNA','Type','strand_snoRNA']
geneCounts=bf.groupby('name_snoRNA').size()
geneCounts.sort(ascending=False)
return geneCounts
def countRemainingGeneTypes(remaining):
for bedFile in remaining:
try:
bf=pd.DataFrame(pd.read_table(bedFile,header=None))
bf.columns=['Chr','Start','End','ReadName','Q','Strand','CLIPper_winChr','CLIPper_winStart','CLIPper_winEmd','CLIPper_winaName','CLIPper_winP','CLIPper_winStrand']
# *** THIS MAY DEPEND UPON THE VERSION OF CLIPPER USED ***
bf['geneName']=bf['CLIPper_winaName'].apply(lambda x: x.split('_')[0])
geneCounts=bf.groupby('geneName').size()
geneCounts.sort(ascending=False)
head,fname=os.path.split(bedFile)
geneType=fname.split("_")[1]
outfilepathToSave=outfilepath+'/PlotData_ReadsPerGene_%s'%geneType
geneCounts.to_csv(outfilepathToSave)
except ValueError:
print "No reads in %s"%bedFile
print "Generate sorted gene lists by gene type."
logOpen.write("Generate sorted gene lists by gene type.\n")
bedFile_pc=outfilepath+"clipGenes_proteinCoding_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved.bed"
geneCounts_pc=sortFilteredBed(bedFile_pc)
outfilepathToSave=outfilepath + '/PlotData_ReadsPerGene_proteinCoding'
geneCounts_pc.to_csv(outfilepathToSave)
bedFile_linc=outfilepath+"clipGenes_lincRNA_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved.bed"
geneCounts_linc=sortFilteredBed(bedFile_linc)
outfilepathToSave=outfilepath + '/PlotData_ReadsPerGene_lincRNA'
geneCounts_linc.to_csv(outfilepathToSave)
CLIPPERlowFDRcenters=getBedCenterPoints(CLIPPERlowFDR)
allLowFDRCentersBedGraph=makeBedGraph(CLIPPERlowFDRcenters,genomeFile)
bedFile_sno=getSnoRNAreads(CLIPPERlowFDRcenters,snoRNAindex)
geneCounts_sno=countSnoRNAs(bedFile_sno)
outfilepathToSave=outfilepath + '/PlotData_ReadsPerGene_snoRNA'
geneCounts_sno.to_csv(outfilepathToSave)
remaining=[f for f in glob.glob(outfilepath+"*_LowFDRreads.bed") if 'lincRNA' not in f and 'proteinCoding' not in f and 'snoRNA' not in f]
countRemainingGeneTypes(remaining)
# <codecell>
def makeClusterCenter(windowsFile):
# Usage: Generate a file of cluster centers.
# Input: Raw CLIPper output file.
# Output: File with coordinates for the center of each CLIPper cluster.
cleanBed = cleanBedFile(windowsFile)
centers=cleanBed.replace('.bed','.clusterCenter')
f = open(centers, 'w')
with open(cleanBed, 'r') as infile:
for line in infile:
elementList = line.strip().split('\t')
diff=abs(int((int(elementList[1])-int(elementList[2]))/2))
f.write(elementList[0]+'\t'+str(int(elementList[1])+diff)+'\t'+str(int(elementList[1])+diff+1)+'\n')
f.close()
return centers
def getClusterIntensity(bedGraph,centerCoordinates):
# Usage: Generate a matrix of read itensity values around CLIPper cluster center.
# Input: BedGraph and cluster center file.
# Output: Generates a matrix, which is passed into R.
program=os.getcwd() + '/bin/grep_chip-seq_intensity.pl'
program2='wait'
proc=subprocess.Popen(['perl',program, centerCoordinates, bedGraph],)
proc.communicate()
logOpen.write("Waiting for Cluster Intensity file completion...\n")
proc2=subprocess.Popen(program2,shell=True)
proc2.communicate()
print "Get binding intensity around cluster centers."
logOpen.write("Get binding intensity around cluster centers.\n")
bedGraphCLIPin=makeBedGraph(CLIPPERin,genomeFile)
centerCoordinates=makeClusterCenter(CLIPperOutBed)
getClusterIntensity(bedGraphCLIPin,centerCoordinates)
# <codecell>
def partitionReadsByUTR(infile,UTRmask,utrReads,notutrReads):
program = 'intersectBed'
outfh = open(utrReads,'w')
proc = subprocess.Popen([program,'-a',infile,'-b',UTRmask,'-u','-s'],stdout=outfh)
proc.communicate()
outfh.close()
outfh = open(notutrReads,'w')
proc = subprocess.Popen([program,'-a',infile,'-b',UTRmask,'-v','-s'],stdout=outfh)
proc.communicate()
outfh.close()
def extractUTRs(bedIn,fivePUTRBed,threePUTRBed,cdsBed):
# Usage: Extract all UTR specific reads from the input file.
# Input: .bed file
# Output: Mutually exclusive partitions of the input file.
fivePreads = bedIn.replace('.bed', '_5p.bed')
notFivePreads = bedIn.replace('.bed', '_NOT5p.bed')
partitionReadsByUTR(bedIn,fivePUTRBed,fivePreads,notFivePreads)
threePreads = bedIn.replace('.bed', '_3p.bed')
notThreePreads = bedIn.replace('.bed', '_NOT3p.bed')
partitionReadsByUTR(notFivePreads,threePUTRBed,threePreads,notThreePreads)
CDSreads = bedIn.replace('.bed', '_cds.bed')
notCDSreads = bedIn.replace('.bed', '_NOTcds.bed')
partitionReadsByUTR(notThreePreads,cdsBed,CDSreads,notCDSreads)
return (fivePreads,notFivePreads,CDSreads,notCDSreads,threePreads,notThreePreads)
print "Intron and UTR analysis."
logOpen.write("Intron and UTR analysis.\n")
fivePreads,notFivePreads,CDSreads,notCDSreads,threePreads,notThreePreads=extractUTRs(filteredProteinCodingCenters,fivePUTRBed,threePUTRBed,cdsBed)
geneCounts_5p=sortFilteredBed(fivePreads)
geneCounts_3p=sortFilteredBed(threePreads)
geneCounts_cds=sortFilteredBed(CDSreads)
outfilepathToSave=outfilepath+'/PlotData_ReadsPerGene_5pUTR'
geneCounts_5p.to_csv(outfilepathToSave)
outfilepathToSave=outfilepath+'/PlotData_ReadsPerGene_3pUTR'
geneCounts_3p.to_csv(outfilepathToSave)
outfilepathToSave=outfilepath+'/PlotData_ReadsPerGene_CDS'
geneCounts_cds.to_csv(outfilepathToSave)
# <codecell>
def makeTab(bedGraph,genesFile,sizesFile):
program = os.getcwd() + '/bin/bedGraph2tab.pl'
program2 = 'wait'
outfile=bedGraph.replace('.bedgraph','.tab')
proc = subprocess.Popen(['perl',program,genesFile,sizesFile,bedGraph,outfile],)
proc.communicate()
proc2 = subprocess.Popen(program2,shell=True)
proc2.communicate()
return outfile
def makeAvgGraph(bedGraph,utrFile,genesFile,sizesFile):
# Usage: Generate a matrix of read itensity values across gene body.
# Input: BedGraph.
# Output: Generates two matricies.
program= os.getcwd() + '/bin/averageGraph_scaled_tab.pl'
program2 = 'wait'
tabFile=makeTab(bedGraph,genesFile,sizesFile)
outhandle=tabFile.replace('.tab','_UTRs')
proc = subprocess.Popen(['perl',program,utrFile,tabFile,tabFile,outhandle],)
proc.communicate()
proc2 = subprocess.Popen(program2,shell=True)
proc2.communicate()
print "Gene body analysis."
logOpen.write("Gene body analysis.\n")
bedGraphProtein=makeBedGraph(bedFile_pc,genomeFile)
makeAvgGraph(bedGraphProtein,utrFile,genesFile,sizesFile)
# <codecell>
def getGeneStartStop(bedFile,geneRef):
try:
bf=pd.DataFrame(pd.read_table(bedFile,header=None))
bf.columns=['Chr','Start','End','ReadName','Q','Strand','CLIPper_winChr','CLIPper_winStart','CLIPper_winEmd','CLIPper_winaName','CLIPper_winP','CLIPper_winStrand']
bf['geneName']=bf['CLIPper_winaName'].apply(lambda x: x.split('_')[0])
merge=pd.merge(geneRef,bf,left_on='Ensembl Gene ID',right_on='geneName')
ncRNA_startStop=merge[['Ensembl Gene ID','Gene Start (bp)','Gene End (bp)','Start','End','Strand']]
outfilepathToSave=bedFile.replace(".bed",".geneStartStop")
ncRNA_startStop.to_csv(outfilepathToSave)
except ValueError:
print "No reads in %s"%bedFile
print "ncRNA gene body anaysis."
geneStartStopRepo=os.getcwd()+'/docs/all_genes.txt'
geneRef=pd.DataFrame(pd.read_table(geneStartStopRepo))
remaining=[f for f in glob.glob(outfilepath+"*_LowFDRreads.bed") if 'lincRNA' not in f and 'proteinCoding' not in f and 'snoRNA' not in f]
for bedFile in remaining:
st_stop=getGeneStartStop(bedFile,geneRef)
# lincRNA file processing
bedFile_linc=outfilepath+"clipGenes_lincRNA_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved.bed"
bf=pd.DataFrame(pd.read_table(bedFile_linc,header=None))
bf.columns=['Chr','Start','Stop','CLIPper_name','Q','Strand']
bf['geneName']=bf['CLIPper_name'].apply(lambda x: x.split('_')[0])
merge=pd.merge(geneRef,bf,left_on='Ensembl Gene ID',right_on='geneName')
ncRNA_startStop=merge[['Ensembl Gene ID','Gene Start (bp)','Gene End (bp)','Start','Stop','Strand']]
outfilepathToSave=bedFile_linc.replace(".bed",".geneStartStop")
ncRNA_startStop.to_csv(outfilepathToSave)
# <codecell>
def makeRepeatAnnotation(repeatGenomeBuild,repeatAnnotation):
repeat_genome=np.genfromtxt(repeatGenomeBuild,dtype='string')
repeat_genome_bases=repeat_genome[1]
repeat_genome_size=len(repeat_genome[1])
repeatAnnotDF=pd.DataFrame(pd.read_table(repeatAnnotation,header=None))
repeatAnnotDF.columns=['Name','Length','IndexStart','IndexEnd']
repeatAnnotDF['End_for_extraction']=repeatAnnotDF['IndexEnd']+1 # Python list extraction is not end index inclusive; to extract sequence, use end + 1.
return (repeat_genome_bases,repeatAnnotDF)
def readBed(path):
bedFile = pd.read_table(path,dtype=str,header=None)
bedFile.columns=['Index','Start','Stop','Name','QS','Strand']
bedFile['Start']=bedFile['Start'].astype(int)
return bedFile
print "Record repeat RNA."
repeat_genome_bases,repeatAnnotDF=makeRepeatAnnotation(repeatGenomeBuild,repeatAnnotation)
repeatAnnotDF.set_index('Name',inplace=True,drop=False)
# Get merged data for repeat index.
repeatMerged=glob.glob(outfilepath+"*repeat_allreads.mergedRT.bed")
rep=pd.read_table(repeatMerged[0],dtype=str,header=None)
rep.columns=['Rep_index','Start','Stop','Read_name','Q','Strand']
rep['RT_stop']=rep['Start'].astype(int)+expand
for ix in repeatAnnotDF.index:
end=repeatAnnotDF.loc[ix,'IndexEnd']
repName=repeatAnnotDF.loc[ix,'Name']
gene_hits=rep[(rep['RT_stop']<int(repeatAnnotDF.loc[ix,'IndexEnd']))&(rep['RT_stop']>int(repeatAnnotDF.loc[ix,'IndexStart']))]
gene_hits['Repeat_End']=repeatAnnotDF.loc[ix,'IndexEnd']
gene_hits['Repeat_Start']=repeatAnnotDF.loc[ix,'IndexStart']
outfilepathToSave=outfilepath + '/PlotData_RepeatRNAreads_%s'%repName
gene_hits.to_csv(outfilepathToSave)
# <codecell>
def makeRepeatAnnotation(repeatGenomeBuild,repeatAnnotation):
repeat_genome=np.genfromtxt(repeatGenomeBuild,dtype='string')
repeat_genome_bases=repeat_genome[1]
repeat_genome_size=len(repeat_genome[1])
repeatAnnotDF=pd.DataFrame(pd.read_table(repeatAnnotation,header=None))
repeatAnnotDF.columns=['Name','Length','IndexStart','IndexEnd']
repeatAnnotDF['End_for_extraction']=repeatAnnotDF['IndexEnd']+1 # Python list extraction is not end index inclusive; to extract sequence, use end + 1.
return (repeat_genome_bases,repeatAnnotDF)
repeat_genome_bases,repeatAnnotDF=makeRepeatAnnotation(repeatGenomeBuild,repeatAnnotation)
# <codecell>
def lineCount(filename):
i=0
with open(filename) as f:
for i,l in enumerate(f):
pass
return i+1
def plot_ReadAccounting(outfilepath,sampleName):
rawRead1=infilepath+sampleName+'_R1.fastq'
rawRead2=infilepath+sampleName+'_R2.fastq'
reads3pTrim=[outfilepath+sampleName+'_R1_3ptrimmed.fastq',outfilepath+sampleName+'_R2_3ptrimmed.fastq']
readsFilter=[outfilepath+sampleName+'_R1_3ptrimmed_filter.fastq',outfilepath+sampleName+'_R2_3ptrimmed_filter.fastq']
readsNoDupes=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe.fastq',outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe.fastq']
readsMappedReapeat=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe_5ptrimmed_mappedTorepeat_withDupes.bed',outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe_5ptrimmed_mappedTorepeat_withDupes.bed']
readsMappedHg19=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes.bed'%index_tag,outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes.bed'%index_tag]
readsMappedBlacklist=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes.bed'%index_tag,outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes.bed'%index_tag]
readsMappedRepeatMask=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes_noBlacklist_noRepeat.bed'%index_tag,outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes_noBlacklist_noRepeat.bed'%index_tag]
clipperIN=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIPPERin.bed'%(threshold,index_tag)
clipperOUT=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIP_clusters_lowFDRreads.bed'%(threshold,index_tag)
fileNames=['Raw (R1)','Raw (R2)','3p Trim (R1)','3p Trim (R2)','Filter (R1)','Filter (R2)','No dupes (R1)','No dupes (R2)','RepeatMapped (R1)','RepeatMapped (R2)','Hg19Mapped (R1)','Hg19Mapped (R2)','Blacklist (R1)','Blacklist (R2)','RepeatMask (R1)','RepeatMask (R2)','ClipperIn','ClipperOut']
filesToCount=[rawRead1,rawRead2,reads3pTrim[0],reads3pTrim[1],readsFilter[0],readsFilter[1],readsNoDupes[0],readsNoDupes[1],readsMappedReapeat[0],readsMappedReapeat[1],readsMappedHg19[0],readsMappedHg19[1],readsMappedBlacklist[0],readsMappedBlacklist[1],readsMappedRepeatMask[0],readsMappedRepeatMask[1],clipperIN,clipperOUT]
counts=[]
counter=0
for fileString in filesToCount:
temp=lineCount(fileString)
if counter < 8:
temp=temp/4 # Fastq files
counts=counts+[temp]
counter += 1
ind = np.arange(len(counts)) + 0.5
plt.barh(ind,list(reversed(np.log10(np.array(counts)))),align='center',color='blue')
plt.xlabel('log10(Counts per file)',fontsize=5)
locs,pltlabels = plt.xticks(fontsize=5)
plt.setp(pltlabels, rotation=90, fontsize=5)
plt.yticks(ind,list(reversed(fileNames)),fontsize=5)
plt.tick_params(axis='yticks',labelsize=5)
ax=plt.gca()
for line in ax.get_yticklines():
line.set_markersize(0)
plt.title('Read counts',fontsize=5)
readDF=pd.DataFrame()
readDF['File_name']=fileNames
readDF['Reads_per_file']=counts
outfilepathToSave=outfilepath + '/PlotData_ReadsPerPipeFile'
readDF.to_csv(outfilepathToSave)
plt.subplot(2,3,1)
plot_ReadAccounting(outfilepath,sampleName)
# <codecell>
def plot_BoundGeneTypes(outfilepath,sampleName):
record=pd.DataFrame()
# Exclude specific files (e.g., UTR-specific reads).
geneListToPlot=[f for f in glob.glob(outfilepath+'PlotData_ReadsPerGene_*') if '5pUTR' not in f and '3pUTR' not in f and 'CDS' not in f]
for boundGenes in geneListToPlot:
glist=pd.read_csv(boundGenes,header=None)
glist.columns=['GeneName','Count']
gName=boundGenes.split('_')[-1]
record.loc[gName,'genesBound']=glist.shape[0]
record.loc[gName,'totalReads']=glist['Count'].sum()
record.sort('genesBound',inplace=True)
outfilepathToSave=outfilepath + '/PlotData_ReadAndGeneCountsPerGenetype'
record.to_csv(outfilepathToSave)
ind = np.arange(record.shape[0]) + 0.5
plt.bar(ind,record['genesBound'],align='center',color='blue')
locs,pltlabels = plt.yticks(fontsize=5)
locs,pltlabels = plt.xticks(ind,record.index,fontsize=5)
plt.setp(pltlabels, rotation=90, fontsize=5)
plt.tick_params(axis='xticks',labelsize=5)
ax=plt.gca()
for line in ax.get_xticklines():
line.set_markersize(0)
plt.ylabel('Number of genes bound',fontsize=5)
plt.tick_params(axis='yticks',labelsize=5)
plt.title('Bound genes by class',fontsize=5)
plt.subplot(2,3,6)
plot_BoundGeneTypes(outfilepath,sampleName)
# <codecell>
def plot_ReadsPerCluster(outfilepath,sampleName):
readPerCluster=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIP_clusters.readsPerCluster'%(threshold,index_tag)
clust=pd.DataFrame(pd.read_table(readPerCluster,header=None))
clust.columns=['ReadsPerCluster']
clust=clust['ReadsPerCluster']
interval=10
bins=range(min(clust)-10,max(clust)+10,interval)
hist,bins=np.histogram(clust,bins=bins)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
plt.bar(center, hist,align='center',width=width)
locs,pltlabels = plt.yticks(fontsize=5)
locs,pltlabels = plt.xticks(center,center,fontsize=5)
plt.setp(pltlabels, rotation=90, fontsize=3.5)
plt.tick_params(axis='yticks',labelsize=5)
plt.xlabel('Reads per cluster (bin=%s)'%interval,fontsize=5)
plt.ylabel('Frequency (RT stop count)',fontsize=5)
plt.title('Reads per cluster',fontsize=5)
plt.xlim(0,100) # Make the histogram easy to view.
# plt.xlim(-interval,np.max(center)+interval)
plt.subplot(2,3,2)
plot_ReadsPerCluster(outfilepath,sampleName)
# <codecell>
def plot_ClusterSizes(outfilepath,sampleName):
clipClusters=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIP_clusters'%(threshold,index_tag)
clust=pd.DataFrame(pd.read_table(clipClusters,header=None,skiprows=1))
clust.columns=['chr','start','end','name','score','strand','m1','m2']
clust['clusterSize']=clust['start']-clust['end']
clust['clusterSize']=clust['clusterSize'].apply(lambda x: math.fabs(x))
plt.boxplot(clust['clusterSize'])
plt.tick_params(axis='x',labelbottom='off')
ax=plt.gca()
for line in ax.get_xticklines():
line.set_markersize(0)
plt.ylabel('Cluster length (bases)',fontsize=5)
locs,pltlabels = plt.yticks(fontsize=5)
plt.title('Cluster size',fontsize=5)
plt.subplot(2,3,3)
plot_ClusterSizes(outfilepath,sampleName)
# <codecell>
def plot_clusterBindingIntensity(outfilepath,sampleName):
clusterCenterHeatmap=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIP_clusters_cleaned_sorted.clusterCenter_heatmap.txt'%(threshold,index_tag)
hmap=pd.DataFrame(pd.read_table(clusterCenterHeatmap,header=None,skiprows=1))
hmap_vals=hmap.ix[:,1:]
sums=hmap_vals.sum(axis=1)
hmap_vals=hmap_vals.loc[np.argsort(sums),:]
plt.ylim(0,hmap_vals.shape[0])
p=plt.pcolormesh(np.array(hmap_vals),cmap='Blues')
plt.tick_params(axis='x',labelbottom='off')
plt.xlabel('Cluster position',fontsize=5)
locs,pltlabels = plt.yticks(fontsize=5)
plt.ylabel('Cluster number',fontsize=5)
plt.title('Read distribution',fontsize=5)
plt.subplot(2,3,4)
plot_clusterBindingIntensity(outfilepath,sampleName)
# <codecell>
def readUTRfile(path):
geneCounts=pd.read_csv(path,header=None)
geneCounts.columns=['Gene_name','Count']
return geneCounts
def plot_readsBymRNAregion(outfilepath,sampleName):
pc_5pReads=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_5pUTR')['Count'].sum()
pc_3pReads=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_3pUTR')['Count'].sum()
pc_CDSReads=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_CDS')['Count'].sum()
non_intronic=pc_5pReads+pc_3pReads+pc_CDSReads
allProteinCoding=outfilepath +'clipGenes_proteinCoding_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved.bed'
all_pc=pd.DataFrame(pd.read_table(allProteinCoding,header=None))
pc_allReads=all_pc.shape[0]
v=[float(pc_allReads-non_intronic)/pc_allReads,float(pc_5pReads)/pc_allReads,float(pc_CDSReads)/pc_allReads,float(pc_3pReads)/pc_allReads]
pie_wedges=ax.pie(v,labels=["Intronic","5p UTR","CDS","3pUTR"],labeldistance=1.1,autopct='%1.1f%%')
plt.rcParams['font.size']=5
for wedge in pie_wedges[0]:
wedge.set_edgecolor('black')
wedge.set_lw(1)
ax=plt.subplot(2,3,5)
plot_readsBymRNAregion(outfilepath,sampleName)
# <codecell>
fig1=plt.figure(1)
plt.subplot(2,3,1)
plot_ReadAccounting(outfilepath,sampleName)
plt.subplot(2,3,2)
plot_ReadsPerCluster(outfilepath,sampleName)
plt.subplot(2,3,3)
plot_ClusterSizes(outfilepath,sampleName)
plt.subplot(2,3,4)
plot_clusterBindingIntensity(outfilepath,sampleName)
ax=plt.subplot(2,3,5)
plot_readsBymRNAregion(outfilepath,sampleName)
plt.subplot(2,3,6)
plot_BoundGeneTypes(outfilepath,sampleName)
fig1.tight_layout()
fig1.savefig(outfilepath+'Figure1.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig1.savefig(outfilepath+'Figure1.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def plot_mRNAgeneBodyDist(outfilepath,sampleName):
averageGraph=outfilepath+'clipGenes_proteinCoding_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved_cleaned_sorted_UTRs_scaled_cds200_abt0_averageGraph.txt'
hmap=pd.DataFrame(pd.read_table(averageGraph,header=None,skiprows=1))
hmap=hmap.set_index(0)
avgTrace=hmap.loc['treat',:]
plt.plot(avgTrace,color='blue',linewidth='2')
plt.vlines(200,0,np.max(avgTrace),linestyles='dashed')
plt.vlines(400,0,np.max(avgTrace),linestyles='dashed')
plt.ylim(0,np.max(avgTrace))
plt.tick_params(axis='x',labelbottom='off')
plt.xlabel('mRNA gene body (5pUTR, CDS, 3pUTR)')
plt.ylabel('Read density')
plt.tick_params(axis='y',labelsize=5)
plt.title('CLIP signal across average mRNA transcript.',fontsize=5)
plt.subplot2grid((2,3),(0,0),colspan=3)
plot_mRNAgeneBodyDist(outfilepath,sampleName)
# <codecell>
def convertENBLids(enst_name):
ensg_name=ensemblGeneAnnot.loc[enst_name,'name2']
return ensg_name
def getUTRbindingProfile(utr,hmap_m):
if utr=='5p':
ix=(hmap_m[range(201,601)].sum(axis=1)==0)&(hmap_m[range(1,201)].sum(axis=1)>0)
screen=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_5pUTR')
elif utr=='3p':
ix=(hmap_m[range(1,401)].sum(axis=1)==0)&(hmap_m[range(401,601)].sum(axis=1)>0)
screen=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_3pUTR')
else:
ix=(hmap_m[range(1,201)].sum(axis=1)==0)&(hmap_m[range(401,601)].sum(axis=1)==0)&(hmap_m[range(201,401)].sum(axis=1)>0)
screen=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_CDS')
# Ensure all genes are also identified in pre-allocated gene lists.
hmap_m_utrSpec=hmap_m.ix[ix,:]
hmap_m_utrSpec_filter=pd.merge(hmap_m_utrSpec,screen,left_on='ENSG_ID',right_on='Gene_name',how='inner')
sums=hmap_m_utrSpec_filter[range(1,601)].sum(axis=1)
hmap_m_utrSpec_filter=hmap_m_utrSpec_filter.loc[np.argsort(sums),:]
return hmap_m_utrSpec_filter
def plot_geneBodyPartition(outfilepath,sampleName):
treatMatrix=outfilepath+'clipGenes_proteinCoding_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved_cleaned_sorted_UTRs_scaled_cds200_abt0_treatmatrix.txt'
hmap=pd.DataFrame(pd.read_table(treatMatrix,header=None,skiprows=1))
# Ensure genes recoverd from this analysis are indepdently indentified using partitioning of CLIPper cluster data.
hmap['ENSG_ID']=hmap.ix[:,0].apply(convertENBLids)
bound_pc = outfilepath+'clipGenes_proteinCoding'
pc_genes=pd.DataFrame(pd.read_table(bound_pc,header=None,))
pc_genes.columns=['ENSG_ID']
hmap_m=pd.merge(hmap,pc_genes,left_on='ENSG_ID',right_on='ENSG_ID',how='inner')
# Isolate intronic bound genes.
tosave=outfilepath+'PlotData_ExclusiveBound_Intronic'
intronicBoundGenes=list(set(pc_genes['ENSG_ID'])-set(hmap_m['ENSG_ID']))
np.savetxt(tosave,np.array(intronicBoundGenes),fmt="%s")
# UTR specific genes.
geneTypes=['5p','cds','3p']
depth=50
for i in range(0,3):
utrMatrix=getUTRbindingProfile(geneTypes[i],hmap_m)
tosave=outfilepath+'PlotData_ExclusiveBound_%s'%geneTypes[i]
np.savetxt(tosave,utrMatrix['ENSG_ID'],fmt="%s")
plt.subplot2grid((2,3),(1,i),colspan=1)
dataToPlot=utrMatrix[range(1,601)]
p=plt.pcolormesh(np.array(dataToPlot)[-depth:-1,:],cmap='Blues')
plt.title(geneTypes[i],fontsize=5)
plt.vlines(200,0,depth,linestyles='dashed')
plt.vlines(400,0,depth,linestyles='dashed')
plt.tick_params(axis='x',labelbottom='off')
plt.tick_params(axis='y',labelleft='off')
plt.ylim(0,depth)
plt.ylabel('Ranked genes (highest on bottom)',fontsize=5)
plt.xticks(visible=False)
plt.yticks(visible=False)
plt.title('%s specific genes: %s'%(geneTypes[i],np.unique(utrMatrix['ENSG_ID']).shape[0]),fontsize=5)
ensemblGeneAnnot=pd.DataFrame(pd.read_table(genesFile))
ensemblGeneAnnot=ensemblGeneAnnot.set_index('name') # Make ENST the index
plot_geneBodyPartition(outfilepath,sampleName)
# <codecell>
fig2=plt.figure(2)
plt.subplot2grid((2,3),(0,0),colspan=3)
plot_mRNAgeneBodyDist(outfilepath,sampleName)
plot_geneBodyPartition(outfilepath,sampleName)
fig2.tight_layout()
fig2.savefig(outfilepath+'Figure2.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig2.savefig(outfilepath+'Figure2.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def plot_repeatRNA(outfilepath,sampleName):
repeat_genome=np.genfromtxt(repeatGenomeBuild,dtype='string')
repeat_genome_bases=repeat_genome[1]
repFiles=glob.glob(outfilepath + '/PlotData_RepeatRNAreads_*')
repFiles=[repFile for repFile in repFiles if 'rDNA' not in repFile]
plotDim=math.ceil(math.sqrt(len(repFiles)))
i=0
for path in repFiles:
name=path.split('RepeatRNAreads_')[-1]
try:
# Read in each RT stop file
hits_per_rep=pd.read_csv(path)
RTpositions=hits_per_rep['RT_stop']
start=hits_per_rep.loc[0,'Repeat_Start']
end=hits_per_rep.loc[0,'Repeat_End']
# Histogram of RT stops across gene body
bins=range(start,end+2,1)
hist,bins=np.histogram(RTpositions,bins=bins)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
# Normalize
histPlot=np.array(hist,dtype=float)
histPlot=np.array(histPlot/float(len(RTpositions)),dtype=float)
# Subplot
plt.subplot(plotDim,plotDim,i+1)
plt.bar(center,histPlot,align='center',width=width,color='blue',alpha=0.45)
plt.tick_params(axis='x',labelsize=2.5)
plt.tick_params(axis='y',labelsize=2.5)
plt.title('RT stops for %s: %s'%(name,len(RTpositions)),fontsize=5)
plt.xlim(start,end)
# Record data
storageDF=pd.DataFrame()
sequence=repeat_genome_bases[start:end+1]
storageDF['Sequence']=pd.Series(list(sequence))
readsPerBase=np.array(list(hist))
readsPerBaseNorm=np.array(list(histPlot))
storageDF['RT_stops']=readsPerBase
storageDF['RT_stops_norm']=readsPerBaseNorm
outfilepathToSave=outfilepath +'/PlotData_RepeatRNAHist_%s'%name
storageDF.to_csv(outfilepathToSave)
i+=1
except:
print "No reads for repeatRNA %s"%name
plt.tight_layout()
fig3=plt.figure(3)
plot_repeatRNA(outfilepath,sampleName)
fig3.tight_layout()
fig3.savefig(outfilepath+'Figure3.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig3.savefig(outfilepath+'Figure3.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def plot_rDNA(outfilepath,sampleName):
plt.subplot2grid((3,3),(0,0),colspan=3)
name='rDNA'
rDNA=glob.glob(outfilepath + 'PlotData_RepeatRNAreads_rDNA')
hits_per_rep=pd.read_csv(rDNA[0])
RTpositions=hits_per_rep['RT_stop']
start=hits_per_rep.loc[0,'Repeat_Start']
end=hits_per_rep.loc[0,'Repeat_End']
bins=range(start,end+2,1)
hist,bins=np.histogram(RTpositions,bins=bins)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1]+bins[1:])/2
histPlot=np.array(hist,dtype=float)
histPlot=np.array(histPlot/float(len(RTpositions)),dtype=float)
plt.bar(center,histPlot,align='center',width=width,color='blue',alpha=0.45)
plt.tick_params(axis='x',labelsize=2.5)
plt.tick_params(axis='y',labelsize=2.5)
plt.title('RT stops for %s: %s'%(name,len(RTpositions)),fontsize=5)
plt.xlim(start,end)
# Record data
storageDF=pd.DataFrame()
sequence=repeat_genome_bases[start:end+1]
storageDF['Sequence']=pd.Series(list(sequence))
readsPerBase=np.array(list(hist))
readsPerBaseNorm=np.array(list(histPlot))
storageDF['RT_stops']=readsPerBase
storageDF['RT_stops_norm']=readsPerBaseNorm
outfilepathToSave=outfilepath +'/PlotData_RepeatRNAHist_%s'%name
storageDF.to_csv(outfilepathToSave)
# Features of rDNA with respect to start of the bowtie index (index=0)
rRNAstart=start
plt.axvspan(start18s+rRNAstart,end18s+rRNAstart,facecolor='g',alpha=0.5)
plt.axvspan(start5s+rRNAstart,end5s+rRNAstart,facecolor='r',alpha=0.5)
plt.axvspan(start28s+rRNAstart,end28s+rRNAstart,facecolor='b',alpha=0.5)
# Generate histogram for transcribed region
plt.subplot2grid((3,3),(1,0),colspan=3)
datarDNAOnly=RTpositions-start
bins=range((start-start),(end-start+2),1)
hist,bins=np.histogram(datarDNAOnly,bins=bins)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
histPlot=np.array(hist,dtype=float)
histPlot=np.array(histPlot/float(len(RTpositions)),dtype=float)
plt.bar(center,histPlot,align='center',width=width,color='blue',alpha=0.45)
plt.tick_params(axis='x',labelsize=2.5)
plt.tick_params(axis='y',labelsize=2.5)
plt.xlabel('rRNA locus position (bin=1 base)',fontsize=5)
plt.ylabel('Normalized RT stop / bin',fontsize=2.5)
plt.axvspan(start18s,end18s,facecolor='g',alpha=0.5)
plt.axvspan(start5s,end5s,facecolor='r',alpha=0.5)
plt.axvspan(start28s,end28s,facecolor='b',alpha=0.5)
plt.xlim(0,rRNAend)
# Individual regions
plt.subplot2grid((3,3),(2,0),colspan=1)
plt.bar(center,histPlot,align='center',width=width,color='green',alpha=0.75)
plt.xlim(start18s,end18s)
plt.xlabel('rRNA locus position (bin=1 base)',fontsize=5)
plt.ylabel('Normalized RT stop / bin',fontsize=2.5)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.title('18s Region',fontsize=5)
plt.subplot2grid((3,3),(2,1),colspan=1)
plt.bar(center,histPlot,align='center',width=width,color='red',alpha=0.75)
plt.xlim(start5s,end5s)
plt.xlabel('rRNA locus position (bin=1 base)',fontsize=5)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.title('5.8s Region',fontsize=5)
plt.subplot2grid((3,3),(2,2),colspan=1)
plt.bar(center,histPlot,align='center',width=width,color='blue',alpha=0.75)
plt.xlim(start28s,end28s)
plt.xlabel('rRNA locus position (bin=1 base)',fontsize=5)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.title('28s Region',fontsize=5)
plt.tight_layout()
fig4=plt.figure(4)
plot_rDNA(outfilepath,sampleName)
fig4.tight_layout()
fig4.savefig(outfilepath+'Figure4.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig4.savefig(outfilepath+'Figure4.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def getBindingFrac(type_specific):
# 5' position on the negative strand is snoRNA stop coordinate.
neg_data=type_specific[type_specific['strand_snoRNA']=='-']
neg_data['diff']=np.abs(neg_data['Stop_snoRNA']-neg_data['Start'])
neg_data['frac']=neg_data['diff']/(neg_data['Stop_snoRNA']-neg_data['Start_snoRNA'])
# 5' position on the positive strand is snoRNA start coordinate.
pos_data=type_specific[type_specific['strand_snoRNA']=='+']
pos_data['diff']=np.abs(pos_data['Start_snoRNA']-pos_data['Start'])
pos_data['frac']=pos_data['diff']/(pos_data['Stop_snoRNA']-pos_data['Start_snoRNA'])
DF_snoProfile=pd.concat([neg_data,pos_data])
return DF_snoProfile
print "snoRNA gene body anaysis."
# logOpen.write("Gene body analysis.\n")
bf_sno=pd.read_table(outfilepath+"clipGenes_snoRNA_LowFDRreads.bed",header=None)
bf_sno.columns=['Chr','Start','End','CLIPper_name','Q','Strand','Chr_snoRNA','Start_snoRNA','Stop_snoRNA','name_snoRNA','Type','strand_snoRNA']
snoTypes=pd.DataFrame(bf_sno.groupby('Type').size())
snoTypes.columns=['Reads']
snoTypes['Fraction']=snoTypes['Reads']/snoTypes['Reads'].sum(axis=1)
outfilepathToSave=outfilepath+'/PlotData_readsPerSnoRNAType'
snoTypes.to_csv(outfilepathToSave)
snoTypesAndGenes=pd.DataFrame(bf_sno.groupby(['Type','name_snoRNA']).size())
snoTypesAndGenes.columns=['Count_per_gene']
outfilepathToSave=outfilepath+'/PlotData_geneStatsPerSnoRNAType'
snoTypesAndGenes.to_csv(outfilepathToSave)
fig5=plt.figure(5)
ax=plt.subplot(2,2,1)
pie_wedges=ax.pie(snoTypes['Fraction'],labels=snoTypes.index,labeldistance=1.1,autopct='%1.1f%%')
plt.rcParams['font.size']=5
for wedge in pie_wedges[0]:
wedge.set_edgecolor('black')
wedge.set_lw(1)
i=2
for sType in set(bf_sno['Type']):
type_specific=bf_sno[bf_sno['Type']==sType]
sno_profile=getBindingFrac(type_specific)
if sType=='C':
title="C/D_box"
elif sType=='H':
title="H/ACA_box"
else:
title="scaRNA"
outfilepathToSave=outfilepath+'/PlotData_snoRNAReadDist_%s'%sType
sno_profile.to_csv(outfilepathToSave)
plt.subplot(2,2,i)
bins=np.arange(0,1,0.01)
hist,bins=np.histogram(sno_profile['frac'],bins=bins)
hist=np.array(hist/float(sno_profile['frac'].shape[0]),dtype=float)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
plt.bar(center,hist,align='center',width=width,color='blue',alpha=0.75)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.xlabel('Fraction of gene body (5p - 3p)',fontsize=5)
plt.title('Binding profile for %s'%title,fontsize=5)
plt.xlim([0,1])
# Record data
storageDF=pd.DataFrame()
storageDF['bins']=pd.Series(bins)
storageDF['hist']=pd.Series(hist)
outfilepathToSave=outfilepath+'/PlotData_snoRNAhistogram_%s'%sType
storageDF.to_csv(outfilepathToSave)
i+=1
fig5.tight_layout()
fig5.savefig(outfilepath+'Figure5.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig5.savefig(outfilepath+'Figure5.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def getncRNABindingFrac(type_specific):
# 5' position on the negative strand is snoRNA stop coordinate.
neg_data=type_specific[type_specific['Strand']=='-']
neg_data['diff']=np.abs(neg_data['Gene End (bp)']-neg_data['RT_stop'])
neg_data['frac']=neg_data['diff']/(neg_data['Gene End (bp)']-neg_data['Gene Start (bp)'])
# 5' position on the positive strand is snoRNA start coordinate.
pos_data=type_specific[type_specific['Strand']=='+']
pos_data['diff']=np.abs(pos_data['Gene Start (bp)']-pos_data['RT_stop'])
pos_data['frac']=pos_data['diff']/(pos_data['Gene End (bp)']-pos_data['Gene Start (bp)'])
DF_ncRNAProfile=pd.concat([neg_data,pos_data])
return DF_ncRNAProfile
print "ncRNA gene body anaysis."
st_stopFiles=glob.glob(outfilepath+"*.geneStartStop")
st_stopFiles=[f for f in st_stopFiles if 'rRNA' not in f]
fig6=plt.figure(6)
plotDim=math.ceil(math.sqrt(len(st_stopFiles)))
i=1
for st_file in st_stopFiles:
name=st_file.split('clipGenes_')[1].split('_LowFDRreads')[0]
tmp=pd.read_csv(st_file)
tmp['RT_stop']=tmp['Start']+expand
tmp_profile=getncRNABindingFrac(tmp)
plt.subplot(plotDim,plotDim,i)
bins=np.arange(0,1,0.01)
hist,bins=np.histogram(tmp_profile['frac'],bins=bins)
hist=np.array(hist/float(tmp_profile['frac'].shape[0]),dtype=float)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
plt.bar(center,hist,align='center',width=width,color='blue',alpha=0.75)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.xlabel('Fraction of gene body (5p - 3p)',fontsize=5)
plt.title('Binding profile for %s'%name,fontsize=5)
i+=1
fig6.tight_layout()
fig6.savefig(outfilepath+'Figure6.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig6.savefig(outfilepath+'Figure6.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
logOpen.close()
# <codecell>
|
Can I exchange or add tickets to my current order?
What if I cannot attend or no longer want to go to my event?
What if my event is rescheduled or postponed?
What if my event is cancelled?
The vast majority of the time, the original tickets will still be valid for the rescheduled event, so always hold onto them, just in case. If the venue is reprinting new tickets, you will be notified by TickPick. To be fair to our sellers and per our User Agreement, we cannot offer refunds or exchanges for postponed or rescheduled events. If you can no longer attend the event, you can list your tickets for sale on TickPick here.
|
from songmodel import SongModel
from extractor import Extractor
from sklearn import svm, multiclass, neighbors
import subprocess, math
class AI:
def __init__(self, song):
self.song = song
self.model = SongModel()
self.extractor = Extractor()
self.tempo = 0
self.rolloffmoy = 0.0
self.rolloffect = 0.0
self.zcrmoy = 0.0
self.zcrect = 0.0
self.duration = 0.0
self.genre = []
for l in open("training/Tracks/genres.txt"):
self.genre.append(l.replace('\n',''))
def get_song_datas(self):
self.extractor.set_song(self.song)
self.tempo = self.extractor.get_tempo()
self.rolloffmoy = self.extractor.get_rolloff_moy()
self.rolloffect = self.extractor.get_rolloff_ect()
self.zcrmoy = self.extractor.get_zcr_moy()
self.zcrect = self.extractor.get_zcr_ect()
self.duration = self.extractor.get_duration()
def classify_with_knn(self):
vect, mat = self.model.get_datas()
clf = neighbors.KNeighborsClassifier()
clf.fit(mat, vect)
self.get_song_datas()
l = [[self.tempo, self.rolloffmoy, self.rolloffect, self.zcrmoy, self.zcrect, self.duration]]
ret = clf.predict(l)
print(self.genre[ret[0]])
def classify_with_svm(self):
vect, mat = self.model.get_datas()
clf = svm.SVC(class_weight='auto', kernel='linear')
clf.fit(mat, vect)
self.get_song_datas()
l = [[self.tempo, self.rolloffmoy, self.rolloffect, self.zcrmoy, self.zcrect, self.duration]]
ret = clf.predict(l)
print(self.genre[int(ret[0])])
|
Best-in-class Engine Horsepower: Kubota's high-powered engines provide 92 HP for performance that surpasses the competition.
New CRS Diesel Engine: The SVL90-2 features Kubota's renowned CRS (Common Rail System) diesel engines which deliver a powerful 92 HP. The CRS electronically controls the fuel injection timing and amount for high torque at a low rpm to improve work efficiency.
Electronic Travel Torque Management: Electronic Travel Torque Management System lets you work faster with more power. The ECU constantly monitors the load of the machine to optimally control hydraulic pump output according to the load, preventing engine stalls for smoother operation even under severe conditions.
Diesel Particulate Filter (DPF): The Diesel Particulate Filter meets the latest Interim Tier 4 diesel emissions regulation by collecting particulate matter (PM) from exhaust gas for cleaner exhaust emissions.
Long Reach for Maximum Lifting and Dumping, High Bucket Hinge Pin Height: Kubota's unique vertical lift is designed to deliver an exceptionally long reach of 40.7 inches. In addition, the tip of the arm is optimally shaped for a tall hinge pin height of 128.6 inches for easy dumping into trucks.
Standard Self-leveling Function: This feature helps keep the bucket or pallet forks in a horizontal position without the need to manually adjust the angle during lifting.
Standard Two-speed Travel: Shift from low gear at 5.0 mph to high gear at 7.3 mph to get the job done faster.
Pilot Valve with Shuttle Piston: This ensures consistent oil flow which improves directional stability.
High-flow Hydraulics (Optional): This option increases the hydraulic capacity for attachments requiring a higher flow. Both 1/2" and 3/4" couplers are part of this optional package.
Higher Ground Clearance: A high ground clearance of 11.8 inches lets you travel over large obstacles with ease.
Strong Traction Force: Kubota's original track lug design gives you more grip and a stronger traction force of 12178 lbs.
Wider Cab Entrance: A wider entrance makes it easier to get in and out.
Easy-clean Floor Mat and Drain Outlet: The soft rubber floor mat is easy to remove and clean. And with the built-in drain outlet, the mat can be washed in the cab.
Hand and Foot Throttle: The hand and foot throttle allows you to match the engine speed to the task. The hand throttle is ideal for jobs requiring constant engine speed such as cold planing, trenching or snow blowing. The foot throttle lets the operator vary engine speed for other applications.
Pressurized Cab (Optional): The cab is pressurized to keep dust, flying debris, and insects away to enhance comfort on the job. Furthermore, noise is kept down to let you work for long hours with minimal fatigue.
Optimized Air Conditioning: The climate control system offers outstanding cooling and heating performance with optimized airflow for a high level of comfort all year round.
Enhanced Defrosting and Demisting Functions: Ample airflow prevents the windows from fogging up or freezing during the cold months of the year.
Easy-open Sliding Windows: The sliding front door and double-sliding side windows can be opened regardless of the position of the front attachment, allowing you to get in or out safely even during emergencies.
Tilt-up Cab: The cab can be tilted 72° without having to lift the arm, allowing easy access to the hydraulic pumps, valves, lines and hydraulic tank.
Routine Checks: Simply open the rear bonnet to access all components for daily routine inspections.
Slide-and-tilt Radiator and Oil Cooler: The radiator can be tilted for easy cleaning.
Large Space for Tool Box: The SVL90-2 offer a convenient and large tool box for storing a grease gun and other necessities.
Welded, Integrated Mainframe and Undercarriage: The undercarriage forms an integral part of the main frame for maximum durability.
Kubota Original Rubber Tracks: Kubota's original rubber tracks offer outstanding durability and long-life performance. Furthermore, Kubota's original lug pattern prevents soil from accumulating and improves traction force.
Triple Flange Lower Track Rollers: A highly reliable triple flange design not only minimizes the risk of detracking, but improves stability as well.
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import api, fields, models
import logging
import shutil
_logger = logging.getLogger(__name__)
class SurveyFileArquiveWizard(models.TransientModel):
_name = 'myo.survey.file.arquive.wizard'
def _default_survey_file_ids(self):
return self._context.get('active_ids')
survey_file_ids = fields.Many2many(
'myo.survey.file',
string='Survey Files',
default=_default_survey_file_ids)
dir_path = fields.Char(
'Directory Path',
required=True,
help="Directory Path",
default='/opt/openerp/mostlyopen_clvhealth_jcafb/survey_files/input'
)
arquive_dir_path = fields.Char(
'Arquive Directory Path',
required=True,
help="Arquive Directory Path",
default='/opt/openerp/mostlyopen_clvhealth_jcafb/survey_files/arquive'
)
@api.multi
def do_survey_file_arquive(self):
self.ensure_one()
for survey_file_reg in self.survey_file_ids:
filepath = self.dir_path + '/' + survey_file_reg.name
arquive_filepath = self.arquive_dir_path + '/' + survey_file_reg.name
print '>>>>>', filepath, survey_file_reg.state
# print '>>>>>', survey_file_reg.survey_id.description
if survey_file_reg.state == 'imported':
shutil.move(filepath, arquive_filepath)
survey_file_reg.state = 'arquived'
return True
|
Marvelous Wardrobe Sliding Door Track For Diy Youtube Sliding Wardrobe Rails - The image above with the title Marvelous Wardrobe Sliding Door Track For Diy Youtube Sliding Wardrobe Rails, is part of Sliding Wardrobe Rails picture gallery. Size for this image is 630 × 354, a part of Others category and tagged with sliding wardrobe rail clothes rack, sliding wardrobe rail hook, ikea sliding wardrobe rails, sliding wardrobe tracks uk, sliding wardrobe door rails uk published September 1st, 2018 07:20:29 AM by Saige Orn. Find or search for images related to "Marvelous Wardrobe Sliding Door Track For Diy Youtube Sliding Wardrobe Rails" in another posts. Back to: Sliding Wardrobe Rails.
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# http://cuiqingcai.com/3179.html
__author__ = 'Demi Yu'
from bs4 import BeautifulSoup
import os
from Download import request
class mzitu():
def all_url(self, url):
html = request.get(url,3) ##调用request函数把套图地址传进去会返回给我们一个response
all_a = BeautifulSoup(html.text, 'lxml').find('div', class_='all').find_all('a')
retval = os.getcwd() # 查看当前工作目录 '/Users/yuhongjun/Python/python-training/work_two_Crawler'
for a in all_a:
title = a.get_text()
print(u'开始保存:', title) ##加点提示不然太枯燥了
path = str(title).replace("?", '_') ##我注意到有个标题带有 ? 这个符号Windows系统是不能创建文件夹的所以要替换掉
self.mkdir(path) ##调用mkdir函数创建文件夹!这儿path代表的是标题title哦!!!!!不要糊涂了哦!
href = a['href']
self.html(href) ##调用html函数把href参数传递过去!href是啥还记的吧? 就是套图的地址哦!!不要迷糊了哦!
os.chdir(retval) ##切换到目录
def html(self, href): ##这个函数是处理套图地址获得图片的页面地址
html = request.get(href,3)
max_span = BeautifulSoup(html.text, 'lxml').find('div', class_='pagenavi').find_all('span')[-2].get_text()
# for page in range(1, int(max_span) + 1):
for page in range(1, 2):
page_url = href + '/' + str(page)
self.img(page_url) ##调用img函数
def img(self, page_url): ##这个函数处理图片页面地址获得图片的实际地址
img_html = request.get(page_url,3)
img_url = BeautifulSoup(img_html.text, 'lxml').find('div', class_='main-image').find('img')['src']
self.save(img_url)
def save(self, img_url): ##这个函数保存图片
name = img_url[-9:-4]
img = request.get(img_url,3)
f = open(name + '.jpg', 'ab')
f.write(img.content)
f.close()
def mkdir(self, path): ##这个函数创建文件夹
path = path.strip()
macPath="Pic/"
isExists = os.path.exists(os.path.join(macPath, path))
if not isExists:
print(u'建了一个名字叫做', path, u'的文件夹!')
os.makedirs(os.path.join(macPath, path))
os.chdir(os.path.join(macPath, path)) ##切换到目录
return True
else:
print(u'名字叫做', path, u'的文件夹已经存在了!')
return False
# def request(self, url): ##这个函数获取网页的response 然后返回
# headers = {
# 'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"}
# content = requests.get(url, headers=headers)
# return content
Mzitu = mzitu() ##实例化
Mzitu.all_url('http://www.mzitu.com/all') ##给函数all_url传入参数 你可以当作启动爬虫(就是入口)
|
So with trooper Daph heading to Vancouver for a few days last weekend, Jeff went to EMEA for a London and then Berlin work event. We did manage to get some long overdue time with the Clarkes, as well as some Vesta old skool friends.
And check out this welcome video from the Clarkes over on Facebook.
|
from typing import Union
from brown import constants
from brown.core import brown
from brown.core.font import Font
from brown.core.music_font import MusicFont
from brown.core.object_group import ObjectGroup
from brown.core.path import Path
from brown.core.pen import Pen
from brown.core.pen_pattern import PenPattern
from brown.core.staff import Staff
from brown.utils.units import GraphicUnit
from examples.feldman_projections_2.glyph_name import GlyphName
from examples.feldman_projections_2.grid_unit import GridUnit
from examples.feldman_projections_2.instrument_data import InstrumentData
from examples.feldman_projections_2.measure import Measure
from examples.feldman_projections_2.music_text_event import MusicTextEvent
from examples.feldman_projections_2.text_event import TextEvent
class Score(ObjectGroup):
_TEXT_FONT_SIZE = GraphicUnit(GridUnit(0.6)).value
_MUSIC_FONT_SIZE = Staff._make_unit_class(GridUnit(0.5))
_bar_line_pen = Pen(thickness=GridUnit(0.05), pattern=PenPattern.DOT)
_instrument_divider_pen = Pen(thickness=GridUnit(0.05))
def __init__(self, pos, instruments, parent):
super().__init__(pos, parent)
self.events = []
self.text_font = Font.deriving(
brown.default_font,
size=Score._TEXT_FONT_SIZE,
weight=60)
self.music_font = MusicFont(
constants.DEFAULT_MUSIC_FONT_NAME,
Score._MUSIC_FONT_SIZE)
self.instruments = instruments
for i, instrument in enumerate(instruments):
for event_data in instrument.event_data:
self.events.append(self._create_event(i, event_data))
self.draw_instrument_dividers()
self.draw_bar_lines()
def _create_event(self, instrument_index, event_data):
if isinstance(event_data.text, GlyphName):
return self._create_music_text_event(instrument_index, event_data)
return self._create_text_event(instrument_index, event_data)
def _create_text_event(self, instrument_index, event_data):
return TextEvent(
(event_data.pos_x, (Score._instrument_pos_y(instrument_index)
+ event_data.register.value)),
self,
event_data.length,
event_data.text,
self.text_font)
def _create_music_text_event(self, instrument_index, event_data):
return MusicTextEvent(
(event_data.pos_x, (Score._instrument_pos_y(instrument_index)
+ event_data.register.value)),
self,
event_data.length,
event_data.text,
self.music_font)
@property
def measure_count(self):
return max(max(int(Measure(e.pos_x).value)
for e in i.event_data)
for i in self.instruments) + 1
@staticmethod
def _instrument_pos_y(instrument_index):
return GridUnit(3 * instrument_index)
@staticmethod
def _divider_pos_y(divider_index):
return GridUnit(3 * divider_index)
@staticmethod
def _divider_visible(instrument_above: Union[InstrumentData, None],
instrument_below: Union[InstrumentData, None],
measure_num: int) -> bool:
return ((instrument_above is not None
and instrument_above.measure_has_events(measure_num))
or (instrument_below is not None
and instrument_below.measure_has_events(measure_num)))
def _bar_line_extends_below(self,
measure_num: int,
divider_num: int) -> bool:
if divider_num >= len(self.instruments):
return False
instrument = self.instruments[divider_num]
return (instrument.measure_has_events(measure_num - 1)
or instrument.measure_has_events(measure_num))
def draw_instrument_dividers(self):
for divider in range(len(self.instruments) + 1):
current_path = Path((Measure(0), Score._divider_pos_y(divider)),
pen=Score._instrument_divider_pen,
parent=self)
instrument_above = (self.instruments[divider - 1]
if divider > 0 else None)
instrument_below = (self.instruments[divider]
if divider < len(self.instruments) else None)
drawing = False
for measure_num in range(self.measure_count + 1):
if Score._divider_visible(
instrument_above, instrument_below, measure_num):
if not drawing:
current_path.move_to(Measure(measure_num), GridUnit(0))
drawing = True
else:
if drawing:
current_path.line_to(Measure(measure_num), GridUnit(0))
drawing = False
def draw_bar_lines(self):
for measure_num in range(self.measure_count + 1):
current_path = Path((Measure(measure_num), GridUnit(0)),
pen=Score._bar_line_pen,
parent=self)
drawing = False
for divider_num in range(len(self.instruments) + 1):
if self._bar_line_extends_below(measure_num, divider_num):
if not drawing:
current_path.move_to(
GridUnit(0),
Score._instrument_pos_y(divider_num))
drawing = True
else:
if drawing:
current_path.line_to(
GridUnit(0),
Score._instrument_pos_y(divider_num))
drawing = False
|
Designed and handmade right here at Saltwater by Saltwater! Wrap and stack multiple times to achieve the perfect arm party. Or wear as a long layered necklace!
Embroidered sleeveless tank in flowy cotton blend.
|
#!/usr/bin/env python
import h5py
import numpy
import argparse
import cPickle
from fuel.datasets.hdf5 import H5PYDataset
def pack(f, name, dataset_pathes):
datasets = [cPickle.load(open(path)) for path in dataset_pathes]
data = sum(datasets, [])
dtype = h5py.special_dtype(vlen=numpy.dtype('int32'))
table = f.create_dataset(name, (len(data),), dtype=dtype)
for i, example in enumerate(data):
table[i] = example
return numpy.array([len(d) for d in datasets])
if __name__ == '__main__':
parser = argparse.ArgumentParser("Pack data to HDF5")
parser.add_argument('-s', dest='sources', nargs='*', help="Source datasets")
parser.add_argument('-t', dest='targets', nargs='*', help="Target datasets")
parser.add_argument('-n', dest='names', nargs='*', help="Dataset names")
parser.add_argument('-i', dest='add_ids',
action='store_true', default=False,
help="Add integer IDs")
parser.add_argument('dest', help="Destination")
args = parser.parse_args()
assert len(args.sources) == len(args.targets)
assert len(args.sources) == len(args.names)
with h5py.File(args.dest, mode='w') as f:
lengths = pack(f, "sources", args.sources)
assert numpy.all(lengths == pack(f, "targets", args.targets))
offsets = [0] + list(lengths.cumsum())
total_len = offsets[-1]
if args.add_ids:
id_table = f.create_dataset('ids',
data=numpy.arange(total_len,
dtype='int32'))
split_dict = {
args.names[i]:
{'sources': (offsets[i], offsets[i + 1]),
'targets': (offsets[i], offsets[i + 1]),
'ids': (offsets[i], offsets[i + 1])}
for i in range(len(args.names))}
else:
split_dict = {
args.names[i]:
{'sources': (offsets[i], offsets[i + 1]),
'targets': (offsets[i], offsets[i + 1])}
for i in range(len(args.names))}
f.attrs['split'] = H5PYDataset.create_split_array(split_dict)
|
One of the most important care, when you stay at the seaside: the scrub to prepare the skin and keep a nice tan long. I kept my two favorite scrubs I discovered aerlier this year. For the face : the Masque-gommage Réconfortant “Rose blanche” (the “White Rose” comforting face mask and exfoliator) of Doux Me, for fragile and sensitized skins. It’s bio: Doux Me cosmetics are among the few bio cosmetics that do not cause me allergies or reactions. And I love their textute! I prefer appling my scrub at night, to avoid problems with the sun. For the body: a touch of argan and Essaouira, with the Gommage soyeux au savon noir (the silky Black Soap Scrub) of Galénic. It’s not organic, but it makes the skinin credibly soft! I like slightly minty smell and texture like a jelly, a real treat! The super natural alternative (and extra light for the vanity case) dedicated to the beach babes: do a light body scrub with the sand, before plunging into the sea. To complete my body scrub: I apply the Haki balm with essential oils, on my feet. I use it only at night: as I don’t know its exact composition, I fear it might be photo-sensitizer (because of essential oils). This balm soothes, nourishes and softens the feet driest and the most stressed. Essential to have”happy” pretty feet (yes!) in flip flops and extra-flat sandals.
(very interesting website, read the excellent ELLE Deco South Africa too!) : Jackie Burger showed her beauty “routine”, including a product of Africology. Never heard about this South Africa cosmetics luxury brand, ethic and environment-friendly.
Renchia Droganis, holistic therapist and Africology founder, created biodegradable natural products with South African essential oils and plants. After a local success story, Africology is now famous in England. I’m more and more puzzled and I want to test those products. I like discovering luxury cosmetics brands from all over the world (like Panpuri in Thailand).
For one month and half, I’ve tested the slimming products of Africology. Fortunately, my skin didn’t react to the essential oils! I do like the 4 products of the picture above! The textures are nourishing, not greasy with a matte finish : great in damp climatic zones. Cellulite Detox (2) is one of my two favourite products: you need to massage this oil into the skin. Nice texture (marula oils rooibos and hypoxis) and the parfum is really strong.
Same for the scent of the 3 other products : strong citronella scent (I’m addictive). I melt this oil with Slimming Detox Cream, a super moisterising that nourishes my very dry skin. I also melt the Slimming Detox Cream with the Conditionning Body Oil (4). This oil with a matte finish makes my skin extremely soft and my feet like baby ones!!! Twice a week, I use the Body Scrub : the colour and texture of the scrub look like grains of sand that gently wipe the skin.
|
import os
import numpy
import word2toke
def getCommonVecs(num=10000, d=100, path="C:/Users/Grant/PycharmProjects/Machine-Learning/Embedding/"):
fname = os.path.join(path, "glove.6B." + str(d) + "d.txt")
f = open(fname, 'r', encoding="utf-8")
dic = {}
for step, line, in zip(range(num), f):
entry = line.split()
word, vec = entry[0], numpy.array(entry[1:], dtype=float).reshape((d, 1))
dic[word] = vec
return dic
def getVecsInVocab(vocab, path="C:/Users/Grant/PycharmProjects/Machine-Learning/Embedding/", d=100, steps=100000):
fname = os.path.join(path, "glove.6B." + str(d) + "d.txt")
f = open(fname, 'r', encoding="utf-8")
dic, out = {}, {}
for word in vocab:
out[word] = (numpy.random.rand(d, 1) - 0.5) / float(int(d) + 1)
for step, line, in zip(range(steps), f):
entry = line.split()
word, vec = entry[0], numpy.array(entry[1:], dtype=float).reshape((d,))
dic[word] = vec
for key0, vec0, key1, vec1 in zip(out.keys(), out.values(), dic.keys(), dic.values()):
if key0 in dic:
out[key0] = vec1
return out
def getWeights(vocab, w2int, dic, d=100):
n = len(vocab)
W = numpy.zeros((n, d))
for word in vocab:
W[w2int[word]] = dic[word].reshape((d))
return W
#def vec2embedding
|
Tokina FiRIN 20mm f/2 FE MF Lens now Available for Pre-order !
The new announced Tokina FiRIN 20mm f/2 FE MF Lens now finally available for pre-order at B&H Photo Video and Adorama. The US price is $799.00. According to B&H Photo and Adorama, this lens will be shipping at the end of January, 2017.
|
#!/usr/bin/python2
# requires install python-httplib2
# apt-get install python-httplib2
# Run using python2
# python2 get_counts.py
# The sha-bang is set for ubuntu 16.04
# Windows
# pip isntall httplib2
import httplib2
import sys
import json
source_hosts = []
sink_hosts = []
args = sys.argv
numargs = len(args)
if numargs != 3:
raise Exception("reset_counts2.py source-name sink-name")
source_name = args[1]
sink_name = args[2]
try:
# Try to get tasks for source
conn = httplib2.Http(timeout=1)
resp, resp_body = conn.request("http://master.mesos:8080/v2/apps/" + source_name)
data = json.loads(resp_body)
tasks = data['app']['tasks']
for task in tasks:
source_hosts.append(task['host'] + ":" + str(task['ports'][0]))
except Exception as e:
print("Failed to connect")
try:
# Try to get tasks for source
conn = httplib2.Http(timeout=1)
resp, resp_body = conn.request("http://master.mesos:8080/v2/apps/" + sink_name)
data = json.loads(resp_body)
tasks = data['app']['tasks']
for task in tasks:
sink_hosts.append(task['host'] + ":" + str(task['ports'][0]))
except Exception as e:
print("Failed to connect")
print("Sources")
for host in source_hosts:
print(host)
try:
conn = httplib2.Http(timeout=1)
resp, resp_body = conn.request("http://" + host + "/reset")
data = json.loads(resp_body)
print(data)
except Exception as e:
#print(e.message)
print("Failed to connect")
print
print("Sinks")
for host in sink_hosts:
print(host)
try:
conn = httplib2.Http(timeout=1)
resp, resp_body = conn.request("http://" + host + "/reset")
data = json.loads(resp_body)
print(data)
except Exception as e:
#print(e.message)
print("Failed to connect")
|
How much pressure do you cut with?
Thank you for the quick answer's, as I am getting my computer fixed and the guy is going to put 7 on it for me.
Hi, Will Sign Blazer work with the new Windows 7?
|
from Move import Move
class MoveGenerator:
def __init__(self, board):
"""
makes all legal moves for the side on move
:param board: the board to make moves for
"""
self.board = board
self.pieces = self.get_all_pieces()
self.moves = []
self.get_moves(self.pieces)
def get_all_pieces(self):
"""
:return: the pieces of the side on move
"""
if self.board.whites_turn:
return self.board.white_piece_list.get_pieces()
else:
return self.board.black_piece_list.get_pieces()
def get_all_moves(self):
"""
makes all moves and adds them to self.moves
:return: None
"""
self.get_moves(self.get_all_pieces())
def get_moves(self, pieces):
"""
makes all moves for the pieces specified and adds the moves to self.move
:param pieces: the pieces specified
:return: None
"""
for piece in pieces:
self.find_pieces_moves(piece)
def find_pieces_moves(self, src):
"""
makes all moves for the piece specified and adds the moves to self.move
:param src: the piece
:return: None
"""
piece_type = src.piece.upper()
# add the moves based on what kind of src it is
if piece_type == "P":
# set the forward direction
if self.board.whites_turn:
f = -1
else:
f = 1
self.scan(src, src, f, -1, True, True, True)
self.scan(src, src, f, 0, True, False, False)
self.scan(src, src, f, 1, True, True, True)
elif piece_type == "N":
self.symscan(src, src, 2, 1, True, True)
self.symscan(src, src, 1, 2, True, True)
elif piece_type == "B":
self.symscan(src, src, 1, 1, False, True)
self.symscan(src, src, 0, 1, True, False)
elif piece_type == "R":
self.symscan(src, src, 0, 1, False, True)
elif piece_type == "Q":
self.symscan(src, src, 0, 1, False, True)
self.symscan(src, src, 1, 1, False, True)
elif piece_type == "K":
self.symscan(src, src, 0, 1, True, True)
self.symscan(src, src, 1, 1, True, True)
def scan(self, src, intermediate, row_change, col_change, short, can_capture, must_capture=False):
"""
looks at all the squares projected in the direction to search for valid moves
valid moves are added to self.move
:param src: the starting square, never changes because moves produced need a valid starting square
:param intermediate: a square used to walk the board
:param row_change: the change in rows
:param col_change: the change in cols
:param short: if the scan should continue in the direction
:param can_capture: if the scan allows capturing moves
:param must_capture: if the scan requires capturing moves
:return: None
"""
# make sure that the intermediate square is on the board
if not self.check_bounds(intermediate, row_change, col_change):
return
dest_cords = (intermediate.row + row_change, intermediate.col + col_change)
dest = self.board.dict_board[dest_cords]
if dest.is_empty() and not must_capture:
self.moves.append(Move(src, dest))
else:
# if the square is occupied the scan can stop
short = True
if not dest.is_empty():
# if the dest has a enemy piece
if src.piece.isupper() != dest.piece.isupper():
if can_capture:
# if this scan allows capuring, add this move
self.moves.append(Move(src, dest))
if not short:
# recurse if scan not over
self.scan(src, dest, row_change, col_change, short, can_capture, must_capture)
def check_bounds(self, src, row_change, col_change):
"""
checks if a square is on the board
:param src: the starting square
:param row_change: the change in rows
:param col_change: the change in columns
:return: True if square on board, False otherwise
"""
r = src.row + row_change
c = src.col + col_change
if r < 0 or r >= self.board.row_count:
return False
if c < 0 or c >= self.board.col_count:
return False
return True
def symscan(self, src, intermediate, row_change, col_change, short, can_capture, must_capture=False):
"""
looks at all the squares projected in 4 directions to search for valid moves
valid moves are added to self.move
:param src: the starting square, never changes because moves produced need a valid starting square
:param intermediate: a square used to walk the board
:param row_change: the change in rows
:param col_change: the change in cols
:param short: if the scan should continue in the direction
:param can_capture: if the scan allows capturing moves
:param must_capture: if the scan requires capturing moves
:return: None
"""
# row_change and col_change are swapped and negated to get 4 directions
self.scan(src, intermediate, row_change, col_change, short, can_capture, must_capture)
self.scan(src, intermediate, -col_change, row_change, short, can_capture, must_capture)
self.scan(src, intermediate, -row_change, -col_change, short, can_capture, must_capture)
self.scan(src, intermediate, col_change, -row_change, short, can_capture, must_capture)
|
Wallace Mobile Notary provides an important trust service in witnessing the execution of legal and commercial documents. I verify the identity of the signing party, witness the signature and vouch for their authenticity. I offer confidential, personalized notary public services in your home, office, coffee shop or care facility. You chose the time and location most convenient to you!
Estate Planning Documents, Living Wills,etc.
The State of California has determined the fees for Notary Services as $15.00 (maximum) per signature notarized.
Please note: if you are hiring a mobile notary public, you are paying an additional fee for the convenience of mobile service to your location. Rates are variable depending upon location and time of day.
Standard Signing… $10 per signature.
Airport Emergency Notarization: $50.00 travel + parking + $10.00 per document per signature.
Excessive Waiting Times: $25.00 per hour after the first 20 minutes.
Ask about discounts for Senior Citizens, active military, first responders, and more!
American Vets are ALWAYS FREE!
For your convenience, I accept cash and process credit card payments on site, via PayPal. Personal checks only accepted through PayPal.
All PayPal transactions subject to additional 3% processing fee; (minimum $2).
The primary function of a Notary Public is to examine identification, to assure that you are the person listed on the document, and to act as an impartial witness when you sign your documents. This is to deter fraudulent activity.
If it is impossible for you to produce the I.D. noted above, call me to discuss your options: 310-933-9474.
Please be sure your document is fully completed. If sections are not applicable, please indicate that on the document. It is a violation of California law for a Notary Public to notarize an incomplete document. This law is to protect the signer from someone fraudulently entering additional information on your document after it is out of your possession. Please wait to sign your document until your appointment.
Although I receive many calls asking me to provide legal forms; however, according to the Secretary of State, this is considered an illegal practice of law. Please consult an attorney for the best option for you.
The individual or company that is requesting that your signature be notarized will usually determine which notarial act is used. The Notary Public may provide an acknowledgement or jurat, but cannot choose which act you should use.
Sworn statement and requires an oath or affirmation. This notarial act, must be signed in front of the Notary under oath of affirmation.
It’s always best to sign in Notary’s presence, although the document can be signed prior to the appointment and acknowledged to the Notary Public that it is your signature on the document. The signer is still required to appear before the Notary and provide CA state approved identification before the document can be notarized.
These are just Local Service Areas. I will go anywhere in the Greater Los Angeles area!
|
# Copyright (c) 2012 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.openstack.common import log as logging
from webob import Request, Response
import json
import copy
from cimibase import Controller, Consts
from cimibase import make_response_data
from cimiutils import concat, match_up, remove_member
from cimiutils import map_image_state
LOG = logging.getLogger(__name__)
class MachineImageCtrler(Controller):
"""
Handles machine image request.
"""
def __init__(self, conf, app, req, tenant_id, *args):
super(MachineImageCtrler, self).__init__(conf, app, req, tenant_id,
*args)
self.os_path = '/%s/images' % (tenant_id)
self.image_id = args[0] if len(args) > 0 else ''
self.entity_uri = 'MachineImage'
self.metadata = Consts.MACHINEIMAGE_METADATA
# Use GET to handle all container read related operations.
def GET(self, req, *parts):
"""
Handle GET Container (List Objects) request
"""
env = self._fresh_env(req)
env['PATH_INFO'] = '/'.join([self.os_path, self.image_id])
new_req = Request(env)
res = new_req.get_response(self.app)
if res.status_int == 200:
image = json.loads(res.body).get('image')
if image:
body = {}
body['type'] = 'IMAGE'
body['id'] = '/'.join([self.tenant_id, self.entity_uri,
self.image_id])
match_up(body, image, 'name', 'name')
match_up(body, image, 'created', 'created')
match_up(body, image, 'updated', 'updated')
body['state'] = map_image_state(image['status'])
body['imageLocation'] = body['id']
if self.res_content_type == 'application/xml':
response_data = {self.entity_uri: body}
else:
body['resourceURI'] = '/'.join([self.uri_prefix,
self.entity_uri])
response_data = body
new_content = make_response_data(response_data,
self.res_content_type,
self.metadata,
self.uri_prefix)
resp = Response()
self._fixup_cimi_header(resp)
resp.headers['Content-Type'] = self.res_content_type
resp.status = 200
resp.body = new_content
return resp
else:
return res
return res
class MachineImageColCtrler(Controller):
"""
Handles machine image collection request.
"""
def __init__(self, conf, app, req, tenant_id, *args):
super(MachineImageColCtrler, self).__init__(conf, app, req, tenant_id,
*args)
self.os_path = '/%s/images/detail' % (tenant_id)
self.entity_uri = 'MachineImageCollection'
self.metadata = Consts.MACHINEIMAGE_COL_METADATA
# Use GET to handle all container read related operations.
def GET(self, req, *parts):
"""
Handle GET Container (List Objects) request
"""
env = copy.copy(req.environ)
env['SCRIPT_NAME'] = self.os_version
env['PATH_INFO'] = self.os_path
# we will always use json format to get Nova information
env['HTTP_ACCEPT'] = 'application/json'
# need to remove this header, otherwise, it will always take the
# original request accept content type
if env.has_key('nova.best_content_type'):
env.pop('nova.best_content_type')
new_req = Request(env)
res = new_req.get_response(self.app)
if res.status_int == 200:
content = json.loads(res.body)
body = {}
body['id'] = '/'.join([self.tenant_id, self.entity_uri])
body['machineImages'] = []
images = content.get('images', [])
for image in images:
entry = {}
entry['resourceURI'] = '/'.join([self.uri_prefix,
'MachineImage'])
entry['id'] = '/'.join([self.tenant_id,
'MachineImage',
image['id']])
entry['type'] = 'IMAGE'
entry['name'] = image['name']
entry['created'] = image['created']
entry['updated'] = image['updated']
entry['state'] = map_image_state(image['status'])
entry['imageLocation'] = entry['id']
body['machineImages'].append(entry)
body['count'] = len(body['machineImages'])
if self.res_content_type == 'application/xml':
remove_member(body, 'resourceURI')
body['resourceURI'] = '/'.join([self.uri_prefix,
self.entity_uri])
response_data = {'Collection': body}
else:
body['resourceURI'] = '/'.join([self.uri_prefix,
self.entity_uri])
response_data = body
new_content = make_response_data(response_data,
self.res_content_type,
self.metadata,
self.uri_prefix)
resp = Response()
self._fixup_cimi_header(resp)
resp.headers['Content-Type'] = self.res_content_type
resp.status = 200
resp.body = new_content
return resp
else:
return res
|
You can also add to discussions in the comments section. if you have any questions, concerns or even any requests. Thank you.
|
"""Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
from __future__ import generators
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
from token import *
import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["COMMENT", "tokenize", "NL"]
del token
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
N_TOKENS += 2
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return apply(group, choices) + '*'
def maybe(*choices): return apply(group, choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
Octnumber = r'0[0-7]*[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[uU]?[rR]?'''", '[uU]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = map(
re.compile, (Token, PseudoToken, Single3, Double3))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
'r': None, 'R': None, 'u': None, 'U': None}
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
print "%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token))
def tokenize(readline, tokeneater=printtoken):
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
apply(tokeneater, token_info)
def generate_tokens(readline):
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
line = readline()
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError, ("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError, ("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
yield (parenlev > 0 and NL or NEWLINE,
token, spos, epos, line)
elif initial == '#':
yield (COMMENT, token, spos, epos, line)
elif token in ("'''", '"""', # triple-quoted
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""'):
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in ("'", '"') or \
token[:2] in ("r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"') or \
token[:3] in ("ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"' ):
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
|
How Do You Enter Services Rendered in an Accounting Journal?
For businesses that don’t sell a physical product, income is usually from services rendered. A service is an intangible product, and income derived from selling your services must be recorded in the accounting ledger. A journal entry in an accounting journal is a business transaction. A double-entry journal entry takes the form of a ledger with two columns; a debit column, and a credit column. It’s called double-entry because every entry into one column requires a corresponding entry into the other column.
For example, if a company borrows $1,000 from the bank, the company's asset account Cash is increased with a debit entry of $1,000. That’s one entry. The second entry requires that the company's liability account Loans Payable is increased with a credit entry of $1,000. If the company repays $500 of the loan, the company will decrease the amount in its Cash account with a credit entry of $500 and will also reduce the balance in its Loan Payable account with a debit entry of $500.
When Does Services Rendered Count as Income?
Service revenue counts as income when the services are rendered. Often, services are provided, and payment is received at a later time after an invoice is sent. The payment at that time may be in full, or it may be a partial payment. As soon services are rendered, even if they haven't been paid for yet, that is considered to be service revenue.
It is important to note that advanced collections before services are rendered are not treated as service revenue yet. They become part of service revenue only when the services are rendered.
Is Income Considered a Debit or Credit?
To enter services rendered in an accounting journal, first create the format of the entry. Entries should include information such as the date of service, an explanation of the entry and the option to place it into a debit or credit column. The first column includes the account number and account name into which the entry is recorded. The second column contains the debit amount to be entered. The third column contains the credit amount to be entered. Indent the account name and number line if it is a credit.
When a customer owes you cash for services rendered, the business has an asset for the amount due. In the double-entry method, the Cash account will receive a debit in the amount of the service to be paid for. The Service Revenue column will receive a credit in the same amount.
Let’s look at an example of services that are rendered for $500. The journal entry would show a debit to Cash of $500 and a credit to Service Revenue for $500. If the customer pays only $100, then Cash receives a credit of $100. Accounts Receivable receives a credit of $400, and Service Revenue receives a debit of $500. In this example, the portion collected is Cash while the remaining balance is debited to Accounts Receivable.
Note that the total debit to Service Revenue is equal to the amount entered in credit for cash received and amount still due. The double-entry method requires that the debit amounts always equal the credit amounts.
Since 2006, Vanessa has written for a variety of website development agencies and private clients on topics related to growth for new and underperforming businesses. Her work can be found in print publications and on websites such as Outpost.co, the blog of the email tool of Palo Alto Software and business accelerators and Chambers of Commerce in her state.
|
#!/usr/bin/env python
import sys
from xml.etree import ElementTree as ET
import json
import re
import datetime
class DestFileCreator:
nextTrip = 1
def __init__(self, firstTrip):
self.nextTrip = firstTrip
def next_dest(self, data_file):
newName = "Trip" + str(self.nextTrip).zfill(3) + "-" + data_file
self.nextTrip = self.nextTrip + 1
print "Starting ",
print newName
return open(newName, "w")
def get_next_file(trace_file):
numbers_str = re.findall(r'[0-9]+', trace_file)
numbers_int = map(int, numbers_str)
oldFile = datetime.datetime(numbers_int[0], numbers_int[1], numbers_int[2], numbers_int[3])
dtime = datetime.timedelta(hours=1)
oldFile = oldFile + dtime
filename = str(oldFile.date())
filename += "-"
if(oldFile.hour < 10):
filename += "0"
filename += str(oldFile.hour)
filename += ".json"
return filename
def compile_trip(trace_file, tripNum):
dataFileValid = True
lastTimeStamp = 0.0
currentTimeStamp = 0
destFileGen = DestFileCreator(tripNum)
errorCount = 0
lineCount = 0
destinationFile = destFileGen.next_dest(trace_file)
while dataFileValid is True:
try:
currentTraceFile = open(trace_file, "r")
except IOError, e:
print e
dataFileValid = False
destinationFile.close()
break
else:
print 'Opened %s' % trace_file
for line in currentTraceFile:
try:
lineCount = lineCount + 1
timestamp, data = line.split(':', 1)
record = json.loads(data)
except ValueError:
sys.stderr.write("Skipping line: %s" % data)
print " "
errorCount = errorCount + 1
continue
if lastTimeStamp is not 0.0:
if (float(timestamp) - lastTimeStamp) > 600.00: # Time is in seconds
print "Found a gap of ",
print (float(timestamp) - lastTimeStamp),
print " seconds. Creating new Trip file."
destinationFile.close()
lastTimeStamp = 0.0
destinationFile = destFileGen.next_dest(trace_file)
elif (float(timestamp) - lastTimeStamp) > 1.00: # Time is in seconds
print "Momentary dropout of ",
print (float(timestamp) - lastTimeStamp),
print " seconds. Ignoring."
lastTimeStamp = float(timestamp)
destinationFile.write(line)
if dataFileValid is True:
currentTraceFile.close()
trace_file = get_next_file(trace_file)
percentBad = 100.0 * errorCount / lineCount
print "Parsed",
print lineCount,
print "lines."
print "Detected",
print errorCount,
print "errors."
print percentBad,
print "% bad data."
if __name__ == '__main__':
if len(sys.argv) is not 3:
print "Must provide the path to the first trace file in a trip and the trip number."
sys.exit(1)
compile_trip(sys.argv[1], int(sys.argv[2]))
|
Centrally located in Canmore, just minutes from Kananaskis Country and Banff National Park, The Rocky Mountain Ski Lodge offers a mix of hotel rooms, apartments and lofts. With the variety of accommodations, this property is an affordable choice for both individual travelers as well as groups. The rooms feature spectacular mountain views, air conditioning, free Wi-Fi, as well as mini fridges and coffeemakers. Amenities include a hot pool, a sauna, and BBQ and picnic area with a kids’ playground, plus laundry facilities and a meeting room.
There is limited availability at this property for the dates you have selected. To confirm, call 866-535-6051 for immediate assistance or request a quote. One of our Mountain Vacation Specialists may still be able to secure an available unit for you here. Otherwise, we will recommend another option that is similar and available.
There is limited availability at this property for the room type you are trying to book for the dates you have selected. To confirm, call 866-535-6051 for immediate assistance or request a quote. One of our Mountain Vacation Specialists may still be able to secure an available unit for you at this property or know of other similar properties at this resort that may have availability.
|
"""
This module tests the interaction of Kafka with Zookeeper with authentication enabled
"""
import logging
import uuid
import pytest
import sdk_cmd
import sdk_install
import sdk_marathon
import sdk_utils
from tests import active_directory
from tests import auth
from tests import config
from tests import test_utils
pytestmark = pytest.mark.skipif(
not active_directory.is_active_directory_enabled(),
reason="This test requires TESTING_ACTIVE_DIRECTORY_SERVER to be set",
)
log = logging.getLogger(__name__)
@pytest.fixture(scope="module", autouse=True)
def kerberos(configure_security):
try:
kerberos_env = active_directory.ActiveDirectoryKerberos()
yield kerberos_env
finally:
kerberos_env.cleanup()
@pytest.fixture(scope="module")
def zookeeper_server(kerberos):
service_kerberos_options = {
"service": {
"name": config.ZOOKEEPER_SERVICE_NAME,
"security": {
"kerberos": {
"enabled": True,
"kdc": {"hostname": kerberos.get_host(), "port": int(kerberos.get_port())},
"realm": kerberos.get_realm(),
"keytab_secret": kerberos.get_keytab_path(),
}
},
}
}
try:
sdk_install.uninstall(config.ZOOKEEPER_PACKAGE_NAME, config.ZOOKEEPER_SERVICE_NAME)
sdk_install.install(
config.ZOOKEEPER_PACKAGE_NAME,
config.ZOOKEEPER_SERVICE_NAME,
config.ZOOKEEPER_TASK_COUNT,
package_version=config.ZOOKEEPER_PACKAGE_VERSION,
additional_options=service_kerberos_options,
timeout_seconds=30 * 60,
)
yield {**service_kerberos_options, **{"package_name": config.ZOOKEEPER_PACKAGE_NAME}}
finally:
sdk_install.uninstall(config.ZOOKEEPER_PACKAGE_NAME, config.ZOOKEEPER_SERVICE_NAME)
@pytest.fixture(scope="module", autouse=True)
def kafka_server(kerberos, zookeeper_server):
# Get the zookeeper DNS values
zookeeper_dns = sdk_cmd.svc_cli(
zookeeper_server["package_name"],
zookeeper_server["service"]["name"],
"endpoint clientport",
parse_json=True,
)[1]["dns"]
service_kerberos_options = {
"service": {
"name": config.SERVICE_NAME,
"security": {
"kerberos": {
"enabled": True,
"enabled_for_zookeeper": True,
"kdc": {"hostname": kerberos.get_host(), "port": int(kerberos.get_port())},
"realm": kerberos.get_realm(),
"keytab_secret": kerberos.get_keytab_path(),
}
},
},
"kafka": {"kafka_zookeeper_uri": ",".join(zookeeper_dns)},
}
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
try:
sdk_install.install(
config.PACKAGE_NAME,
config.SERVICE_NAME,
config.DEFAULT_BROKER_COUNT,
additional_options=service_kerberos_options,
timeout_seconds=30 * 60,
)
yield {**service_kerberos_options, **{"package_name": config.PACKAGE_NAME}}
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.fixture(scope="module", autouse=True)
def kafka_client(kerberos, kafka_server):
brokers = sdk_cmd.svc_cli(
kafka_server["package_name"], kafka_server["service"]["name"], "endpoint broker", parse_json=True
)[1]["dns"]
try:
client_id = "kafka-client"
client = {
"id": client_id,
"mem": 512,
"container": {
"type": "MESOS",
"docker": {"image": "elezar/kafka-client:4b9c060", "forcePullImage": True},
"volumes": [
{
"containerPath": "/tmp/kafkaconfig/kafka-client.keytab",
"secret": "kafka_keytab",
}
],
},
"secrets": {"kafka_keytab": {"source": kerberos.get_keytab_path()}},
"networks": [{"mode": "host"}],
"env": {
"JVM_MaxHeapSize": "512",
"KAFKA_CLIENT_MODE": "test",
"KAFKA_TOPIC": "securetest",
"KAFKA_BROKER_LIST": ",".join(brokers),
},
}
sdk_marathon.install_app(client)
yield {**client, **{"brokers": list(map(lambda x: x.split(":")[0], brokers))}}
finally:
sdk_marathon.destroy_app(client_id)
@pytest.mark.dcos_min_version("1.10")
@sdk_utils.dcos_ee_only
@pytest.mark.sanity
def test_client_can_read_and_write(kafka_client, kafka_server, kerberos):
client_id = kafka_client["id"]
auth.wait_for_brokers(kafka_client["id"], kafka_client["brokers"])
topic_name = "authn.test"
sdk_cmd.svc_cli(
kafka_server["package_name"],
kafka_server["service"]["name"],
"topic create {}".format(topic_name),
parse_json=True,
)
test_utils.wait_for_topic(
kafka_server["package_name"], kafka_server["service"]["name"], topic_name
)
message = str(uuid.uuid4())
assert write_to_topic("client", client_id, topic_name, message, kerberos)
assert message in read_from_topic("client", client_id, topic_name, 1, kerberos)
def write_to_topic(cn: str, task: str, topic: str, message: str, krb5: object) -> bool:
return auth.write_to_topic(
cn,
task,
topic,
message,
auth.get_kerberos_client_properties(ssl_enabled=False),
auth.setup_krb5_env(cn, task, krb5),
)
def read_from_topic(cn: str, task: str, topic: str, message: str, krb5: object) -> str:
return auth.read_from_topic(
cn,
task,
topic,
message,
auth.get_kerberos_client_properties(ssl_enabled=False),
auth.setup_krb5_env(cn, task, krb5),
)
|
To install two internal doors/handles, one external door/lock, one side kitchen larder panel and a stair rail.
We require stair-rail to be fitted to stair case. Materials also required to complete work.
We would like to remove our current shower, as it does not work anymore and replace it with a new one.
Small 2 bed end Terrace house, I require 4 rows of roof tiles to be lifted for insulation to be placed and tiles to be put back. Easy access to the roof as there is an extension underneath.
To assemble a schreiber corner wardrobe, single wardrobe and narrow 5 draws chest.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""grid module: responsible for building up grid widgets"""
# Copyright 2002, 2003 St James Software
#
# This file is part of jToolkit.
#
# jToolkit is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# jToolkit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with jToolkit; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from jToolkit.widgets import widgets
from jToolkit.widgets import table
from jToolkit import cidict
from jToolkit.data import dates
import types
class GridCategory:
def __init__(self, name, title, display, storageformat, displayformat, col, pctwidth, mergecells, mainarea, attribs = {}):
self.name = name
self.title = title
self.display = display
self.mainarea = mainarea
self.storageformat = storageformat
self.displayformat = displayformat
self.col = col
self.pctwidth = pctwidth
self.width = None
self.mergecells = mergecells
self.attribs = attribs
def gettitlecellwidget(self):
"""returns the title cell for this category, using the self.width"""
titlestyle = {'font-weight':'bold'}
return table.TableCell(self.title, newattribs={'width':'%f%%' % self.width,'style':titlestyle})
def valuetostring(self, value):
"""based on the category, returns value as a string that can be displayed"""
# TODO: look at combining this and other methods with FormCategory...
if self.storageformat in ['TEXT','STRING']:
if value is None:
return ''
else:
return value
elif self.storageformat in ['INTEGER', 'DECIMAL']:
return str(value)
elif self.storageformat == 'DATETIME':
return dates.formatdate(value, self.displayformat)
elif value is None:
return ''
else:
return value
def getcellwidget(self, obj, style, href, hreftarget):
"""returns a widget for a cell"""
text = self.valuetostring(obj)
if href is None:
contents = text
else:
contents = widgets.Link(href, text, {'target':hreftarget,'style':style})
self.attribs.update({'valign':'top','style':style})
return table.TableCell(contents, newattribs=self.attribs)
def canmerge(self, widget1, widget2):
"""checks whether we can merge these cells..."""
if not self.mergecells:
return False
if type(widget1) == type(widget2):
if type(widget1) == types.InstanceType:
if widget1.__class__ == widget2.__class__:
# now we can compare
if isinstance(widget1, table.TableCell):
# match cells if the contents match and the attributes match
return self.canmerge(widget1.contents, widget2.contents) and widget1.attribs == widget2.attribs
elif isinstance(widget1, widgets.Link):
# merge if contents are the same, even if links are different
# the links are usable from another row
return self.canmerge(widget1.contents, widget2.contents)
else:
# unknown class...
return 0
else:
# mismatched class
return 0
elif isinstance(widget1, basestring):
return widget1 == widget2
else:
# unknown type
return 0
else:
# mismatched types
return 0
def gethtmlstyle(self, textcolor, backcolor, font):
style = {}
style['color'] = widgets.getrgb(textcolor, '&H000000')
style['background-color'] = widgets.getrgb(backcolor, '&HFFFFFF')
if font is not None:
fontstyles = font.lower().split()
for fontstyle in fontstyles:
if fontstyle == 'bold': style['font-weight'] = 'bold'
if fontstyle == 'italic': style['font-style'] = 'italic'
return style
class GridDivision(widgets.Division):
"""this division class is used to do layout tricks to be wider than the screen"""
def __init__(self, gridwidget):
widthstr = '%d%%' % gridwidget.pctwidth
widgets.Division.__init__(self, newattribs={'style':{'width':widthstr}}, contents=gridwidget)
class Grid(table.TableLayout):
def __init__(self, categories, fillincolumn=None):
table.TableLayout.__init__(self, newattribs = {'width':'100%','border':'1','cellspacing':'0'})
self.pctwidth = 100
self.categories = categories
self.fillincolumn = fillincolumn
self.balancewidths()
def addtitlerow(self):
"""Add the title row cells"""
for category in self.categories:
if category.display:
titlecell = category.gettitlecellwidget()
self.addcell(0, category, titlecell)
def balancewidths(self):
"""calculate the width of each of the columns"""
# summarize widths...
mainfixedpctwidth = 0
suppfixedpctwidth = 0
for category in self.categories:
if category.display:
if category.pctwidth == 0: category.pctwidth = 10
if category.mainarea:
mainfixedpctwidth += category.pctwidth
else:
suppfixedpctwidth += category.pctwidth
extrapctwidth = 100 - mainfixedpctwidth
if extrapctwidth >= 0:
totalpctwidth = 100 + suppfixedpctwidth
else:
totalpctwidth = 100 + suppfixedpctwidth - extrapctwidth
extrapctwidth = 0
self.pctwidth = totalpctwidth
# add the title cells...
for category in self.categories:
if category.display:
if category.name == self.fillincolumn:
category.width = (category.pctwidth + extrapctwidth) * 100.0 / totalpctwidth
else:
category.width = category.pctwidth * 100.0 / totalpctwidth
def addcell(self, rownum, category, value):
"""adds a cell to the grid"""
# see if this should be merged with the above cell...
if self.hascell(rownum-1, category.col):
cellabove = self.getcell(rownum-1, category.col)
if category.canmerge(value, cellabove):
# at the moment, duplicate objects, later, replace them...
value = cellabove
table.TableLayout.setcell(self, rownum, category.col, value)
class SimpleGridCategory(GridCategory):
"""a GridCategory that handles coloring..."""
def __init__(self, name, title, tooltip, colnum, colordict, isheading, display, storageformat='TEXT'):
if storageformat == 'DATETIME':
displayformat = '%y-%m-%d %H:%M:%S'
else:
displayformat = ''
GridCategory.__init__(self, name, title, display=display, storageformat=storageformat, displayformat=displayformat,
col=colnum, pctwidth=10, mergecells=0, mainarea=1)
self.tooltip = tooltip
self.colorcategory = None
self.colordict = colordict
if isheading:
self.backcolor = '&HD0D0D0'
self.font = 'bold'
else:
self.backcolor = '&HFFFFFF'
self.font = ''
def setcolorcategory(self, colorcategory):
"""sets which category the color of this one will be based on"""
self.colorcategory = colorcategory
def gettitlecellwidget(self):
"""returns the title cell for this category, using the given width"""
titlestyle = self.gethtmlstyle('&H660000', self.backcolor, 'bold')
titlelink=widgets.Tooltip(self.tooltip, self.title)
attribs={'width': '%f%%' % self.width, 'style': titlestyle,'valign': 'top'}
return table.TableCell(titlelink, newattribs=attribs)
def gettextcolor(self, row):
"""returns textcolor based on the value of this category in given row"""
obj = row[self.name]
return self.colordict.get(str(obj),None)
def getwidget(self, row, href, hreftarget):
"""simply returns a widget for this category"""
obj = row[self.name]
obj = self.valuetostring(obj)
if obj is None:
text = ''
elif isinstance(obj, unicode):
text = obj.encode('utf8')
else:
text = str(obj)
if text == '':
text = ' '
if self.colorcategory is not None:
textcolor = self.colorcategory.gettextcolor(row)
else:
textcolor = '&H000000'
style = self.gethtmlstyle(textcolor, self.backcolor, self.font)
style['text-decoration'] = 'none'
# we need to set the style in both objects otherwise the link style can override it
if href is None:
contents = text
else:
contents = widgets.Link(href, text, {'target':hreftarget,'style':style})
return table.TableCell(contents, newattribs={'valign':'top','style':style})
class SimpleGrid(Grid):
"""a grid with common methods for config pages"""
def __init__(self, gridtable, columnlist, hrefbase=None, hreftarget='', colordefs={}, colordeps={}, \
headingcolumns=(), hidecolumns=(), filter=None, gridcategory=SimpleGridCategory,newattribs={}):
self.hrefbase = hrefbase
self.hreftarget = hreftarget
self.gridtable = gridtable
self.columnlist = columnlist
self.colordefs = colordefs
self.colordeps = colordeps
self.headingcolumns = headingcolumns
self.hidecolumns = hidecolumns
self.filter = filter
self.gridcategory = gridcategory
Grid.__init__(self, self.getcolumns())
self.overrideattribs(newattribs)
self.makegrid()
enabled, disabled = '&H000000', '&H808080'
booleancolors = cidict.cidict({'false':disabled, 'true':enabled})
def getcolumns(self):
"""gets the columns for the grid (columns of categoryconf)..."""
columns = []
columndict = {}
colnum = 0
for name, title, tooltip in self.columnlist:
colnum += 1
colordict = self.colordefs.get(name,{})
isheading = name in self.headingcolumns
display = name not in self.hidecolumns
storageformat = self.gridtable.columntypes.get(name, 'TEXT').upper()
column = self.gridcategory(name, title, tooltip, colnum, colordict, isheading, display, storageformat)
columns.append(column)
columndict[name] = column
for colorcolumnname, depcolumnnames in self.colordeps.iteritems():
colorcolumn = columndict[colorcolumnname]
for depcolumnname in depcolumnnames:
depcolumn = columndict[depcolumnname]
depcolumn.setcolorcategory(colorcolumn)
return columns
def makegrid(self):
"""makes up the grid - retrieves rows, adds them, and adjusts the grid"""
self.addtitlerow()
self.addrows()
self.shrinkrange()
# in case we missed any out...
self.fillemptycells()
self.mergeduplicates([0])
self.hidecoveredcells()
self.calcweights()
def addrows(self):
"""gets all the database rows from the table and adds cells for each one to the grid"""
self.getrows()
for row in self.tablerows:
self.addrow(row)
def addrow(self, row):
"""adds all the cells for a row to the grid"""
href = None
if self.hrefbase is not None:
rowid = self.gridtable.getrowid(row)
href = self.hrefbase
if '?' in href:
href += "&"
else:
href += "?"
href += 'action=view&' + self.gridtable.rowidparamstring(rowid)
if self.page:
href += '&page=%d' % self.page
rownum = self.maxrownum() + 1
for category in self.categories:
if not category.display: continue
widgetcell = category.getwidget(row, href, self.hreftarget)
self.addcell(rownum, category, widgetcell)
def handlepageargs(self):
"""handle arguments that select the current page"""
page = self.attribs.get('page', 0)
try:
page = int(page)
except:
if page.lower() == 'all':
page = 0
else:
page = 1
self.page = page
self.numrowsinpage = self.attribs.get('numrowsinpage',20)
def getrows(self):
"""retrieves the appropriate rows from the database"""
self.handlepageargs()
if self.page == 0:
minrow, maxrow = None, None
else:
minrow, maxrow = (self.page-1)*self.numrowsinpage, self.page*self.numrowsinpage
if minrow < 0: minrow = 0
# if maxrow > len(alllogrows): maxrow = len(alllogrows)
if minrow != None and maxrow != None:
self.tablerows = self.gridtable.getsometablerows(minrow,maxrow,self.filter)
if len(self.tablerows) < maxrow-minrow: #End of table
self.tableEnded = 1
else:
self.tableEnded = 0
else:
self.tablerows = self.gridtable.gettablerows(self.filter)
def getpagelink(self, pagehref, pagenum, pagetext=None):
"""returns a widget that links to a particular page"""
if pagetext is None:
pagetext = "Page %d" % pagenum
if pagenum == self.page:
pagetext = "<font color='red'><b>%s</b></font>" % pagetext
return widgets.Link(pagehref+"&page=%d" % pagenum, pagetext)
def getpagelinks(self):
"""returns a widget that links to other pages of this grid"""
self.numpages = (self.gridtable.countrows(self.filter) + self.numrowsinpage - 1) / self.numrowsinpage
currentpage = "Page %d of %d. " % (self.page, self.numpages)
pagehref = "?"
if self.filter is not None:
pagehref += self.filter.getfilterparams()
first, last, next, previous = "First", "Last", "Next", "Previous"
if self.page == 0:
currentpage = "Showing all records"
pages = [(1, first), (self.numpages, last)]
elif self.numpages > 1:
pages = []
if 1 == self.page-1:
pages.append((1, first+"/"+previous))
else:
pages.append((1, first))
if self.page-1 > 1:
pages.append((self.page-1, previous))
if self.page+1 == self.numpages:
pages.append((self.page+1, next+"/"+last))
else:
if self.page+1 < self.numpages:
pages.append((self.page+1, next))
pages.append((self.numpages, last))
else:
pages = []
pagelinks = [self.getpagelink(pagehref, pagenum, pagetext) for pagenum, pagetext in pages]
return widgets.Division(contents=widgets.Paragraph(contents=[currentpage, pagelinks]), id="toolbar")
class NumberGridCategory(SimpleGridCategory):
"""A grid category that handles numbers"""
def valuetostring(self, value):
if self.storageformat in ['DECIMAL', 'DOUBLE'] and isinstance(value, (int, float)):
return "%.3f" % value
else:
return SimpleGridCategory.valuetostring(self, value)
|
Discussion in 'Parts: For Sale/Trade/Wanted' started by vern0013, Oct 1, 2002.
No need to double post. If I see you advertising in the swap forums again I will start deleting all you posts.
that and we don't allow this.
|
from setuptools import setup, find_packages
import os
packagename = 'zenodio'
description = 'I/O with Zenodo.'
author = 'Jonathan Sick'
author_email = 'jsick@lsst.org'
license = 'MIT'
url = 'https://github.com/lsst-sqre/zenodio'
version = '0.1.1.dev0'
def read(filename):
full_filename = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
filename)
return open(full_filename, mode='r', encoding='utf8').read()
long_description = read('README.rst')
setup(
name=packagename,
version=version,
description=description,
long_description=long_description,
url=url,
author=author,
author_email=author_email,
license=license,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='aas',
packages=find_packages(exclude=['docs', 'tests*', 'data', 'notebooks']),
install_requires=['future', 'requests', 'xmltodict'],
tests_require=['pytest'],
# package_data={},
)
|
Play Doh Cuisine. p te modeler play doh les ustensiles de cuisine cr ation de fruits et l gumes en fran ais. le super cuistot play doh achat vente dinette cuisine cdiscount. play doh meal makin 39 kitchen. play doh la cuisine au meilleur prix sur. jouet duplo creative ice cream play doh ma cuisine de p tissier unboxing duplo play doh. play doh ma cuisine de p tissier youtube. model na play doh sweet shoppe v roba kol k obchodn d m d rk. play doh food kitchen meal makin play dough pizza yummy like real food playset youtube. play doh burger hamburger playdough hot dog fries cooking games doh food kids fun toys download. play doh american food hamburger playdoh traditional food youtube. [Pure-colfitness.com].
|
from django.contrib.auth import logout, login, authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import HttpResponse
from django.shortcuts import render
from django.utils import timezone
from django.contrib.gis.geos import Point
from .models import TopPost, Following, Post, FollowingForm, PostForm, MyUserCreationForm
# Anonymous views
#################
def index(request):
# if request.user.is_authenticated():
return home(request)
# else:
# return anon_home(request)
def anon_home(request):
return render(request, 'micro/public.html')
def search(request):
return render(request, 'micro/search.html')
def stream(request, user_id):
# See if to present a 'follow' button
form = None
if request.user.is_authenticated() and request.user.id != int(user_id):
try:
f = Following.objects.get(follower_id=request.user.id,
followee_id=user_id)
except Following.DoesNotExist:
form = FollowingForm
user = User.objects.get(pk=user_id)
post_list = Post.objects.filter(user_id=user_id).order_by('-pub_date')
paginator = Paginator(post_list, 10)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
posts = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
posts = paginator.page(paginator.num_pages)
context = {
'posts' : posts,
'stream_user' : user,
'form' : form,
}
return render(request, 'micro/stream.html', context)
def getpost(request, post_id):
if request.user.is_authenticated():
post = Post.objects.get(
id = post_id)
print(post.text)
print(post.url)
print(post.mpoint)
context = {
'post': post,
}
return render(request, 'micro/postindiv.html', context)
# return render(request, 'micro/postindiv.html', context)
def register(request):
if request.method == 'POST':
form = MyUserCreationForm(request.POST)
new_user = form.save(commit=True)
# Log in that user.
user = authenticate(username=new_user.username,
password=form.clean_password2())
if user is not None:
login(request, user)
else:
raise Exception
return home(request)
else:
form = MyUserCreationForm
return render(request, 'micro/register.html', {'form' : form})
# Authenticated views
#####################
# @login_required
def home(request):
# '''List of recent posts by people I follow'''
# try:
# my_post = Post.objects.filter(user=request.user).order_by('-pub_date')[0]
# except IndexError:
# my_post = None
# follows = [o.followee_id for o in Following.objects.filter(
# follower_id=request.user.id)]
global logedIn
if request.user.is_authenticated():
logedIn = True
else:
logedIn = False
print(logedIn)
# user = User.objects.get(pk=user_id)
post_list = Post.objects.all()
context = {
'post_list': post_list,
# 'user': user,
# 'my_post' : my_post,
# 'post_form' : PostForm
'logedIn': logedIn
}
return render(request, 'micro/home.html', context)
def toppost(request):
global logedIn
if request.user.is_authenticated():
logedIn = True
else:
logedIn = False
print(logedIn)
# user = User.objects.get(pk=user_id)
post_list = TopPost.objects.all()
context = {
'post_list': post_list,
# 'user': user,
# 'my_post' : my_post,
# 'post_form' : PostForm
'logedIn': logedIn
}
return render(request, 'micro/home.html', context)
# Allows to post something and shows my most recent posts.
@login_required
def post(request):
if request.method == 'POST':
form = PostForm(request.POST)
new_post = form.save(commit=False)
new_post.user = request.user
new_post.pub_date = timezone.now()
new_post.mpoint = Point(new_post.longtitude, new_post.latitude)
new_post.save()
return home(request)
else:
form = PostForm
return render(request, 'micro/post.html', {'form' : form})
@login_required
def follow(request):
if request.method == 'POST':
form = FollowingForm(request.POST)
new_follow = form.save(commit=False)
new_follow.follower = request.user
new_follow.follow_date = timezone.now()
new_follow.save()
return home(request)
else:
form = FollowingForm
return render(request, 'micro/follow.html', {'form' : form})
|
The Department's home is the modern, purpose-built Mathematical Sciences Building (shared with Mathematics and Computer Science) on the University of Warwick central campus. Statistics academics and PhD students have offices on the four floors of the building, which opened in 2018. Its facilities include superb lecture rooms and student work areas, and well-equipped computer labs.
|
import logging
import logging.config
import os
import sys
import sentry_sdk
from mtp_transaction_uploader import settings
from mtp_transaction_uploader.upload import main as transaction_uploader
def setup_monitoring():
"""
Setup logging and exception reporting
"""
logging_conf = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'simple': {
'format': '%(asctime)s [%(levelname)s] %(message)s',
'datefmt': '%Y-%m-%dT%H:%M:%S',
},
'elk': {
'()': 'mtp_common.logging.ELKFormatter'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple' if settings.ENVIRONMENT == 'local' else 'elk',
},
},
'root': {
'level': 'WARNING',
'handlers': ['console'],
},
'loggers': {
'mtp': {
'level': 'INFO',
'handlers': ['console'],
'propagate': False,
},
},
}
sentry_enabled = False
if os.environ.get('SENTRY_DSN'):
sentry_sdk.init(
dsn=settings.SENTRY_DSN,
environment=settings.ENVIRONMENT,
release=settings.APP_GIT_COMMIT,
send_default_pii=False,
request_bodies='never',
)
sentry_enabled = True
logging.config.dictConfig(logging_conf)
logger = logging.getLogger('mtp')
return logger, sentry_enabled
def main():
logger, sentry_enabled = setup_monitoring()
if settings.UPLOADER_DISABLED:
logger.info('Transaction uploader is disabled')
sys.exit(0)
# ensure all required parameters are set
missing_params = []
required_params = {'SFTP_HOST', 'SFTP_USER', 'SFTP_PRIVATE_KEY', 'ACCOUNT_CODE',
'API_URL', 'API_CLIENT_ID', 'API_CLIENT_SECRET',
'API_USERNAME', 'API_PASSWORD'}
for param in dir(settings):
if param in required_params and not getattr(settings, param):
missing_params.append(param)
if missing_params:
logger.error('Missing environment variables: ' +
', '.join(missing_params))
sys.exit(1)
try:
# run the transaction uploader
transaction_uploader()
except Exception as e:
if sentry_enabled:
sentry_sdk.capture_exception(e)
else:
logger.exception('Unhandled error')
sys.exit(2)
if __name__ == '__main__':
main()
|
If you ask Jay Leonhardy how to improve his city, he stresses a simple focus for a complex problem: Provide quality employment opportunities to Richmond’s youth.
“We see a very direct correlation between reducing youth involved in crime and participation in our work progam,” Leonhardy said, pointing to graduation and high school exit exam rates among program participants as indicating they’re less likely to get involved in crime.
Last year, 94 percent of eligible participants graduated or passed exit exams, Leonhardy said. That compares with a statewide graduation rate of less than 60 percent for students from low-income families.
As Division Program Manager for Richmond YouthWORKS, the city’s youth-employment program, Leonhardy oversees an operation that employed 705 local teens and young adults ages 16-21 this year at 140 Bay Area public and private work sites.
The program has paid out about $648,000 in wages this year, Leonhardy said.
Alia Anderson went from wayward teen to polished young professional riding the fast-track to a stable city job, thanks in part to YouthWORKS.
Anderson, 21, first enrolled in the program 5 years ago, when her mother, a bus driver who raised five children alone, took her to the YouthWORKS offices because she was concerned about her daughter’s poor grades and increasingly disobedient behavior.
Alia Anderson, one of 705 local youths who earned paychecks with YouthWORKS last summer, worked at the City Attorney's office.
Anderson said the change was immediate.
“I went from mostly F’s to nothing worse than C’s,” said Anderson, who graduated from De Anza High School in 2006.
With her mother working long hours, Anderson said she leaned heavily on mentors, tutors and case managers at YouthWORKS, who helped her transfer from John F. Kennedy High School and encouraged her to enroll in life skills, interviewing, hygiene and other after-school courses through the program.
This summer, Anderson worked as an administrative intern in the city’s City Attorney’s Office for $8.25 per hour. She did so well that she was retained by the office after the summer program was complete, and currently earns $10 per hour, 24 hours per week.
In six months, if she continues to impress, Anderson said she hopes to be retained as a full-time employee in the office.
“I would love to work here long term, and I know I can if I keep doing what’s expected of me,” she said.
The program was restarted in 2004, when RichmondWORKS director Sal Vaca, former mayor Irma Anderson and Leonhardy, who was Anderson’s chief of staff, decided to work toward fundraising for youth employment. A similar program went defunct in 2000 when federal funding dried up, Leonhardy said.
“We got about $200,000 from the city’s general fund and we raised about $100,000 from the community to start,” Leonhardy said.
One of the original private donors was Chevron Corp., which helped the program launch with a $50,000 donation.
Chevron’s sponsorship has continued and grown. This year, the multinational energy company donated $106,000.
Leonhardy noted that with 705 enrollees in a city of 100,000, Richmond has the highest per capita summer youth employment program in the state.
The youths enrolled are paid $8.25 per hour for a maximum of 121 hours of summer employment, Leonhardy said. Other benefits include access to self-improvement courses and a computer lab. Anderson said she still routinely goes to the program’s 25th Street offices to use the Internet.
Other successful enrollees include three of Anderson’s brothers and sisters.
Anderson’s older sister has worked at Chevron Corp. for two years, a job she got through a YouthWORKS internship.
And the program also offers a respite to youths in more dire straits than Anderson. Leonhardy points to Gustavo Ponce as a telling example.
Ponce, 17, was running with a north Richmond sect of the notorious Surenos street gang less than two years ago. Today, he works as an office assistant in a North Richmond insurance agency, employment he got through YouthWORKS last summer.
Leonhardy said he expects local, federal and private support to continue.
to correct a statement that was quoted wrong I never said my mother couldnt afford internet however I do regularly go to the office on 25th street to use the computers..
|
import os
import re
import cache
from fields import RequiresField, ModulesField, PackagesField
from template import template, show_field
import jinja2
import simplejson as json
from wtforms import form, fields
import logging
from uuid import uuid4 as uuid
log = logging.getLogger(__name__)
safe_string = jinja2.Markup
SETUP_PY_TEMPLATE = 'setup_py.tpl'
python_file_pattern = re.compile(r'(.*)\.(py|pyc|pyo)$', re.I)
readme_file_pattern = re.compile(r'readme(\..*)?$', re.I)
from trove import all_classifiers
license_choices = \
[('', '')] + \
[tuple([c.split(' :: ')[-1]] * 2) for c in all_classifiers
if c.startswith('License :: ')]
classifier_choices = [tuple([c] * 2) for c in all_classifiers
if not c.startswith('License :: ')]
def create_setup(client=None):
"""
Use the file list from the source control client to
instantiate a new Setup object.
"""
setup = SetupDistutils()
packages = []
modules = []
readme = None
if client:
packages = [os.path.dirname(f)
for f in client.files if '__init__.' in f]
# look for files not in a package to add to py_modules in setup
# find README.* files, first one wins
modules = []
for filename in client.files:
match = re.match(python_file_pattern, filename)
if match:
package = os.path.dirname(filename)
module = match.groups()[0]
if not module.endswith('setup') and package not in packages:
modules.append(module.replace('/', '.'))
if not readme:
match = re.match(readme_file_pattern, filename)
if match:
readme = filename
setup.process(None, **client.discovered)
setup.readme.data = readme
setup.py_modules.data = ' '.join(modules)
setup.packages.data = ' '.join(packages)
return setup
class Setup(form.Form):
author = fields.TextField()
author_email = fields.TextField()
name = fields.TextField()
description = fields.TextField()
version = fields.TextField()
long_description = fields.TextAreaField()
url = fields.TextField()
license = fields.SelectField(choices=license_choices)
classifiers = fields.SelectMultipleField(choices=classifier_choices)
readme = fields.HiddenField()
# lists
py_modules = ModulesField()
packages = PackagesField()
requires = RequiresField()
def __init__(self, *args, **kwargs):
super(Setup, self).__init__(*args, **kwargs)
self.cache_key = str(uuid()).replace('-', '')
for field in [self.license, self.classifiers]:
if field.data == 'None':
field.data = None
def process(self, formdata=None, obj=None, **kwargs):
super(Setup, self).process(formdata=formdata, obj=obj, **kwargs)
def cache(self):
data = dict(self.data)
data['cache_key'] = self.cache_key
cache.set(self.cache_key, json.dumps(data))
def visible_fields(self):
return [f for f in self if not isinstance(f, fields.HiddenField)]
class SetupDistutils(Setup):
def generate(self, executable=False, under_test=False):
try:
indent = ' '
args = ''
for field in self.visible_fields():
# don't show field at all if executable is on
if not field.data and executable:
continue
args += u'{}{}\n'.format(
indent,
show_field(field, self, executable))
return safe_string(template(SETUP_PY_TEMPLATE,
setup=self,
executable=executable,
setup_arguments=args,
under_test=under_test))
except Exception:
log.exception('Failed to generate setup.py')
return 'Error generating setup.py'
|
Sometimes a full bottle is a little too much wine, so it's nice to have the option to drink less - but drink better. And when it comes to Pinot Noir, nobody does it better in New Zealand than Ata Rangi. Few estates in New Zealand (or anywhere for that matter) can match Ata Rangi for sheer ambition in Pinot Noir. These lofty goals are reflected in this Crimson Pinot Noir - where other producers would be happy to produce something soft, fruity and accessible, Ata Rangi seek to make a genuine 5-Star wine that will develop with time although still be more accessible than their famous estate Pinot. This 2017 vintage is a cracker from winemaker Helen Masters, a serious, herb infused take on the Martinborough terroir.
Freshly mown summer hay, red and black cherry and plum with a kiss of French Oak.
|
# Copyright 2016 rpaas authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import unittest
import mock
from rpaas.nginx import Nginx, NginxError
class NginxTestCase(unittest.TestCase):
def setUp(self):
self.cache_headers = [{'Accept-Encoding': 'gzip'}, {'Accept-Encoding': 'identity'}]
def test_init_default(self):
nginx = Nginx()
self.assertEqual(nginx.nginx_manage_port, '8089')
self.assertEqual(nginx.nginx_purge_path, '/purge')
self.assertEqual(nginx.nginx_healthcheck_path, '/healthcheck')
def test_init_config(self):
nginx = Nginx({
'NGINX_PURGE_PATH': '/2',
'NGINX_MANAGE_PORT': '4',
'NGINX_LOCATION_TEMPLATE_DEFAULT_TXT': '5',
'NGINX_LOCATION_TEMPLATE_ROUTER_TXT': '6',
'NGINX_HEALTHCHECK_PATH': '7',
})
self.assertEqual(nginx.nginx_purge_path, '/2')
self.assertEqual(nginx.nginx_manage_port, '4')
self.assertEqual(nginx.config_manager.location_template_default, '5')
self.assertEqual(nginx.config_manager.location_template_router, '6')
self.assertEqual(nginx.nginx_healthcheck_path, '7')
@mock.patch('rpaas.nginx.requests')
def test_init_config_location_url(self, requests):
def mocked_requests_get(*args, **kwargs):
class MockResponse:
def __init__(self, text, status_code):
self.text = text
self.status_code = status_code
if args[0] == 'http://my.com/default':
return MockResponse("my result default", 200)
elif args[0] == 'http://my.com/router':
return MockResponse("my result router", 200)
with mock.patch('rpaas.nginx.requests.get', side_effect=mocked_requests_get) as requests_get:
nginx = Nginx({
'NGINX_LOCATION_TEMPLATE_DEFAULT_URL': 'http://my.com/default',
'NGINX_LOCATION_TEMPLATE_ROUTER_URL': 'http://my.com/router',
})
self.assertEqual(nginx.config_manager.location_template_default, 'my result default')
self.assertEqual(nginx.config_manager.location_template_router, 'my result router')
expected_calls = [mock.call('http://my.com/default'),
mock.call('http://my.com/router')]
requests_get.assert_has_calls(expected_calls)
@mock.patch('rpaas.nginx.requests')
def test_purge_location_successfully(self, requests):
nginx = Nginx()
response = mock.Mock()
response.status_code = 200
response.text = 'purged'
side_effect = mock.Mock()
side_effect.status_code = 404
side_effect.text = "Not Found"
requests.request.side_effect = [response, side_effect, response, side_effect]
purged = nginx.purge_location('myhost', '/foo/bar')
self.assertTrue(purged)
self.assertEqual(requests.request.call_count, 4)
expec_responses = []
for scheme in ['http', 'https']:
for header in self.cache_headers:
expec_responses.append(mock.call('get', 'http://myhost:8089/purge/{}/foo/bar'.format(scheme),
headers=header, timeout=2))
requests.request.assert_has_calls(expec_responses)
@mock.patch('rpaas.nginx.requests')
def test_purge_location_preserve_path_successfully(self, requests):
nginx = Nginx()
response = mock.Mock()
response.status_code = 200
response.text = 'purged'
requests.request.side_effect = [response]
purged = nginx.purge_location('myhost', 'http://example.com/foo/bar', True)
self.assertTrue(purged)
self.assertEqual(requests.request.call_count, 2)
expected_responses = []
for header in self.cache_headers:
expected_responses.append(mock.call('get', 'http://myhost:8089/purge/http://example.com/foo/bar',
headers=header, timeout=2))
requests.request.assert_has_calls(expected_responses)
@mock.patch('rpaas.nginx.requests')
def test_purge_location_not_found(self, requests):
nginx = Nginx()
response = mock.Mock()
response.status_code = 404
response.text = 'Not Found'
requests.request.side_effect = [response, response, response, response]
purged = nginx.purge_location('myhost', '/foo/bar')
self.assertFalse(purged)
self.assertEqual(requests.request.call_count, 4)
expec_responses = []
for scheme in ['http', 'https']:
for header in self.cache_headers:
expec_responses.append(mock.call('get', 'http://myhost:8089/purge/{}/foo/bar'.format(scheme),
headers=header, timeout=2))
requests.request.assert_has_calls(expec_responses)
@mock.patch('rpaas.nginx.requests')
def test_wait_healthcheck(self, requests):
nginx = Nginx()
count = [0]
response = mock.Mock()
response.status_code = 200
response.text = 'WORKING'
def side_effect(method, url, timeout, **params):
count[0] += 1
if count[0] < 2:
raise Exception('some error')
return response
requests.request.side_effect = side_effect
nginx.wait_healthcheck('myhost.com', timeout=5)
self.assertEqual(requests.request.call_count, 2)
requests.request.assert_called_with('get', 'http://myhost.com:8089/healthcheck', timeout=2)
@mock.patch('rpaas.nginx.requests')
def test_wait_app_healthcheck(self, requests):
nginx = Nginx()
count = [0]
response = mock.Mock()
response.status_code = 200
response.text = '\n\nWORKING'
def side_effect(method, url, timeout, **params):
count[0] += 1
if count[0] < 2:
raise Exception('some error')
return response
requests.request.side_effect = side_effect
nginx.wait_healthcheck('myhost.com', timeout=5, manage_healthcheck=False)
self.assertEqual(requests.request.call_count, 2)
requests.request.assert_called_with('get', 'http://myhost.com:8080/_nginx_healthcheck/', timeout=2)
@mock.patch('rpaas.nginx.requests')
def test_wait_app_healthcheck_invalid_response(self, requests):
nginx = Nginx()
count = [0]
response = mock.Mock()
response.status_code = 200
response.text = '\nFAIL\n'
def side_effect(method, url, timeout, **params):
count[0] += 1
if count[0] < 2:
raise Exception('some error')
return response
requests.request.side_effect = side_effect
with self.assertRaises(NginxError):
nginx.wait_healthcheck('myhost.com', timeout=5, manage_healthcheck=False)
self.assertEqual(requests.request.call_count, 6)
requests.request.assert_called_with('get', 'http://myhost.com:8080/_nginx_healthcheck/', timeout=2)
@mock.patch('rpaas.nginx.requests')
def test_wait_healthcheck_timeout(self, requests):
nginx = Nginx()
def side_effect(method, url, timeout, **params):
raise Exception('some error')
requests.request.side_effect = side_effect
with self.assertRaises(Exception):
nginx.wait_healthcheck('myhost.com', timeout=2)
self.assertGreaterEqual(requests.request.call_count, 2)
requests.request.assert_called_with('get', 'http://myhost.com:8089/healthcheck', timeout=2)
@mock.patch('os.path')
@mock.patch('rpaas.nginx.requests')
def test_add_session_ticket_success(self, requests, os_path):
nginx = Nginx({'CA_CERT': 'cert data'})
os_path.exists.return_value = True
response = mock.Mock()
response.status_code = 200
response.text = '\n\nticket was succsessfully added'
requests.request.return_value = response
nginx.add_session_ticket('host-1', 'random data', timeout=2)
requests.request.assert_called_once_with('post', 'https://host-1:8090/session_ticket', timeout=2,
data='random data', verify='/tmp/rpaas_ca.pem')
@mock.patch('rpaas.nginx.requests')
def test_missing_ca_cert(self, requests):
nginx = Nginx()
with self.assertRaises(NginxError):
nginx.add_session_ticket('host-1', 'random data', timeout=2)
|
Flat 2.31 acre parcel that will be split into 2- acre+ lots as per The City and R1-43 ESL requirements. Lot split in progress (2-MD-2019). Current list price is for acreage as is. When lot split is complete then each lot will be priced higher. Make your offer now.Dynamic views of the McDowell Mountains and a developed secluded luxury homes neighborhood far from the heavy traffic of Shea Blvd.This property has not been on the market since 1946. The Mayo Clinic is to the NE and the 101 Freeway and shopping to the west. Call for more information including aerials, survey info and drainage. Buyer to confirm all information that is pertinent to buyer.
Direction: Shea Blvd east to 124th St then south to Gold Dust Ave. East to 126th St and south to property on west side of road.
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
"""Starts a service for running portable beam pipelines.
The basic usage is simply
python -m apache_beam.runners.portability.local_job_service_main
Many other options are also supported, such as starting in the background or
passing in a lockfile to ensure that only one copy of the service is running
at a time. Pass --help to see them all.
"""
import argparse
import logging
import os
import pathlib
import signal
import subprocess
import sys
import time
from apache_beam.runners.portability import local_job_service
_LOGGER = logging.getLogger(__name__)
def run(argv):
if argv[0] == __file__:
argv = argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument(
'-p',
'--port',
'--job_port',
type=int,
default=0,
help='port on which to serve the job api')
parser.add_argument('--staging_dir')
parser.add_argument(
'--pid_file', help='File in which to store the process id of the server.')
parser.add_argument(
'--port_file', help='File in which to store the port of the server.')
parser.add_argument(
'--background',
action='store_true',
help='Start the server up as a background process.'
' Will fail if pid_file already exists, unless --stop is also specified.')
parser.add_argument(
'--stderr_file',
help='Where to write stderr (if not specified, merged with stdout).')
parser.add_argument(
'--stdout_file', help='Where to write stdout for background job service.')
parser.add_argument(
'--stop',
action='store_true',
help='Stop the existing process, if any, specified in pid_file.'
' Will not start up a new service unless --background is specified.')
options = parser.parse_args(argv)
if options.stop:
if not options.pid_file:
raise RuntimeError('--pid_file must be specified with --stop')
if os.path.exists(options.pid_file):
with open(options.pid_file) as fin:
pid = int(fin.read())
print('Killing process at', pid)
try:
os.kill(pid, signal.SIGTERM)
except Exception:
print('Process', pid, 'already killed.')
os.unlink(options.pid_file)
else:
print('Process id file', options.pid_file, 'already removed.')
if not options.background:
return
if options.background:
if not options.pid_file:
raise RuntimeError('--pid_file must be specified with --start')
if options.stop:
argv.remove('--stop')
argv.remove('--background')
if not options.port_file:
options.port_file = os.path.splitext(options.pid_file)[0] + '.port'
argv.append('--port_file')
argv.append(options.port_file)
if not options.stdout_file:
raise RuntimeError('--stdout_file must be specified with --background')
os.makedirs(pathlib.PurePath(options.stdout_file).parent, exist_ok=True)
stdout_dest = open(options.stdout_file, mode='w')
if options.stderr_file:
os.makedirs(pathlib.PurePath(options.stderr_file).parent, exist_ok=True)
stderr_dest = open(options.stderr_file, mode='w')
else:
stderr_dest = subprocess.STDOUT
subprocess.Popen([
sys.executable,
'-m',
'apache_beam.runners.portability.local_job_service_main'
] + argv,
stderr=stderr_dest,
stdout=stdout_dest)
print('Waiting for server to start up...')
while not os.path.exists(options.port_file):
time.sleep(.1)
with open(options.port_file) as fin:
port = fin.read()
print('Server started at port', port)
return
if options.pid_file:
print('Writing process id to', options.pid_file)
os.makedirs(pathlib.PurePath(options.pid_file).parent, exist_ok=True)
fd = os.open(options.pid_file, os.O_CREAT | os.O_EXCL | os.O_RDWR)
with os.fdopen(fd, 'w') as fout:
fout.write(str(os.getpid()))
try:
job_servicer = local_job_service.LocalJobServicer(options.staging_dir)
port = job_servicer.start_grpc_server(options.port)
try:
if options.port_file:
print('Writing port to', options.port_file)
os.makedirs(pathlib.PurePath(options.port_file).parent, exist_ok=True)
with open(options.port_file + '.tmp', 'w') as fout:
fout.write(str(port))
os.rename(options.port_file + '.tmp', options.port_file)
serve("Listening for beam jobs on port %d." % port, job_servicer)
finally:
job_servicer.stop()
finally:
if options.pid_file and os.path.exists(options.pid_file):
os.unlink(options.pid_file)
if options.port_file and os.path.exists(options.port_file):
os.unlink(options.port_file)
def serve(msg, job_servicer):
logging_delay = 30
while True:
_LOGGER.info(msg)
time.sleep(logging_delay)
logging_delay *= 1.25
if __name__ == '__main__':
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
run(sys.argv)
|
Judy Marek was described as a strong and giving person and avid Clemson fan.
Judy D. Marek, an engineering lecturer in Clemson's College of Engineering, Computing and Applied Sciences, died of brain cancer on Jan. 26, a university press release said.
Marek was 54 and a graduate of Clemson. The South Carolina native was a member of the Chi Omega sorority at Clemson.
Jonna Mcgrath Rowe, a close friend of Marek's from the church she attended in Georgia, said Marek spoke about Clemson "with a gleam in her eye and a smile on her face."
Rowe said Marek was the "glue" that kept their church group together and said she was a strong Christian.
"She was an amazing person and amazing friend," Rowe said. "We are going to have to adjust to a new normal without her."
Marek had always dreamed of going back to Clemson to teach. She started at the university in the fall and was diagnosed with brain cancer in October, Rowe said.
"It brought her joy and fulfilled her to give back to the school," Rowe said.
Before becoming a lecturer at Clemson, Marek worked as an adjunct instructor at Tri-County Technical College. Her career also involved experience at TailorMade Executive, Lucent Technologies, and AT&T Laboratories, according to her Clemson profile.
"A loss in the Clemson Family is felt by us all," Anand Gramopadhye, the dean of the College of Engineering, Computing and Applied Sciences, said in a statement. "On behalf of the college, I would like to extend our deepest sympathies to her family and friends."
Marek is survived by her mom, two children and her husband of 31 years, Todd Marek, who serves as executive director of the Watt Family Innovation Center.
Services for Marek are scheduled for 4 p.m. on Jan. 31 at East Cobb Presbyterian Church in Marietta, Georgia. In lieu of flowers, the family asked for donations to Clemson’s Women in Science and Engineering (WISE) program.
"I am in awe that they would choose our program," said Serita Acker, director for Programs for Educational Enrichment and Retention and WISE at Clemson. "I am really honored. She believed in the cause of women progressing in the STEM field and this is a testament to that."
|
# Copyright 2016 - Alcatel-Lucent
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for Graph access and manipulation
Functions in this module are imported into the vitrage.graph namespace.
Call these functions from vitrage.graph namespace and not the
vitrage.graph.driver namespace.
"""
import abc
import copy
from vitrage.graph.driver.elements import Edge
from vitrage.graph.driver.elements import Vertex
from vitrage.graph.driver.notifier import Notifier
class Direction(object):
OUT = 1
IN = 2
BOTH = 3
class Graph(object, metaclass=abc.ABCMeta):
def __init__(self, name, graph_type, vertices=None, edges=None):
"""Create a Graph instance
:type name: str
:type graph_type: str
:type vertices: list of Vertex
:type edges: list of Edge
:rtype: Graph
"""
self.name = name
self.graph_type = graph_type
self.notifier = Notifier()
def subscribe(self, function, finalization=False):
"""Subscribe to graph changes
:param function: function will be called after each graph change
:param finalization: function will be called after all non finalization
Usage Example:
graph = NXGraph()
graph.subscribe(foo1, finalization=True)
graph.subscribe(foo2, finalization=False)
graph.subscribe(foo3, finalization=False)
The order of the calls in this example wii be:
1. foo2
2. foo3
3. foo1
foo1 is called last because it subscribed as a finalization function
"""
self.notifier.subscribe(function, finalization)
def is_subscribed(self):
return self.notifier.is_subscribed()
def get_item(self, item):
if isinstance(item, Edge):
return self.get_edge(item.source_id, item.target_id, item.label)
if isinstance(item, Vertex):
return self.get_vertex(item.vertex_id)
@property
def algo(self):
"""Get graph algorithms
:rtype: GraphAlgorithm
"""
return None
@abc.abstractmethod
def copy(self):
"""Create a copy of the graph
:return: A copy of the graph
:rtype: Graph
"""
pass
@abc.abstractmethod
def num_vertices(self):
"""Number of vertices in the graph
:return:
:rtype: int
"""
pass
@abc.abstractmethod
def num_edges(self):
"""Number of edges in the graph
:return:
:rtype: int
"""
pass
@abc.abstractmethod
def add_vertex(self, v):
"""Add a vertex to the graph
A copy of Vertex v will be added to the graph.
Example:
--------
graph = Graph()
v = Vertex(vertex_id=1, properties={prop_key:prop_value})
graph.add_vertex(v)
:param v: the vertex to add
:type v: Vertex
"""
pass
def add_vertices(self, vertices):
"""Add a list of vertices to the graph
Uses add_vertex to add each vertex
:param vertices:
:type vertices:list of Vertex
"""
if not vertices:
return
for v in vertices:
self.add_vertex(v)
@abc.abstractmethod
def add_edge(self, e):
"""Add an edge to the graph
A copy of Edge e will be added to the graph.
Example:
--------
graph = Graph()
v1_prop = {'prop_key':'some value for my first vertex'}
v2_prop = {'prop_key':'another value for my second vertex'}
v1 = Vertex(vertex_id=1, properties=v1_prop)
v2 = Vertex(vertex_id=2, properties=v2_prop)
graph.add_vertex(v1)
graph.add_vertex(v2)
e_prop = {'edge_prop':'and here is my edge property value'}
e = Edge(source_id=v1.vertex_id, target_id=v2.vertex_id,
label='BELONGS', properties=e_prop)
graph.add_edge(e)
:param e: the edge to add
:type e: Edge
"""
pass
def add_edges(self, edges):
"""Add a list of edges to the graph
Uses add_edge to add each edge
:param edges:
:type edges:list of Edge
"""
if not edges:
return
for e in edges:
self.add_edge(e)
@abc.abstractmethod
def get_vertex(self, v_id):
"""Fetch a vertex from the graph
:param v_id: vertex id
:type v_id: str
:return: the vertex or None if it does not exist
:rtype: Vertex
"""
pass
@abc.abstractmethod
def get_edge(self, source_id, target_id, label):
"""Fetch an edge from the graph,
Fetch an edge from the graph, according to its two vertices and label
:param source_id: vertex id of the source vertex
:type source_id: str or None
:param target_id: vertex id of the target vertex
:type target_id: str
:param label: the label property of the edge
:type label: str or None
:return: The edge between the two vertices or None
:rtype: Edge
"""
pass
@abc.abstractmethod
def get_edges(self,
v1_id,
v2_id=None,
direction=Direction.BOTH,
attr_filter=None):
"""Fetch multiple edges from the graph,
Fetch all edges from the graph, according to its two vertices.
If only one vertex id is given it finds all the edges from this vertex
to all other vertices.
If two vertices ids are given it finds all the edges between those two
vertices.
EXAMPLE
-------
v2_edges1 = g.get_edges(
v_id=v2.vertex_id,
attr_filter={'LABEL': 'ON'})
v2_edges2 = g.get_edges(
v_id=v2.vertex_id,
attr_filter={'LABEL': ['ON', 'WITH']})
:param v1_id: first vertex id of vertex
:type v1_id: str
:param v2_id: second vertex id of vertex
:type v2_id: str
:param direction: specify In/Out/Both for edge direction
:type direction: int
:param attr_filter: expected keys and values
:type attr_filter: dict
:return: All edges matching the requirements
:rtype: set of Edge
"""
pass
@abc.abstractmethod
def update_vertex(self, v, overwrite=True):
"""Update the vertex properties
Update an existing vertex and create it if non existing.
:param v: the vertex with the new data
:type v: Vertex
:param overwrite: whether to overwrite existing properties
:type overwrite: Boolean
"""
pass
def update_vertices(self, vertices):
"""For each vertex, update its properties
For each existing vertex, update its properties and create it if
non existing.
:param vertices: the vertex with the new data
:type vertices: List
"""
for v in vertices:
self.update_vertex(v)
@abc.abstractmethod
def update_edge(self, e):
"""Update the edge properties
Update an existing edge and create it if non existing.
:param e: the edge with the new data
:type e: Edge
"""
pass
@staticmethod
def _merged_properties(base_props, updated_props, overwrite):
if base_props is None:
return copy.copy(updated_props)
else:
# Return all updated properties if overwrite is true, or only the
# new properties otherwise
return {k: v for k, v in updated_props.items()
if overwrite or k not in base_props}
@abc.abstractmethod
def remove_vertex(self, v):
"""Remove Vertex v and its edges from the graph
:type v: Vertex
"""
pass
@abc.abstractmethod
def remove_edge(self, e):
"""Remove an edge from the graph
:type e: Edge
"""
pass
@abc.abstractmethod
def get_vertices(self,
vertex_attr_filter=None,
query_dict=None):
"""Get vertices list with an optional match filter
To filter the vertices, specify property values for
the vertices
Example:
--------
graph = Graph()
v1_prop = {'prop_key':'some value for my first vertex'}
v2_prop = {'prop_key':'another value for my second vertex'}
v3_prop = {'prop_key':'YES'}
v1 = Vertex(vertex_id=1, properties=v1_prop)
v2 = Vertex(vertex_id=2, properties=v2_prop)
v3 = Vertex(vertex_id=3, properties=v3_prop)
graph.add_vertex(v1)
graph.add_vertex(v2)
graph.add_vertex(v3)
all_vertices = graph.get_vertices()
for v in all_vertices:
do something with v
filtered_vertices_list = graph.get_vertices(
vertex_attr_filter={'prop_key':['YES']})
:param vertex_attr_filter: expected keys and values
:type vertex_attr_filter dict
:param query_dict: expected query
:type query_dict dict
:return: A list of vertices that match the requested query
:rtype: list of Vertex
"""
pass
@abc.abstractmethod
def get_vertices_by_key(self,
key_values_hash):
"""Get vertices list according to their hash key
The hash key is derived from their properties :
See processor_utils - get_defining_properties
:param key_values_hash: hash key
:type key_values_hash str
"""
pass
@abc.abstractmethod
def neighbors(self, v_id, vertex_attr_filter=None,
edge_attr_filter=None, direction=Direction.BOTH):
"""Get vertices that are neighboring to v_id vertex
To filter the neighboring vertices, specify property values for
the vertices or for the edges connecting them.
Example:
--------
graph = Graph()
v1_prop = {'prop_key':'some value for my first vertex'}
v2_prop = {'prop_key':'another value for my second vertex'}
v3_prop = {'prop_key':'YES'}
v1 = Vertex(vertex_id=1, properties=v1_prop)
v2 = Vertex(vertex_id=2, properties=v2_prop)
v3 = Vertex(vertex_id=3, properties=v3_prop)
graph.add_vertex(v1)
graph.add_vertex(v2)
graph.add_vertex(v3)
e_prop = {'edge_prop':'and here is my edge property value'}
e1 = Edge(source_id=v1.vertex_id, target_id=v2.vertex_id,
label='BELONGS', properties=e_prop)
e2 = Edge(source_id=v1.vertex_id, target_id=v3.vertex_id,
label='ON', properties=e_prop)
graph.add_edge(e1)
graph.add_edge(e2)
vertices_list1 = graph.neighbors(v_id=v1.vertex_id,
vertex_attr_filter={'prop_key':'YES'},
edge_attr_filter={'LABEL':'ON})
vertices_list2 = graph.neighbors(v_id=v1.vertex_id,
vertex_attr_filter={'prop_key':['YES', 'CAT']},
edge_attr_filter={'LABEL':['ON', 'WITH']})
:param direction:
:param v_id: vertex id
:type v_id: str
:param vertex_attr_filter: expected keys and values
:type vertex_attr_filter dict
:param edge_attr_filter: expected keys and values
:type edge_attr_filter: dict
:return: A list of vertices that match the requested query
:rtype: list of Vertex
"""
pass
@abc.abstractmethod
def json_output_graph(self, **kwargs):
pass
@abc.abstractmethod
def union(self, other_graph):
pass
|
There was a time when African female writers were discriminated against. I remember reading once that a few publishers rejected Tsitsi Dangarembga’s Nervous Conditions because she portrayed the lives of black women too negatively; and Buchi Emecheta was said to have been too bold in her portrayal of women in her novels. Things are obviously changing now because there are so many amazing female writers out there - of the 12 shortlisted Commonwealth’s regional Best Book and Best First Book awards in 2011, 9 were African women.
There are quite a few out there but here are some amazing female African writers.
I need to read Neighbours and Migritude. Thanks for this post. Love seeing my people like this.
Some great authors listed here. I've loved the ones that I've read, and have yet to read Shailja Patel, Wambui Mwagani, and Lilia Momple. Given how much I loved the others, I really ought to try these ones soon!
You're welcome Kinna. I'm yet to read Migritude as well. It's on my ever-growing wish list.
I know Amy. They are all amazing authors.
|
from peachpy.x86_64 import *
from peachpy import *
import common.YepStatus as YepStatus
from common.pipeline import software_pipelined_loop
from common.instruction_selection import *
def binop_IVV_IV(arg_x, arg_y, arg_n, op, isa_ext):
# First we set some constants based on the input/output types
# so that we can use the same code for any input/output
# type combination
input_type = arg_x.c_type.base
output_type = arg_x.c_type.base
input_type_size = arg_x.c_type.base.size
output_type_size = arg_x.c_type.base.size
unroll_factor = 5
simd_register_size = { "AVX2": YMMRegister.size,
"AVX" : YMMRegister.size,
"SSE" : XMMRegister.size }[isa_ext]
SCALAR_LOAD, SCALAR_OP, SCALAR_STORE = scalar_instruction_select(input_type, output_type, op, isa_ext)
SIMD_LOAD, SIMD_OP, SIMD_STORE = vector_instruction_select(input_type, output_type, op, isa_ext)
reg_x_scalar, reg_y_scalar = scalar_reg_select(output_type, isa_ext)
simd_accs, simd_ops = vector_reg_select(isa_ext, unroll_factor)
ret_ok = Label()
ret_null_pointer = Label()
ret_misaligned_pointer = Label()
# Load args and test for null pointers and invalid arguments
reg_length = GeneralPurposeRegister64() # Keeps track of how many elements are left to process
LOAD.ARGUMENT(reg_length, arg_n)
TEST(reg_length, reg_length)
JZ(ret_ok) # Check there is at least 1 element to process
reg_x_addr = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_x_addr, arg_x)
TEST(reg_x_addr, reg_x_addr) # Make sure arg_x is not null
JZ(ret_null_pointer)
TEST(reg_x_addr, output_type_size - 1) # Check that our output arr is aligned
JNZ(ret_misaligned_pointer)
reg_y_addr = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_y_addr, arg_y)
TEST(reg_y_addr, reg_y_addr) # Make sure arg_y is not null
JZ(ret_null_pointer)
align_loop = Loop() # Loop to align one of the addresses
scalar_loop = Loop() # Processes remainder elements (if n % 8 != 0)
# Aligning on X addr
# Process elements 1 at a time until z is aligned on YMMRegister.size boundary
TEST(reg_x_addr, simd_register_size - 1) # Check if already aligned
JZ(align_loop.end) # If so, skip this loop entirely
with align_loop:
SCALAR_LOAD(reg_x_scalar, [reg_x_addr])
SCALAR_LOAD(reg_y_scalar, [reg_y_addr])
SCALAR_OP(reg_x_scalar, reg_x_scalar, reg_y_scalar)
SCALAR_STORE([reg_x_addr], reg_x_scalar)
ADD(reg_x_addr, output_type_size)
ADD(reg_y_addr, output_type_size)
SUB(reg_length, 1)
JZ(ret_ok)
TEST(reg_x_addr, simd_register_size - 1)
JNZ(align_loop.begin)
reg_x_addr_out = GeneralPurposeRegister64()
MOV(reg_x_addr_out, reg_x_addr)
# Batch loop for processing the rest of the array in a pipelined loop
instruction_columns = [InstructionStream(), InstructionStream(), InstructionStream(), InstructionStream()]
instruction_offsets = (0, 1, 2, 3)
for i in range(unroll_factor):
with instruction_columns[0]:
SIMD_LOAD(simd_accs[i], [reg_x_addr + i * simd_register_size * input_type_size / output_type_size])
with instruction_columns[1]:
SIMD_LOAD(simd_ops[i], [reg_y_addr + i * simd_register_size * input_type_size / output_type_size])
with instruction_columns[2]:
SIMD_OP(simd_accs[i], simd_accs[i], simd_ops[i])
with instruction_columns[3]:
SIMD_STORE([reg_x_addr_out + i * simd_register_size], simd_accs[i])
with instruction_columns[0]:
ADD(reg_x_addr, simd_register_size * unroll_factor * input_type_size / output_type_size)
with instruction_columns[1]:
ADD(reg_y_addr, simd_register_size * unroll_factor * input_type_size / output_type_size)
with instruction_columns[3]:
ADD(reg_x_addr_out, simd_register_size * unroll_factor * input_type_size / output_type_size)
software_pipelined_loop(reg_length, unroll_factor * simd_register_size / output_type_size, instruction_columns, instruction_offsets)
# Check if there are leftover elements that were not processed in the pipelined loop
# This loop should iterate at most #(elems processed per iteration in the batch loop) - 1 times
TEST(reg_length, reg_length)
JZ(scalar_loop.end)
with scalar_loop: # Process the remaining elements
SCALAR_LOAD(reg_x_scalar, [reg_x_addr])
SCALAR_LOAD(reg_y_scalar, [reg_y_addr])
SCALAR_OP(reg_x_scalar, reg_x_scalar, reg_y_scalar)
SCALAR_STORE([reg_x_addr], reg_x_scalar)
ADD(reg_x_addr, output_type_size)
ADD(reg_y_addr, output_type_size)
SUB(reg_length, 1)
JNZ(scalar_loop.begin)
with LABEL(ret_ok):
RETURN(YepStatus.YepStatusOk)
with LABEL(ret_null_pointer):
RETURN(YepStatus.YepStatusNullPointer)
with LABEL(ret_misaligned_pointer):
RETURN(YepStatus.YepStatusMisalignedPointer)
|
We stock a great range of diarrhoea treatment products ranging from our best sellers, Imodium Capsules which can stop diarrhoea quickly and effectively to Dioralyte Sachets which contain rehydration salts consisting of sugars and salts to replace body fluids lost as a result of acute diarrhoea. Check out our full range of products below.
Loperamide is a medication that is used for the relief of acute diarrhea and the management of chronic diarrhea in patients with inflammatory bowel disease. Maximum order per customer for this product is 4 packs. Larger orders will be reduced to a maximum of 4.
IMODIUM® Original capsules for the symptomatic treatment of acute diarrhoea in adults and children aged 12 years and over. Imodium works by slowing down your bowel and returning it to its normal rhythm.
They’re specially designed to give quick relief in under one hour, when you're on the go, as they dissolve instantly on your tongue, so you don’t need to take them with water.
Imodium Instants are used for the symptomatic relief of diarrhoea only and are not suitable for rehydration therapy. If symptoms persist for more than 24 hours, consult your doctor.
Imodium oral syrup solution is used to treat sudden short-lived (acute) attacks of diarrhoea in adults and children over 4 years of age and to treat long-lasting (chronic) diarrhoea in adults only.
|
import agents as ag
#testsdjhfakdjfhds
def HW2Agent() -> object:
def program(percept):
bump, status = percept
if status == 'Dirty':
action = 'Suck'
else:
lastBump, lastStatus, = program.oldPercepts[-1]
lastAction = program.oldActions[-1]
if program.counter == 0:
if bump == 'Bump':
program.counter += 1
action = 'Right'
else:
action = 'Down'
elif program.counter == 1:
if bump == 'Bump':
program.counter = 6
action = 'Up'
else:
program.counter += 1
action = 'Up'
elif program.counter == 2:
if bump == 'Up':
program.counter += 1
action = 'Up'
else:
action = 'Up'
elif program.counter == 3:
if bump == 'Bump':
program.counter = 7
action = 'Left'
else:
program.counter = 0
action = 'Down'
#Skipping 4 and 5 because it's similar to 1 and 3
elif program.counter == 6:
if bump == 'Bump':
program.counter += 1
action = 'Left'
else:
action = 'Up'
elif program.counter == 7:
if bump == 'Bump':
program.counter = 3
action = 'Right'
else:
program.counter += 1
action = 'Down'
elif program.counter == 8:
if bump == 'Bump':
program.counter += 1
action = 'Left'
else:
action = 'Down'
elif program.counter == 9:
if bump == 'Bump':
program.counter = 1
action = 'Right'
else:
program.counter = 6
action = 'Up'
program.oldPercepts.append(percept)
program.oldActions.append(action)
return action
# assign static variables here
program.oldPercepts = [('None', 'Clean')]
program.oldActions = ['Left', 'Right']
program.counter = 0
# program.lastWall = ['None', 'Down']
agt = ag.Agent(program)
# assign class attributes here:
# agt.direction = ag.Direction('left')
return agt
|
YARD SALE Saturday April 20th, 8:00 a.m. – noon. 614 Road 1. Variety of items, including kid stuff, pet stuff, some furniture, and a kayak! No early birds please.
|
# -*- coding: utf-8 -*-
"""
Lower all conversions between native values <-> objects to calls to a
runtime.conversion module set in the environment.
"""
from __future__ import print_function, division, absolute_import
from pykit import types
from pykit.ir import transform, GlobalValue, Module, Builder
def build_conversion_table(convertable=types.scalar_set):
"""Returns { (from_type, to_type) -> funcname }"""
table = {}
for type in convertable:
typename = types.typename(type).lower()
table[(type, types.Object)] = "object_from_%s" % typename
table[(types.Object, type)] = "%s_from_object" % typename
return table
def conversion_runtime(convertable=types.scalar_set):
"""Returns a Module with declared external runtime conversion functions"""
table = build_conversion_table(convertable)
mod = Module()
for (from_type, to_type), funcname in table.iteritems():
signature = types.Function(to_type, [from_type])
gv = GlobalValue(funcname, signature, external=True, address=0)
mod.add_global(gv)
return mod
class LowerConversions(object):
def __init__(self, func, conversion_table):
self.conversion_table = conversion_table
self.builder = Builder(func)
def op_convert(self, op):
arg = op.args[0]
if (op.type, arg.type) in self.conversion_table:
funcname = self.conversion_table[op.type, arg.type]
return self.builder.gen_call_external(funcname, [arg])
def run(func, env):
if not env.get("runtime.conversion"):
env["runtime.conversion"] = conversion_runtime()
func.module.link(env["runtime.conversion"])
transform(LowerConversions(func, build_conversion_table()), func)
|
I have given over 30 talks on the future of food and eating, on stages such as The Royal Society with Prof. Brian Cox, The Royal Institution, Tech conferences, TEDxHackney and TEDxMogadishu.
I’m passionate about making a positive impact in the world and interested in speaking to a broad variety of audiences.
For public speaking deals, please contact Laurinci or send me your request here.
|
#!/usr/bin/env python
# --------------------------------------------------------------------
from mpi4py import MPI
try:
from signal import signal, SIGPIPE, SIG_IGN
signal(SIGPIPE, SIG_IGN)
except ImportError:
pass
# --------------------------------------------------------------------
try:
from docutils.nodes import NodeVisitor
NodeVisitor.unknown_visit = lambda self, node: None
NodeVisitor.unknown_departure = lambda self, node: None
except ImportError:
pass
try: # epydoc 3.0.1 + docutils 0.6
from docutils.nodes import Text
try:
from collections import UserString
except ImportError:
from UserString import UserString
if not isinstance(Text, UserString):
def Text_get_data(s):
try:
return s._data
except AttributeError:
return s.astext()
def Text_set_data(s, d):
s.astext = lambda: d
s._data = d
Text.data = property(Text_get_data, Text_set_data)
except ImportError:
pass
# --------------------------------------------------------------------
from epydoc.docwriter import dotgraph
import re
dotgraph._DOT_VERSION_RE = \
re.compile(r'dot (?:- Graphviz )version ([\d\.]+)')
try:
dotgraph.DotGraph.DEFAULT_HTML_IMAGE_FORMAT
dotgraph.DotGraph.DEFAULT_HTML_IMAGE_FORMAT = 'png'
except AttributeError:
DotGraph_to_html = dotgraph.DotGraph.to_html
DotGraph_run_dot = dotgraph.DotGraph._run_dot
def to_html(self, image_file, image_url, center=True):
if image_file[-4:] == '.gif':
image_file = image_file[:-4] + '.png'
if image_url[-4:] == '.gif':
image_url = image_url[:-4] + '.png'
return DotGraph_to_html(self, image_file, image_url)
def _run_dot(self, *options):
if '-Tgif' in options:
opts = list(options)
for i, o in enumerate(opts):
if o == '-Tgif': opts[i] = '-Tpng'
options = type(options)(opts)
return DotGraph_run_dot(self, *options)
dotgraph.DotGraph.to_html = to_html
dotgraph.DotGraph._run_dot = _run_dot
# --------------------------------------------------------------------
import re
_SIGNATURE_RE = re.compile(
# Class name (for builtin methods)
r'^\s*((?P<class>\w+)\.)?' +
# The function name
r'(?P<func>\w+)' +
# The parameters
r'\(((?P<self>(?:self|cls|mcs)),?)?(?P<params>.*)\)' +
# The return value (optional)
r'(\s*(->)\s*(?P<return>\S.*?))?'+
# The end marker
r'\s*(\n|\s+(--|<=+>)\s+|$|\.\s+|\.\n)')
from epydoc import docstringparser as dsp
dsp._SIGNATURE_RE = _SIGNATURE_RE
# --------------------------------------------------------------------
import sys, os
import epydoc.cli
def epydocify():
dirname = os.path.dirname(__file__)
config = os.path.join(dirname, 'epydoc.cfg')
sys.argv.append('--config=' + config)
epydoc.cli.cli()
if __name__ == '__main__':
epydocify()
# --------------------------------------------------------------------
|
I shared my word of the year on Monday, and with that theme in mind, I've been busy plotting out goals that I can accomplish in smaller chunks. I'm trying to think of 2018 in months and weeks rather than seasons, or the whole thing, because I need to be better about fixing the little to produce results in the big. It just seems like a "thing" I need to iron out, ya know?
I don't know about you, but I love seeing what everyone is hoping to accomplish each month. It's an inspiring thing to me - to see what individuals strive for, and how their goals might ignite some of my own! So first, here's what I really want out of 2018.
My biggest goal with this one is to switch from a mindset of "getting by"...of existing any way we possibly can, to preparing our assets to afford us the big life things we want to accomplish. We'd love to be financially equipped to get a bigger house, to take more vacations, and even adopt...and making this shift in how we spend and save will be huge for us, I think.
I technically know how to be healthy, and specifically what my body prefers and needs, but I often choose not to do it. Because I'm tired. And then I'm tired because I treated my body poorly. And around and around it goes. No more! Long term health is at stake, here...I feel as though our 30's and 40's really set the stage for our longevity. I also want to eliminate as many toxins as I can from my cleansing and beauty products.
I want to simplify, organize, and declutter my home, but I also want to organize my daily life and family functions. I'm just so sick of the chaotic nature of how we go through our days, and I need some more calm and preparedness in the pace of motherhood. Even though pre-planning goes against my nature, I think I need to accept that I need it for now, and as everything, it's just a phase.
In the past year, I've gotten some striking clarity in what I believe and how I use my beliefs in my every day life. I've learned how to use my faith and certain practices to navigate life in a new, connected sort of way...and I'm constantly reminding my heart to be more intuitive and less analytical. Not to the point that I change who I am - but that I'm not always getting in my own way. I want to study things that have come to mean more to me, and I want to be more intentional, knowledgeable, and refined in my practices.
I have been enamored with this idea of purpose - for a long, long time. My senior quote in high school was about purpose, and 15 years later, I felt like I didn't have any more of a grasp on it. I'm coming into more understanding about it now, and I know what I want and how I want it. And this year? I'm setting myself up to let go of the things that aren't serving this purpose, as well as making time to work on it. It's kind of a thrilling feeling, really.
And what would a new year be without a motto?
"You know what? Who cares."
I'm trying to apply it to all the places where I'd normally get tightly wound, or disappointed that something didn't go the way I planned, or facing the negative way someone might view me. Because you know what? We are always exactly where we are meant to be, and every single thing that happens to us in life crafts who we are meant to become. We go through it all to learn the specific things that cause our personal growth and expansion, and that's the whole reason we are here!
What's the point in letting our nervous system get all firey over it?
Here is how all of this translates into my January goals.
1. Start using our family budget and keeping careful details about it.
2. Start honoring my daily morning routine and not tossing it aside because "I don't feel like it."
3. Drop dairy. Much much more on this soon.
4. Read a book on money, a book on spirituality, and a book on astrology.
5. Percolate & Meditate every weekday. More on this coming soon, too!
6. Wrap up my MJ business details & prepare for 2018 business.
7. Blog goals: post every MWF, keep my planned work hours, and get my new series' going.
9. Use crystals every day.
Bonus: Practice yoga 2x-3x a week & start using my diastasis recti dvd.
I know number ten is vague, but it's a reminder to be all in on my tasks at hand. Just do what I'm doing, not worrying about what comes next, or what messes or projects are waiting for me. Tunnel vision, baby.
I also have an overall goal to be diligent with our family calendar, but this is going to be a year-long project of checking it and updating it everyday, and sitting down to review it with Matt at the start of each week (probably Sunday nights.) This will also involve actually sticking to the times we'd like to do things...like starting dinner at 4:45, and starting the kids' bedtime routine at 7:45.
Oooh I am so ready for this year!
How about you - what are your goals this month? Or this year?
|
from django.conf.urls import patterns, url
from crowdataapp import views
urlpatterns = patterns('crowdataapp.views',
url(r'^$',
'document_set_index',
name='document_set_index'),
url(r'^pleaselogin$',
'login',
name='login_page'),
url(r'^logout$',
'logout',
name='logout_page'),
url(r'^afterlogin$',
'after_login',
name='after_login'),
url(r'^profile$',
'edit_profile',
name='edit_profile'),
url(r'^(?P<document_set>[\w-]+)$',
'document_set_view',
name='document_set_view'),
url(r'^(?P<document_set>[\w-]+)/new_transcription$',
'transcription_new',
name='new_transcription'),
url(r'^(?P<document_set>[\w-]+)/(?P<document_id>[\w-]+)$',
'show_document',
name='show_document'),
url(r'^(?P<document_set>[\w-]+)/autocomplete/(?P<field_name>[\w-]+)$',
'autocomplete_field',
name='autocomplete_field'),
url(r'crowdata/form/(?P<slug>[\w-]+)',
'form_detail',
name='crowdata_form_detail'),
url(r'^(?P<document_set>[\w-]+)/ranking/(?P<ranking_id>[\w-]+)$',
'ranking_all',
name='ranking_all'),
url(r'^(?P<document_set>[\w-]+)/users/(?P<username>[\w-]+)$',
'user_profile',
name='user_profile'),
url(r'^(?P<document_set>[\w-]+)/all/users$',
'users_all',
name='users_all'),
url(r'^(?P<document_set>[\w-]+)/(?P<field_id>[\w-]+)/(?P<canon_id>[\w-]+)$',
'documents_by_entry_value',
name='documents_by_entry_value'),
)
|
Have you ever called your lead generation company only to have the representative not know why you have called? Or discover that several other agents in your area have been sold the exact same lead as you? Are your leads generated using bait and switch tactics that fool the potential client into a sales attempt? Or are they simply names and addresses that all require door knocking in order to sell? Maybe they are of such poor quality that you are only able to close one or two out of twenty.
Why do so many insurance companies who get into the lead business try and fail? It’s because of the philosophy that it is only a simple process of downloading data electronically from some source. The problem is that the data they get represents only a small fraction of the possible population of lead data that exists.
These are the very things that NAA has eliminated from our lead generation process. Other companies use multiple subcontractors for the process. Not being in control of their lead program allows them to lay the blame on those companies if you experience issues with closing or the sales process. Since National Agents Alliance owns the entire process – from directly hiring court researches to owning the printing machines and mail house facilities – we are totally accountable for every aspect of the lead generation process.
Our leads are only available to our agents/agency partners that are members of The Alliance and are not sold on the open market. We do not sell our leads or data to any other company to generate additional revenue. We are not in the business of creating profit from selling leads – we are in the business of making profits from working those leads. We have the most comprehensive lead generation program in the industry. Much of what we do is proprietary and cutting edge in getting our agents in front of buying customers.
This is why our leads are the highest quality, freshest most qualified leads in the industry – NO ONE ELSE IN THIS BUSINESS CAN MAKE THAT CLAIM! Click here to check out one of our leads. While it may look like others out there, the process used to generate it, as described above, is completely different. This particular lead format has proven extremely effective in getting the best responses, which is why it will be familiar, but the key is not the appearance, it’s the work behind it. The key is in how fresh that name on the lead is; it’s in how many other agents got that same lead that you are competing against; it’s what kind of quantities that need to be worked to make one sale?
The key to having quality leads is the company and process behind each and every one: National Agents Alliance – the largest and most successful association of agents and agencies in the country!
|
from config import permitted_id
import discord
import inspect
async def evaluate(cmd, message, args):
if message.author.id in permitted_id:
if not args:
await message.channel.send(cmd.help())
else:
try:
execution = " ".join(args)
output = eval(execution)
if inspect.isawaitable(output):
output = await output
status = discord.Embed(title='✅ Executed', color=0x66CC66)
if output:
try:
status.add_field(name='Results', value='\n```\n' + str(output) + '\n```')
except:
pass
except Exception as e:
cmd.log.error(e)
status = discord.Embed(color=0xDB0000, title='❗ Error')
status.add_field(name='Execution Failed', value=str(e))
await message.channel.send(None, embed=status)
else:
status = discord.Embed(type='rich', color=0xDB0000,
title='⛔ Insufficient Permissions. Bot Owner or Server Admin Only.')
await message.channel.send(None, embed=status)
|
The Receptor BUG offers award winning design and safety thanks to the patented VDSAP system which utilizes two overlapping shells, adding enhanced structural stability with added protection against penetration or sharp objects. The advanced outer ABS shell creates a tough and robust barrier whilst the in-mold polycarbonate/EPS inner liner provides test-winning impact absorption. The Receptor BUG also features a flexible ventilation system that can be closed for colder days and works seamlessly with goggles. The removable ear and neck pads can be removed for different styles of riding or weather.
|
#!/usr/bin/env python
import sys
import getopt
import os
import subprocess as sp
import shutil as sh
import xmlrpclib
class MakeTest(object):
def __init__(self,test):
self.initpath = os.getcwd()
self.runpath = os.path.join(self.initpath,'runs',test)
self.test = test
os.chdir(self.runpath)
retcode = sp.call(["mpiexec","./piernik"])
if retcode != 0:
sys.exit(retcode)
self.runtest(test)
os.chdir(self.initpath)
def put_png(self):
server = xmlrpclib.ServerProxy("http://piernik:p1ern1k@hum/piernik/login/xmlrpc")
for file in os.listdir(self.runpath):
if file.find('png') != -1:
server.wiki.putAttachment(self.test+'/'+file, xmlrpclib.Binary(open(self.runpath+'/'+file).read()))
def testJeans (self):
sp.call(["gnuplot","verify.gpl"])
self.put_png()
def testMaclaurin (self):
from maclaurin import Maclaurin_test
Maclaurin_test(self.runpath+'/maclaurin_sph_0001.h5')
self.put_png()
def testSedov (self):
print "test not implemented"
def output(self):
print self.initpath
print self.runpath
def runtest(self,test):
tests = { "jeans": self.testJeans,
"maclaurin": self.testMaclaurin,
"sedov": self.testSedov}[test]()
#tests.get(test)
def usage():
print __doc__
def main(argv):
try:
opts, args = getopt.getopt(argv, "ht", ["help", "test="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-t", "--test"):
test=arg
# add piernik modules
sys.path.append(sys.path[0]+'/python')
t = MakeTest(test)
t.output()
if __name__ == "__main__":
main(sys.argv[1:])
|
Two of the four members of indie band Remnose, Marlon and Carson Morton join us on the show to discuss writing music in Colombia, their favorite Detroit venues, and embarrass Michael for mixing them up. Also, Sgt. Surefire joins us on the show to discuss how he plans to eradicate America’s enemies.
|
# -*- coding: utf-8 -*-
#
# pystruct documentation build configuration file, created by
# sphinx-quickstart on Fri May 3 17:14:50 2013.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_bootstrap_theme
import pystruct
#import sphinx_gallery
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('sphinxext'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.doctest', 'sphinx.ext.pngmath',
'sphinx.ext.viewcode', 'numpy_ext.numpydoc', 'sphinx_gallery.gen_gallery']
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# generate autosummary even if no references
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pystruct'
copyright = u'2013, Andreas Mueller'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pystruct.__version__
# The full version, including alpha/beta/rc tags.
release = pystruct.__version__
sphinx_gallery_conf = {
'reference_url': {
# The module you locally document uses a None
'pystruct': None,
# External python modules use their documentation websites
'sklearn': 'http://scikit-learn.org/stable',
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1'}}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_templates', '_themes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'pystruct'
html_theme = 'bootstrap'
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = ['_themes']
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
#pystruct The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pystructdoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'pystruct.tex', u'pystruct Documentation',
u'Andreas Mueller', 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pystruct', u'pystruct Documentation',
[u'Andreas Mueller'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pystruct', u'pystruct Documentation', u'Andreas Mueller',
'pystruct', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Theme options are theme-specific and customize the look and feel of a
# theme further.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "PyStruct",
# Tab name for entire site. (Default: "Site")
#'navbar_site_name': "Site",
# A list of tuples containting pages to link to. The value should
# be in the form [(name, page), ..]
'navbar_links': [
('Start', 'index'),
('Installation', 'installation'),
('Introduction', 'intro'),
('User Guide', 'user_guide'),
('Examples', 'auto_examples/index'),
('API', 'references'),
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': False,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 0,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "false",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "None",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo".
'bootswatch_theme': "cerulean",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
|
We have been having fun playing with our new lens! And we found this pretty green spot, those kind of spots are RARE in Arizona.
I am all about keeping it simple when it comes to accessories, I love a good statement necklace but sometimes I just like to be simple. That's why my favorite sweet is arm candy! It's not a huge distraction, it's just a perfect little addition to any outfit. PLUS arm candy has no calories. win!
My I loveee my mnmlst watch because it is super simple & adds a nice pop of color to my outfit :) Another perk is you can change out the strap to any of their strap options so I can share my watch with Jason! He is a total watch lover so he loves that it's an option! Plus, I borrow enough of his shirts, I figure I owe him this ;) haha! They WAY less expensive than other designer watches and the quality is the same as the big guys. Check them out & thank me later.
That watch is really nice. Your hair looks SO beautiful!
Love your style and your blog!!
thank YOU! I am going to hop on over to yours and check it out!
Im in love with your hairstyle!
Ooh, I really like that watch! $70 is totally not bad for a watch...I might need to pick this one up!
Stunning photos! I love your watch a lot and your hair... TOO pretty!
|
#: vim set encoding=utf-8 :
##
# PixelBot Plugin API
# Tools for creating plugins for the bot
#
# version 0.2
# author William F.
# copyright MIT
##
# Imports
import asyncio
import logging
# Main class
class Plugin(object):
def __init__(self, bot, name, description, version, author, data):
self.bot = bot
self.name = name
self.desc = description
self.version = version
self.author = author
self.data = {}
self.default_data = data
self.tasks = {}
self.cmds = {}
self.mod_cmds = {}
# Fetch plugin data
if not self.name in self.bot.data['plugins']:
self.bot.data['plugins'][self.name] = self.default_data
self.data = self.bot.data['plugins'][self.name]
# Register tasks and commands
logging.info('Registering plugin "{}".'.format(self.name))
for method in dir(self):
if callable(getattr(self, method)):
call = getattr(self, method)
if method.startswith('cmd_'):
call(None, None, None)
elif method.startswith('task_'):
call(None)
def init(plugin, bot):
"""Used to inherit from PluginAPI without calling the
'bloated' super method"""
default_data = {}
if 'default_data' in plugin.__dir__():
default_data = plugin.default_data
super(type(plugin), plugin).__init__(bot,
name=plugin.name,
description=plugin.description,
version=plugin.version,
author=plugin.author,
data=default_data)
# Utils
def saveData(self):
self.bot.data['plugins'][self.name] = self.data
self.bot.saveData()
def getConfig(self, key):
return self.bot.cfg.get(key, section=self.name)
def log(self, message):
logging.info('[{}][INFO] {}'.format(self.name, message))
def warning(self, message):
logging.critical('[{}][WARN] {}'.format(self.name, message))
def critical(self, message):
logging.critical('[{}][FAIL] {}'.format(self.name, message))
def generateHelp(self, mod=False):
info = (
'**{name}**\n'
'*{desc}*\n\n'
'Version: {version}\n'
'Commands:```{cmds}```'
).format(
name=self.name,
desc=self.description,
version=self.version,
cmds='...'
)
return info
# Methods
async def on_ready(self, client):
pass
async def on_message(self, client, msg):
pass
async def on_member_join(self, client, user):
pass
async def on_member_remove(self, client, user):
pass
async def on_member_update(self, client, before, after):
pass
# User API
class _UserAPI(object):
def __init__(self):
pass
def is_mod(self, plugin, user):
"""Returns True if the user is a mod (has any 'mod_roles' role)."""
try:
for role in user.roles:
if role.name in plugin.bot.settings['discord']['mod_roles']:
return True
return False
except:
return False
User = _UserAPI()
# Task class
class Task(object):
def __init__(self, owner, name, func, interval, alive=True):
self.owner = owner
self.name = name
self.func = func
self.interval = interval
self.alive = alive
async def run(self, client):
while self.alive:
await self.func(self.owner, client)
await asyncio.sleep(self.interval)
def kill(self):
logging.info('[{}] Task "{}" killed.'.format(self.owner.name,
self.name))
self.alive = False
def revive(self):
logging.info('[{}] Task "{}" revived.'.format(self.owner.name,
self.name))
self.alive = True
self.owner.bot.client.loop.create_task(self.run(self.owner.bot.client))
# Task decorator
def task(name, interval, alive=True):
"""Make the function a bot task."""
def wrapper(func):
def wrapped(*args):
this = Task(args[0], name, func, interval)
this.alive = alive
args[0].tasks[name] = this
if args[1]:
func(*args)
return wrapped
return wrapper
# Command decorator
def command(name, mod=False, ns=None):
def wrapper(func):
def wrapped(*args):
cmd = {
'type': 'cmd',
'func': func,
'mod': mod
}
if ns is None:
args[0].cmds[name] = cmd
else:
if ns not in args[0].cmds.keys():
args[0].cmds[ns] = {
'type': 'ns',
name: cmd
}
else:
args[0].cmds[ns][name] = cmd
if args[1]:
func(*args)
return wrapped
return wrapper
|
O thou of God and man the Son!
Thou, art my glory, joy, and crown.
Than all the angels heaven can boast.
Now and forevermore be thine.
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayFrontendIPConfiguration(SubResource):
"""Frontend IP configuration of an application gateway.
:param id: Resource ID.
:type id: str
:param private_ip_address: PrivateIPAddress of the network interface IP
Configuration.
:type private_ip_address: str
:param private_ip_allocation_method: PrivateIP allocation method. Possible
values include: 'Static', 'Dynamic'
:type private_ip_allocation_method: str or
~azure.mgmt.network.v2017_09_01.models.IPAllocationMethod
:param subnet: Reference of the subnet resource.
:type subnet: ~azure.mgmt.network.v2017_09_01.models.SubResource
:param public_ip_address: Reference of the PublicIP resource.
:type public_ip_address:
~azure.mgmt.network.v2017_09_01.models.SubResource
:param provisioning_state: Provisioning state of the public IP resource.
Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'SubResource'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, id: str=None, private_ip_address: str=None, private_ip_allocation_method=None, subnet=None, public_ip_address=None, provisioning_state: str=None, name: str=None, etag: str=None, type: str=None, **kwargs) -> None:
super(ApplicationGatewayFrontendIPConfiguration, self).__init__(id=id, **kwargs)
self.private_ip_address = private_ip_address
self.private_ip_allocation_method = private_ip_allocation_method
self.subnet = subnet
self.public_ip_address = public_ip_address
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.type = type
|
This paper applies the ‘hypothetical monopolist’ test of market definition to a retail market with products differentiated by means of location and other dimensions. The test for defining the relevant product and geographic market follows the conditions required by the European Union Competition Law and so it takes into account both demand- and supply-side substitution. The empirical model using sales data from a set of movie theatres in the North of Spain, incorporating the observed locations of consumers vis-àvis the stores, shows that empirical tests of market definition may lead to an implausible definition of the relevant market if supply-side substitution is not accounted for.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.