text
stringlengths 29
850k
|
|---|
from tensortools import optimize
from tensortools.diagnostics import kruskal_align
from tqdm import trange
from collections.abc import Iterable
import numpy as np
class Ensemble(object):
"""
Represents an ensemble of fitted tensor decompositions.
"""
def __init__(self, nonneg=False, fit_method=None, fit_options=dict()):
"""Initializes Ensemble.
Parameters
----------
nonneg : bool
If True, constrains low-rank factor matrices to be nonnegative.
fit_method : None, str, callable, optional (default: None)
Method for fitting a tensor decomposition. If input is callable,
it is used directly. If input is a string then method is taken
from tensortools.optimize using ``getattr``. If None, a reasonable
default method is chosen.
fit_options : dict
Holds optional arguments for fitting method.
"""
# Model parameters
self._nonneg = nonneg
# Determinine optimization method. If user input is None, try to use a
# reasonable default. Otherwise check that it is callable.
if fit_method is None:
self._fit_method = optimize.ncp_bcd if nonneg else optimize.cp_als
elif isinstance(fit_method, str):
try:
self._fit_method = getattr(optimize, fit_method)
except AttributeError:
raise ValueError("Did not recognize method 'fit_method' "
"{}".format(fit_method))
elif callable(fit_method):
self._fit_method = fit_method
else:
raise ValueError("Expected 'fit_method' to be a string or "
"callable.")
# Try to pick reasonable defaults for optimization options.
fit_options.setdefault('tol', 1e-5)
fit_options.setdefault('max_iter', 500)
fit_options.setdefault('verbose', False)
self._fit_options = fit_options
# TODO - better way to hold all results...
self.results = dict()
def fit(self, X, ranks, replicates=1, verbose=True):
"""
Fits CP tensor decompositions for different choices of rank.
Parameters
----------
X : array_like
Real tensor
ranks : int, or iterable
iterable specifying number of components in each model
replicates: int
number of models to fit at each rank
verbose : bool
If True, prints summaries and optimization progress.
"""
# Make ranks iterable if necessary.
if not isinstance(ranks, Iterable):
ranks = (ranks,)
# Iterate over model ranks, optimize multiple replicates at each rank.
for r in ranks:
# Initialize storage
if r not in self.results:
self.results[r] = []
# Display fitting progress.
if verbose:
itr = trange(replicates,
desc='Fitting rank-{} models'.format(r),
leave=False)
else:
itr = range(replicates)
# Fit replicates.
for i in itr:
model_fit = self._fit_method(X, r, **self._fit_options)
self.results[r].append(model_fit)
# Print summary of results.
if verbose:
itr.close()
itr.refresh()
min_obj = min([res.obj for res in self.results[r]])
max_obj = max([res.obj for res in self.results[r]])
elapsed = sum([res.total_time for res in self.results[r]])
print('Rank-{} models: min obj, {:.2f}; '
'max obj, {:.2f}; time to fit, '
'{:.1f}s'.format(r, min_obj, max_obj, elapsed), flush=True)
# Sort results from lowest to largest loss.
for r in ranks:
idx = np.argsort([result.obj for result in self.results[r]])
self.results[r] = [self.results[r][i] for i in idx]
# Align best model within each rank to best model of next larger rank.
# Here r0 is the rank of the lower-dimensional model and r1 is the rank
# of the high-dimensional model.
for i in reversed(range(1, len(ranks))):
r0, r1 = ranks[i-1], ranks[i]
U = self.results[r0][0].factors
V = self.results[r1][0].factors
kruskal_align(U, V, permute_U=True)
# For each rank, align everything to the best model
for r in ranks:
# store best factors
U = self.results[r][0].factors # best model factors
self.results[r][0].similarity = 1.0 # similarity to itself
# align lesser fit models to best models
for res in self.results[r][1:]:
res.similarity = kruskal_align(U, res.factors, permute_V=True)
def objectives(self, rank):
"""Returns objective values of models with specified rank.
"""
self._check_rank(rank)
return [result.obj for result in self.results[rank]]
def similarities(self, rank):
"""Returns similarity scores for models with specified rank.
"""
self._check_rank(rank)
return [result.similarity for result in self.results[rank]]
def factors(self, rank):
"""Returns KTensor factors for models with specified rank.
"""
self._check_rank(rank)
return [result.factors for result in self.results[rank]]
def _check_rank(self, rank):
"""Checks if specified rank has been fit.
Parameters
----------
rank : int
Rank of the models that were queried.
Raises
------
ValueError: If no models of rank ``rank`` have been fit yet.
"""
if rank not in self.results:
raise ValueError('No models of rank-{} have been fit.'
'Call Ensemble.fit(tensor, rank={}, ...) '
'to fit these models.'.format(rank))
|
My beehive starter kit arrived today via UPS: included are the frames, hives, smoker, gloves, hive tool, etc. Now I just need for the live bees to be delivered later this week or early next week. I'm sure the post office in East Hampton will be looking forward to my package.
I'll be blogging about my experience as a first time beekeeper. During my childhood my father kept many hives as I remember harvesting the honey. Looking forward to producing some honey.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.utils.translation import ugettext as _
from misago.core.utils import slugify
def create_default_forums_tree(apps, schema_editor):
Forum = apps.get_model('misago_forums', 'Forum')
Forum.objects.create(
special_role='private_threads',
role='forum',
name='Private',
slug='private',
lft=1,
rght=2,
tree_id=0,
level=0,
)
root = Forum.objects.create(
special_role='root_category',
role='category',
name='Root',
slug='root',
lft=3,
rght=10,
tree_id=1,
level=0,
)
category_name = _("First category")
forum_name = _("First forum")
redirect_name = _("Misago support forums")
redirect_link = _("http://misago-project.org")
category = Forum.objects.create(
parent=root,
lft=4,
rght=9,
tree_id=1,
level=1,
role='category',
name=category_name,
slug=slugify(category_name),
css_class='accent')
Forum.objects.create(
parent=category,
lft=5,
rght=6,
tree_id=1,
level=2,
role='forum',
name=forum_name,
slug=slugify(forum_name))
Forum.objects.create(
parent=category,
lft=7,
rght=8,
tree_id=1,
level=2,
role='redirect',
name=redirect_name,
slug=slugify(redirect_name),
redirect_url=redirect_link)
class Migration(migrations.Migration):
dependencies = [
('misago_forums', '0001_initial'),
]
operations = [
migrations.RunPython(create_default_forums_tree),
]
|
Coventry University Scarborough Campus is a world class university providing higher education for students from 18 years old. The courses provided range from science to business and marketing. The facility has a super lab, debating theatre and first-class IT facilities. EMCE were contracted to carry out the full Mechanical and Electrical installation to the project. A design brief was provided by the consultants with final design down to EMCE. The design was carried out using AutoCad Revit software to BIM level 1.
The project was required to meet an overall Energy Performance Rating Assessment rating of ‘B’. This required detailed attention to the mechanical and electrical services installed to the building. To assist in achieving the required EPC rating REHAU under floor heating and LED lighting with smart lighting controls were installed to the building. The emergency lighting is monitored and tested by a Tridonic system. The system schedules the testing of the emergency lighting and sends alerts to the site team of any failures. All mechanical and electrical services to the building were metered and monitored by the central BMS system. The M + E installation was completed in 26 weeks.
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 SubDownloader Developers - See COPYING - GPLv3
import logging
import socket
from ssl import SSLError
from urllib.error import HTTPError, URLError
from urllib.request import urlopen, urlretrieve
log = logging.getLogger('subdownloader.http')
DEFAULT_TIMEOUT = 300
# FIXME: allow download and unzip in one step?
def test_connection(url, timeout=DEFAULT_TIMEOUT):
"""
Open a connection to the url.
:param url: Url to test
:param timeout: Timeout
:return: True if connection could be made.
"""
log.debug('testConnection: url={}, timeout={}'.format(url, timeout))
# FIXME: For Python3 ==> use urlopen(timeout=...) and get rid of socket
defTimeOut = socket.getdefaulttimeout()
try:
timeout = float(timeout)
except ValueError:
log.debug('Illegal timeout argument. {} ({})'.format(
timeout, type(timeout)))
socket.setdefaulttimeout(timeout)
connectable = False
log.debug('Test connection "{}", timeout={}'.format(url, timeout))
try:
urlopen(url)
log.debug('urlopen succeeded')
connectable = True
except (HTTPError, URLError, SSLError, socket.error):
log.exception('url failed')
socket.setdefaulttimeout(defTimeOut)
return connectable
def download_raw(url, local_path, callback):
"""
Download an url to a local file.
:param url: url of the file to download
:param local_path: path where the downloaded file should be saved
:param callback: instance of ProgressCallback
:return: True is succeeded
"""
log.debug('download_raw(url={url}, local_path={local_path})'.format(url=url, local_path=local_path))
raw_progress = RawDownloadProgress(callback)
reporthook = raw_progress.get_report_hook()
try:
log.debug('urlretrieve(url={url}, local_path={local_path}) ...'.format(url=url, local_path=local_path))
urlretrieve(url=url, filename=local_path, reporthook=reporthook)
log.debug('... SUCCEEDED')
callback.finish(True)
return True
except URLError:
log.exception('... FAILED')
callback.finish(False)
return False
class RawDownloadProgress(object):
"""
Subclass of ProgressCallback purposed for reporting back download progress.
"""
def __init__(self, callback):
"""
Create a new RawDownloadProgress that encapsulates a ProgressCallback to record download progress.
:param callback: ProgressCallback to encapsulate
"""
self._callback = callback
self._chunkNumber = 0
self._total = 0
def get_report_hook(self):
"""
Return a callback function suitable for using reporthook argument of urllib(.request).urlretrieve
:return: function object
"""
def report_hook(chunkNumber, chunkSize, totalSize):
if totalSize != -1 and not self._callback.range_initialized():
log.debug('Initializing range: [{},{}]'.format(0, totalSize))
self._callback.set_range(0, totalSize)
self._chunkNumber = chunkNumber
self._total += chunkSize
if self._total > totalSize:
# The chunk size can be bigger than the file
self._total = totalSize
self._callback.update(self._total)
return report_hook
|
My five favorite books to bring you sunshine!
There are a few books I have read in my life that I find myself recommending time and again to people. These are the books that I think can make a real difference to how to see and live life. With summer coming and time to read and think I hope you enjoy my selection of 5 books for greater happiness in life.
Jonathan Haidt draws on ancient wisdom, philosophy and modern psychology to describe what they all agree are the things most likely to make us happy in life. This provides a well-written and engaging manual on how to improve your experience of living. Best of all his style is easy to read and understand.
The Consolations of Philosophy: Alain de Botton.
De Botton is a philosopher for modern times who has turned his attention to many of the difficulties of our lives; mood, love, work, religion, status anxiety and even architecture. In this book he explores the ancient philosophers and draws out wisdom which holds true across the centuries in an accessible and engaging style.
Dr Germer is an american clinical psychologist who alongside Dr Kristen Neff has pioneered much mindfulness work in the area of developing self compassion. I use this book to help me with overly self-critical thoughts, thoughts that I am not good enough and with just allowing myself to be human and vulnerable! It helped me understand what self-compassion looks and sounds like.
Rick Hanson is a clinical psychologist who draws on personal and professional experience and his research in neuroscience. This book provides practical strategies to overcome the tendency of the mind to focus on the negative in life and build up our capacity to “take in the good”, however tiny in our day to day lives. I love it!
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
''' Lyndor runs from here - contains the main functions '''
import sys, time, os
import module.message as message
import module.save as save
import module.cookies as cookies
import module.read as read
import install
import module.move as move
import module.draw as draw
import module.rename as rename
import module.exercise_file as exercise_file
from colorama import Fore, init
def main():
''' Main function '''
init()
message.animate_characters(Fore.LIGHTYELLOW_EX, draw.ROCKET, 0.02)
message.spinning_cursor()
message.print_line('\r1. Paste course url or\n' +
'2. Press enter for Bulk Download')
url = input()
print('')
start_time = time.time() #start time counter begins
if url == "":
# If user press Enter (i.e. url empty), get urls from Bulkdownload.txt
urls = read.bulk_download()
if not urls:
sys.exit(message.colored_message(Fore.LIGHTRED_EX, 'Please paste urls in Bulk Download.txt\n'))
for url in urls:
schedule_download(url)
else:
# begin regular download
schedule_download(url)
try:
end_time = time.time()
message.animate_characters(Fore.LIGHTGREEN_EX, draw.COW, 0.02)
message.colored_message(Fore.LIGHTGREEN_EX, "\nThe whole process took {}\n".format(move.hms_string(end_time - start_time)))
except KeyboardInterrupt:
sys.exit(message.colored_message(Fore.LIGHTRED_EX, "\n- Program Interrupted!!\n"))
def schedule_download(url):
''' Look for the scheduled time in settings.json '''
if not read.aria2_installed:
tip = '☝🏻 Tip: Install aria2c for faster downloads, read README.md to learn more.'
message.carriage_return_animate(tip)
if read.download_time == '':
# If download time not set, begin download
download_course(url)
return
else:
counter = True
message.colored_message(Fore.LIGHTGREEN_EX, 'Download time set to: ' + read.download_time + '\
in settings.json, you can change or remove this time in settings.json\n')
try:
while counter:
if time.strftime("%H:%M") == read.download_time:
download_course(url)
return
print(f'Download will start at: {read.download_time} leave this window open.')
time.sleep(60)
except KeyboardInterrupt:
sys.exit(message.colored_message(Fore.LIGHTRED_EX, "\n- Program Interrupted!!\n"))
def download_course(url):
''' download course '''
# Check for a valid url
if url.find('.html') == -1:
sys.exit(message.animate_characters(Fore.LIGHTRED_EX, draw.ANONYMOUS, 0.02))
url = url[:url.find(".html")+5] #strip any extra text after .html in the url
# Folder/File paths
lynda_folder_path = read.location + '/'
course_folder_path = save.course_path(url, lynda_folder_path)
desktop_folder_path = install.get_path("Desktop")
download_folder_path = install.get_path("Downloads")
# Read preferences
use_cookie_for_download = read.course_download_pref
if use_cookie_for_download in ['cookies', 'cookie'] or read.exfile_download_method == 'aria2':
cookie_path = cookies.find_cookie(desktop_folder_path, download_folder_path)
downloading_from_cookie = message.return_colored_message(Fore.LIGHTBLUE_EX, '🍪 Downloading videos using cookies.txt')
message.carriage_return_animate(downloading_from_cookie)
else:
cookie_path = ''
usr_pass_message = message.return_colored_message(Fore.LIGHTGREEN_EX, '⛺ Using username and password combination for download\n')
message.carriage_return_animate(usr_pass_message)
try:
# main operations ->
save.course(url, lynda_folder_path) # Create course folder
save.info_file(url, course_folder_path) # Gather information
save.chapters(url, course_folder_path) # Create chapter folders
save.contentmd(url) # Create content.md
save.videos(url, cookie_path, course_folder_path) # Download videos
rename.videos(course_folder_path) # rename videos
rename.subtitles(course_folder_path) # rename subtitles
move.vid_srt_to_chapter(url, course_folder_path) # Move videos and subtitles to chapter folders
# Download exercise files
if save.check_exercise_file(url):
print('\nExercise file is available to download')
if not read.download_exercise_file:
# if user do not want to download ex-file
print("settings.json says you do not want to download ex-file -> 'download_exercise_file': false")
else:
# if user wants to download ex-file
if read.course_download_pref == 'regular-login':
exercise_file.download(url, course_folder_path, cookie_path)
elif read.exfile_download_pref == 'library-login':
if read.card_number == '':
print('\nTo download ex-file via library login -> Please save library card details in settings.json')
else:
exercise_file.download(url, course_folder_path, cookie_path)
else:
print('\nThe exercise file can only be downloaded through one of the below combinations:')
print('~ Regular login: username + password or')
print('~ Library login: card number, pin and org. url\n')
else: # if exercise file not present
print('This course does not include Exercise files.')
except KeyboardInterrupt:
sys.exit(message.colored_message(Fore.LIGHTRED_EX, "\n- Program Interrupted!!\n"))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(message.colored_message(Fore.LIGHTRED_EX, "\n- Program Interrupted!!\n"))
|
The U.S. Attorney’s Office announced that the following persons were arraigned today before U.S. Magistrate Judge Timothy J. Cavan in Billings. Indictments handed down by the Grand Jury were unsealed. Indictments are merely accusations and defendants are presumed innocent until proven guilty.
|
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.db import models
from storageadmin.models import Appliance
class UpdateSubscription(models.Model):
"""name of the channel. eg: stable"""
name = models.CharField(max_length=64, unique=True)
"""detailed description or a longer name"""
description = models.CharField(max_length=128)
"""url of the repo"""
url = models.CharField(max_length=512)
appliance = models.ForeignKey(Appliance)
password = models.CharField(max_length=64, null=True)
"""status of subscription: active, inactive, expired etc.."""
status = models.CharField(max_length=64)
class Meta:
app_label = 'storageadmin'
|
Attractive space solutions with two floors. Unparalleled Double decker panache!
Losberger’s maxiflex Emporium structure features a side height of 6.8m to accommodate two stories (ground level and mezzanine level) that both have an overhead clearance of more than 10’ of spacious, unencumbered useable floor space for freedom of event design and people movement. Even an outside, open-air mezzanine extending off the sides of the maxiflex Emporium structure is available to further enhance the views from upstairs in the structure.
The mezzanine may cover all or part of the tent’s interior and it serves well as a balcony or as an atrium gallery.
Many floor plans are possible with this maxiflex Emporium large tent. System parts from the maxiflex modular building system can be used, including large tent system cassette floor and system side wall cladding elements made of hard PVC and tempered safety glass. Ask us about the possibilities.
Discover the different models and variants of the maxiflex Emporium and find the right solution for your needs. Or begin to design your space right now with our Space Planner Tool.
|
import urllib.request
from cloudbot import hook
class vdm:
def __init__(self):
try:
self.page = urllib.request.urlopen("http://feeds.feedburner.com/viedemerde").read().decode('utf-8')
except IOError:
self.page = ''
def new_story(self):
"""The fastest and the recomended option"""
start_quote = self.page.find("Aujourd'hui, ")
end_quote = self.page.find(". VDM") + 5
vdm = self.page[start_quote:end_quote]
self.page = self.page[end_quote:]
if len(vdm) >= 310:
return self.new_story()
return vdm
def random_story(self):
"""Get a story from vdm"""
chars_to_delete = ['</a><a href="', 'class="fmllink">', "/sante/'", "/sexe/", "/travail/", "/animaux/",
"</a>", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "/inclassable/", "/amour/",
"/enfants/", "/argent/", '"', "?quot;"]
page = urllib.request.urlopen("http://www.viedemerde.fr/aleatoire").read().decode('utf-8')
story = (page[page.find('class="fmllink">') + 16:page.find('" class="fmllink"> VDM</a>') + 26])
del page
for x in chars_to_delete:
story = story.replace(x, "")
if 310 >= len(story):
return story
return self.random_story()
@hook.command("vdm", "viedemerde")
def main(reply):
x = vdm()
try:
s = x.random_story()
if s != '':
reply("%s\n" % s)
else:
reply("%s\n" % x.new_story())
except IOError:
reply("Erreur de connection :(")
return None
|
Compact, affordable family All-In-One with smart connectivity for simple home printing, copying and scanning. Fast and high-quality printing, also images can be printed or scanned easily via Cloud Link. Available in white and black.
|
"""Support for controlling Sisyphus Kinetic Art Tables."""
import asyncio
import logging
import voluptuous as vol
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
EVENT_HOMEASSISTANT_STOP
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
_LOGGER = logging.getLogger(__name__)
DATA_SISYPHUS = 'sisyphus'
DOMAIN = 'sisyphus'
AUTODETECT_SCHEMA = vol.Schema({})
TABLE_SCHEMA = vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
})
TABLES_SCHEMA = vol.Schema([TABLE_SCHEMA])
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Any(AUTODETECT_SCHEMA, TABLES_SCHEMA),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the sisyphus component."""
from sisyphus_control import Table
tables = hass.data.setdefault(DATA_SISYPHUS, {})
table_configs = config.get(DOMAIN)
session = async_get_clientsession(hass)
async def add_table(host, name=None):
"""Add platforms for a single table with the given hostname."""
table = await Table.connect(host, session)
if name is None:
name = table.name
tables[name] = table
_LOGGER.debug("Connected to %s at %s", name, host)
hass.async_create_task(async_load_platform(
hass, 'light', DOMAIN, {
CONF_NAME: name,
}, config
))
hass.async_create_task(async_load_platform(
hass, 'media_player', DOMAIN, {
CONF_NAME: name,
CONF_HOST: host,
}, config
))
if isinstance(table_configs, dict): # AUTODETECT_SCHEMA
for ip_address in await Table.find_table_ips(session):
await add_table(ip_address)
else: # TABLES_SCHEMA
for conf in table_configs:
await add_table(conf[CONF_HOST], conf[CONF_NAME])
async def close_tables(*args):
"""Close all table objects."""
tasks = [table.close() for table in tables.values()]
if tasks:
await asyncio.wait(tasks)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, close_tables)
return True
|
Nordic Industries Development (NID), a Finland-based business development consulting firm, recently announced Blue Ash will be the headquarters of its North American operations.
NID was established in 1994 and specializes in increasing the efficiency and effectiveness of international sales channels. NID accomplishes this by working directly with sales management and providing factual information to properly estimate channel potential. In addition, NID develops market-specific sales strategies and works with local channels to maximize sales performance and enhance control for its clients.
"Our strength is in the operational and regional knowledge of the sectors and clients we serve," explained Magalie Racaud, NID's Administrative Director. "We already have offices in Singapore, the United Arab Emirates and Finland. Having a North American presence will enable us to be accessible to our clients and stakeholders who are located here and it will also help us obtain first-hand market information that would not be possible otherwise."
NID is the first Finland-based business to open an office in Blue Ash. According to Mrs. Racaud, the company narrowed its search to Greater Cincinnati due to the region's convenient access to a large percentage of the population base and manufacturing facilities.
"After visiting Blue Ash and meeting the people, we decided it is the right place for us. Blue Ash has a great selection of facilities and its prime location will give our employees the ability to work in the heart of a very dynamic and active business community," she said.
The company will move into its new offices in January 2015 and plans to open additional offices in other states as well as Mexico and Canada to support clients in those locations. NID currently operates in North America, Europe, Scandinavia, the Middle East, and Southeast Asia. NID specializes in several U.S. industry sectors, including power engineering, automation, aviation, automotive, manufacturing technologies, construction, and offshore and marine.
On November 4th, the company will host a seminar in Helsinki at the U.S. Embassy which will focus on trends and prospects in the USA for manufacturing and process automation. Featured speakers will include Neil Hensley, Economic Development Director for the City of Blue Ash, Anne Cappel, Executive Director, European American Chamber of Commerce in Cincinnati, Joe Dehner, Partner, Frost Brown Todd, Cincinnati, and Juha Seppanen, CEO of Nordic Industries North America, Blue Ash. The seminar will also be held in several other cities in Finland.
Photo: NID's new North American Headquarters at 4555 Lake Forest Drive, Suite 650.
|
import argparse
import logging
import os
from decimal import Decimal
import colorama
from bdgt import get_data_dir, get_version
from bdgt.commands.factory import CommandFactory
from bdgt.storage.database import open_database
_log = logging.getLogger(__name__)
def process_cmd(args):
try:
command = CommandFactory.create(args)
output = command()
except Exception as e:
print "Error: {}".format(str(e))
else:
print output
def main():
colorama.init()
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--database',
help="The absolute path to the bdgt database. " +
"If not specified, ~/.bdgt/bdgt.db is used.")
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(get_version()))
subparsers = parser.add_subparsers(dest='command')
# Account
account_parser = subparsers.add_parser(
'account',
help="Manage accounts"
)
account_subparsers = account_parser.add_subparsers(dest='sub_command')
account_add_parser = account_subparsers.add_parser(
'add',
help='Add an account'
)
account_add_parser.add_argument(
'name', type=unicode,
help="The name of the account, e.g: personal, savings."
)
account_add_parser.add_argument(
'number', type=unicode,
help="The account number for the account."
)
account_subparsers.add_parser(
'list',
help="List accounts"
)
account_delete_parser = account_subparsers.add_parser(
'delete',
help="Delete an account"
)
account_delete_parser.add_argument(
'name', type=unicode,
help="The name of the account, e.g: personal, savings."
)
# Import
import_parser = subparsers.add_parser(
'import',
help="Import transactions"
)
import_subparsers = import_parser.add_subparsers(dest='sub_command')
import_file_parser = import_subparsers.add_parser(
'file',
help="Import transactions from a file"
)
import_file_parser.add_argument(
'type_', type=unicode, choices=["mt940", "ofx"],
help="The type of the file being imported."
)
import_file_parser.add_argument(
'file_',
help="The path of the file to import."
)
import_subparsers.add_parser(
'status',
help="View the status of an import that's in progress"
)
import_add_parser = import_subparsers.add_parser(
'add',
help="Add parsed transactions to the staging area"
)
import_add_parser.add_argument(
'transaction_ids', type=unicode,
help="A comma-separated list of transaction id's. A range of id's " +
"can be specified using '-'; e.g: 1,4,6-10,12"
)
import_remove_parser = import_subparsers.add_parser(
'remove',
help="Remove parsed transactions from the staging area"
)
import_remove_parser.add_argument(
'transaction_ids', type=unicode,
help="A comma-separated list of transaction id's. A range of id's " +
"can be specified using '-'; e.g: 1,4,6-10,12"
)
import_subparsers.add_parser(
'reset',
help="Resets the import process."
)
import_subparsers.add_parser(
'commit',
help="Commit parsed transactions to the database."
)
import_set_parser = import_subparsers.add_parser(
'set',
help="Set the value of a field in a parsed transaction"
)
import_set_parser.add_argument(
'field', type=unicode, choices=["account", "category"],
help="The field of which the value is to be set."
)
import_set_parser.add_argument(
'value', type=unicode,
help="The value to set the field to."
)
import_set_parser.add_argument(
'transaction_ids', type=unicode,
help="A comma-separated list of transaction id's. A range of id's " +
"can be specified using '-'; e.g: 1,4,6-10,12"
)
# TX
tx_parser = subparsers.add_parser(
'tx',
help="Manage transactions"
)
tx_subparsers = tx_parser.add_subparsers(dest='sub_command')
tx_list_parser = tx_subparsers.add_parser(
'list',
help="List transactions"
)
tx_list_parser.add_argument(
'account_name', type=unicode,
help="The name of the account, e.g: personal, savings."
)
tx_assign_parser = tx_subparsers.add_parser(
'assign',
help="Assign transactions to a category."
)
tx_assign_parser.add_argument(
'category_name', type=unicode,
help="The name of the category"
)
tx_assign_parser.add_argument(
'transaction_ids', type=unicode,
help="A comma-separated list of transaction id's. A range of id's " +
"can be specified using '-'; e.g: 1,4,6-10,12"
)
tx_unassign_parser = tx_subparsers.add_parser(
'unassign',
help="Unassign a transaction from a category."
)
tx_unassign_parser.add_argument(
'transaction_ids', type=unicode,
help="A comma-separated list of transaction id's. A range of id's " +
"can be specified using '-'; e.g: 1,4,6-10,12"
)
tx_reconcile_parser = tx_subparsers.add_parser(
'reconcile',
help="Mark transactions as reconciled."
)
tx_reconcile_parser.add_argument(
'transaction_ids', type=unicode,
help="A comma-separated list of transaction id's. A range of id's " +
"can be specified using '-'; e.g: 1,4,6-10,12"
)
# Set
set_parser = subparsers.add_parser(
'set',
help="Set a budget for a category."
)
set_parser.add_argument(
'category_name', type=unicode,
help="The name of the category"
)
set_parser.add_argument(
'period', type=unicode, choices=["week", "month", "quarter", "year"],
help="The period the spending limit applies to."
)
set_parser.add_argument(
'amount', type=Decimal,
help="The spending limit amount."
)
# TODO: Month must be between 1 and 12
# TODO: Year must be 4 digits
status_parser = subparsers.add_parser(
'status',
help="View the status of a budget for the given month and year."
)
status_parser.add_argument(
'month', type=int,
)
status_parser.add_argument(
'year', type=int
)
args = parser.parse_args()
# Open database
if args.database:
open_database(args.database)
else:
bdgt_dir = get_data_dir()
if not os.path.exists(bdgt_dir):
os.makedirs(bdgt_dir)
bdgt_db = os.path.join(bdgt_dir, "bdgt.db")
open_database("sqlite:///{}".format(bdgt_db))
# Process command
process_cmd(args)
|
Your new house will eventually reveal its darker spots, meaning you’re gonna need more light to get the job done. But it’s not a problem because The Dunfee Group can introduce you to the best electricians in your area. Before you know it, your home will be the most luminous thing in the neighborhood — and you’ll be saving bundles because of all the LED bulbs you chose to install.
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 01 13:52:49 2014
@author: paulinkenbrandt
http://gis.stackexchange.com/questions/108113/loop-with-arcpy-listfiles
"""
import arcpy
import os
from arcpy import env
from arcpy.sa import *
import arcpy_metadata as md
import datetime
#r'C:\GIS\PRISM\S\MAY_OCT_14'
env.workspace = arcpy.GetParameterAsText(0)
outplace = arcpy.GetParameterAsText(1)
# Uncomment the following if you are using asc files
ischecked1 = arcpy.GetParameterAsText(2)
ischecked2 = arcpy.GetParameterAsText(3)
ascFileList = arcpy.ListFiles("*.asc")
if str(ischecked1) == 'true':
for ascFile in ascFileList:
if int(ascFile[1:-1])>= (int(arcpy.GetParameterAsText(8))*100+int(arcpy.GetParameterAsText(9))):
# get the file name without extension (replaces the %Name% variable from ModelBuidler)
ascFileName = os.path.splitext(ascFile)[0]
# define the output file
rastFile = env.workspace + '/' + ascFileName + 'o'
ascinFile = env.workspace + '/' + ascFile
arcpy.ASCIIToRaster_conversion(ascinFile, rastFile, 'INTEGER')
if str(ischecked2) == 'true':
# the following defines projections and clips the PRISM raster file
imgFileList = arcpy.ListRasters()
for imgFile in imgFileList:
if int(imgFile[1:-1])>= (int(arcpy.GetParameterAsText(8))*100+int(arcpy.GetParameterAsText(9))):
imgFileName = os.path.splitext(imgFile)[0]
imgFile1 = env.workspace + '/' + imgFileName + 'p'
incoords = arcpy.GetParameterAsText(4)
# Process: Projektion definieren
arcpy.DefineProjection_management(imgFile, incoords)
outExtractByMask = ExtractByMask(imgFile, arcpy.GetParameterAsText(5))
outExtractByMask.save(imgFile1)
arcpy.AddMessage("Clipped " +imgFileName)
arcpy.AddMessage("Finished Clipping Data!")
outcoord = arcpy.GetParameterAsText(6)
ischecked3 = arcpy.GetParameterAsText(7)
# the following projects the rasters and downsamples them
if str(ischecked3) == 'true':
prjFileList = arcpy.ListRasters()
for prjFile in prjFileList:
if int(prjFile[1:-1])>= (int(arcpy.GetParameterAsText(8))*100+int(arcpy.GetParameterAsText(9))):
prjFileName = os.path.splitext(prjFile)[0]
prjFile1 = outplace + '/' + prjFileName
arcpy.ProjectRaster_management(prjFile, prjFile1 ,outcoord, "CUBIC", arcpy.GetParameterAsText(10))
arcpy.AddMessage("Projected and downsampled " +prjFileName)
arcpy.AddMessage("Finished Downsampling Data!")
# convert from mm to inches of ppt
ischecked4 = arcpy.GetParameterAsText(11)
if str(ischecked4) == 'true':
env.workspace = outplace
calcFileList = arcpy.ListRasters()
for calcFile in calcFileList:
if int(calcFile[1:-1])>= (int(arcpy.GetParameterAsText(8))*100+int(arcpy.GetParameterAsText(9))):
# Overwrite pre-existing files
arcpy.env.overwriteOutput = True
calcFileName = os.path.splitext(calcFile)[0]
calcFile1 = outplace + '/' + 'a' + calcFileName[1:-1]
arcpy.Times_3d(calcFile,0.0393701,calcFile1)
arcpy.AddMessage("Converted " + calcFileName + ' to inches')
# Add Metadata Input
ischecked5 = arcpy.GetParameterAsText(12)
if str(ischecked5) == 'true':
env.workspace = outplace
metaFileList = arcpy.ListRasters('a*')
for metafile in metaFileList:
if int(metafile[1:-1])>= (int(arcpy.GetParameterAsText(8))*100+int(arcpy.GetParameterAsText(9))):
metaplace = outplace + '/' + metafile
metadata = md.MetadataEditor(metaplace)
metadata.title.set('PRISM precipitation data (inches) ' + metafile[-3:-1] + ' ' + metafile[1:-3] ) #
metadata.purpose.set('PRISM Raster File in Inches ' + metafile[-3:-1] + ' ' + metafile[1:-3])
metadata.abstract.append('PRISM Raster File in Inches ' + metafile[-3:-1] + ' ' + metafile[1:-3])
metadata.tags.add(["PRISM", "Precipitation", "Inches",metafile[-3:-1],metafile[1:-3] ]) # tags.extend is equivalent to maintain list semantics
metadata.finish() # save the metadata back to the original source feature class and cleanup. Without calling finish(), your edits are NOT saved!
arcpy.AddMessage("Added Metadata to " + metafile + ' to inches')
|
Will Nesbitt Of Nesbitt Realty Is An Expert On Houses In McLean, Virginia And Can Save You Big On Your Next Purchase.
Nesbitt Realty offers a buyer’s rebate. What is it and how does it work? When a seller puts a property on the market, they offer a commission to agents.
Will Nesbitt can save you money on your purchase at Downscrest or anywhere in Northern VA. You could work with a big chain, or you can work with a small-business like ours. We appreciate you choosing us. Will Nesbitt knows real estate in McLean Virginia.
We believe there is something spiritual about finding and buying a home and we love being a part of that process. We know where to look in Fairfax County to find the best possible houses for around $2,795,000.
|
'''
------------------------------------------------------------------------
Last updated 7/17/2015
Functions for created the matrix of ability levels, e. This can
only be used for looking at the 25, 50, 70, 80, 90, 99, and 100th
percentiles, as it uses fitted polynomials to those percentiles.
For a more generic version, see income_nopoly.py.
This file calls the following files:
utils.py
This py-file creates the following other file(s):
(make sure that an OUTPUT folder exists)
OUTPUT/Demographics/ability_log
OUTPUT/Demographics/ability
------------------------------------------------------------------------
'''
'''
------------------------------------------------------------------------
Packages
------------------------------------------------------------------------
'''
import numpy as np
import scipy.optimize as opt
import utils
'''
------------------------------------------------------------------------
Generate Polynomials
------------------------------------------------------------------------
The following coefficients are for polynomials which fit ability data
for the 25, 50, 70, 80, 90, 99, and 100 percentiles. The data comes from
the following file:
data/ability/FR_wage_profile_tables.xlsx
the polynomials are of the form
log(ability) = constant + (one)(age) + (two)(age)^2 + (three)(age)^3
------------------------------------------------------------------------
'''
# Vals for: .25 .25 .2 .1 .1 .09 .01
one = np.array([-0.09720122, 0.05995294, 0.17654618, 0.21168263, 0.21638731, 0.04500235, 0.09229392])
two = np.array([0.00247639, -0.00004086, -0.00240656, -0.00306555, -0.00321041, 0.00094253, 0.00012902])
three = np.array([-0.00001842, -0.00000521, 0.00001039, 0.00001438, 0.00001579, -0.00001470, -0.00001169])
constant = np.array([3.41e+00, 0.69689692, -0.78761958, -1.11e+00, -0.93939272, 1.60e+00, 1.89e+00])
ages = np.linspace(21, 80, 60)
ages = np.tile(ages.reshape(60, 1), (1, 7))
income_profiles = constant + one * ages + two * ages ** 2 + three * ages ** 3
income_profiles = np.exp(income_profiles)
'''
------------------------------------------------------------------------
Generate ability type matrix
------------------------------------------------------------------------
Given desired starting and stopping ages, as well as the values for S
and J, the ability matrix is created. An arctan function is used
to extrapolate ability for ages 80-100.
------------------------------------------------------------------------
'''
def graph_income(S, J, e, starting_age, ending_age, bin_weights):
'''
Graphs the ability matrix (and it's log)
Inputs:
S = number of age groups (scalar)
J = number of ability types (scalar)
e = ability matrix (SxJ array)
starting_age = initial age (scalar)
ending_age = end age (scalar)
bin_weights = ability weights (Jx1 array)
Outputs:
OUTPUT/Demographics/ability_log.png
OUTPUT/Demographics/ability.png
'''
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
domain = np.linspace(starting_age, ending_age, S)
Jgrid = np.zeros(J)
for j in xrange(J):
Jgrid[j:] += bin_weights[j]
X, Y = np.meshgrid(domain, Jgrid)
cmap2 = matplotlib.cm.get_cmap('winter')
if J == 1:
plt.figure()
plt.plot(domain, np.log(e))
plt.savefig('OUTPUT/Demographics/ability_log')
else:
fig10 = plt.figure()
ax10 = fig10.gca(projection='3d')
ax10.plot_surface(X, Y, np.log(e).T, rstride=1, cstride=2, cmap=cmap2)
ax10.set_xlabel(r'age-$s$')
ax10.set_ylabel(r'ability type -$j$')
ax10.set_zlabel(r'log ability $log(e_j(s))$')
# plt.show()
plt.savefig('OUTPUT/Demographics/ability_log')
# 2D Version
fig112 = plt.figure()
ax = plt.subplot(111)
ax.plot(domain, np.log(e[:, 0]), label='0 - 24%', linestyle='-', color='black')
ax.plot(domain, np.log(e[:, 1]), label='25 - 49%', linestyle='--', color='black')
ax.plot(domain, np.log(e[:, 2]), label='50 - 69%', linestyle='-.', color='black')
ax.plot(domain, np.log(e[:, 3]), label='70 - 79%', linestyle=':', color='black')
ax.plot(domain, np.log(e[:, 4]), label='80 - 89%', marker='x', color='black')
ax.plot(domain, np.log(e[:, 5]), label='90 - 99%', marker='v', color='black')
ax.plot(domain, np.log(e[:, 6]), label='99 - 100%', marker='1', color='black')
ax.axvline(x=80, color='black', linestyle='--')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.set_xlabel(r'age-$s$')
ax.set_ylabel(r'log ability $log(e_j(s))$')
plt.savefig('OUTPUT/Demographics/ability_log_2D')
if J == 1:
plt.figure()
plt.plot(domain, e)
plt.savefig('OUTPUT/Demographics/ability')
else:
fig10 = plt.figure()
ax10 = fig10.gca(projection='3d')
ax10.plot_surface(X, Y, e.T, rstride=1, cstride=2, cmap=cmap2)
ax10.set_xlabel(r'age-$s$')
ax10.set_ylabel(r'ability type -$j$')
ax10.set_zlabel(r'ability $e_j(s)$')
plt.savefig('OUTPUT/Demographics/ability')
def arc_tan_func(points, a, b, c):
'''
Functional form for a generic arctan function
'''
y = (-a / np.pi) * np.arctan(b*points + c) + a / 2
return y
def arc_tan_deriv_func(points, a, b, c):
'''
Functional form for the derivative of a generic arctan function
'''
y = -a * b / (np.pi * (1+(b*points+c)**2))
return y
def arc_error(guesses, params):
'''
How well the arctan function fits the slope of ability matrix at age 80, the level at age 80, and the level of age 80 times a constant
'''
a, b, c = guesses
first_point, coef1, coef2, coef3, ability_depreciation = params
error1 = first_point - arc_tan_func(80, a, b, c)
if (3 * coef3 * 80 ** 2 + 2 * coef2 * 80 + coef1) < 0:
error2 = (3 * coef3 * 80 ** 2 + 2 * coef2 * 80 + coef1)*first_point - arc_tan_deriv_func(80, a, b, c)
else:
error2 = -.02 * first_point - arc_tan_deriv_func(80, a, b, c)
error3 = ability_depreciation * first_point - arc_tan_func(100, a, b, c)
error = [np.abs(error1)] + [np.abs(error2)] + [np.abs(error3)]
return error
def arc_tan_fit(first_point, coef1, coef2, coef3, ability_depreciation, init_guesses):
'''
Fits an arctan function to the last 20 years of the ability levels
'''
guesses = init_guesses
params = [first_point, coef1, coef2, coef3, ability_depreciation]
a, b, c = opt.fsolve(arc_error, guesses, params)
old_ages = np.linspace(81, 100, 20)
return arc_tan_func(old_ages, a, b, c)
def get_e(S, J, starting_age, ending_age, bin_weights, omega_SS, flag_graphs):
'''
Inputs:
S = Number of age cohorts (scalar)
J = Number of ability levels by age (scalar)
starting_age = age of first age cohort (scalar)
ending_age = age of last age cohort (scalar)
bin_weights = ability weights (Jx1 array)
omega_SS = population weights (Sx1 array)
flag_graphs = Graph flags or not (bool)
Output:
e = ability levels for each age cohort, normalized so
the weighted sum is one (SxJ array)
'''
e_short = income_profiles
e_final = np.ones((S, J))
e_final[:60, :] = e_short
e_final[60:, :] = 0.0
# This following variable is what percentage of ability at age 80 ability falls to at age 100.
# In general, we wanted people to lose half of their ability over a 20 year period. The first
# entry is .47, though, because nothing higher would converge. The second to last is .7 because this group
# actually has a slightly higher ability at age 80 then the last group, so this makes it decrease more so it
# ends monotonic.
ability_depreciation = np.array([.47, .5, .5, .5, .5, .7, .5])
# Initial guesses for the arctan. They're pretty sensitive.
init_guesses = np.array([[58, 0.0756438545595, -5.6940142786],
[27, 0.069, -5],
[35, .06, -5],
[37, 0.339936555352, -33.5987329144],
[70.5229181668, 0.0701993896947, -6.37746859905],
[35, .06, -5],
[35, .06, -5]])
for j in xrange(J):
e_final[60:, j] = arc_tan_fit(e_final[59, j], one[j], two[j], three[j], ability_depreciation[j], init_guesses[j])
if flag_graphs:
graph_income(S, J, e_final, starting_age, ending_age, bin_weights)
e_final /= (e_final * omega_SS.reshape(S, 1) * bin_weights.reshape(1, J)).sum()
return e_final
|
This Walking Holiday explores the 4th section of the Via de la Plata, the longest of all Camino routes to Santiago de Compostela! It runs through the whole country from South to North, from the hot Andalucian plains to the green mountains of Galicia, from Moorish influenced buildings to traditional stone farmhouses and much more! This Camino in its whole offers most of Spain’s rich and mixed culture.
The well-marked Camino runs through the heart of Extremadura all the way to Carcaboso, yet this is where we observe the most-changing landscape, day after day. At the beginning, there is very little shade along the Way but as the landscape becomes a bit more undulating, woodland also starts to appear. Attractive towns with interesting heritage are also part of this section, with the likes of Caceres, Galisteo and Aldeanueva del Camino. As this section varies between very short walks and longer ones, this walking holiday is accessible to anybody with an average level of fitness.
Caceres still holds its old medieval town and walls, as if nothing has changed since the ancient times. It is a charming town where it is enjoyable to stroll around. To add to the relaxing atmosphere of the city, Caceres is also famous for being the home of numerous storks!
Leaving Caceres, we spend the first few kilometres walking along a main road, with flat crop fields on both side of the road, but with no shelter from the sun. Fortunately, this is a short walking day so you will have all afternoon to relax at your hotel in Casar de Caceres!
Soon after Casar de Caceres, we find ourselves walking along wide dirt tracks, through a flat landscape with endless skies and views. Along the Camino, we will also come across many “miliarios”, stone posts remaining from the old Roman road which the Via de la Plata follows.
Today can be seen as challenging as this is a long walk and there is also a steep climb a few kilometres after leaving Canaveral. However, this is a very enjoyable walk, as we spend most of the day walking through both woods and farmland.
Leave Galisteo by crossing the town’s beautiful fourteenth century. It is then an easy and short stroll to Carcaboso, through a greener landscape than the rest of the region, thanks to several canals cutting through the area.
*Once in Carcaboso, a short transfer to your hotel in Plasencia.
Virtually every company flies to Madrid (can book return flights).
Sevilla airport with Aer Lingus, Iberia, Vueling, Ryanair or Easyjet.
1 – Madrid Airport, bus to Madrid bus station (Approx. 20min). Then bus to Merida (Approx. 4h30), 7 departures a day.
From Placensia, Cevasa bus to Madrid (Approx. 3hrs), 2-3 departures a day.
|
from PolyMesh import *
from Primitives3D import *
from OpenGL.GL import *
import sys
import re
import math
import time
import numpy as np
from scipy import sparse
import scipy.io as sio
from scipy.sparse.linalg import lsqr, cg, eigsh
import matplotlib.pyplot as plt
import os
this_path = os.path.dirname(os.path.abspath(__file__))
if os.path.exists(this_path + '/ext/lib') and os.path.exists(this_path + '/ext/libigl/python'):
sys.path.insert(0, this_path + '/ext/libigl/python/')
sys.path.insert(0, this_path + '/ext/lib/')
print "Importing IGL"
import igl
#Quickly compute sparse Laplacian matrix with cotangent weights and Voronoi areas
#by doing many operations in parallel using NumPy
#VPos: N x 3 array of vertex positions
#ITris: M x 3 array of triangle indices
#anchorsIdx: List of anchor positions
def makeLaplacianMatrixCotWeights(VPos, ITris, anchorsIdx, anchorWeights = 1):
N = VPos.shape[0]
M = ITris.shape[0]
#Allocate space for the sparse array storage, with 2 entries for every
#edge for every triangle (6 entries per triangle); one entry for directed
#edge ij and ji. Note that this means that edges with two incident triangles
#will have two entries per directed edge, but sparse array will sum them
I = np.zeros(M*6)
J = np.zeros(M*6)
V = np.zeros(M*6)
#Keep track of areas of incident triangles and the number of incident triangles
IA = np.zeros(M*3)
VA = np.zeros(M*3) #Incident areas
VC = 1.0*np.ones(M*3) #Number of incident triangles
#Step 1: Compute cotangent weights
for shift in range(3):
#For all 3 shifts of the roles of triangle vertices
#to compute different cotangent weights
[i, j, k] = [shift, (shift+1)%3, (shift+2)%3]
dV1 = VPos[ITris[:, i], :] - VPos[ITris[:, k], :]
dV2 = VPos[ITris[:, j], :] - VPos[ITris[:, k], :]
Normal = np.cross(dV1, dV2)
#Cotangent is dot product / mag cross product
NMag = np.sqrt(np.sum(Normal**2, 1))
cotAlpha = np.sum(dV1*dV2, 1)/NMag
I[shift*M*2:shift*M*2+M] = ITris[:, i]
J[shift*M*2:shift*M*2+M] = ITris[:, j]
V[shift*M*2:shift*M*2+M] = cotAlpha
I[shift*M*2+M:shift*M*2+2*M] = ITris[:, j]
J[shift*M*2+M:shift*M*2+2*M] = ITris[:, i]
V[shift*M*2+M:shift*M*2+2*M] = cotAlpha
if shift == 0:
#Compute contribution of this triangle to each of the vertices
for k in range(3):
IA[k*M:(k+1)*M] = ITris[:, k]
VA[k*M:(k+1)*M] = 0.5*NMag
#Step 2: Create laplacian matrix
L = sparse.coo_matrix((V, (I, J)), shape=(N, N)).tocsr()
#Create the diagonal by summing the rows and subtracting off the nondiagonal entries
L = sparse.dia_matrix((L.sum(1).flatten(), 0), L.shape) - L
#Scale each row by the incident areas
Areas = sparse.coo_matrix((VA, (IA, IA)), shape=(N, N)).tocsr()
Areas = Areas.todia().data.flatten()
Counts = sparse.coo_matrix((VC, (IA, IA)), shape=(N, N)).tocsr()
Counts = Counts.todia().data.flatten()
RowScale = sparse.dia_matrix((3*Counts/Areas, 0), L.shape)
L = L.T.dot(RowScale).T
#Step 3: Add anchors
L = L.tocoo()
I = L.row.tolist()
J = L.col.tolist()
V = L.data.tolist()
I = I + range(N, N+len(anchorsIdx))
J = J + anchorsIdx
V = V + [anchorWeights]*len(anchorsIdx)
L = sparse.coo_matrix((V, (I, J)), shape=(N+len(anchorsIdx), N)).tocsr()
return L
#Use simple umbrella weights instead of cotangent weights
#VPos: N x 3 array of vertex positions
#ITris: M x 3 array of triangle indices
#anchorsIdx: List of anchor positions
def makeLaplacianMatrixUmbrellaWeights(VPos, ITris, anchorsIdx, anchorWeights = 1):
N = VPos.shape[0]
M = ITris.shape[0]
I = np.zeros(M*6)
J = np.zeros(M*6)
V = np.ones(M*6)
#Step 1: Set up umbrella entries
for shift in range(3):
#For all 3 shifts of the roles of triangle vertices
#to compute different cotangent weights
[i, j, k] = [shift, (shift+1)%3, (shift+2)%3]
I[shift*M*2:shift*M*2+M] = ITris[:, i]
J[shift*M*2:shift*M*2+M] = ITris[:, j]
I[shift*M*2+M:shift*M*2+2*M] = ITris[:, j]
J[shift*M*2+M:shift*M*2+2*M] = ITris[:, i]
#Step 2: Create laplacian matrix
L = sparse.coo_matrix((V, (I, J)), shape=(N, N)).tocsr()
L[L > 0] = 1
#Create the diagonal by summing the rows and subtracting off the nondiagonal entries
L = sparse.dia_matrix((L.sum(1).flatten(), 0), L.shape) - L
#Step 3: Add anchors
L = L.tocoo()
I = L.row.tolist()
J = L.col.tolist()
V = L.data.tolist()
I = I + range(N, N+len(anchorsIdx))
J = J + anchorsIdx
V = V + [anchorWeights]*len(anchorsIdx)
L = sparse.coo_matrix((V, (I, J)), shape=(N+len(anchorsIdx), N)).tocsr()
return L
def solveLaplacianMatrix(L, deltaCoords, anchors, anchorWeights = 1):
y = np.concatenate((deltaCoords, anchorWeights*anchors), 0)
y = np.array(y, np.float64)
coo = L.tocoo()
coo = np.vstack((coo.row, coo.col, coo.data)).T
coo = igl.eigen.MatrixXd(np.array(coo, dtype=np.float64))
LE = igl.eigen.SparseMatrixd()
LE.fromCOO(coo)
Q = LE.transpose()*LE
start_time = time.time()
#solver = igl.eigen.SimplicialLLTsparse(Q)
solver = igl.eigen.CholmodSupernodalLLT(Q)
ret = solver.solve(igl.eigen.MatrixXd(y))
end_time = time.time()
print 'factorization elapsed time:',end_time-start_time,'seconds'
return np.array(ret)
#Make a QP solver with hard constraints
def makeLaplacianMatrixSolverIGLHard(VPos, ITris, anchorsIdx):
VPosE = igl.eigen.MatrixXd(VPos)
ITrisE = igl.eigen.MatrixXi(ITris)
L = igl.eigen.SparseMatrixd()
M = igl.eigen.SparseMatrixd()
M_inv = igl.eigen.SparseMatrixd()
igl.cotmatrix(VPosE,ITrisE,L)
igl.massmatrix(VPosE,ITrisE,igl.MASSMATRIX_TYPE_VORONOI,M)
igl.invert_diag(M,M_inv)
L = M_inv*L
deltaCoords = L*VPosE
deltaCoords = np.array(deltaCoords)
#Bi-laplacian
Q = L.transpose()*L
#Linear term with delta coordinates
#TODO: Finish this
#return (L, solver, deltaCoords)
def makeLaplacianMatrixSolverIGLSoft(VPos, ITris, anchorsIdx, anchorWeights, makeSolver = True):
VPosE = igl.eigen.MatrixXd(VPos)
ITrisE = igl.eigen.MatrixXi(ITris)
'''
#Doing this check slows things down by more than a factor of 2 (convert to numpy to make faster?)
for f in range(ITrisE.rows()):
v_list = ITrisE.row(f)
v1 = VPosE.row(v_list[0])
v2 = VPosE.row(v_list[1])
v3 = VPosE.row(v_list[2])
if (v1-v2).norm() < 1e-10 and (v1-v3).norm() < 1e-10 and (v2-v3).norm() < 1e-10:
print 'zero area triangle!',f
'''
L = igl.eigen.SparseMatrixd()
M = igl.eigen.SparseMatrixd()
M_inv = igl.eigen.SparseMatrixd()
igl.cotmatrix(VPosE,ITrisE,L)
igl.massmatrix(VPosE,ITrisE,igl.MASSMATRIX_TYPE_VORONOI,M)
#np.set_printoptions(threshold='nan')
#print 'what is M?',M.diagonal()
igl.invert_diag(M,M_inv)
#L = M_inv*L
deltaCoords = (M_inv*L)*VPosE
#TODO: What to do with decaying_anchor_weights?
'''
anchor_dists = []
for i in range(VPosE.rows()):
anchor_dists.append(min([ (VPosE.row(i)-VPosE.row(j)).norm() for j in anchorsIdx ]))
max_anchor_dist = max(anchor_dists)
# assume linear weighting for anchor weights -> we are 0 at the anchors, anchorWeights at max_anchor_dist
decaying_anchor_weights = []
for anchor_dist in anchor_dists:
decaying_anchor_weights.append(anchorWeights*(anchor_dist/max_anchor_dist))
'''
solver = None
if makeSolver:
Q = L*(M_inv*M_inv)*L
#Now add in sparse constraints
diagTerms = igl.eigen.SparseMatrixd(VPos.shape[0], VPos.shape[0])
# anchor points
for a in anchorsIdx:
diagTerms.insert(a, a, anchorWeights)
# off points
'''
for adx,decay_weight in enumerate(decaying_anchor_weights):
if decay_weight == 0:
diagTerms.insert(adx, adx, anchorWeights)
else:
diagTerms.insert(adx, adx, decay_weight)
'''
Q = Q + diagTerms
Q.makeCompressed()
start_time = time.time()
solver = igl.eigen.SimplicialLLTsparse(Q)
#solver = igl.eigen.CholmodSupernodalLLT(Q)
end_time = time.time()
print 'factorization elapsed time:',end_time-start_time,'seconds'
return (L, M_inv, solver, np.array(deltaCoords))
#solver: Eigen simplicialLLT solver that has Laplace Beltrami + anchors
#deltaCoords: numpy array of delta coordinates
#anchors: numpy array of anchor positions
#anchorWeights: weight of anchors
def solveLaplacianMatrixIGLSoft(solver, L, M_inv, deltaCoords, anchorsIdx, anchors, anchorWeights):
print "solveLaplacianMatrixIGLSoft: anchorWeights = %g"%anchorWeights
y = np.array(L*M_inv*igl.eigen.MatrixXd(np.array(deltaCoords, dtype=np.float64)))
y[anchorsIdx] += anchorWeights*anchors
y = igl.eigen.MatrixXd(y)
ret = solver.solve(y)
return np.array(ret)
if __name__ == '__main__2':
anchorWeights = 10000
m = PolyMesh()
m.loadOffFile("cow.off")
m.performDisplayUpdate()
X = sio.loadmat("anchors.mat")
anchors = X['anchors']
anchorsIdx = X['anchorsIdx'].flatten().tolist()
deltaCoords = X['deltaCoords']
L = makeLaplacianMatrixCotWeights(m.VPos, m.ITris, anchorsIdx, anchorWeights)
m.VPos = solveLaplacianMatrix(L, deltaCoords, anchors, anchorWeights)
m.saveOffFile("LapCow.off")
if __name__ == '__main__3':
anchorWeights = 100
m = getSphereMesh(1, 2)
print "BBox Before: ", m.getBBox()
m.performDisplayUpdate()
anchorsIdx = np.random.randint(0, len(m.vertices), 30).tolist()
L = makeLaplacianMatrixCotWeights(m.VPos, m.ITris, anchorsIdx, anchorWeights)
sio.savemat("L.mat", {"L":L})
deltaCoords = L.dot(m.VPos)[0:len(m.vertices), :]
anchors = m.VPos[anchorsIdx, :]
anchors = anchors*5
m.VPos = solveLaplacianMatrix(L, deltaCoords, anchors, anchorWeights)
print "BBox After:", m.getBBox()
m.saveOffFile("LapSphere.off")
|
This Used to be Fields explored the history of the Becontree Estate in Dagenham, east London, through a new archive created by residents and a mural by artist Chad McCail.
The Becontree Estate was built in the 1920’s and 1930’s to re-house people during the slum clearances in east London and to house soldiers returning from the First World War. It is one of the most striking examples of the scale of social change occurring in London at that time and was the largest housing estate in the world at the time of construction.
A programme of events was led by Historypin and supported by the London Borough of Barking and Dagenham Archives to enable local people and organisations to contribute photographs and stories for an online archive about Becontree.
This collaborative process of discovery, sharing and documentation culminated in a new permanent public work of art on the Becontree Estate by Lanarkshire-based artist Chad McCail. McCail was selected by local residents from a shortlist of three artists.
McCail’s first ever mural presented a multi-layered history of the Becontree Estate from it’s inception in 1921 to the current day informed by his many conversations with local people. The internal structure of the work was a winding Becontree street. In the foreground he represented the changing minutiae of day to day life for example the first inhabitants with the clear signs of the ravages of poverty and WW1, the arrival of Ford Motors followed by it’s rapid departure, the impact of Right to Buy in the 1980’s on the facades of houses and other significant cultural and political events such as Gandhi’s visit to Kingsley Hall. Also included in the mural were the personal stories of early and current residents of the estate and depictions of local people McCail met throughout his time in the borough.
The mural is now on permanent public display at Valence House Museum, Archives and Local Studies Centre. Admission is free and opening hours are Monday to Saturday 10am to 4pm (except public holidays).
Visit the Historypin website to explore the archive and find out how to share and contribute your memories of the area.
This Used to be Fields was commissioned in partnership with the Barbican with funding from the Arts Council of England and additional support from Creative Barking and Dagenham.
Chad McCail (born 1961 in Manchester) studied English at the University of Kent and obtained a BA in Fine Art from Goldsmiths, University of London in 1989. His solo exhibitions include: Systemic, Northern Gallery of Contemporary Art, Sunderland (2010); We are not dead, Gallery of Modern Art, Glasgow (2006); Food, Shelter, Clothing, Fuel, Baltimore Museum of Art, Baltimore (2004); and Life is driven by the desire for pleasure, The Fruitmarket Gallery, Edinburgh (2003). His group exhibitions include Eye on Europe: Prints, Books and Multiples, 1960 to Now, MoMA, New York (2006); British Art Show 5, UK touring exhibition (2000); and Becks Futures, ICA, London (2000). He lives and works in Thankerton, South Lanarkshire.
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Classes for interacting with Kubernetes API
"""
import copy
import kubernetes.client.models as k8s
from airflow.kubernetes.k8s_model import K8SModel
class VolumeMount(K8SModel):
"""
Initialize a Kubernetes Volume Mount. Used to mount pod level volumes to
running container.
:param name: the name of the volume mount
:type name: str
:param mount_path:
:type mount_path: str
:param sub_path: subpath within the volume mount
:type sub_path: Optional[str]
:param read_only: whether to access pod with read-only mode
:type read_only: bool
"""
def __init__(self, name, mount_path, sub_path, read_only):
self.name = name
self.mount_path = mount_path
self.sub_path = sub_path
self.read_only = read_only
def to_k8s_client_obj(self) -> k8s.V1VolumeMount:
"""
Converts to k8s object.
:return Volume Mount k8s object
"""
return k8s.V1VolumeMount(
name=self.name,
mount_path=self.mount_path,
sub_path=self.sub_path,
read_only=self.read_only
)
def attach_to_pod(self, pod: k8s.V1Pod) -> k8s.V1Pod:
"""
Attaches to pod
:return Copy of the Pod object
"""
cp_pod = copy.deepcopy(pod)
volume_mount = self.to_k8s_client_obj()
cp_pod.spec.containers[0].volume_mounts = pod.spec.containers[0].volume_mounts or []
cp_pod.spec.containers[0].volume_mounts.append(volume_mount)
return cp_pod
|
Cheap Jerseys Wholesale - 美容保養 - 維ni熊~時尚交流區 - Powered by Discuz!
Xie said China supports the UN in playing a leading mediation role and is willing to make positive contributions to the talks to achieve concrete results as soon as possible.Still, he offered few details about how that approach would differ substantively from what the U."Paul LeBlanc, president of the university, said: "Amy is the epitome of a lifelong learner, and my hope is that her story will remind others that it's never too late to follow their dreams or learn something new. woman has graduated from college with top marks and is pursuing a master's degree. should quickly pull out, but he also campaigned on a vow to start winning wars. Members of the cast of Lipstick Under My Burkha arrive at the The 29th Tokyo International Film Festival on October 25, 2016 at Roppongi Hills Arena in Tokyo, Japan. No one was remotely upset or distressed by it."Of course I would have loved no cuts, but the FCAT has been very fair and clear."We had met at Cheap NFL Jerseys China Wholesale the restaurant to gather lawyers like Zhou and Li, 'citizen movement' leaders like Hu and 'doers' like me to discuss how to topple the current regime, we wanted to overthrow the CPC," Zhai Cheap Jerseys From China confessed, adding that they reviewed the subversive activities of the previous year, and formulated plans for 2015. 6 (Xinhua)France and the United States came out the only two taking six points after two rounds of robin at the Algarve Cup, while "Group of Death" underdogs China down to bottom with a 20 defeat to Germany here on Friday.It is also compulsory to notice the player’s injuries, medical conditions and difficulties within the team.Zhai was first exposed to Wholesale NFL Jerseys China concepts such as the "color revolutions" and "peaceful transition" online, the court said in a statement.
GMT+8, 2019-4-24 12:39, Processed in 0.039753 second(s), 6 queries.
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import setuptools
version = '1.0.0'
setuptools.setup(
name='rpco-hacking-checks',
author='Rackspace Private Cloud',
description='Hacking/Flake8 checks for rpc-openstack',
version=version,
install_requires=['hacking'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
],
py_modules=['rpco_checks'],
provides=['rpco_checks'],
entry_points={
'flake8.extension': [
'rpco.git_title_bug = rpco_checks:OnceGitCheckCommitTitleBug',
('rpco.git_title_length = '
'rpco_checks:OnceGitCheckCommitTitleLength'),
('rpco.git_title_period = '
'rpco_checks:OnceGitCheckCommitTitlePeriodEnding'),
]
},
)
|
Oakwood Properties is adding value to its historic properties. Photo by Gerard O'Brien.
While changes to earthquake-strengthening rules caused controversy, Oakwood Properties director David Marsh sees them as an opportunity.
It was a good chance to renew Dunedin's heritage building stock and give buildings a new lease of life, Mr Marsh said.
''We've got a positive view of it. It's all pretty exciting. Some people are running from it; I think you've got to embrace it,'' he said.
Oakwood Properties was spending ''millions'' upgrading some of its buildings, including the Robert Burns Hotel and adjacent properties in George St, including the former Johnson's Fish Shop.
The Robert Burns building needed ''a bit of work'' and the upshot of both the renovation and the earthquake strengthening would be a safe, historic building that would be ''set for another 100 years'', he said.
The city's historic buildings were important and Oakwood Properties would ''do our bit'' to add value to them. Business owners had a responsibility to ''get it right''.
The former Johnson's Fish Shop was noted in a Dunedin City Council scheduled heritage place report as having national significance on the basis of its unusual architecture.
It has been described as a rare example of neo-Byzantine architecture, a style from the mid-19th century.
The chequered history of the buildings - home to everything from a billiard saloon to a fruiterer - was rich. Foxys nightclub was ''the late-night place to go [in the 1980s]'', Mr Marsh said.
But now the mix of students accommodated upstairs and hospitality businesses below was a good combination, with students breathing life into the area, he said.
|
from __future__ import absolute_import, unicode_literals
from ..event.row_wrapper import InsertEventRow, UpdateEventRow, DeleteEventRow
class IEventHandler(object):
def on_insert_raw(self, ev_id, ev):
ev_timestamp = ev.packet.timestamp
schema = ev.schema
table = ev.table
affected_rows = []
for row in ev.rows:
affected_rows.append(InsertEventRow(
ev_id=ev_id,
ev=ev,
new_values=row['values'],
))
self.on_insert(ev_id, ev_timestamp, schema, table, affected_rows)
def on_update_raw(self, ev_id, ev):
ev_timestamp = ev.packet.timestamp
schema = ev.schema
table = ev.table
affected_rows = []
for row in ev.rows:
affected_rows.append(UpdateEventRow(
ev_id=ev_id,
ev=ev,
old_values=row['before_values'],
new_values=row['after_values'],
))
self.on_update(ev_id, ev_timestamp, schema, table, affected_rows)
def on_delete_raw(self, ev_id, ev):
ev_timestamp = ev.packet.timestamp
schema = ev.schema
table = ev.table
affected_rows = []
for row in ev.rows:
affected_rows.append(DeleteEventRow(
ev_id=ev_id,
ev=ev,
old_values=row['values'],
))
self.on_delete(ev_id, ev_timestamp, schema, table, affected_rows)
pass
def on_insert(self, ev_id, ev_timestamp, schema, table, affected_rows):
"""
:param ev_id: unique string id for each event
:param ev_timestamp: unix epoch for the time event happens
:param schema: affected database name
:param table: affected table name
:param affected_rows: list of instance of mysqlevp.event.row_wrapper.InsertEventRow
each of instance has an attr named "new_values",
which is a dict(whose key is MySQL column name) of the new inserted row
for row in affected_rows:
do_something(row.new_values)
"""
pass
def on_update(self, ev_id, ev_timestamp, schema, table, affected_rows):
"""
:param ev_id: unique string id for each event
:param ev_timestamp: unix epoch for the time event happens
:param schema: affected database name
:param table: affected table name
:param affected_rows: list of instance of mysqlevp.event.row_wrapper.UpdateEventRow
each of instance has two attrs named "new_values" and "old_values",
which are dicts(whose key is MySQL column name) of the new inserted row and the old replaced row
for row in affected_rows:
do_something(row.new_values, row.old_values)
"""
pass
def on_delete(self, ev_id, ev_timestamp, schema, table, affected_rows):
"""
:param ev_id: unique string id for each event
:param ev_timestamp: unix epoch for the time event happens
:param schema: affected database name
:param table: affected table name
:param affected_rows: list of instance of mysqlevp.event.row_wrapper.DeleteEventRow
each of instance has an attr named "old_values",
which is a dict(whose key is MySQL column name) of the deleted row
for row in affected_rows:
do_something(row.old_values)
"""
pass
def close(self):
"""allow user to release some resource
"""
pass
|
Apartment Florence near San Lorenzo, Duomo, Central Station.
Full Description: Studio apartment in the center Florence in a beautiful are and street full with little typical Italian shops. Near San Lorenzo, the Duomo and Santa maria Novella Station.
This listing has been viewed 10668 times.
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Ole Krause-Sparmann
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
import numpy
import scipy
import time
from scipy.spatial.distance import cdist
from nearpy.utils import numpy_array_from_list_or_numpy_array
from nearpy.utils.utils import unitvec
class RecallPrecisionExperiment(object):
"""
Performs nearest neighbour recall experiments with custom vector data
for all engines in the specified list.
perform_experiment() returns list of (recall, precision, search_time)
tuple. These are the averaged values over all request vectors. search_time
is the average retrieval/search time compared to the average exact search
time.
coverage_ratio determines how many of the vectors are used as query
vectors for exact andapproximated search. Because the search comparance
overhead is quite large, it is best with large data sets (>10000) to
use a low coverage_ratio (like 0.1) to make the experiment fast. A
coverage_ratio of 0.1 makes the experiment use 10% of all the vectors
for querying, that is, it looks for 10% of all vectors for the nearest
neighbours.
"""
def __init__(self, N, vectors, coverage_ratio=0.2):
"""
Performs exact nearest neighbour search on the data set.
vectors can either be a numpy matrix with all the vectors
as columns OR a python array containing the individual
numpy vectors.
"""
# We need a dict from vector string representation to index
self.vector_dict = {}
self.N = N
self.coverage_ratio = coverage_ratio
numpy_vectors = numpy_array_from_list_or_numpy_array(vectors)
# Get numpy array representation of input
self.vectors = numpy.vstack([unitvec(v) for v in numpy_vectors.T])
# Build map from vector string representation to vector
for index, v in enumerate(self.vectors):
self.vector_dict[self.__vector_to_string(v)] = index
# Determine the indices of query vectors used for comparance
# with approximated search.
query_count = numpy.floor(self.coverage_ratio *
len(self.vectors))
self.query_indices = []
for k in range(int(query_count)):
index = numpy.floor(k * (float(len(self.vectors)) / query_count))
index = min(index, len(self.vectors) - 1)
self.query_indices.append(int(index))
print('\nStarting exact search (query set size=%d)...\n' % query_count)
# For each query vector get the closest N neighbours
self.closest = {}
self.exact_search_time_per_vector = 0.0
for index in self.query_indices:
v = self.vectors[index, numpy.newaxis]
exact_search_start_time = time.time()
D = cdist(v, self.vectors, 'euclidean')
self.closest[index] = scipy.argsort(D)[0, 1:N+1]
# Save time needed for exact search
exact_search_time = time.time() - exact_search_start_time
self.exact_search_time_per_vector += exact_search_time
print('Done with exact search...\n')
# Normalize search time
self.exact_search_time_per_vector /= float(len(self.query_indices))
def perform_experiment(self, engine_list):
"""
Performs nearest neighbour recall experiments with custom vector data
for all engines in the specified list.
Returns self.result contains list of (recall, precision, search_time)
tuple. All are the averaged values over all request vectors.
search_time is the average retrieval/search time compared to the
average exact search time.
"""
# We will fill this array with measures for all the engines.
result = []
# For each engine, first index vectors and then retrieve neighbours
for endine_idx, engine in enumerate(engine_list):
print('Engine %d / %d' % (endine_idx, len(engine_list)))
# Clean storage
engine.clean_all_buckets()
# Use this to compute average recall
avg_recall = 0.0
# Use this to compute average precision
avg_precision = 0.0
# Use this to compute average search time
avg_search_time = 0.0
# Index all vectors and store them
for index, v in enumerate(self.vectors):
engine.store_vector(v, 'data_%d' % index)
# Look for N nearest neighbours for query vectors
for index in self.query_indices:
# Get indices of the real nearest as set
real_nearest = set(self.closest[index])
# We have to time the search
search_time_start = time.time()
# Get nearest N according to engine
nearest = engine.neighbours(self.vectors[index])
# Get search time
search_time = time.time() - search_time_start
# For comparance we need their indices (as set)
nearest = set([self.__index_of_vector(x[0]) for x in nearest])
# Remove query index from search result to make sure that
# recall and precision make sense in terms of "neighbours".
# If ONLY the query vector is retrieved, we want recall to be
# zero!
nearest.remove(index)
# If the result list is empty, recall and precision are 0.0
if len(nearest) == 0:
recall = 0.0
precision = 0.0
else:
# Get intersection count
inter_count = float(len(real_nearest & nearest))
# Normalize recall for this vector
recall = inter_count/float(len(real_nearest))
# Normalize precision for this vector
precision = inter_count/float(len(nearest))
# Add to accumulator
avg_recall += recall
# Add to accumulator
avg_precision += precision
# Add to accumulator
avg_search_time += search_time
# Normalize recall over query set
avg_recall /= float(len(self.query_indices))
# Normalize precision over query set
avg_precision /= float(len(self.query_indices))
# Normalize search time over query set
avg_search_time = avg_search_time / float(len(self.query_indices))
# Normalize search time with respect to exact search
avg_search_time /= self.exact_search_time_per_vector
print(' recall=%f, precision=%f, time=%f' % (avg_recall,
avg_precision,
avg_search_time))
result.append((avg_recall, avg_precision, avg_search_time))
# Return (recall, precision, search_time) tuple
return result
def __vector_to_string(self, vector):
""" Returns string representation of vector. """
return numpy.array_str(numpy.round(unitvec(vector), decimals=3))
def __index_of_vector(self, vector):
""" Returns index of specified vector from test data set. """
return self.vector_dict[self.__vector_to_string(vector)]
|
Original sport-artistic performances and slackline workshops. | SLACKSHOW SLACKSHOW – Original sport-artistic performances and slackline workshops.
Original sport-artistic performances and slackline workshops. We can give you a breath-taking highline show high above your head or a shocking trickline show full of flips, rotations and combos. Our slackshow is perfect for all parties, corporate events, team buildings, conferences or gala evenings. Simply a perfect programme for your event.
We offer a few selections of performances from which you choose whatever your action is.
We already have a decent dose of very successful actions. Look what it looked like.
|
import calibre.ebooks.markdown.markdown as markdown
from calibre.ebooks.markdown.markdown import etree
DEFAULT_URL = "http://www.freewisdom.org/projects/python-markdown/"
DEFAULT_CREATOR = "Yuri Takhteyev"
DEFAULT_TITLE = "Markdown in Python"
GENERATOR = "http://www.freewisdom.org/projects/python-markdown/markdown2rss"
month_map = { "Jan" : "01",
"Feb" : "02",
"March" : "03",
"April" : "04",
"May" : "05",
"June" : "06",
"July" : "07",
"August" : "08",
"September" : "09",
"October" : "10",
"November" : "11",
"December" : "12" }
def get_time(heading):
heading = heading.split("-")[0]
heading = heading.strip().replace(",", " ").replace(".", " ")
month, date, year = heading.split()
month = month_map[month]
return rdftime(" ".join((month, date, year, "12:00:00 AM")))
def rdftime(time):
time = time.replace(":", " ")
time = time.replace("/", " ")
time = time.split()
return "%s-%s-%sT%s:%s:%s-08:00" % (time[0], time[1], time[2],
time[3], time[4], time[5])
def get_date(text):
return "date"
class RssExtension (markdown.Extension):
def extendMarkdown(self, md, md_globals):
self.config = { 'URL' : [DEFAULT_URL, "Main URL"],
'CREATOR' : [DEFAULT_CREATOR, "Feed creator's name"],
'TITLE' : [DEFAULT_TITLE, "Feed title"] }
md.xml_mode = True
# Insert a tree-processor that would actually add the title tag
treeprocessor = RssTreeProcessor(md)
treeprocessor.ext = self
md.treeprocessors['rss'] = treeprocessor
md.stripTopLevelTags = 0
md.docType = '<?xml version="1.0" encoding="utf-8"?>\n'
class RssTreeProcessor(markdown.treeprocessors.Treeprocessor):
def run (self, root):
rss = etree.Element("rss")
rss.set("version", "2.0")
channel = etree.SubElement(rss, "channel")
for tag, text in (("title", self.ext.getConfig("TITLE")),
("link", self.ext.getConfig("URL")),
("description", None)):
element = etree.SubElement(channel, tag)
element.text = text
for child in root:
if child.tag in ["h1", "h2", "h3", "h4", "h5"]:
heading = child.text.strip()
item = etree.SubElement(channel, "item")
link = etree.SubElement(item, "link")
link.text = self.ext.getConfig("URL")
title = etree.SubElement(item, "title")
title.text = heading
guid = ''.join([x for x in heading if x.isalnum()])
guidElem = etree.SubElement(item, "guid")
guidElem.text = guid
guidElem.set("isPermaLink", "false")
elif child.tag in ["p"]:
try:
description = etree.SubElement(item, "description")
except UnboundLocalError:
# Item not defined - moving on
pass
else:
if len(child):
content = "\n".join([etree.tostring(node)
for node in child])
else:
content = child.text
pholder = self.markdown.htmlStash.store(
"<![CDATA[ %s]]>" % content)
description.text = pholder
return rss
def makeExtension(configs):
return RssExtension(configs)
|
Be in for the snow! This property SOLD on April 5, 2019 for $240,000. This condo has been well cared for and LOOKS BRAND NEW! EXTRA-large rooms! North Summit Chair lift around back of building, super location for all snow lovers! Kitchen has a breakfast nook with round table and extended counter top for EXTRA seating. Living room has gas fireplace and sliding glass door to balcony overlooking chair lift and the Laurel Highlands! All 3 bedrooms have a bath. Master bedroom has jetted soaking tub with separate toilet and double ceramic shower room. Ski side guest bedroom has double set of bunk beds & front guest bedroom has a double bed & a trundle bed! Tons of storage space in entry (large enough for bikes, golf clubs). This condo has been well cared for and LOOKS BRAND NEW! Extra-large rooms! GAS heat with central air conditioning! Property is being sold with furniture, accessories, and appliances including a stack washer and dryer. Seller is offering a one year home warranty. Subscribe to our monthly newsletter for area news. Contact us to see this and other properties like it. Map is approximate.
|
#
# Chris Lumens <clumens@redhat.com>
#
# Copyright 2005, 2006, 2007 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
from pykickstart.base import KickstartCommand
from pykickstart.errors import KickstartValueError, formatErrorMsg
from pykickstart.options import KSOptionParser
from pykickstart.i18n import _
class FC3_Firewall(KickstartCommand):
removedKeywords = KickstartCommand.removedKeywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
self.op = self._getParser()
self.enabled = kwargs.get("enabled", None)
self.ports = kwargs.get("ports", [])
self.trusts = kwargs.get("trusts", [])
def __str__(self):
extra = []
filteredPorts = []
retval = KickstartCommand.__str__(self)
if self.enabled is None:
return retval
if self.enabled:
# It's possible we have words in the ports list instead of
# port:proto (s-c-kickstart may do this). So, filter those
# out into their own list leaving what we expect.
for port in self.ports:
if port == "ssh":
extra.append(" --ssh")
elif port == "telnet":
extra.append(" --telnet")
elif port == "smtp":
extra.append(" --smtp")
elif port == "http":
extra.append(" --http")
elif port == "ftp":
extra.append(" --ftp")
else:
filteredPorts.append(port)
# All the port:proto strings go into a comma-separated list.
portstr = ",".join(filteredPorts)
if len(portstr) > 0:
portstr = " --port=" + portstr
else:
portstr = ""
extrastr = "".join(extra)
truststr = ",".join(self.trusts)
if len(truststr) > 0:
truststr = " --trust=" + truststr
# The output port list consists only of port:proto for
# everything that we don't recognize, and special options for
# those that we do.
retval += "# Firewall configuration\nfirewall --enabled%s%s%s\n" % (extrastr, portstr, truststr)
else:
retval += "# Firewall configuration\nfirewall --disabled\n"
return retval
def _getParser(self):
def firewall_port_cb (option, opt_str, value, parser):
for p in value.split(","):
p = p.strip()
if p.find(":") == -1:
p = "%s:tcp" % p
parser.values.ensure_value(option.dest, []).append(p)
op = KSOptionParser(mapping={"ssh":["22:tcp"], "telnet":["23:tcp"],
"smtp":["25:tcp"], "http":["80:tcp", "443:tcp"],
"ftp":["21:tcp"]})
op.add_option("--disable", "--disabled", dest="enabled",
action="store_false")
op.add_option("--enable", "--enabled", dest="enabled",
action="store_true", default=True)
op.add_option("--ftp", "--http", "--smtp", "--ssh", "--telnet",
dest="ports", action="map_extend")
op.add_option("--high", deprecated=1)
op.add_option("--medium", deprecated=1)
op.add_option("--port", dest="ports", action="callback",
callback=firewall_port_cb, nargs=1, type="string")
op.add_option("--trust", dest="trusts", action="append")
return op
def parse(self, args):
(opts, extra) = self.op.parse_args(args=args, lineno=self.lineno)
if len(extra) != 0:
mapping = {"command": "firewall", "options": extra}
raise KickstartValueError(formatErrorMsg(self.lineno, msg=_("Unexpected arguments to %(command)s command: %(options)s") % mapping))
self._setToSelf(self.op, opts)
return self
class F9_Firewall(FC3_Firewall):
removedKeywords = FC3_Firewall.removedKeywords
removedAttrs = FC3_Firewall.removedAttrs
def _getParser(self):
op = FC3_Firewall._getParser(self)
op.remove_option("--high")
op.remove_option("--medium")
return op
class F10_Firewall(F9_Firewall):
removedKeywords = F9_Firewall.removedKeywords
removedAttrs = F9_Firewall.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
F9_Firewall.__init__(self, writePriority, *args, **kwargs)
self.services = kwargs.get("services", [])
def __str__(self):
if self.enabled is None:
return ""
retval = F9_Firewall.__str__(self)
if self.enabled:
retval = retval.strip()
svcstr = ",".join(self.services)
if len(svcstr) > 0:
svcstr = " --service=" + svcstr
else:
svcstr = ""
return retval + "%s\n" % svcstr
else:
return retval
def _getParser(self):
def service_cb (option, opt_str, value, parser):
# python2.4 does not support action="append_const" that we were
# using for these options. Instead, we have to fake it by
# appending whatever the option string is to the service list.
if not value:
parser.values.ensure_value(option.dest, []).append(opt_str[2:])
return
for p in value.split(","):
p = p.strip()
parser.values.ensure_value(option.dest, []).append(p)
op = F9_Firewall._getParser(self)
op.add_option("--service", dest="services", action="callback",
callback=service_cb, nargs=1, type="string")
op.add_option("--ftp", dest="services", action="callback",
callback=service_cb)
op.add_option("--http", dest="services", action="callback",
callback=service_cb)
op.add_option("--smtp", dest="services", action="callback",
callback=service_cb)
op.add_option("--ssh", dest="services", action="callback",
callback=service_cb)
op.add_option("--telnet", deprecated=1)
return op
class F14_Firewall(F10_Firewall):
removedKeywords = F10_Firewall.removedKeywords + ["telnet"]
removedAttrs = F10_Firewall.removedAttrs + ["telnet"]
def _getParser(self):
op = F10_Firewall._getParser(self)
op.remove_option("--telnet")
return op
class F20_Firewall(F14_Firewall):
def __init__(self, writePriority=0, *args, **kwargs):
F14_Firewall.__init__(self, writePriority, *args, **kwargs)
self.remove_services = kwargs.get("remove_services", [])
def _getParser(self):
def remove_service_cb(option, opt_str, value, parser):
# python2.4 does not support action="append_const" that we were
# using for these options. Instead, we have to fake it by
# appending whatever the option string is to the service list.
if not value:
parser.values.ensure_value(option.dest, []).append(opt_str[2:])
return
for p in value.split(","):
p = p.strip()
parser.values.ensure_value(option.dest, []).append(p)
op = F14_Firewall._getParser(self)
op.add_option("--remove-service", dest="remove_services",
action="callback", callback=remove_service_cb,
nargs=1, type="string")
return op
def __str__(self):
if self.enabled is None:
return ""
retval = F10_Firewall.__str__(self)
if self.enabled:
retval = retval.strip()
svcstr = ",".join(self.remove_services)
if len(svcstr) > 0:
svcstr = " --remove-service=" + svcstr
else:
svcstr = ""
return retval + "%s\n" % svcstr
else:
return retval
|
Check out our latest emote designs for the Destiny streamer Luminosity48. I’ve had a few inquiries on if emote design is something I enjoy doing. The short answer, is yes! The long answer, is that I’m constantly looking into better methods of design to improve my skills in emotes. It’s one of the most important staples of Twitch – and I like to treat it as such. Hopefully I’ll have a lot more examples coming in the near future.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2010 Derrick Moser <derrick_moser@yahoo.com>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the licence, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. You may also obtain a copy of the GNU General Public License
# from the Free Software Foundation by visiting their web site
# (http://www.fsf.org/) or by writing to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import glob
import os
import stat
import subprocess
import sys
app_path = sys.argv[0]
# print a message to stderr
def logError(s):
sys.stderr.write('%s: %s\n' % (app_path, s))
# this install script should not be used on Windows
if os.name == 'nt':
logError('Wrong platform. Use scripts from the "windows-installer" directory instead.')
sys.exit(1)
# reset the umask so files we create will have the expected permissions
os.umask(stat.S_IWGRP | stat.S_IWOTH)
# option defaults
options = { 'destdir': '/',
'prefix': '/usr/local/',
'sysconfdir': '/etc/',
'examplesdir': '${sysconfdir}',
'mandir': '${prefix}/share/man/',
'pythonbin': '/usr/bin/env python' }
install = True
files_only = False
# process --help option
if len(sys.argv) == 2 and sys.argv[1] == '--help':
print """Usage: %s [OPTION...]
Install or remove Diffuse.
Options:
--help
print this help text and quit
--remove
remove the program
--destdir=PATH
path to the installation's root directory
default: %s
--prefix=PATH
common installation prefix for files
default: %s
--sysconfdir=PATH
directory for installing read-only single-machine data
default: %s
--examplesdir=PATH
directory for example configuration files
default: %s
--mandir=PATH
directory for man pages
default: %s
--pythonbin=PATH
command for python interpreter
default: %s
--files-only
only install/remove files; skip the post install/removal tasks""" % (app_path, options['destdir'], options['prefix'], options['sysconfdir'], options['examplesdir'], options['mandir'], options['pythonbin'])
sys.exit(0)
# returns the list of components used in a path
def components(s):
return [ p for p in s.split(os.sep) if p != '' ]
# returns a relative path from 'src' to 'dst'
def relpath(src, dst):
s1, s2, i = components(src), components(dst), 0
while i < len(s1) and i < len(s2) and s1[i] == s2[i]:
i += 1
s = [ os.pardir ] * (len(s1) - i)
s.extend(s2[i:])
return os.sep.join(s)
# apply a set of text substitution rules on a string
def replace(s, rules, i=0):
if i < len(rules):
k, v = rules[i]
a = s.split(k)
for j in range(len(a)):
a[j] = replace(a[j], rules, i + 1)
s = v.join(a)
return s
# create directories
def createDirs(d):
p = os.sep
for c in components(d):
p = os.path.join(p, c)
if not os.path.isdir(p):
os.mkdir(p)
# remove a file
def removeFile(f):
try:
os.unlink(f)
except OSError:
logError('Error removing "%s".' % (f, ))
# install/remove sets of files
def processFiles(install, dst, src, template):
for k, v in template.items():
for s in glob.glob(os.path.join(src, k)):
d = s.replace(src, dst, 1)
if install:
createDirs(os.path.dirname(d))
# install file
f = open(s, 'rb')
c = f.read()
f.close()
if v is not None:
c = replace(c, v)
print 'Installing %s' % (d, )
f = open(d, 'wb')
f.write(c)
f.close()
if k == 'bin/diffuse':
# turn on the execute bits
os.chmod(d, 0755)
else:
# remove file
removeFile(d)
# compile .po files and install
def processTranslations(install, dst):
for s in glob.glob('translations/*.po'):
lang = s[13:-3]
d = os.path.join(dst, 'share/locale/%s/LC_MESSAGES/diffuse.mo' % (lang, ))
if install:
# install file
try:
print 'Installing %s' % (d, )
createDirs(os.path.dirname(d))
if subprocess.Popen(['msgfmt', '-o', d, s]).wait() != 0:
raise OSError()
except OSError:
logError('WARNING: Failed to compile "%s" localisation.' % (lang, ))
else:
# remove file
removeFile(d)
# parse command line arguments
for arg in sys.argv[1:]:
if arg == '--remove':
install = False
elif arg == '--files-only':
files_only = True
else:
for opt in options.keys():
key = '--%s=' % (opt, )
if arg.startswith(key):
options[opt] = arg[len(key):]
break
else:
logError('Unknown option "%s".' % (arg, ))
sys.exit(1)
# expand variables
for s in 'sysconfdir', 'examplesdir', 'mandir':
for k in 'prefix', 'sysconfdir':
if s != k:
options[s] = options[s].replace('${%s}' % (k, ), options[k])
# validate inputs
if options['destdir'] == '':
options['destdir'] = '/'
for opt in 'prefix', 'sysconfdir', 'examplesdir', 'mandir':
p = options[opt]
c = components(p)
if os.pardir in c or os.curdir in c:
logError('Bad value for option "%s".' % (opt, ))
sys.exit(1)
c.insert(0, '')
c.append('')
options[opt] = os.sep.join(c)
destdir = options['destdir']
prefix = options['prefix']
sysconfdir = options['sysconfdir']
examplesdir = options['examplesdir']
mandir = options['mandir']
pythonbin = options['pythonbin']
# tell the user what we are about to do
if install:
stage = 'install'
else:
stage = 'removal'
print '''Performing %s with:
destdir=%s
prefix=%s
sysconfdir=%s
examplesdir=%s
mandir=%s
pythonbin=%s''' % (stage, destdir, prefix, sysconfdir, examplesdir, mandir, pythonbin)
# install files to prefix
processFiles(install, os.path.join(destdir, prefix[1:]), 'src/usr/', {
'bin/diffuse': [ ("'../../etc/diffuserc'", repr(relpath(os.path.join(prefix, 'bin'), os.path.join(sysconfdir, 'diffuserc')))), ('/usr/bin/env python', pythonbin) ],
'share/applications/diffuse.desktop': None,
'share/diffuse/syntax/*.syntax': None,
'share/gnome/help/diffuse/*/diffuse.xml': [ ('/usr/', prefix), ('/etc/', sysconfdir) ],
'share/omf/diffuse/diffuse-*.omf': [ ('/usr/', prefix) ],
'share/icons/hicolor/*/apps/diffuse.png': None
})
# install manual
processFiles(install, os.path.join(destdir, mandir[1:]), 'src/usr/share/man/', {
'man1/diffuse.1': [ ('/usr/', prefix), ('/etc/', sysconfdir) ],
'*/man1/diffuse.1': [ ('/usr/', prefix), ('/etc/', sysconfdir) ]
})
# install files to sysconfdir
processFiles(install, os.path.join(destdir, examplesdir[1:]), 'src/etc/', { 'diffuserc': [ ('/etc/', sysconfdir), ('../usr', relpath(sysconfdir, prefix)) ] })
# install translations
processTranslations(install, os.path.join(destdir, prefix[1:]))
if not install:
# remove directories we own
for s in 'share/omf/diffuse', 'share/gnome/help/diffuse/C', 'share/gnome/help/diffuse/ru', 'share/gnome/help/diffuse', 'share/diffuse/syntax', 'share/diffuse':
d = os.path.join(destdir, os.path.join(prefix, s)[1:])
try:
os.rmdir(d)
except OSError:
logError('Error removing "%s".' % (d, ))
# do post install/removal tasks
if not files_only:
print 'Performing post %s tasks.' % (stage, )
cmds = [ [ 'update-desktop-database' ],
[ 'gtk-update-icon-cache', os.path.join(destdir, os.path.join(prefix, 'icons/hicolor')[1:]) ] ]
if install:
cmds.append([ 'scrollkeeper-update', '-q', '-o', os.path.join(destdir, os.path.join(prefix, 'share/omf/diffuse')[1:]) ])
else:
cmds.append([ 'scrollkeeper-update', '-q' ])
for c in cmds:
for p in os.environ['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(p, c[0])):
print ' '.join(c)
try:
if subprocess.Popen(c).wait() != 0:
raise OSError()
except OSError:
logError('WARNING: Failed to update documentation database with %s.' % (c[0], ))
break
else:
print 'WARNING: %s is not installed' % (c[0], )
|
Bradley Cooper Might Be the Next Indiana Jones?
Even though Indiana Jones and the Kingdom of the Crystal Skull is a movie that should never exist and should definitely never be discussed in life, EVER, Disney is really looking to move forward with another Indiana Jones movie. Considering the fact that Harrison Ford is 71 years old right now, they should get the gigantic boulder rolling stat unless they want the movie to be Indiana Jones and Tales of Assisted Living.
However, it looks like they may not have to: According to Latino Review, it's possible the studio has a man in mind to replace Ford if need be.
Now, the most obvious of choices is Shia LaBeouf as the successor to the role considering his part in the last Indy movie that shall not be named—but remember, that guy is not famous anymore. Cooper, on the other hand, has a few David O. Russell films under his belt, not to mention The Hangover franchise, so he's pretty famous.
|
# -*- coding: utf-8 -*-
# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web)
# Copyright (C) 2018-2020 OzzieIsaacs
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from flask import render_template
from flask_babel import gettext as _
from flask import g
from werkzeug.local import LocalProxy
from flask_login import current_user
from . import config, constants, ub, logger, db, calibre_db
from .ub import User
log = logger.create()
def get_sidebar_config(kwargs=None):
kwargs = kwargs or []
if 'content' in kwargs:
content = kwargs['content']
content = isinstance(content, (User, LocalProxy)) and not content.role_anonymous()
else:
content = 'conf' in kwargs
sidebar = list()
sidebar.append({"glyph": "glyphicon-book", "text": _('Books'), "link": 'web.index', "id": "new",
"visibility": constants.SIDEBAR_RECENT, 'public': True, "page": "root",
"show_text": _('Show recent books'), "config_show":False})
sidebar.append({"glyph": "glyphicon-fire", "text": _('Hot Books'), "link": 'web.books_list', "id": "hot",
"visibility": constants.SIDEBAR_HOT, 'public': True, "page": "hot",
"show_text": _('Show Hot Books'), "config_show": True})
if current_user.role_admin():
sidebar.append({"glyph": "glyphicon-download", "text": _('Downloaded Books'), "link": 'web.download_list',
"id": "download", "visibility": constants.SIDEBAR_DOWNLOAD, 'public': (not g.user.is_anonymous),
"page": "download", "show_text": _('Show Downloaded Books'),
"config_show": content})
else:
sidebar.append({"glyph": "glyphicon-download", "text": _('Downloaded Books'), "link": 'web.books_list',
"id": "download", "visibility": constants.SIDEBAR_DOWNLOAD, 'public': (not g.user.is_anonymous),
"page": "download", "show_text": _('Show Downloaded Books'),
"config_show": content})
sidebar.append(
{"glyph": "glyphicon-star", "text": _('Top Rated Books'), "link": 'web.books_list', "id": "rated",
"visibility": constants.SIDEBAR_BEST_RATED, 'public': True, "page": "rated",
"show_text": _('Show Top Rated Books'), "config_show": True})
sidebar.append({"glyph": "glyphicon-eye-open", "text": _('Read Books'), "link": 'web.books_list', "id": "read",
"visibility": constants.SIDEBAR_READ_AND_UNREAD, 'public': (not g.user.is_anonymous),
"page": "read", "show_text": _('Show read and unread'), "config_show": content})
sidebar.append(
{"glyph": "glyphicon-eye-close", "text": _('Unread Books'), "link": 'web.books_list', "id": "unread",
"visibility": constants.SIDEBAR_READ_AND_UNREAD, 'public': (not g.user.is_anonymous), "page": "unread",
"show_text": _('Show unread'), "config_show": False})
sidebar.append({"glyph": "glyphicon-random", "text": _('Discover'), "link": 'web.books_list', "id": "rand",
"visibility": constants.SIDEBAR_RANDOM, 'public': True, "page": "discover",
"show_text": _('Show Random Books'), "config_show": True})
sidebar.append({"glyph": "glyphicon-inbox", "text": _('Categories'), "link": 'web.category_list', "id": "cat",
"visibility": constants.SIDEBAR_CATEGORY, 'public': True, "page": "category",
"show_text": _('Show category selection'), "config_show": True})
sidebar.append({"glyph": "glyphicon-bookmark", "text": _('Series'), "link": 'web.series_list', "id": "serie",
"visibility": constants.SIDEBAR_SERIES, 'public': True, "page": "series",
"show_text": _('Show series selection'), "config_show": True})
sidebar.append({"glyph": "glyphicon-user", "text": _('Authors'), "link": 'web.author_list', "id": "author",
"visibility": constants.SIDEBAR_AUTHOR, 'public': True, "page": "author",
"show_text": _('Show author selection'), "config_show": True})
sidebar.append(
{"glyph": "glyphicon-text-size", "text": _('Publishers'), "link": 'web.publisher_list', "id": "publisher",
"visibility": constants.SIDEBAR_PUBLISHER, 'public': True, "page": "publisher",
"show_text": _('Show publisher selection'), "config_show":True})
sidebar.append({"glyph": "glyphicon-flag", "text": _('Languages'), "link": 'web.language_overview', "id": "lang",
"visibility": constants.SIDEBAR_LANGUAGE, 'public': (g.user.filter_language() == 'all'),
"page": "language",
"show_text": _('Show language selection'), "config_show": True})
sidebar.append({"glyph": "glyphicon-star-empty", "text": _('Ratings'), "link": 'web.ratings_list', "id": "rate",
"visibility": constants.SIDEBAR_RATING, 'public': True,
"page": "rating", "show_text": _('Show ratings selection'), "config_show": True})
sidebar.append({"glyph": "glyphicon-file", "text": _('File formats'), "link": 'web.formats_list', "id": "format",
"visibility": constants.SIDEBAR_FORMAT, 'public': True,
"page": "format", "show_text": _('Show file formats selection'), "config_show": True})
sidebar.append(
{"glyph": "glyphicon-trash", "text": _('Archived Books'), "link": 'web.books_list', "id": "archived",
"visibility": constants.SIDEBAR_ARCHIVED, 'public': (not g.user.is_anonymous), "page": "archived",
"show_text": _('Show archived books'), "config_show": content})
sidebar.append(
{"glyph": "glyphicon-th-list", "text": _('Books List'), "link": 'web.books_table', "id": "list",
"visibility": constants.SIDEBAR_LIST, 'public': (not g.user.is_anonymous), "page": "list",
"show_text": _('Show Books List'), "config_show": content})
return sidebar
def get_readbooks_ids():
if not config.config_read_column:
readBooks = ub.session.query(ub.ReadBook).filter(ub.ReadBook.user_id == int(current_user.id))\
.filter(ub.ReadBook.read_status == ub.ReadBook.STATUS_FINISHED).all()
return frozenset([x.book_id for x in readBooks])
else:
try:
readBooks = calibre_db.session.query(db.cc_classes[config.config_read_column])\
.filter(db.cc_classes[config.config_read_column].value == True).all()
return frozenset([x.book for x in readBooks])
except (KeyError, AttributeError):
log.error("Custom Column No.%d is not existing in calibre database", config.config_read_column)
return []
# Returns the template for rendering and includes the instance name
def render_title_template(*args, **kwargs):
sidebar = get_sidebar_config(kwargs)
return render_template(instance=config.config_calibre_web_title, sidebar=sidebar,
accept=constants.EXTENSIONS_UPLOAD, read_book_ids=get_readbooks_ids(),
*args, **kwargs)
|
A contemporary art exhibition inspired by the surroundings of Hestercombe will showcase the work of three artists this autumn/winter.
Sarah Bennett, Megan Calver and Philippa Lawrence have been directly engaged with Hestercombe House and Gardens over three years and the resulting exhibition has developed from these experiences. It features exhibits using a range of media from photography, sculpture to drawing and text.
Materiality: provisional states runs from November 10 until February 24, 2019, at Hestercombe Gallery and features exhibits using a range of media from photography, sculpture to drawing and text.
Sarah Bennett’s practice investigates institutional sites, both historical and contemporary in UK and international settings, employing a range of artistic research methods and material processes including: digital recording, facsimile object making, observational drawing, and embodied actions. She is Head of School of Art and Architecture at Kingston University, and chairs educational events in art schools, universities and arts organisations in Europe and the USA.
After a deep study of the Hestercombe landscape, Philippa Lawrence offers works that ask us to perceive things we may have overlooked or not considered, tracing humankind’s activity in managing an estate and nature. Philippa has exhibited widely both in the UK and internationally, including America, Japan, Czech Republic, Canada, Iceland and Australia.
Find out more about Hestercombe Art Gallery, including how to get here.
|
# /html/body/div[1]/div[3]/table/tbody/tr/td/div/center/table/tbody/tr[1]
import scraps
import textparser
import itertools
class NumberParser(textparser.TextParser):
def parse_int(self, text, match):
r'^\d+$'
return eval(text)
def parse_number_ptBR_with_percent(self, text, match):
r'^-?\s*((\d+[\.])+)?\d+[,]\d+%$'
text = text.replace('%', '')
text = text.replace('.', '')
text = text.replace(',', '.')
return eval(text)*100
def parse_number_ptBR_with_thousands(self, text, match):
r'^-?\s*((\d+[\.])+)?\d+[,]\d+?$'
text = text.replace('.', '')
text = text.replace(',', '.')
return eval(text)
def parseText(self, text):
return 'NA'
def month_pt2en(mes):
mes = mes.lower()
return {'fev':'feb', 'abr':'apr', 'mai':'may', 'ago':'aug', 'set':'sep', 'out':'oct'}.get(mes, mes)
def month_pt2number(mes):
mes = mes.lower()
return {
'jan':1, 'fev':2, 'mar':3, 'abr':4, 'mai':5, 'jun':6, 'jul':7, 'ago':8, 'set':9, 'out':10, 'nov':11,'dez':12
}.get(mes)
number_parser = NumberParser()
class IGPMScrap(scraps.Scrap):
colnames = scraps.Attribute(xpath='//table[3]/tr[1]/td', apply=[month_pt2en])
rownames = scraps.Attribute(xpath='//table[3]/tr[position()>1]/td[1]')
data = scraps.Attribute(xpath='//table[3]/tr[position()>1]/td[position()>1]', apply=[number_parser.parse])
# ----------------
class IGPMScrap(scraps.Scrap):
colnames = scraps.Attribute(xpath='//table[4]/*/tr[1]/td[position()>1]', apply=[month_pt2number])
rownames = scraps.Attribute(xpath='//table[4]/*/tr[position()>1]/td[1]', apply=[number_parser.parse])
data = scraps.Attribute(xpath='//table[4]/*/tr[position()>1]/td[position()>1]')#, apply=[number_parser.parse])
class IGPMFetcher(scraps.Fetcher):
scrapclass = IGPMScrap
url = 'http://www.portalbrasil.net/igpm.htm'
fetcher = IGPMFetcher()
res = fetcher.fetch()
# print(res.colnames)
# print(res.rownames)
# print(res.data)
# for month, rate in zip(list(itertools.product(res.rownames, res.colnames)), res.data):
# print(month + (rate,))
|
You are here: Whanganui District Council » Subdivision file Sub 12/053, Donald wickham, 1195 Whanganui River Road.
Record: Subdivision file Sub 12/053, Donald wickham, 1195 Whanganui River Road.
Subdivision file Sub 12/053, Donald wickham, 1195 Whanganui River Road.
created Subdivision file Sub 12/053, Donald wickham, 1195 Whanganui River Road.
|
#!/usr/bin/env python
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# NOTE: This requires PyTorch! We do not provide installation scripts to install PyTorch.
# It is up to you to install this dependency if you want to execute this example.
# PyTorch's website should give you clear instructions on this: http://pytorch.org/
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from torch.nn.modules.sparse import EmbeddingBag
import numpy as np
import torch
import random
import string
import time
from fasttext import load_model
from torch.autograd import Variable
class FastTextEmbeddingBag(EmbeddingBag):
def __init__(self, model_path):
self.model = load_model(model_path)
input_matrix = self.model.get_input_matrix()
input_matrix_shape = input_matrix.shape
super().__init__(input_matrix_shape[0], input_matrix_shape[1])
self.weight.data.copy_(torch.FloatTensor(input_matrix))
def forward(self, words):
word_subinds = np.empty([0], dtype=np.int64)
word_offsets = [0]
for word in words:
_, subinds = self.model.get_subwords(word)
word_subinds = np.concatenate((word_subinds, subinds))
word_offsets.append(word_offsets[-1] + len(subinds))
word_offsets = word_offsets[:-1]
ind = Variable(torch.LongTensor(word_subinds))
offsets = Variable(torch.LongTensor(word_offsets))
return super().forward(ind, offsets)
def random_word(N):
return ''.join(
random.choices(
string.ascii_uppercase + string.ascii_lowercase + string.digits,
k=N
)
)
if __name__ == "__main__":
ft_emb = FastTextEmbeddingBag("fil9.bin")
model = load_model("fil9.bin")
num_lines = 200
total_seconds = 0.0
total_words = 0
for _ in range(num_lines):
words = [
random_word(random.randint(1, 10))
for _ in range(random.randint(15, 25))
]
total_words += len(words)
words_average_length = sum([len(word) for word in words]) / len(words)
start = time.clock()
words_emb = ft_emb(words)
total_seconds += (time.clock() - start)
for i in range(len(words)):
word = words[i]
ft_word_emb = model.get_word_vector(word)
py_emb = np.array(words_emb[i].data)
assert (np.isclose(ft_word_emb, py_emb).all())
print(
"Avg. {:2.5f} seconds to build embeddings for {} lines with a total of {} words.".
format(total_seconds, num_lines, total_words)
)
|
What exactly was happening to your trading during those "pullback" periods? There should be some exact activity that cost you money in the end.
Thanks for your feedback. What was happening during these 'pullback' periods is best described as follows: once the goal is in sight and I have traded for so long successfully, naturally I am full of confidence and perhaps overconfident thinking now is the time. Just one more trading day and I have reached my goal and I can step up the trading size/go back to even. I start to trade and lose/win. At that point I think due to the level of confidence I have reached, I feel complacent about the loss/win and think I can easily turn it around/make some more. I think at this point I am not clearly seeing the market anymore and start to overtrade, take suboptimal trades, leading to (more) losses instead. Then, still being confident because I've come so far, I persist in stubbornly thinking I can easily turn it around, taking even more trades leading to more losses and so on, until the point the loss becomes way bigger than the predefined maximum loss. But usually my confidence is still there at this point, so I start to break that rule. I think a lot of traders recognize this. Also, because the goal was so close, I feel the urge to continue trading, neglecting all rules I diligently kept to while steadily building the account. sometimes this coincides with some big trend day or news day, where you think you can make all the losses back with one big move, but this usually doesn't come to a good end or makes things even worse. So this can go on to the point where the losses have piled up to a point where you suddenly lose your confidence, and get a clear head again, but at this point the damage is already done. I think people who do not stop and go on through this process eventually blow up their account this way. I did so before.
So, what it comes down to, is that up to this point I was trading the process, but from this point on I started to trade PnL. I guess that sums it up. However, as so many traders will agree when reading this, when you're in the moment, this somehow just happens. That's why I stress self-discpline and self control. And it's one thing to read this in books on forums etc, but its another thing to actually put it in practice. It's like Federer telling you how to win at tennis vs actually playing tennis yourself and putting his words into practice.
Hope this clarifies. Writing this down, actually made it clearer for myself.
Good discussion pertinent to me.
Rank every trade you take according to the degree to which you followed your plan/process ...say 1 is not at all and 10 is exactly.
If your process is well enough objectvely articulated this should be possible.
The goal is to score 10 on each trade regardless of trade outcome.
To average 10 for atrading session is optimal.
If you score 5 or less on 2 consequtive trades stop for the day as your mindset is off.
This a bit like the 'tiltmeter' in the edgewonk journaling software.
And i think to get relaxed before trading is important as we want our executive brain function accessible....not our old reactive crocodile brains.
And remember...the next trade is just the first of the next thousand.....but execute it well.
Thanks for your feedback @Linds! A ranking system sounds like a good idea to keep myself in check.
Also, in the next post I logged some relaxation technique. It really works!
Last edited by rdaytrader; September 29th, 2017 at 10:03 AM.
The subject you have touched is very deep, widespread and various in its development according to me. So don't expect simple answers.
Do you know, what is the most dangerous time for newbies in driving? Approximately 2 years after they start driving. Why? Because they feel they are driving gods. Overconfidence, less attention. And this is when problems come.
Your subconsciousness is saying: "You are so damn good at driving/trading! You now have a higher rank in society!". Your subconsciousness is satisfied, job is done.
The thing is, your consciousness doesn't realize the above. It's being controlled by subconsciousness. In fact most of our action patterns come from subconscious level.
Let me go up one level to describe those things in trying to answer your initial question "Why is this happening".
In fact it lies very deep at subconsciousness level. Our subconsciousness study all the time, and it has "knowledge" it collected during our life (and was taught by other subconsciousness of other people (like your parents) and so on and on, its experience in the end was being collected for centuries). And that is why it's so difficult to identify our action patterns, because most of the things you do is done automatically not having a single thought about that (like eye blinking, breathing, running away from danger, wanting to be liked by society etc..).
Trading is the field where all our "weak" points (from subconsciousness level) are enlightened in its purest forms. In usual social activities we are hiding all our "drawbacks" behind other people, situations, society in general where we are used (and were taught) to survive. And our subconsciousness acts accordingly.
From our childhood we have learned (from situations, reactions of people, other people's teachings) how to behave, hence we have our habits (already at subconscious level) from those ancient times. And it's very difficult to even identify all those things we were taught.
The point is, we all have same pattern of our reaction in everyday situations. Again, it's all on subconsciousness level. And you have same pattern in same situations (stress, risk, uncertainty, learning new things etc.). So my proposition is to identify your trading issues through other situations and your reaction to those you experience in your everyday life. Think about your life, what you did and why you did.
For me trading was a push towards thinking who I am real.
Let me give you an example. Some time back I surprisingly discovered I had some sort of "victim complex", i.e. I was doing sometimes something intentionally in order not to succeed (in trading as well). That could be anything. When I made some investigation, I learned that was coming from my childhood when it was so comfortable when your parents show compassion when you fail. And this pattern was sitting so deep inside me (looking for compassion instead of being successful) that it took a huge effort to get rid of such habit (how - that is a long story). First step was of course identification.
Another example: why do most people fail in trading? They follow the crowd. But this is what our subconsciousness was taught for centuries: in order to survive you have to be with the crowd! That is why it's so uncomfortable to sell when everybody is buying (and every cell in your body is screaming to buy as well), "you are breaking the rule, you are going against the crowd"!!
Also big issue for most people is Ego. I find it one of the most difficult feelings we experience and which tremendously effects or lives and inevitably - trading. In fact it's also the reaction of subconsciousness in order to survive and be at higher level in society (your subconsciousness is telling you: "OK, now you are trading great, you are very smart, you are now ranked higher", so you feel it's great and can't accept the fact that you are still trading "poor" and "ranked" the same level). Most of the time people don't even realize that feeling though. You may find this ego fights "for ranks" of people around you (and you participating as well) every day (when somebody horns behind when you are delayed a bit in moving on a traffic signal; and that guy was tremendously slow when he just started to drive, but now he thinks he is such a great driver and ranked higher than you). Normally it won't have direct impact in life (you are a boss, you are doing stupid things and you are stubborn, but you have other people around who will cover your wrong judgement and you won't suffer right now), but in trading it will have immediate effect.
The point is - it's a big separate human activity, but you may start the journey to meet yourself, trading is a good push towards this according to me. And this what everybody refers to "become another person", meaning to get rid of those acquired patterns during your life that just prevent you from becoming successful (in this case - in trading).
Sorry for such a long thoughts.
Thanks for your feedback and stories. It's no problem to elaborate, all thoughts can be useful.
Honestly all those breathing techniques never helped me (I mean not only trading) so I can't say anything. Most things I overcome were/are through will-power, pulling myself together.
I would strongly recommend exercising, especially martial arts - they are perfect for mind control. This way your body (and mind) gets used to stress and starts to act different way increasing overall stability.
have you ever tried the breathing techniques? it's psysiological so it has a direct effect on your nervous system. Yes, I thought about quitting trading too the last time(s) the goals were near. But you know what? I still sat behind my pc, doing other work, still opened the trading software, promising myself not to trade, but then I saw an opportunity and went down that road previously described. Looking back at it now, it was the lack of self-discipline. Ofcourse I shouldn't even have sat down behind my pc in the first place. I've an addictive personality so add to that the eagerness of reaching the goal and disaster is around the corner. But I'm reflecting now and next time I reach this point again, I am much better prepared. Also, I can re-read this thread for support . Exercising, yes, I practiced Martial Arts before, but nowadays running. Just didn't run for the last couple of months, so need to pick it up again.
I took some excerpts from it that I found most interesting and added stuff so it is most applicable to this thread.
trading is guaranteed to connect you with all of those ‘emotional skeletons’ residing deep in the depths of your unconscious mind waiting to be activated. These emotional skeletons in the closet are like ‘sleeper’ secret agents waiting to be triggered by some event or emotion. these ‘sleepers’ contribute to a common trading behaviour pattern that usually losses you money and that is definitely psychological in nature; this is the behaviour pattern called ‘self-sabotage’.
self-sabotage is simply a behaviour or action designed to prevent you doing the right thing. for example, your desired behaviour might be to stick to your trading plan but the self-sabotage behaviour makes you break your own rules you set in your system. the reason is that you don’t have the discipline to stick to the system and it becomes a self-sabotage behaviour. it is preventing you doing the right thing.
the cause is inside your mind and not ‘out there’ in the external world. It is only your own mind that causes you to do one thing that is stopping you from doing something else that might be more useful to you.
The internal battle for control. Internal conflict can be caused by one part of your mind trying to do one thing for you while at the same time another part is trying to do something else. Both of these ‘parts’ believe that their behaviour is the best one for you at that particular moment.
The ‘desired behaviour’ is to stick to the plan and be disciplined enough to do that. So your ‘follow the plan’ part of your mind is trying to get you to behave with discipline and structure to follow the trading plan. But from a young age a part of you has always been a bit of a rebel and even now as an adult you still might not really like being made to follow rules or be told what to do. So any activity that looks like you have to ‘follow the rules’ or ‘stick to the plan’ probably still activates this rebellious side of your personality.
And so the ‘rebel’ part kicks in and causes conflict with the ‘stick to the plan’ part; even though you know logically that sticking to your trading plan will help you achieve your trading goals! As soon as the ‘stick to the plan’ part tries to help you with disciplined behaviour, the ‘rebel’ part tries to undermine it. The ‘rebel’ wants to entice you away on some unrelated activity that will satisfy your impatient “I don’t like rules” inner child rather than support the efforts of your ‘stick to the plan’ part.
The ‘rebel’ part wants you to feel like that ‘other you’. The ‘You’ who values being: individual, free, your own boss, that “won’t be told what to do” child and not the boring ‘follow the plan’ limited ‘You’ that would be better suited to actually get the job done.
The last one on this list can actually be useful if you pay attention to what your ‘inner demon’ voice is saying because it can help you identify the various ‘parts’ or ‘emotional skeletons’ that are still holding you back from your desired goals. Many of these parts will have been created during your childhood but are still active throughout your adult life unless taken care of.
“I don’t know” is really just another ‘avoidance’ technique. It simply means you are unwilling to look at yourself honestly and accept responsibility. The reality is, and it takes only a few moments of honest and open inner reflection, you usually do know why but most of the time you just don’t want to admit it. Or your subconscious is preventing you to acknowledge it.
self sabotage is some form of internal conflict between opposing ‘parts’ from deep within the unconscious part of your mind, but why do you have this conflict in the first place?
To answer this question we must be totally honest with ourselves and, as mentioned above, for many people that is a very difficult challenge. The reality is “Truth usually hurts” and especially so when you are looking into the mirror of your mind and seeing reflected back your very essence as a person.
But there is a very simple answer to the question: “Why do I self-sabotage?” if only you are prepared to listen to your true inner self and take responsibility for the answer (and many traders can’t or won’t do that).
How to stop the self-sabotage?
The only way to truly stop self-sabotage is to uncover the root of the cause that is preventing you from taking the action you know in your heart will allow you to move forward. Yes, you can try all sorts of ‘management’ or ‘motivation’ type methods and these may work for a while (or even completely) but for most unless you address the underlying cause the management methods will fail and the self-sabotage will come back. Failing to reach an important goal in your trading is an example of this.
Last edited by rdaytrader; October 2nd, 2017 at 02:04 AM.
|
import datetime as dt
import logging
from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask import Markup
from flask_login import fresh_login_required, login_user, logout_user, current_user
from authmgr.extensions import login_manager
from authmgr.utils import flash_errors
from authmgr.extensions import db
from authmgr.user_directory.forms import UserDirectoryForm
from authmgr.user_directory.models import UserDirectory
blueprint = Blueprint('user_directory', __name__, url_prefix='/user_directory', static_folder='../static')
#User Directory views
@blueprint.route('/')
@fresh_login_required
def list():
"""
List all User Directories
"""
user_directories = UserDirectory.query.order_by('name').all()
return render_template('user_directory/user_directories.html', user_directories=user_directories, title='User Directories')
@blueprint.route('/add', methods=['GET', 'POST'])
@fresh_login_required
def add():
"""
Add a User Directory
"""
add_user_directory = True
form = UserDirectoryForm()
if form.validate_on_submit():
user_directory = UserDirectory(
directory_type=form.directory_type.data,
name=form.name.data,
description=form.description.data,
hostname=form.hostname.data,
port=form.port.data,
use_ssl=form.use_ssl.data,
basedn=form.basedn.data,
username=form.username.data,
password=form.password.data,
user_id=current_user.username,
updated_at=dt.datetime.utcnow()
)
try:
# add user_directory to the database
db.session.add(user_directory)
db.session.commit()
flash('User Directory added.', 'success')
except:
#logger.error(str(traceback.print_exc()))
# in case User Directory name already exists
flash('Error: User Directory already exists.', 'warning')
# redirect to the User Directory list page
return redirect(url_for('user_directory.list'))
# load setting template
return render_template('user_directory/user_directory.html', add_user_directory=add_user_directory, form=form, title='Add User Directory')
@blueprint.route('/edit/<int:id>', methods=['GET', 'POST'])
@fresh_login_required
def edit(id):
"""
Edit a User Directory
"""
add_user_directory = False
user_directory = UserDirectory.query.get_or_404(id)
form = UserDirectoryForm(obj=user_directory)
if form.validate_on_submit():
user_directory.directory_type=form.directory_type.data,
user_directory.name=form.name.data,
user_directory.description=form.description.data,
user_directory.hostname=form.hostname.data,
user_directory.port=form.port.data,
user_directory.use_ssl=form.use_ssl.data,
user_directory.basedn=form.basedn.data,
user_directory.username=form.username.data,
user_directory.password=form.password.data
user_directory.user_id=current_user.username
user_directory.updated_at=dt.datetime.utcnow()
db.session.commit()
flash('User Directory updated.', 'success')
# redirect to the user_directories page
return redirect(url_for('user_directory.list'))
return render_template('/user_directory.html', add_user_directory=add_user_directory, form=form, title='Edit User Directory')
@blueprint.route('/user_directories/delete/<int:id>', methods=['GET', 'POST'])
@fresh_login_required
def delete(id):
"""
Delete a User Directory from the database
"""
user_directory = UserDirectory.query.get_or_404(id)
db.session.delete(user_directory)
db.session.commit()
flash('User Directory deleted.', 'success')
# redirect to the roles page
return redirect(url_for('user_directory.list'))
|
Benefits persons with gastric pain, stomach ulcers or duodenal ulcers. Produced by TaiChi People Herb Co, LLC in USA.
Adult: Prepare fresh daily as tea and drink on empty stomach (1-2 hours prior to meal, 4 hours after meal). Add 2 rounded tablespoons herb powder to 1 1/2 cups clean water in stainless or ceramic pan and bring to boil. Reduce heat and simmer on low for 10 minutes. Cool slightly and store in covered glass. Drink all within 24 hours.
Size: appx. 4.5 ounces, 135 grams, powder in jar. Produced by TaiChi People Herb Co, LLC in USA.
|
import os
import re
from io import BytesIO
import cmocean
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import numpy as np
from flask_babel import gettext
import plotting
def make_colormap(seq):
"""
Return a LinearSegmentedColormap: http://stackoverflow.com/a/16836182
Args:
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return mcolors.LinearSegmentedColormap('CustomMap', cdict)
def find_colormap(name):
try:
return colormaps[name.lower()]
except KeyError:
for key in colormaps:
if re.search(key, name, re.I):
return colormaps[key]
return colormaps['mercator']
_c = mcolors.ColorConverter().to_rgb
data_dir = os.path.join(os.path.dirname(plotting.__file__), 'data')
colormaps = {
'water velocity bearing': cmocean.cm.rain,
'depth excess': cmocean.cm.deep,
'ammonium concentration': cmocean.cm.matter,
'nitrogen': cmocean.cm.balance,
'dissolved organic nitrogen concentration': cmocean.cm.amp,
'particulate organic nitrogen concentration': cmocean.cm.amp,
'depth': cmocean.cm.deep,
'deep': cmocean.cm.deep,
'partial pressure': cmocean.cm.matter,
'primary production': cmocean.cm.algae,
'temp gradient': cmocean.cm.thermal,
'heat': cmocean.cm.thermal,
'density': cmocean.cm.dense,
'curl': cmocean.cm.curl,
'vorticity': cmocean.cm.curl,
'divergence': cmocean.cm.curl,
'bathymetry': cmocean.cm.deep,
'salinity': cmocean.cm.haline,
'speed': cmocean.cm.speed,
'speed of current': cmocean.cm.speed,
'freesurface': cmocean.cm.balance,
'free surface': cmocean.cm.balance,
'surface height': cmocean.cm.balance,
'surface elevation': cmocean.cm.balance,
'velocity': cmocean.cm.delta,
'eastward current': cmocean.cm.delta,
'northward current': cmocean.cm.delta,
'waveheight': cmocean.cm.amp,
'waveperiod': cmocean.cm.tempo,
'chlorophyll': cmocean.cm.algae,
'iron': cmocean.cm.amp,
'oxygen': cmocean.cm.oxy,
'phosphate': mcolors.ListedColormap(
np.loadtxt(os.path.join(data_dir, 'phosphate.txt'))),
'nitrate': mcolors.ListedColormap(
np.loadtxt(os.path.join(data_dir, 'nitrate.txt'))),
'nitrate concentration': cmocean.cm.tempo,
'ice': cmocean.cm.ice,
'phytoplankton': cmocean.cm.deep_r,
'diatoms concentration as nitrogen': cmocean.cm.algae,
'flagellates concentration as nitrogen': cmocean.cm.algae,
'mesodinium rubrum concentration as nitrogen': cmocean.cm.algae,
'mesozooplankton concentration as nitrogen': cmocean.cm.algae,
'microzooplankton concentration as nitrogen': cmocean.cm.algae,
'silicate': make_colormap([
_c('#ffffff'),
_c('#57a6bd'),
]),
'silicon concentration': cmocean.cm.turbid,
'biogenic silicon concentration': cmocean.cm.turbid,
'ph': make_colormap([
_c('#ED1B26'),
_c('#F46432'), 0.1, _c('#F46432'),
_c('#FFC324'), 0.2, _c('#FFC324'),
_c('#84C341'), 0.3, _c('#84C341'),
_c('#33A94B'), 0.4, _c('#33A94B'),
_c('#0AB8B6'), 0.5, _c('#0AB8B6'),
_c('#4591CD'), 0.6, _c('#4591CD'),
_c('#5A51A2'), 0.7, _c('#5A51A2'),
_c('#63459D'), 0.8, _c('#63459D'),
_c('#6C2180'), 0.9, _c('#6C2180'),
_c('#49176E')
]),
'mercator_current': make_colormap([
_c('#e1f3fc'),
_c('#7ebce5'), 0.17, _c('#7ebce5'),
_c('#4990bd'), 0.25, _c('#4990bd'),
_c('#4eb547'), 0.42, _c('#4eb547'),
_c('#f3e65b'), 0.55, _c('#f3e65b'),
_c('#f58a35'), 0.67, _c('#f58a35'),
_c('#d72928'), 0.83, _c('#d72928'),
_c('#901418')
]),
'mercator': make_colormap([
_c('#1d3b7a'),
_c('#134aaa'), 0.05, _c('#134aaa'),
_c('#075ce4'), 0.10, _c('#075ce4'),
_c('#1976fa'), 0.15, _c('#1976fa'),
_c('#4b9bf1'), 0.20, _c('#4b9bf1'),
_c('#80c0e7'), 0.25, _c('#80c0e7'),
_c('#4dd9f0'), 0.30, _c('#4dd9f0'),
_c('#1df1f9'), 0.35, _c('#1df1f9'),
_c('#00efcf'), 0.40, _c('#00efcf'),
_c('#04d273'), 0.45, _c('#04d273'),
_c('#0cb20f'), 0.50, _c('#0cb20f'),
_c('#66cf09'), 0.55, _c('#66cf09'),
_c('#c8ed03'), 0.60, _c('#c8ed03'),
_c('#fef000'), 0.65, _c('#fef000'),
_c('#fed100'), 0.70, _c('#fed100'),
_c('#feaf00'), 0.75, _c('#feaf00'),
_c('#fe6a00'), 0.80, _c('#fe6a00'),
_c('#fe2800'), 0.85, _c('#fe2800'),
_c('#d80100'), 0.90, _c('#d80100'),
_c('#a00000'), 0.95, _c('#a00000'),
_c('#610000')
]),
'anomaly': make_colormap([
_c('#000064'),
_c('#0000b2'), 0.090909, _c('#0000b2'),
_c('#0000ff'), 0.181818, _c('#0000ff'),
_c('#0748ff'), 0.272727, _c('#0748ff'),
_c('#9291ff'), 0.363636, _c('#9291ff'),
_c('#dbd9ff'), 0.454545, _c('#dbd9ff'),
_c('#ffffff'), 0.500000, _c('#ffffff'),
_c('#ffd9dd'), 0.545455, _c('#ffd9dd'),
_c('#ff9193'), 0.636364, _c('#ff9193'),
_c('#ff484a'), 0.727273, _c('#ff484a'),
_c('#ff0000'), 0.818182, _c('#ff0000'),
_c('#b20000'), 0.909091, _c('#b20000'),
_c('#640000')
]),
'temperature-old': make_colormap([
_c('#0000ff'),
_c('#0748ff'), 0.125, _c('#0748ff'),
_c('#9291ff'), 0.250, _c('#9291ff'),
_c('#dbd9ff'), 0.375, _c('#dbd9ff'),
_c('#ffffff'), 0.500, _c('#ffffff'),
_c('#ffd9dd'), 0.625, _c('#ffd9dd'),
_c('#ff9193'), 0.750, _c('#ff9193'),
_c('#ff484a'), 0.875, _c('#ff484a'),
_c('#ff0000')
]),
'grey': make_colormap([
_c('#ffffff'),
_c('#000000')
]),
'potential sub surface channel': mcolors.ListedColormap(
['#ecf0f1','#f57732']
),
'thermal': cmocean.cm.thermal,
'neo_sst': mcolors.ListedColormap(
np.loadtxt(os.path.join(data_dir, 'neo_sst.txt'))),
'BuYlRd': mcolors.ListedColormap(
np.loadtxt(os.path.join(data_dir, 'BuYlRd.txt'))),
'temperature': mcolors.ListedColormap(
np.loadtxt(os.path.join(data_dir, 'temperature.txt'))),
}
colormaps['wind'] = colormaps['velocity']
# This is a little odd, but it has a purpose.
# These gettext calls don't really do anything, but it registers the keys with
# Babel so that they'll end up in the translation list.
# If the gettext calls were in the definition of colormap_names, they'd get
# executed before the user's locale is known and would always be in English.
gettext('Ammonium Concentration')
gettext('Anomaly')
gettext('Bathymetry')
gettext('Biogenic Silicon Concentration')
gettext('Chlorophyll')
gettext('Diatoms Concentration as Nitrogen')
gettext('Dissolved Organic Nitrogen Concentration')
gettext('Eastward Current')
gettext('Flagellates Concentration as Nitrogen')
gettext('Greyscale')
gettext('Ice')
gettext('Iron')
gettext('Mercator Ocean Current')
gettext('Mercator')
gettext('Mesodinium rubrum Concentration as Nitrogen')
gettext('Mesozooplankton Concentration as Nitrogen')
gettext('Microzooplankton Concentration as Nitrogen')
gettext('Nitrate')
gettext('Nitrate Concentration')
gettext('Northward Current')
gettext('Oxygen')
gettext('Particulate Organic Nitrogen Concentration')
gettext('Phosphate')
gettext('Phytoplankton')
gettext('Salinity')
gettext('Sea Surface Height (Free Surface)')
gettext('Silicate')
gettext('Silicon Concentration')
gettext('Speed')
gettext('Speed of Current')
gettext('Temperature')
gettext('Velocity')
gettext('Wave Height')
gettext('Wave Period')
gettext('Thermal')
gettext('NEO SST')
gettext('Color Brewer Blue-Yellow-Red')
gettext('Temperature (old)')
gettext('Vorticity')
gettext('Density')
gettext('Deep')
gettext('Balance')
gettext('Potential Sub Surface Channel')
colormap_names = {
'ammonium concentration': 'Ammonium Concentration',
'balance': 'Balance',
'anomaly': 'Anomaly',
'bathymetry': 'Bathymetry',
'chlorophyll': 'Chlorophyll',
'dissolved organic nitrogen concentration': 'Dissolved Organic Nitrogen Concentration',
'diatoms concentration as nitrogen': 'Diatoms Concentration as Nitrogen',
'flagellates concentration as nitrogen': 'Flagellates Concentration as Nitrogen',
'freesurface': 'Sea Surface Height (Free Surface)',
'grey': 'Greyscale',
'ice': 'Ice',
'iron': 'Iron',
'mercator_current': 'Mercator Ocean Current',
'mercator': 'Mercator',
'mesodinium rubrum concentration as nitrogen': 'Mesodinium rubrum Concentration as Nitrogen',
'mesozooplankton concentration as nitrogen': 'Mesozooplankton Concentration as Nitrogen',
'microzooplankton concentration as nitrogen': 'Microzooplankton Concentration as Nitrogen',
'nitrate': 'Nitrate',
'nitrate concentration': 'Nitrate Concentration',
'oxygen': 'Oxygen',
'particulate organic nitrogen concentration': 'Particulate Organic Nitrogen Concentration',
'phosphate': 'Phosphate',
'phytoplankton': 'Phytoplankton',
'potential sub surface channel':'Potential Sub Surface Channel',
'salinity': 'Salinity',
'silicate': 'Silicate',
'silicon concentration': 'Silicon Concentration',
'biogenic silicon concentration': 'Biogenic Silicon Concentration',
'speed': 'Speed',
'speed of current': 'Speed of Current',
'temperature': 'Temperature',
'velocity': 'Velocity',
'eastward current': 'Eastward Current',
'northward current': 'Northward Current',
'waveheight': 'Wave Height',
'waveperiod': 'Wave Period',
'thermal': 'Thermal',
'neo_sst': 'NEO SST',
'BuYlRd': 'Color Brewer Blue-Yellow-Red',
'temperature-old': 'Temperature (old)',
'vorticity': 'Vorticity',
'density': 'Density',
'deep': 'Deep'
}
def plot_colormaps():
fig, axes = plt.subplots(
nrows=len(colormap_names),
figsize=(11, 0.3 * len(colormap_names))
)
fig.subplots_adjust(top=0.925, bottom=0.01, left=0.01, right=0.6)
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
fig.suptitle(gettext("Ocean Navigator Colourmaps"), fontsize=14)
for ax, cmap in zip(axes, sorted(colormap_names, key=colormap_names.get)):
ax.imshow(gradient, aspect='auto', cmap=colormaps.get(cmap))
pos = list(ax.get_position().bounds)
x_text = pos[2] + 0.025
y_text = pos[1] + pos[3] / 2.
fig.text(
x_text, y_text, colormap_names[cmap], va='center', ha='left', fontsize=12
)
for ax in axes:
ax.set_axis_off()
buf = BytesIO()
try:
plt.savefig(buf, format="png", dpi='figure')
plt.close(fig)
return buf.getvalue()
finally:
buf.close()
if __name__ == '__main__':
import viscm
import matplotlib.cm
import sys
for k, v in colormaps.items():
matplotlib.cm.register_cmap(name=k, cmap=v)
maps = [i for i in colormaps]
if len(sys.argv) > 1:
maps = sys.argv[1:]
for m in maps:
v = viscm.viscm(m, uniform_space="CAM02-UCS")
v.fig.set_size_inches(20, 12)
v.fig.savefig(m + ".png")
|
Anyone else facing that problem ?????
okay on mine. very good.
Works fine here. Very cool vid. I love the way she just carries on fiddling in her handbag the whole time. Superb.
|
#!/usr/bin/env python
# PySCUBA/src/PySCUBA/__main__.py
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com; ggiecold@jimmy.harvard.edu
from os import getcwd, path, remove
import Queue
import sys
try:
import igraph
except ImportError, e:
pass
import numpy as np
from PIL import Image, ImageQt
from PyQt4 import QtCore, QtGui
from sklearn.preprocessing import StandardScaler
import wand.image
from .Gap_stats import gap_stats
from .Preprocessing import cytometry_preprocess, PCR_preprocess, RNASeq_preprocess
from . import PySCUBA_design, SCUBA_core
def plot_tree(cluster_indices, parent_clusters, output_directory = None):
"""Display a bifurcation tree.
"""
if igraph not in sys.modules:
return
if output_directory is None:
output_directory = getcwd()
vertex_sizes = np.bincount(cluster_indices)
N_vertices = vertex_sizes.size
vertex_sizes = np.divide(vertex_sizes, float(np.sum(vertex_sizes)))
vertex_sizes *= 100 * N_vertices
vertex_sizes += 40 + (N_vertices / 3)
tree = igraph.Graph()
tree.add_vertices(N_vertices)
cluster_tally = 0
for k, v in parent_clusters.items():
if k > 0:
tree.add_edges(zip(v, xrange(cluster_tally, cluster_tally + len(v))))
cluster_tally += len(v)
tree.vs['label'] = xrange(N_vertices)
layout = tree.layout('fr')
name = path.join(output_directory, 'SCUBA_tree.pdf')
igraph.plot(tree, name, bbox = (200 * N_vertices, 200 * N_vertices), margin = 250,
layout = layout, edge_width = [7] * (N_vertices - 1),
vertex_label_dist = 0, vertex_label_size = 30,
vertex_size = vertex_sizes.tolist())
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in xrange(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
class WorkerThread(QtCore.QThread):
def __init__(self, result_queue, data_type, data_path, cluster_mode, log_mode,
pseudotime_mode, pcv_method, anchor_gene,
exclude_marker_names):
super(WorkerThread, self).__init__()
self.result_queue = result_queue
self.data_type = str(data_type)
self.data_path = data_path
cluster_mode = str(cluster_mode).lower()
self.cluster_mode = None if (cluster_mode == 'none') else cluster_mode
self.log_mode = log_mode
self.pseudotime_mode = pseudotime_mode
self.pcv_method = pcv_method
self.anchor_gene = anchor_gene
self.exclude_marker_names = exclude_marker_names
def __del__(self):
self.wait()
def run(self):
preprocessing_fcts = [cytometry_preprocess, PCR_preprocess,
RNASeq_preprocess]
data_type_dict = {'cytometry': 0, 'PCR': 1, 'RNASeq': 2}
cell_IDs, data, markers, cell_stages, data_tag, \
output_directory = preprocessing_fcts[data_type_dict[self.data_type]](
self.data_path, self.log_mode, self.pseudotime_mode,
self.pcv_method, self.anchor_gene, self.exclude_marker_names)
cell_stages = 1 + one_to_max(cell_stages)
data = StandardScaler(with_std = False).fit_transform(data)
if self.cluster_mode in {'pca', 'pca2'}:
PCA_components, data = SCUBA_core.PCA_analysis(data, self.cluster_mode,
cell_stages if (self.cluster_mode == 'pca2') else None)
centroid_coords, cluster_indices, \
parent_clusters = SCUBA_core.initialize_tree(data, cell_stages)
centroid_coords, cluster_indices, \
parent_clusters = SCUBA_core.refine_tree(data, centroid_coords,
cluster_indices, parent_clusters, cell_stages, output_directory)
plot_tree(cluster_indices, parent_clusters, output_directory)
if self.cluster_mode in {'pca', 'pca2'}:
weights = PCA_components
else:
weights = None
bifurcation_info, bifurcation_axes, \
bifurcation_projections = SCUBA_core.bifurcation_direction(data, cell_IDs,
markers, parent_clusters, centroid_coords, output_directory,
weights)
if bifurcation_info:
data_per_split, parameters_per_split = SCUBA_core.bifurcation_analysis(
cluster_indices, bifurcation_info, bifurcation_axes,
bifurcation_projections, output_directory)
self.result_queue.put(output_directory)
self.result_queue.task_done()
return
class LoadImageThread(QtCore.QThread):
def __init__(self, source_file):
super(LoadImageThread, self).__init__()
self.source_file = source_file
def __del__(self):
self.wait()
def run(self):
self.emit(QtCore.SIGNAL("showImage(QString)"), self.source_file)
class PySCUBApp(QtGui.QMainWindow, PySCUBA_design.Ui_MainWindow):
def __init__(self, parent=None):
super(self.__class__, self).__init__(parent)
self.setupUi(self)
self.cancelButton.setEnabled(False)
self.okButton.setEnabled(False)
self.data_path = './'
self.selectDatasetButton.clicked.connect(self.selectDataset)
self.log_mode = True
self.logCheckBox.stateChanged.connect(self.logStateChanged)
self.pseudotime_mode = True
self.pseudotimeCheckBox.stateChanged.connect(
self.pseudotimeStateChanged)
self.pcv_method = 'Rprincurve'
self.anchor_gene = None
self.exclude_marker_names = None
self.result_queue = Queue.Queue()
self.okButton.clicked.connect(self.buttonClicked)
self.okButton.clicked.connect(self.OK)
self.cancelButton.clicked.connect(self.buttonClicked)
self.zoom = 0
self.pixMap = QtGui.QPixmap()
self.displayFileButton.setEnabled(False)
self.displayFileButton.clicked.connect(self.selectDisplay)
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(self, 'Message',
"Are you sure to quit?", QtGui.QMessageBox.Yes
| QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Escape:
self.close()
def selectDataset(self):
dataFileDialog = QtGui.QFileDialog(self)
self.data_path = str(dataFileDialog.getOpenFileName())
self.statusbar.showMessage("{0} ready to be "
"analyzed".format(path.basename(self.data_path)))
self.cancelButton.setEnabled(True)
self.okButton.setEnabled(True)
def logStateChanged(self, int):
if self.logCheckBox.isChecked():
self.log_mode = True
else:
self.log_mode = False
def pseudotimeStateChanged(self, int):
if self.pseudotimeCheckBox.isChecked():
self.pseudotime_mode = True
else:
self.pseudotime_mode = False
def buttonClicked(self):
sender = self.sender()
self.statusbar.showMessage(sender.text() + " was pressed.")
self.button_clicked = sender.text()
def OK(self):
self.statusbar.showMessage('Work in progress...')
self.worker_thread = WorkerThread(self.dataTypeComboBox.currentText(),
self.data_path, self.clusterModeComboBox.currentText(),
self.log_mode, self.pseudotime_mode, self.pcv_method,
self.anchor_gene, self.exclude_marker_names)
self.connect(self.worker_thread, QtCore.SIGNAL("update(QString)"),
self.worker_thread.run)
self.connect(self.worker_thread, QtCore.SIGNAL("finished()"), self.doneRunning)
self.worker_thread.start()
self.cancelButton.setEnabled(True)
self.okButton.setEnabled(False)
def doneRunning(self):
if self.button_clicked == 'Cancel':
self.cancelRunning()
else:
self.cancelButton.setEnabled(False)
self.okButton.setEnabled(False)
self.displayFileButton.setEnabled(True)
self.directory = self.result_queue.get()
self.statusbar.showMessage("PySCUBA has completed the "
"analysis of your data.")
QtGui.QMessageBox.information(self, "Status Message",
"Mission accomplished!")
def cancelRunning(self):
self.cancelButton.setEnabled(False)
self.okButton.setEnabled(False)
self.worker_thread.terminate()
self.statusbar.showMessage("PySCUBA was interrupted!")
QtGui.QMessageBox.information(self, "Status Message",
"PySCUBA was interrupted!")
def selectDisplay(self):
filters = 'Images (*.jpg *.pdf *.png)'
select_filters = 'Images (*.jpg *.pdf *.png)'
source_file = QtGui.QFileDialog.getOpenFileName(self,
'Select file to display', self.directory, filters, select_filters)
self.load_image_thread = LoadImageThread(source_file)
self.connect(self.load_image_thread, QtCore.SIGNAL("showImage(QString)"),
self.showImage)
self.load_image_thread.start()
def zoomFactor(self):
return self.zoom
def wheelEvent(self, event):
if not self.pixMap.isNull():
if event.delta() < 0:
factor = 0.8
self.zoom -= 1
else:
factor = 1.25
self.zoom += 1
if self.zoom < 0:
self.zoom = 0
elif self.zoom == 0:
self.fitInView()
else:
self.graphicsView.scale(factor, factor)
else:
pass
def fitInView(self):
rect = QtCore.QRectF(self.pixMap.rect())
if not rect.isNull():
unity = self.graphicsView.transform().mapRect(
QtCore.QRectF(0, 0, 1, 1))
self.graphicsView.scale(1.0 / unity.width(), 1.0 / unity.height())
view_rect = self.graphicsView.viewport().rect()
scene_rect = self.graphicsView.transform().mapRect(rect)
factor = min(view_rect.width() / scene_rect.width(),
view_rect.height() / scene_rect.height())
self.graphicsView.scale(factor, factor)
self.graphicsView.centerOn(rect.center())
self.zoom = 0
def showImage(self, source_file):
source_file = str(source_file)
target_file = source_file.split('.')[0] + '.jpg'
with wand.image.Image(filename=source_file) as img:
img.format = 'jpeg'
img.save(filename=target_file)
img = Image.open(target_file, 'r')
width, height = img.size
self.scene.clear()
self.zoom = 0
self.imgQ = ImageQt.ImageQt(img)
self.pixMap = QtGui.QPixmap.fromImage(self.imgQ)
if self.pixMap and not self.pixMap.isNull():
self.graphicsView.setDragMode(
QtGui.QGraphicsView.ScrollHandDrag)
self.scene.addPixmap(self.pixMap)
self.fitInView()
else:
self.graphicsView.setDragMode(QtGui.QGraphicsView.NoDrag)
self.scene.addPixmap(QtGui.QPixmap())
self.scene.update()
remove(target_file)
def main():
app = QtGui.QApplication(sys.argv)
form = PySCUBApp()
form.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
My husband was not unduly put out by being restricted to using a pencil. After all, it was much easier to control than that horrible dip pen. Anyway, it was soon after that that he moved to another school, one that was more liberal in every way. Pupils could write with whatever they wanted. In those days, ballpoints (or Biros as they were known here, regardless of who made them) were notable mostly for unreliability. Usually, they stopped writing while the refill was still full. Others developed the infuriating habit of skipping, but only intermittently, so that you hung on to it a little bit longer in the futile hope that it had cleared itself. The ones that wrote best tended to deliver a little too much ink. It gathered around the point and left a sticky blob every sentence or so.
My husband opted for the fountain pen, and his first one was an Osmiroid 65. It was a great pen and it lasted a whole week before he lost it, as was his way. The next one was the first of many rock-bottom Platignums, the kind that had a plastic body and a gold-alike plastic cap. It blobbed and blotted and sometimes refused to write. It was almost as bad as a ballpoint. The years went by and he grew out of losing things. His mother’s Conway Stewart was passed down to him and he began to really enjoy the pleasure of a good fountain pen. Ballpoints were much improved by this time and most of his classmates used one but they didn’t win him over. They were characterless, required a vertical grip and downward pressure. They hurt the hand after a page or two.
Those were just about the last of the days when you could get your pen resacced. You left it at the newsagents and picked it up a few days later for a very reasonable payment. A sac lasted a long time, and by time he needed another new sac, the service had disappeared. It seemed to be the end of the sac-fill pen. But was it?
My first fountain pen was a burgundy Waterman, probably a W2, never used it at school, by that time we used very strange phallic like ball points pens. I can remember within the half hour when I lost my first fountain pen, at Murrayfield ice rink, five aside football tournament, all the major teams present Hearts, Celtic etc. On leaving I couldn’t wait to get out and was crossing over the seats when my pen must have fallen out of my jacket.
Happy New Year, hope for a better one coming, all the best.
Hubby remembers those strange ballpoints – blue “tail” and grey point, he thinks. They wrote just as badly as the other ballpoints. Happy New Year, Eric. May it be a good one!
Happy New Year Deborah & Eric.
I well remember the Stephens ink, dip pens and ink wells. My first pen was also an Osmaroid, but one with a screw fill. It too got lost rapidly and was followed by a variety of cheap Platignums, usually with a steel italic nib. I still have the remains of the last two!
Your Osmiroid would be a 75, I suspect, Peter. The Platignum nibs were OK. It was the rest of the pen that wasn’t so hot!
|
import socket
import struct
from splunk_eventgen.lib.outputplugin import OutputPlugin
class S2S:
"""
Encode and send events to Splunk over the S2S V2 wire protocol.
It should be noted V2 is a much older protocol and is no longer utilized by any Splunk Forwarder.
It should still work, but its a very simple protocol and we've advanced pretty far since then.
However, if you have fully cooked events, its very lightweight and very easy to implement
which is why I elected to implement this version.
"""
s = None
signature_sent = None
useOutputQueue = True
def __init__(self, host="localhost", port=9997):
"""
Initialize object. Need to know Splunk host and port for the TCP Receiver
"""
self._open_connection(host, port)
self.signature_sent = False
def _open_connection(self, host="localhost", port=9997):
"""
Open a connection to Splunk and return a socket
"""
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((host, int(port)))
def _encode_sig(
self, serverName="s2s-api".encode("utf-8"), mgmtPort="9997".encode("utf-8")
):
"""
Create Signature element of the S2S Message. Signature is C struct:
struct S2S_Signature
{
char _signature[128];
char _serverName[256];
char _mgmtPort[16];
};
"""
if not self.signature_sent:
self.signature_sent = True
return struct.pack(
"!128s256s16s",
"--splunk-cooked-mode-v2--".encode("utf-8"),
serverName,
mgmtPort,
).decode("utf-8")
else:
return ""
def _encode_string(self, tosend=""):
"""
Encode a string to be sent across the wire to splunk
Wire protocol has an unsigned integer of the length of the string followed
by a null terminated string.
"""
tosend = str(tosend).encode("utf-8")
return struct.pack("!I%ds" % (len(tosend) + 1), len(tosend) + 1, tosend).decode(
"utf-8"
)
def _encode_key_value(self, key="", value=""):
"""
Encode a key/value pair to send across the wire to splunk
A key value pair is merely a concatenated set of encoded strings.
"""
return "%s%s" % (self._encode_string(key), self._encode_string(value))
def _encode_event(
self, index="main", host="", source="", sourcetype="", _raw="_done", _time=None
):
# Create signature
sig = self._encode_sig()
msg_size = len(
struct.pack("!I", 0)
) # size of unsigned 32 bit integer, which is the count of map entries
maps = 1
# May not have these, so set them first
encoded_source = False
encoded_sourcetype = False
encoded_host = False
# Encode source
if len(source) > 0:
encoded_source = self._encode_key_value(
"MetaData:Source", "source::" + source
)
maps += 1
msg_size += len(encoded_source)
# Encode sourcetype
if len(sourcetype) > 0:
encoded_sourcetype = self._encode_key_value(
"MetaData:Sourcetype", "sourcetype::" + sourcetype
)
maps += 1
msg_size += len(encoded_sourcetype)
# Encode host
if len(host) > 0:
encoded_host = self._encode_key_value("MetaData:Host", "host::" + host)
maps += 1
msg_size += len(encoded_host)
# Encode index
encoded_index = self._encode_key_value("_MetaData:Index", index)
maps += 1
msg_size += len(encoded_index)
# Encode _raw
encoded_raw = self._encode_key_value("_raw", _raw)
msg_size += len(encoded_raw)
# Will include a 32 bit integer 0 between the end of raw and the _raw trailer
msg_size += len(struct.pack("!I", 0))
# Encode "_raw" trailer... seems to just the string '_raw' repeated again at the end of the _raw field
encoded_raw_trailer = self._encode_string("_raw")
msg_size += len(encoded_raw_trailer)
# Add _done... Not sure if there's a penalty to setting this for every event
# but otherwise we don't flush immediately
encoded_done = self._encode_key_value("_done", "_done")
maps += 1
msg_size += len(encoded_done)
# Encode _time
if _time is not None:
encoded_time = self._encode_key_value("_time", _time)
msg_size += len(encoded_time)
maps += 1
# Create buffer, starting with the signature
buf = sig
# Add 32 bit integer with the size of the msg, calculated earlier
buf += struct.pack("!I", msg_size).decode("utf-8")
# Add number of map entries, which is 5, index, host, source, sourcetype, raw
buf += struct.pack("!I", maps).decode("utf-8")
# Add the map entries, index, source, sourcetype, host, raw
buf += encoded_index
buf += encoded_host if encoded_host else ""
buf += encoded_source if encoded_source else ""
buf += encoded_sourcetype if encoded_sourcetype else ""
buf += encoded_time if encoded_time else ""
buf += encoded_done
buf += encoded_raw
# Add dummy zero
buf += struct.pack("!I", 0).decode("utf-8")
# Add trailer raw
buf += encoded_raw_trailer
return buf
def send_event(
self, index="main", host="", source="", sourcetype="", _raw="", _time=None
):
"""
Encode and send an event to Splunk
"""
if len(_raw) > 0:
e = self._encode_event(index, host, source, sourcetype, _raw, _time)
self.s.sendall(e.encode("utf-8"))
def close(self):
"""
Close connection and send final done event
"""
self.s.close()
class S2SOutputPlugin(OutputPlugin):
name = "s2s"
MAXQUEUELENGTH = 10
s2s = None
useOutputQueue = True
def __init__(self, sample, output_counter=None):
OutputPlugin.__init__(self, sample, output_counter)
def flush(self, q):
if len(q) < 1:
return
if self.s2s is None:
self.s2s = S2S(self._sample.splunkHost, self._sample.splunkPort)
for m in q:
self.s2s.send_event(
m["index"],
m["host"],
m["source"],
m["sourcetype"],
m["_raw"],
m["_time"],
)
def load():
"""Returns an instance of the plugin"""
return S2SOutputPlugin
|
TTS Markets is the fastest growing brokerage firm in ASIA was established in 2014 with all prominent currency pairs. For centuries, India is the best for all kind of tradings. As a Footprint, TTS Markets Imprints its ubiquity in Hassan If anyone asks, Which is the forex trading broker in hassan, TTS Markets is the first name pop-up in traders Mind. Our clients' repeatability justifies the word without any compromise. TTS Markets has long History full of great achievements, technological breakthroughs and long-term relations with our traders. TTS Markets offers industry's most reputable award-winning trading platform-the MT4, which provides hassle-free trading experience.
|
#! /usr/bin/env python
# This file is part of IVRE.
# Copyright 2011 - 2020 Pierre LALET <pierre@droids-corp.org>
#
# IVRE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IVRE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IVRE. If not, see <http://www.gnu.org/licenses/>.
"""Parse NMAP scan results and add them in DB."""
from argparse import ArgumentParser
import os
import sys
import ivre.db
import ivre.utils
from ivre.view import nmap_record_to_view
import ivre.xmlnmap
def recursive_filelisting(base_directories, error):
"""Iterator on filenames in base_directories. Ugly hack: error is a
one-element list that will be set to True if one of the directories in
base_directories does not exist.
"""
for base_directory in base_directories:
if not os.path.exists(base_directory):
ivre.utils.LOGGER.warning("directory %r does not exist", base_directory)
error[0] = True
continue
if not os.path.isdir(base_directory):
yield base_directory
continue
for root, _, files in os.walk(base_directory):
for leaffile in files:
yield os.path.join(root, leaffile)
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument("scan", nargs="*", metavar="SCAN", help="Scan results")
parser.add_argument("-c", "--categories", default="", help="Scan categories.")
parser.add_argument("-s", "--source", default=None, help="Scan source.")
parser.add_argument(
"-t", "--test", action="store_true", help="Test mode (JSON output)."
)
parser.add_argument(
"--test-normal", action="store_true", help='Test mode ("normal" Nmap output).'
)
parser.add_argument(
"--ports",
"--port",
action="store_true",
help='Store only hosts with a "ports" element.',
)
parser.add_argument(
"--open-ports", action="store_true", help="Store only hosts with open ports."
)
parser.add_argument(
"--masscan-probes",
nargs="+",
metavar="PROBE",
help="Additional Nmap probes to use when trying to "
"match Masscan results against Nmap service "
"fingerprints.",
)
parser.add_argument(
"--zgrab-port",
metavar="PORT",
help="Port used for the zgrab scan. This might be "
"needed since the port number does not appear in the"
"result.",
)
parser.add_argument(
"--force-info",
action="store_true",
help="Force information (AS, country, city, etc.)"
" renewal (only useful with JSON format)",
)
parser.add_argument(
"-r",
"--recursive",
action="store_true",
help="Import all files from given directories.",
)
parser.add_argument(
"--update-view", action="store_true", help="Merge hosts in current view"
)
parser.add_argument(
"--no-update-view",
action="store_true",
help="Do not merge hosts in current view (default)",
)
args = parser.parse_args()
database = ivre.db.db.nmap
categories = args.categories.split(",") if args.categories else []
if args.test:
args.update_view = False
args.no_update_view = True
database = ivre.db.DBNmap()
if args.test_normal:
args.update_view = False
args.no_update_view = True
database = ivre.db.DBNmap(output_mode="normal")
# Ugly hack: we use a one-element list so that
# recursive_filelisting can modify its value
error = [False]
if args.recursive:
scans = recursive_filelisting(args.scan, error)
else:
scans = args.scan
if not args.update_view or args.no_update_view:
callback = None
else:
def callback(x):
return ivre.db.db.view.store_or_merge_host(nmap_record_to_view(x))
count = 0
for scan in scans:
if not os.path.exists(scan):
ivre.utils.LOGGER.warning("file %r does not exist", scan)
error[0] = True
continue
try:
if database.store_scan(
scan,
categories=categories,
source=args.source,
needports=args.ports,
needopenports=args.open_ports,
force_info=args.force_info,
masscan_probes=args.masscan_probes,
callback=callback,
zgrab_port=args.zgrab_port,
):
count += 1
except Exception:
ivre.utils.LOGGER.warning("Exception (file %r)", scan, exc_info=True)
error[0] = True
ivre.utils.LOGGER.info("%d results imported.", count)
sys.exit(error[0])
|
Windbreaker jacket by Columbia, Full mesh lining, Hooded neck, Zip opening, Functional pockets, Contrast trims, Regular fit - true to size. With its Pacific Northwest home providing a backdrop of forests, mountains and a rugged coastline, US label Columbia translates its passion for the outdoors into no-nonsense apparel. Keep your cool in its jersey and outerwear.
|
#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom widgets used for form fields.
"""
__authors__ = [
'JamesLevy" <jamesalexanderlevy@gmail.com>',
]
from django import forms
from django.forms import util
from django.forms import widgets
from django.utils import html
from django.utils import simplejson
from django.utils import safestring
from soc.models.survey import SurveyContent, Survey, SurveyRecord
from soc.logic import dicts
import cgi
import wsgiref.handlers
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.db import djangoforms
class SurveyForm(djangoforms.ModelForm):
def __init__(self, *args, **kwargs):
""" This class is used to produce survey forms for several
circumstances:
- Admin creating survey from scratch
- Admin updating existing survey
- User taking survey
- User updating already taken survey
Using dynamic properties of the this_survey model (if passed
as an arg) the survey form is dynamically formed.
TODO: Form now scrambles the order of fields. If it's important
that fields are listed in a certain order, an alternative to
the schema dictionary will have to be used.
"""
kwargs['initial']= {}
this_survey = kwargs.get('this_survey', None)
survey_record = kwargs.get('survey_record', None)
del kwargs['this_survey']
del kwargs['survey_record']
if this_survey:
fields = {}
survey_order = {}
schema = this_survey.get_schema()
for property in this_survey.dynamic_properties():
if survey_record: # use previously entered value
value = getattr(survey_record, property)
else: # use prompts set by survey creator
value = getattr(this_survey, property)
# map out the order of the survey fields
survey_order[schema[property]["index"]] = property
# correct answers? Necessary for grading
if schema[property]["type"] == "long_answer":
fields[property] = forms.fields.CharField(widget=widgets.Textarea()) #custom rows
kwargs['initial'][property] = value
if schema[property]["type"] == "short_answer":
fields[property] = forms.fields.CharField(max_length=40)
kwargs['initial'][property] = value
if schema[property]["type"] == "selection":
these_choices = []
# add all properties, but select chosen one
options = eval( getattr(this_survey, property) )
if survey_record:
these_choices.append( (value, value) )
options.remove(value)
for option in options: these_choices.append( (option, option) )
fields[property] = forms.ChoiceField( choices=tuple(these_choices), widget=forms.Select())
for position, property in survey_order.items():
SurveyForm.base_fields.insert(position, property, fields[property] )
super(SurveyForm, self).__init__(*args, **kwargs)
class Meta:
model = SurveyContent
exclude = ['schema']
class EditSurvey(widgets.Widget):
"""
Edit Survey, or Create Survey if not this_survey arg given.
"""
WIDGET_HTML = """
<div id="survey_widget"><table> %(survey)s </table> %(options_html)s </div>
<script type="text/javascript" src="/soc/content/js/edit_survey.js"></script>
"""
QUESTION_TYPES = {"short_answer": "Short Answer", "long_answer": "Long Answer", "selection": "Selection" }
BUTTON_TEMPLATE = """
<button id="%(type_id)s" onClick="return false;">Add %(type_name)s Question</button>
"""
OPTIONS_HTML = """
<div id="survey_options"> %(options)s </div>
"""
SURVEY_TEMPLATE = """
<tbody></tbody>
"""
def __init__(self, *args, **kwargs):
"""Defines the name, key_name and model for this entity.
"""
self.this_survey = kwargs.get('this_survey', None)
if self.this_survey: del kwargs['this_survey']
super(EditSurvey, self).__init__(*args, **kwargs)
def render(self, name, value, attrs=None):
#print self.entity
#if self.entity: survey = self.SurveyForm(entity)
#else: survey = self.SurveyForm()
survey = SurveyForm(this_survey=self.this_survey, survey_record=None)
survey = str(survey)
if len(survey) == 0: survey = self.SURVEY_TEMPLATE
options = ""
for type_id, type_name in self.QUESTION_TYPES.items():
options += self.BUTTON_TEMPLATE % { 'type_id': type_id, 'type_name': type_name }
options_html = self.OPTIONS_HTML % {'options': options }
result = self.WIDGET_HTML % {'survey': str(survey), 'options_html':options_html }
return result
class TakeSurvey(widgets.Widget):
"""
Take Survey, or Update Survey. """
WIDGET_HTML = """
%(help_text)s <div class="%(status)s"id="survey_widget"><table> %(survey)s </table> </div>
<script type="text/javascript" src="/soc/content/js/take_survey.js"></script>
"""
def render(self, this_survey):
#check if user has already submitted form. If so, show existing form
import soc.models.user
from soc.logic.models.user import logic as user_logic
user = user_logic.getForCurrentAccount()
survey_record = SurveyRecord.gql("WHERE user = :1 AND this_survey = :2", user, this_survey.survey_parent.get()).get()
survey = SurveyForm(this_survey=this_survey, survey_record=survey_record)
if survey_record:
help_text = "Edit and re-submit this survey."
status = "edit"
else:
help_text = "Please complete this survey."
status = "create"
result = self.WIDGET_HTML % {'survey': str(survey), 'help_text': help_text,
'status': status }
return result
class SurveyResults(widgets.Widget):
"""
Render List of Survey Results For Given Survey
"""
def render(self, this_survey, params, filter=filter, limit=1000,
offset=0, order=[], idx=0, context={}):
from soc.logic.models.survey import results_logic as results_logic
logic = results_logic
filter = { 'this_survey': this_survey }
data = logic.getForFields(filter=filter, limit=limit, offset=offset,
order=order)
params['name'] = "Survey Results"
content = {
'idx': idx,
'data': data,
#'export': export_link, TODO - export to CVS
'logic': logic,
'limit': limit,
}
updates = dicts.rename(params, params['list_params'])
content.update(updates)
contents = [content]
#content = [i for i in contents if i.get('idx') == export]
if len(content) == 1:
content = content[0]
key_order = content.get('key_order')
#if key_order: TODO - list order
#data = [i.toDict(key_order) for i in content['data']]
#filename = "export_%d" % export
#return self.csv(request, data, filename, params, key_order)
from soc.views import helper
import soc.logic.lists
context['list'] = soc.logic.lists.Lists(contents)
for list in context['list']._contents:
list['row'] = 'soc/survey/list/results_row.html'
list['heading'] = 'soc/survey/list/results_heading.html'
list['description'] = 'Survey Results:'
context['properties'] = this_survey.this_survey.dynamic_properties()
context['entity_type'] = "Survey Results"
context['entity_type_plural'] = "Results"
context['no_lists_msg'] = "No Survey Results"
from django.template import loader
markup = loader.render_to_string('soc/survey/results.html', dictionary=context).strip('\n')
return markup
|
People collect a couple of embellishments like automatic nervous system responses to stimuli. A very common example is: come to a freno, automatically flick the foot up at the knee after you step over before you put weight on that foot. We've seen followers do this every single time they get to a freno, pretty much as if they can't help it.
There are many embellishments that could be done here and we teach quite a few of them in the series about embellishments. However, one point we particularly want to make is that, every time you do an embellishment, it should be chosen, deliberate, and executed cleanly (rather than sloppy and automatic). Make any embellishments you do at the freno be a part of the special communication between the follower and the leader.
..which I think is totally reasonable.
Men can really hard and we can just enjoy it.
and then we just wait.
a woman can do at the freno.
at the freno in a later video.
We’ll have it - later.
So I step over whenever I want.
I go to the ball of my foot and then I just wait.
And then I’ll pivot her to face me.
And so, there really is that instinct to shift too far forward.
on the ball of my, in this case, left foot.
on the ball of my foot.
but until I put weight there, it’s still me.
D:..now I’m going to take over.
N: Now he has the lead.
And that's more custom than anything else.
|
#===============================================================================
# This code belong to the gis2js library, by Nathan Woodrow
# https://github.com/NathanW2/qgs2js
#===============================================================================
from qgis.core import QgsExpression
import re, json
import os
whenfunctions = []
binary_ops = [
"||", "&&",
"==", "!=", "<=", ">=", "<", ">", "~",
"LIKE", "NOT LIKE", "ILIKE", "NOT ILIKE", "===", "!==",
"+", "-", "*", "/", "//", "%", "^",
"+"
]
unary_ops = ["!", "-"]
def compile(expstr, name=None, mapLib=None):
"""
Convert a QgsExpression into a JS function.
"""
return exp2func(expstr, name, mapLib)
def exp2func(expstr, name=None, mapLib=None):
"""
Convert a QgsExpression into a JS function.
"""
global whenfunctions
whenfunctions = []
exp = QgsExpression(expstr)
if expstr:
js = walkExpression(exp.rootNode(), mapLib=mapLib)
else:
js = "true"
if name is None:
import random
import string
name = ''.join(random.choice(string.ascii_lowercase) for _ in range(4))
name += "_eval_expression"
temp = """
function %s(context) {
// %s
var feature = context.feature;
%s
return %s;
}""" % (name,
exp.dump(),
"\n".join(whenfunctions),
js)
return temp, name, exp.dump()
def walkExpression(node, mapLib):
try:
if node.nodeType() == QgsExpression.ntBinaryOperator:
jsExp = handle_binary(node, mapLib)
elif node.nodeType() == QgsExpression.ntUnaryOperator:
jsExp = handle_unary(node, mapLib)
elif node.nodeType() == QgsExpression.ntInOperator:
jsExp = handle_in(node, mapLib)
elif node.nodeType() == QgsExpression.ntFunction:
jsExp = handle_function(node, mapLib)
elif node.nodeType() == QgsExpression.ntLiteral:
jsExp = handle_literal(node)
elif node.nodeType() == QgsExpression.ntColumnRef:
jsExp = handle_columnRef(node, mapLib)
elif node.nodeType() == QgsExpression.ntCondition:
jsExp = handle_condition(node,mapLib)
return jsExp
except:
return "true"
def handle_condition(node, mapLib):
global condtioncounts
subexps = re.findall("WHEN(\s+.*?\s+)THEN(\s+.*?\s+)", node.dump())
count = 1;
js = ""
for sub in subexps:
when = sub[0].strip()
then = sub[1].strip()
whenpart = QgsExpression(when)
thenpart = QgsExpression(then)
whenjs = walkExpression(whenpart.rootNode(), mapLib)
thenjs = walkExpression(thenpart.rootNode(), mapLib)
style = "if" if count == 1 else "else if"
js += """
%s %s {
return %s;
}
""" % (style, whenjs, thenjs)
js = js.strip()
count += 1
elsejs = "null"
if "ELSE" in node.dump():
elseexps = re.findall("ELSE(\s+.*?\s+)END", node.dump())
elsestr = elseexps[0].strip()
exp = QgsExpression(elsestr)
elsejs = walkExpression(exp.rootNode(), mapLib)
funcname = "_CASE()"
temp = """function %s {
%s
else {
return %s;
}
};""" % (funcname, js, elsejs)
whenfunctions.append(temp)
return funcname
def handle_binary(node, mapLib):
op = node.op()
retOp = binary_ops[op]
left = node.opLeft()
right = node.opRight()
retLeft = walkExpression(left, mapLib)
retRight = walkExpression(right, mapLib)
if retOp == "LIKE":
return "(%s.indexOf(%s) > -1)" % (retLeft[:-1],
re.sub("[_%]", "", retRight))
elif retOp == "NOT LIKE":
return "(%s.indexOf(%s) == -1)" % (retLeft[:-1],
re.sub("[_%]", "", retRight))
elif retOp == "ILIKE":
return "(%s.toLowerCase().indexOf(%s.toLowerCase()) > -1)" % (
retLeft[:-1],
re.sub("[_%]", "", retRight))
elif retOp == "NOT ILIKE":
return "(%s.toLowerCase().indexOf(%s.toLowerCase()) == -1)" % (
retLeft[:-1],
re.sub("[_%]", "", retRight))
elif retOp == "~":
return "/%s/.test(%s)" % (retRight[1:-2], retLeft[:-1])
elif retOp == "//":
return "(Math.floor(%s %s %s))" % (retLeft, retOp, retRight)
else:
return "(%s %s %s)" % (retLeft, retOp, retRight)
def handle_unary(node, mapLib):
op = node.op()
operand = node.operand()
retOp = unary_ops[op]
retOperand = walkExpression(operand, mapLib)
return "%s %s " % (retOp, retOperand)
def handle_in(node, mapLib):
operand = node.node()
retOperand = walkExpression(operand, mapLib)
list = node.list().dump()
retList = json.dumps(list)
return "%s.indexOf(%s) > -1 " % (retList, retOperand)
def handle_literal(node):
val = node.value()
quote = ""
if isinstance(val, basestring):
quote = "'"
val = val.replace("\n", "\\n")
elif val is None:
val = "null"
return "%s%s%s" % (quote, unicode(val), quote)
def handle_function(node, mapLib):
fnIndex = node.fnIndex()
func = QgsExpression.Functions()[fnIndex]
retArgs = []
retFunc = (func.name().replace("$", "_"))
args = node.args()
if args is not None:
args = args.list()
for arg in args:
retArgs.append(walkExpression(arg, mapLib))
retArgs = ",".join(retArgs)
return "fnc_%s([%s], context)" % (retFunc, retArgs)
def handle_columnRef(node, mapLib):
return "getFeatureAttribute(feature, '%s') " % node.name()
def compile_to_file(exp, name=None, mapLib=None, filename="expressions.js"):
"""
Generate JS function to file from exp and append it to the end of the given file name.
:param exp: The expression to export to JS
:return: The name of the function you can call.
"""
functionjs, name, _ = compile(exp, name=name, mapLib=mapLib)
with open(filename, "a") as f:
f.write("\n\n")
f.write(functionjs)
return name
def is_expression_supported(expr):
path = os.path.join(os.path.dirname(__file__), "js", "qgis2web_expressions.js")
with open(path) as f:
lines = f.readlines()
used = [str(e) for e in re.findall("[a-zA-Z]{2,}?\(", expr)]
unsupported = []
for i, line in enumerate(lines):
for func in used:
if func in line:
if "return false" in lines[i + 1]:
unsupported.append(func[:-1])
break
return unsupported
|
OMG! Just one hour left till prom? Susan here couldn't possibly make it to her prom ball, on time, if you didn't lend her a helping hand! Her mom has just asked her to clean up and tidy up her bedroom asap or else she's not allowed to attend her prom and she still hasn't made up her mind what to wear at the ball either. So, chop, chop, play The Spring Prom, probably one of your future top favorite prom party games, and grant Susan the chance to turn herself into a... homecoming queen!
The first major task she wrote down on her list: getting her room a spotless clean, so very tidy look! So, go ahead and lend her a helping hand for picking and throwing all the clothes hanging there into the laundry box, all the books and notebooks scattered all over the room back on her bookshelves again, help her make her bed in the blink of an eye and then, once her bedroom’s sparkly clean again... hurry up, join her into her dressing room and help her get dressed up for the prom ball! You must have surely tried other great prom party games online, but not many of them have been... 3-games-in-1 in fact, right?
A candy-like pink, full-skirt princess dress or maybe a dreamlike, Cinderella dress-inspired prom gown or rather a sheer green, strapless, retro-chic dress instead? Decisions, decisions,so many hard choices to make and where do you add that sweet Susan here still has her prom beauty queen crown to select, the glittery jewels to glam up her dress with and the perfect heels, too. Last but surely not least: help the homecoming queen Susan kiss her handsome date, right on the dance floor, making sure they don't get caught by that super strict math teacher popping up in the background precisely for spying on them!
Eager to try some other equally fun prom party games now? Then feel free to do that: sweep through our whole girl games collection and you'll be surprised to discover how many other future prom beauty queens are waiting in line for you to help them make it to their own prom balls! Enjoy!
|
"""
Executes pre- and post-release shell commands
"""
from rez.release_hook import ReleaseHook
from rez.exceptions import ReleaseHookCancellingError
from rez.config import config
from rez.utils.logging_ import print_debug
from rez.vendor.schema.schema import Schema, Or, Optional, Use, And
from rez.vendor.sh.sh import Command, ErrorReturnCode, sudo, which
import getpass
import sys
import os
class CommandReleaseHook(ReleaseHook):
commands_schema = Schema(
{"command": basestring,
Optional("args"): Or(And(basestring,
Use(lambda x: x.strip().split())),
[basestring]),
Optional("user"): basestring})
schema_dict = {
"print_commands": bool,
"print_output": bool,
"print_error": bool,
"cancel_on_error": bool,
"stop_on_error": bool,
"pre_build_commands": [commands_schema],
"pre_release_commands": [commands_schema],
"post_release_commands": [commands_schema]}
@classmethod
def name(cls):
return "command"
def __init__(self, source_path):
super(CommandReleaseHook, self).__init__(source_path)
def execute_command(self, cmd_name, cmd_arguments, user, errors):
def _err(msg):
errors.append(msg)
if self.settings.print_error:
print >> sys.stderr, msg
def _execute(cmd, arguments):
try:
result = cmd(*(arguments or []))
if self.settings.print_output:
print result.stdout.strip()
except ErrorReturnCode as e:
# `e` shows the command that was run
msg = "command failed:\n%s" % str(e)
_err(msg)
return False
return True
if not os.path.isfile(cmd_name):
cmd_full_path = which(cmd_name)
else:
cmd_full_path = cmd_name
if not cmd_full_path:
msg = "%s: command not found" % cmd_name
_err(msg)
return False
run_cmd = Command(cmd_full_path)
if user == 'root':
with sudo:
return _execute(run_cmd, cmd_arguments)
elif user and user != getpass.getuser():
raise NotImplementedError # TODO
else:
return _execute(run_cmd, cmd_arguments)
def _release(self, commands, errors=None):
for conf in commands:
if self.settings.print_commands or config.debug("package_release"):
from subprocess import list2cmdline
toks = [conf["command"]] + conf.get("args", [])
msg = "running command: %s" % list2cmdline(toks)
if self.settings.print_commands:
print msg
else:
print_debug(msg)
if not self.execute_command(cmd_name=conf.get("command"),
cmd_arguments=conf.get("args"),
user=conf.get("user"),
errors=errors):
if self.settings.stop_on_error:
return
def pre_build(self, user, install_path, **kwargs):
errors = []
self._release(self.settings.pre_build_commands, errors=errors)
if errors and self.settings.cancel_on_error:
raise ReleaseHookCancellingError(
"The following pre-build commands failed:\n%s"
% '\n\n'.join(errors))
def pre_release(self, user, install_path, **kwargs):
errors = []
self._release(self.settings.pre_release_commands, errors=errors)
if errors and self.settings.cancel_on_error:
raise ReleaseHookCancellingError(
"The following pre-release commands failed:\n%s"
% '\n\n'.join(errors))
def post_release(self, user, install_path, variants, **kwargs):
self._release(self.settings.post_release_commands)
def register_plugin():
return CommandReleaseHook
|
This steel 52 Tooth Heavy Duty Spur Gear is an excellent upgrade for Savage owners who use their trucks in extreme conditions with high output engines. It's machined from high carbon steel for precise roundness and smooth tooth shape ensures consistent gear mesh, high efficiency and long gear life. This is compatible with the original Savage slipper clutch. If you want to take your Savage to the next level of toughness, pick up a HPI Heavy Duty 52 Tooth Spur Gear and put the power where it belongs, TO THE GROUND!
For extra tuning capabilites, look at the #77127 47 tooth steel spur gear!
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six.moves.urllib.parse as urlparse
from openstack_dashboard.test.integration_tests import basewebobject
class PageObject(basewebobject.BaseWebObject):
"""Base class for page objects."""
PARTIAL_LOGIN_URL = 'auth/login'
def __init__(self, driver, conf):
"""Constructor."""
super(PageObject, self).__init__(driver, conf)
self._page_title = None
@property
def page_title(self):
return self.driver.title
def is_the_current_page(self, do_assert=False):
found_expected_title = self.page_title.startswith(self._page_title)
if do_assert:
self.assertTrue(
found_expected_title,
"Expected to find %s in page title, instead found: %s"
% (self._page_title, self.page_title))
return found_expected_title
@property
def login_url(self):
base_url = self.conf.dashboard.dashboard_url
if not base_url.endswith('/'):
base_url += '/'
return urlparse.urljoin(base_url, self.PARTIAL_LOGIN_URL)
def get_url_current_page(self):
return self.driver.current_url
def close_window(self):
return self.driver.close()
def is_nth_window_opened(self, n):
return len(self.driver.window_handles) == n
def switch_window(self, window_name=None, window_index=None):
"""Switches focus between the webdriver windows.
Args:
- window_name: The name of the window to switch to.
- window_index: The index of the window handle to switch to.
If the method is called without arguments it switches to the
last window in the driver window_handles list.
In case only one window exists nothing effectively happens.
Usage:
page.switch_window('_new')
page.switch_window(2)
page.switch_window()
"""
if window_name is not None and window_index is not None:
raise ValueError("switch_window receives the window's name or "
"the window's index, not both.")
if window_name is not None:
self.driver.switch_to.window(window_name)
elif window_index is not None:
self.driver.switch_to.window(
self.driver.window_handles[window_index])
else:
self.driver.switch_to.window(self.driver.window_handles[-1])
def go_to_previous_page(self):
self.driver.back()
def go_to_next_page(self):
self.driver.forward()
def refresh_page(self):
self.driver.refresh()
def go_to_login_page(self):
self.driver.get(self.login_url)
self.is_the_current_page(do_assert=True)
|
decline in the Barbados tourism performance.
than hiring a car or taking a taxi.
from the wine list,” said Springer.
a decline in retail revenue of between 10 to 20 per cent for the hotel industry.
spend was up for the industry. He did not say by how much.
of the major issues affecting the competitiveness of the hotel sector.
with everywhere in the world where they don’t have these levels of costs,” said Doyle.
cause a major challenge to the survival of the hotel sector”.
improve their service levels and continue to retrain, motivate and empower staff.
Are we contributing? What appeals/attractions to them coming home for vacations have been considered? Have we asked for or encouraged their suggestions in ways they can help to stimulate the economy, short term or long term?
How about Barbadian pensioners getting their money from abroad? How about finding a way for them to use medical care here with reimbursement for whatever insurance package they have (if they pay) or fee scale services if they have insurance abroad?
or for that matter have their medical condition addressed by our physicians here and recuperate here?
|
"""Frame objects that do the frame demarshaling and marshaling."""
import logging
import struct
from servicebus.pika import amqp_object
from servicebus.pika import exceptions
from servicebus.pika import spec
from servicebus.pika.compat import byte
LOGGER = logging.getLogger(__name__)
class Frame(amqp_object.AMQPObject):
"""Base Frame object mapping. Defines a behavior for all child classes for
assignment of core attributes and implementation of the a core _marshal
method which child classes use to create the binary AMQP frame.
"""
NAME = 'Frame'
def __init__(self, frame_type, channel_number):
"""Create a new instance of a frame
:param int frame_type: The frame type
:param int channel_number: The channel number for the frame
"""
self.frame_type = frame_type
self.channel_number = channel_number
def _marshal(self, pieces):
"""Create the full AMQP wire protocol frame data representation
:rtype: bytes
"""
payload = b''.join(pieces)
return struct.pack('>BHI', self.frame_type, self.channel_number,
len(payload)) + payload + byte(spec.FRAME_END)
def marshal(self):
"""To be ended by child classes
:raises NotImplementedError
"""
raise NotImplementedError
class Method(Frame):
"""Base Method frame object mapping. AMQP method frames are mapped on top
of this class for creating or accessing their data and attributes.
"""
NAME = 'METHOD'
def __init__(self, channel_number, method):
"""Create a new instance of a frame
:param int channel_number: The frame type
:param pika.Spec.Class.Method method: The AMQP Class.Method
"""
Frame.__init__(self, spec.FRAME_METHOD, channel_number)
self.method = method
def marshal(self):
"""Return the AMQP binary encoded value of the frame
:rtype: str
"""
pieces = self.method.encode()
pieces.insert(0, struct.pack('>I', self.method.INDEX))
return self._marshal(pieces)
class Header(Frame):
"""Header frame object mapping. AMQP content header frames are mapped
on top of this class for creating or accessing their data and attributes.
"""
NAME = 'Header'
def __init__(self, channel_number, body_size, props):
"""Create a new instance of a AMQP ContentHeader object
:param int channel_number: The channel number for the frame
:param int body_size: The number of bytes for the body
:param pika.spec.BasicProperties props: Basic.Properties object
"""
Frame.__init__(self, spec.FRAME_HEADER, channel_number)
self.body_size = body_size
self.properties = props
def marshal(self):
"""Return the AMQP binary encoded value of the frame
:rtype: str
"""
pieces = self.properties.encode()
pieces.insert(0, struct.pack('>HxxQ', self.properties.INDEX,
self.body_size))
return self._marshal(pieces)
class Body(Frame):
"""Body frame object mapping class. AMQP content body frames are mapped on
to this base class for getting/setting of attributes/data.
"""
NAME = 'Body'
def __init__(self, channel_number, fragment):
"""
Parameters:
- channel_number: int
- fragment: unicode or str
"""
Frame.__init__(self, spec.FRAME_BODY, channel_number)
self.fragment = fragment
def marshal(self):
"""Return the AMQP binary encoded value of the frame
:rtype: str
"""
return self._marshal([self.fragment])
class Heartbeat(Frame):
"""Heartbeat frame object mapping class. AMQP Heartbeat frames are mapped
on to this class for a common access structure to the attributes/data
values.
"""
NAME = 'Heartbeat'
def __init__(self):
"""Create a new instance of the Heartbeat frame"""
Frame.__init__(self, spec.FRAME_HEARTBEAT, 0)
def marshal(self):
"""Return the AMQP binary encoded value of the frame
:rtype: str
"""
return self._marshal(list())
class ProtocolHeader(amqp_object.AMQPObject):
"""AMQP Protocol header frame class which provides a pythonic interface
for creating AMQP Protocol headers
"""
NAME = 'ProtocolHeader'
def __init__(self, major=None, minor=None, revision=None):
"""Construct a Protocol Header frame object for the specified AMQP
version
:param int major: Major version number
:param int minor: Minor version number
:param int revision: Revision
"""
self.frame_type = -1
self.major = major or spec.PROTOCOL_VERSION[0]
self.minor = minor or spec.PROTOCOL_VERSION[1]
self.revision = revision or spec.PROTOCOL_VERSION[2]
def marshal(self):
"""Return the full AMQP wire protocol frame data representation of the
ProtocolHeader frame
:rtype: str
"""
return b'AMQP' + struct.pack('BBBB', 0, self.major, self.minor,
self.revision)
def decode_frame(data_in):
"""Receives raw socket data and attempts to turn it into a frame.
Returns bytes used to make the frame and the frame
:param str data_in: The raw data stream
:rtype: tuple(bytes consumed, frame)
:raises: pika.exceptions.InvalidFrameError
"""
# Look to see if it's a protocol header frame
try:
if data_in[0:4] == b'AMQP':
major, minor, revision = struct.unpack_from('BBB', data_in, 5)
return 8, ProtocolHeader(major, minor, revision)
except (IndexError, struct.error):
return 0, None
# Get the Frame Type, Channel Number and Frame Size
try:
(frame_type, channel_number,
frame_size) = struct.unpack('>BHL', data_in[0:7])
except struct.error:
return 0, None
# Get the frame data
frame_end = spec.FRAME_HEADER_SIZE + frame_size + spec.FRAME_END_SIZE
# We don't have all of the frame yet
if frame_end > len(data_in):
return 0, None
# The Frame termination chr is wrong
if data_in[frame_end - 1:frame_end] != byte(spec.FRAME_END):
raise exceptions.InvalidFrameError("Invalid FRAME_END marker")
# Get the raw frame data
frame_data = data_in[spec.FRAME_HEADER_SIZE:frame_end - 1]
if frame_type == spec.FRAME_METHOD:
# Get the Method ID from the frame data
method_id = struct.unpack_from('>I', frame_data)[0]
# Get a Method object for this method_id
method = spec.methods[method_id]()
# Decode the content
method.decode(frame_data, 4)
# Return the amount of data consumed and the Method object
return frame_end, Method(channel_number, method)
elif frame_type == spec.FRAME_HEADER:
# Return the header class and body size
class_id, weight, body_size = struct.unpack_from('>HHQ', frame_data)
# Get the Properties type
properties = spec.props[class_id]()
# Decode the properties out
properties.decode(frame_data[12:])
# Return a Header frame
return frame_end, Header(channel_number, body_size, properties)
elif frame_type == spec.FRAME_BODY:
# Return the amount of data consumed and the Body frame w/ data
return frame_end, Body(channel_number, frame_data)
elif frame_type == spec.FRAME_HEARTBEAT:
# Return the amount of data and a Heartbeat frame
return frame_end, Heartbeat()
raise exceptions.InvalidFrameError("Unknown frame type: %i" % frame_type)
|
To get the reward(s) Epic Schematic . You will need to have the total experience level of 4,783,250. You will then require 65,000 Experience to the next level.
|
from django.db import models
from django.contrib.auth.models import User
from django.contrib import admin
from string import join
# from djangoByExample.settings import MEDIA_ROOT
# Create your models here.
class Forum(models.Model):
title = models.CharField(max_length=60)
def __unicode__(self):
return self.title
def num_posts(self):
return sum([t.num_posts() for t in self.thread_set.all()])
def last_post(self):
if self.thread_set.count():
last = None
for t in self.thread_set.all():
l = t.last_post()
if l:
if not last:
last = l
elif l.created > last.created:
last = l
return last
class Thread(models.Model):
title = models.CharField(max_length=60)
created = models.DateTimeField(auto_now_add=True)
creator = models.ForeignKey(User, blank=True, null=True)
forum = models.ForeignKey(Forum)
def __unicode__(self):
return unicode(self.creator) + " - " + self.title
def num_posts(self):
return self.post_set.count()
def num_replies(self):
return self.post_set.count() - 1
def last_post(self):
if self.post_set.count():
return self.post_set.order_by("created")[0]
class Post(models.Model):
title = models.CharField(max_length=60)
created = models.DateTimeField(auto_now_add=True)
creator = models.ForeignKey(User, blank=True, null=True)
thread = models.ForeignKey(Thread)
body = models.TextField(max_length=10000)
def __unicode__(self):
return u"%s - %s - %s" % (self.creator, self.thread, self.title)
def short(self):
return u"%s - %s\n%s" % (self.creator, self.title,
self.created.strftime("%b %d,%I:%M %p"))
short.allow_tags = True
|
The Communication Disability Centre (CDC) has been established to enhance the lives of people experiencing communication disability.
Communication disability refers to the impairments, activity limitations and participation restrictions that affect an individual’s ability to interact and engage with the world in ways that are meaningful to them and those they communicate with.
We aim to help people affected by communication disability through finding strategies for prevention and intervention, research that translates to practice, disseminating research findings and educating speech pathology and audiology students and graduates.
Find out more about CDC research projects here.
foster relationships with key stakeholders in the research process to optimise impact and clinical relevance.
deliver education to undergraduate and graduate speech pathologists and audiologists.
We welcome enquiries from prospective research students (Master of Philosophy (MPhil) and Doctor of Philosophy (PhD)) who are interested in the area of communication disability.
The communication research registry was created to help improve services for people with communication difficulties. It is free to join and available to people of all ages with a communication difficulty and their support network. Find out more at www.ccregistry.org.au.
The CDC is led by Professor Louise Hickson and Professor Linda Worrall along with a dynamic research team of audiologists and speech pathologists.
|
"""
Django settings for base_prj project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h-ae1kbm7l&-#jmgs5yv94l!r2o=j=hhwzlj#ouitmy%%x0g7o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bicycles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'base_prj.urls'
WSGI_APPLICATION = 'base_prj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, "base_prj"),) # added line to enable dynamic routing for static files
TEMPLATE_DIRS = [os.path.join(os.path.join(BASE_DIR, "base_prj"), "templates")]
|
Yellow and gray chevron baby room new elephant baby bedding set baby, gray and yellow zig zag chevron baby bedding : modern ideas of, yellow and gray chevron bedding sets king target grey baby twin. Yellow and gray chevron baby room new elephant baby bedding set baby. Grey chevron bedding set bed crib gray and white baby yellow sets.
|
#coding:utf8
from django.db import models
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser, PermissionsMixin
)
class MyUserManager(BaseUserManager):
def create_user(self, unitsn, unitname, unitgroup, operatorname,password=None):
"""
Creates and saves a User with the given email, unitsn, unitname.
"""
if not unitsn:
raise ValueError('Users must have an sn.')
user = self.model(
unitsn = unitsn,
# email=MyUserManager.normalize_email(email),
unitname=unitname,
unitgroup = unitgroup,
operatorname = operatorname,
# is_staff=False,
# is_active=True,
# is_superuser=False,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, unitsn, unitname, unitgroup, operatorname, password):
"""
Creates and saves a superuser with the given email, date of
birth and password.
"""
user = self.create_user(unitsn,
# email,
password=password,
unitname=unitname,
unitgroup = unitgroup,
operatorname = operatorname,
)
# user.is_staff = True
# user.is_superuser = True
user.is_active = True
user.is_admin = True
# user.is_staff = True
user.save(using=self._db)
return user
class MyUser(AbstractBaseUser, PermissionsMixin):
unitsn = models.CharField(verbose_name='单位编码', max_length=30, unique=True, db_index=True)
# email = models.EmailField(verbose_name='电子邮箱', max_length=255, unique=True,)
unitname = models.CharField(max_length=100, verbose_name="单位名称")
UNITGROUP_CHOICES = (
('0', u'市残联'),
('1', u'区残联'),
('2', u'医院'),
)
unitgroup = models.CharField(max_length=30, choices=UNITGROUP_CHOICES, verbose_name="单位类别")
operatorname = models.CharField(max_length=30, verbose_name="操作人员")
# unitname = models.DateField()
is_active = models.BooleanField(default=True)
# is_staff = models.BooleanField(default=False)
is_admin = models.BooleanField(default=False)
objects = MyUserManager()
USERNAME_FIELD = 'unitsn'
REQUIRED_FIELDS = ['unitname', 'unitgroup', 'operatorname']
def get_full_name(self):
# The user is identified by unitsn
return self.unitsn
def get_short_name(self):
# The user is identified by their email address
return self.unitsn
def __unicode__(self):
s= "%s" % (self.unitsn)
return s
class Meta:
verbose_name = "用户信息"
verbose_name_plural = "用户信息"
# app_label = u"信息管理"
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
# Simplest possible answer: Yes, always
return True
@property
def is_staff(self):
"Is the user a member of staff?"
# Simplest possible answer: All admins are staff
return self.is_admin
|
Whatever financial goals you may have for the year 2014, you will in all likelihood need to save money in order to achieve them. Here are the basic methods you have to apply in order to accumulate considerable savings for the coming year.
With the economic climate as it is, you probably wish you had more sources of income than you currently have. A lot of people these days are actually working two jobs, although the employment landscape in many areas may make it difficult for some to have even just a single job.
However, there’s a way to easily supplement your income, and that is to sell some of your stuff you don’t really use or need. Take stock of what you own, and determine which of them you can live without. You can have a yard sale in your area, or you can go online and offer them for sale or for auction. Either way, you get rid of unnecessary stuff, gain some extra space in your home, and gain extra money.
Getting out of doubt is another effective way of amassing savings, because you have to pay interest for the balance of your debts. It may be best to pay more than the minimum payments required of you each month, and you should also consider transferring your debts to lenders or credit card companies offering lower interest rates. In the meantime, stop racking up more debts if you don’t have to. Your credit card is not a license for you to buy stuff you can’t really afford so use them only for emergencies.
You need to see the difference between stuff you would like or are “nice”, versus things you really need. Take stock of all your expenses, and categorize each one. You can save a lot of money by formulating a stricter definition of the word necessity, and foregoing all the rest of your normal expenses—which are, strictly speaking, luxuries.
For this step to work, you will have to develop some type of self-discipline. Make a list every time you go to the grocery or to the mall for everything you need to purchase; this should prevent you from making impulse purchases. If you see anything that isn’t on your list, don’t buy it.
Another unnecessary expense is when you pay more than you have to for a product. Try to apply for any discounts whenever you can, and go online and search for websites which offer significant price reduction for the items that you need to buy. Be warned, however, that discounts can be very dangerous—you may be tempted to avail of discounts for items you don’t really need. Again first check your list to see if an item is something that you need to buy. Then you can avail of discounts.
Brought to you by the finance team from installments loan lender, a leading authority on consumer financing and short term loans.
|
'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2013 Task Coach developers <developers@taskcoach.org>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from taskcoachlib import patterns
from taskcoachlib.thirdparty.pubsub import pub
from taskcoachlib.i18n import _
import wx
class AttributeSync(object):
''' Class used for keeping an attribute of a domain object synchronized with
a control in a dialog. If the user edits the value using the control,
the domain object is changed, using the appropriate command. If the
attribute of the domain object is changed (e.g. in another dialog) the
value of the control is updated. '''
def __init__(self, attributeGetterName, entry, currentValue, items,
commandClass, editedEventType, changedEventType, callback=None,
**kwargs):
self._getter = attributeGetterName
self._entry = entry
self._currentValue = currentValue
self._items = items
self._commandClass = commandClass
self.__commandKwArgs = kwargs
self.__changedEventType = changedEventType
self.__callback = callback
entry.Bind(editedEventType, self.onAttributeEdited)
if len(items) == 1:
self.__start_observing_attribute(changedEventType, items[0])
def onAttributeEdited(self, event):
event.Skip()
new_value = self.getValue()
if new_value != self._currentValue:
self._currentValue = new_value
commandKwArgs = self.commandKwArgs(new_value)
self._commandClass(None, self._items, **commandKwArgs).do() # pylint: disable=W0142
self.__invokeCallback(new_value)
def onAttributeChanged_Deprecated(self, event): # pylint: disable=W0613
if self._entry:
new_value = getattr(self._items[0], self._getter)()
if new_value != self._currentValue:
self._currentValue = new_value
self.setValue(new_value)
self.__invokeCallback(new_value)
else:
self.__stop_observing_attribute()
def onAttributeChanged(self, newValue, sender):
if sender in self._items:
if self._entry:
if newValue != self._currentValue:
self._currentValue = newValue
self.setValue(newValue)
self.__invokeCallback(newValue)
else:
self.__stop_observing_attribute()
def commandKwArgs(self, new_value):
self.__commandKwArgs['newValue'] = new_value
return self.__commandKwArgs
def setValue(self, new_value):
self._entry.SetValue(new_value)
def getValue(self):
return self._entry.GetValue()
def __invokeCallback(self, value):
if self.__callback is not None:
try:
self.__callback(value)
except Exception, e:
wx.MessageBox(unicode(e), _('Error'), wx.OK)
def __start_observing_attribute(self, eventType, eventSource):
if eventType.startswith('pubsub'):
pub.subscribe(self.onAttributeChanged, eventType)
else:
patterns.Publisher().registerObserver(self.onAttributeChanged_Deprecated,
eventType=eventType,
eventSource=eventSource)
def __stop_observing_attribute(self):
try:
pub.unsubscribe(self.onAttributeChanged, self.__changedEventType)
except pub.UndefinedTopic:
pass
patterns.Publisher().removeObserver(self.onAttributeChanged_Deprecated)
class FontColorSync(AttributeSync):
def setValue(self, newValue):
self._entry.SetColor(newValue)
def getValue(self):
return self._entry.GetColor()
|
EXCEPT when I include water (solvateoct with TIP3PBOX).
slower compared to using a single processor.
Next message: Ross Walker: "RE: AMBER: MPI is slower than single processor with water"
Previous message: Ross Walker: "AMBER: RE: help"
In reply to: Mike Summers: "AMBER: torsion angle penalty calculation"
Next in thread: Ross Walker: "RE: AMBER: MPI is slower than single processor with water"
Reply: Ross Walker: "RE: AMBER: MPI is slower than single processor with water"
|
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.conf.urls import patterns, url
from django.http import Http404
from django.views.decorators.cache import never_cache
from django.core.urlresolvers import reverse
from preserialize.serialize import serialize
from serrano.resources.base import ThrottledResource
from varify import api
from vdw.genes.models import Gene
class GeneResource(ThrottledResource):
model = Gene
template = api.templates.Gene
def is_not_found(self, request, response, pk):
return not self.model.objects.filter(pk=pk).exists()
@api.cache_resource
def get(self, request, pk):
related = ['chr', 'detail', 'families']
try:
gene = self.model.objects.select_related(*related).get(pk=pk)
except self.model.DoesNotExist:
raise Http404
data = serialize(gene, **self.template)
# The approved symbol and name is listed as a synonym for easier
# searching, but they should be displayed in the output
if data['name'] in data['synonyms']:
data['synonyms'].remove(data['name'])
if data['symbol'] in data['synonyms']:
data['synonyms'].remove(data['symbol'])
return data
class GeneSearchResource(ThrottledResource):
model = Gene
template = api.templates.GeneSearch
def get(self, request):
query = request.GET.get('query')
fuzzy = request.GET.get('fuzzy', 1)
page = request.GET.get('page', 1)
# Use only the currently 'approved' genes
genes = self.model.objects.select_related('synonyms')
# Perform search if a query string is supplied
if query:
if fuzzy == '0' or fuzzy == 'false':
genes = genes.filter(symbol__iexact=query)
else:
genes = genes.filter(synonyms__label__icontains=query)
genes = genes.distinct()
# Paginate the results
paginator = Paginator(genes, api.PAGE_SIZE)
try:
page = page = paginator.page(page)
except PageNotAnInteger:
page = paginator.page(1)
except EmptyPage:
page = paginator.page(paginator.num_pages)
resp = {
'result_count': paginator.count,
'results': serialize(page.object_list, **self.template),
}
# Post procesing..
for obj in resp['results']:
# The approved symbol and name is listed as a synonym for easier
# searching, but they should be displayed in the output
if obj['name'] in obj['synonyms']:
obj['synonyms'].remove(obj['name'])
if obj['symbol'] in obj['synonyms']:
obj['synonyms'].remove(obj['symbol'])
obj['_links'] = {
'self': {
'rel': 'self',
'href': reverse('api:genes:gene',
kwargs={'pk': obj['id']})
}
}
links = {}
if page.number != 1:
links['prev'] = {
'rel': 'prev',
'href': "{0}?page={1}".format(reverse('api:genes:search'),
str(page.number - 1))
}
if page.number < paginator.num_pages - 1:
links['next'] = {
'rel': 'next',
'href': "{0}?page={1}".format(reverse('api:genes:search'),
str(page.number + 1))
}
if links:
resp['_links'] = links
return resp
gene_resource = never_cache(GeneResource())
gene_search_resource = never_cache(GeneSearchResource())
urlpatterns = patterns(
'',
url(r'^$', gene_search_resource, name='search'),
url(r'^(?P<pk>\d+)/$', gene_resource, name='gene'),
)
|
Ljiljana Smajlovic was the International Editor of Evropljanin magazine in 1999 when the magazine’s publisher Slavko Curuvija was shot dead. On 5. April 2019 the trial of three men accused of the killing comes to an end in Belgrade.
This is the testimony of Ljliljana Smajlovic, who is an Executive Board Member of the European Centre for Press and Media Freedom.
I await the Curuvija trial verdict with a heavy heart and a lot of apprehension. My publisher was murdered twenty years ago. Will justice be done this Friday?
I despaired when it took the Serbian state 16 years to bring charges against state security agents suspected of assassinating Slavko. I despaired because the trial itself took nearly four years. I was indignant when two of the suspects were released from custody due to the length of the trial, and even more indignant that one suspect is still at large. I was alarmed when the trial chamber ruled three times that key evidence was inadmissible, and three times superior judges had to overrule its decisions. This did not inspire any confidence in the proceedings.
I was unhappy that procedure prevented me from observing the trial because the defense announced early on it would seek to depose me as a witness. The judges never allowed it, but they never took the trouble to inform me of their decision, robbing me of the chance to observe the trial. I was only able to attend the closing session, in time to hear a defendant criticizing me for commenting negatively on trial proceedings which I had not observed in person.
Throughout the trial, my colleagues from the Commission to Establish Facts Surrounding the Murders of Journalists and I were vilified by the accused, as well as their supporters in the press and their defense attorneys. The lawyers for the defense moved to use laws that would prevent us from commenting on trial proceedings, which made such comments punishable by prison for up to three years! They even got the Belgrade Bar Association to back their legal initiative.
In the meantime, their associates in the media organised a press posse against those of us who had pushed for an end to impunity for crimes against journalists (our Commission is working on more unsolved cases of murdered journalists, including those missing and killed in the Yugoslav civil wars). Prior to his assassination during the NATO bombing of Yugoslavia in 1999, my publisher Slavko Curuvija had been attacked in state media as a stooge of the West, a traitor and someone who had asked NATO to bomb his own country. After he was shot on the back on Easter Sunday in broad daylight, I spoke at his funeral to a bewildered, anxious crowd filled with frightened journalists. I thought it was bad enough when the Milosevic-era police interrogated me the morning after on account of that short funeral oration. However, nearly twenty years later, as the trial of the four men accused of killing Slavko drew to a close, it was my turn to be accused of the same treacherous deeds, by the same journalists who had accused my publisher, and in some of the same media organisations that demonised my dead colleague.
I have kept the faith and I believe justice will be done. But will it be done this Friday?
You can read the background to the case here.
|
# Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This was taken from
# https://github.com/dmlc/dmlc-core/blob/master/tracker/dmlc_tracker/tracker.py
# License: https://github.com/dmlc/dmlc-core/blob/master/LICENSE
# Note: We don't need this file any more while we use xgboost>=1.0.0.
# pai-tf 1.15 doesn't support Python3 which xgboost>=1.0.0 requires.
"""
Tracker script for DMLC
Implements the tracker control protocol
- start dmlc jobs
- start ps scheduler and rabit tracker
- help nodes to establish links with each other
Tianqi Chen
"""
# pylint: disable=invalid-name, missing-docstring
# pylint: disable=too-many-arguments, too-many-locals
# pylint: disable=too-many-branches, too-many-statements
from __future__ import absolute_import
import argparse
import logging
import os
import socket
import struct
import subprocess
import sys
import time
from threading import Thread
class ExSocket(object):
"""
Extension of socket to handle recv and send of special data
"""
def __init__(self, sock):
self.sock = sock
def recvall(self, nbytes):
res = []
nread = 0
while nread < nbytes:
chunk = self.sock.recv(min(nbytes - nread, 1024))
nread += len(chunk)
res.append(chunk)
return b''.join(res)
def recvint(self):
return struct.unpack('@i', self.recvall(4))[0]
def sendint(self, n):
self.sock.sendall(struct.pack('@i', n))
def sendstr(self, s):
self.sendint(len(s))
self.sock.sendall(s.encode())
def recvstr(self):
slen = self.recvint()
return self.recvall(slen).decode()
# magic number used to verify existence of data
kMagic = 0xff99
def get_some_ip(host):
return socket.getaddrinfo(host, None)[0][4][0]
def get_family(addr):
return socket.getaddrinfo(addr, None)[0][0]
class SlaveEntry(object):
def __init__(self, sock, s_addr):
slave = ExSocket(sock)
self.sock = slave
self.host = get_some_ip(s_addr[0])
magic = slave.recvint()
assert magic == kMagic, 'invalid magic number=%d from %s' % (magic,
self.host)
slave.sendint(kMagic)
self.rank = slave.recvint()
self.world_size = slave.recvint()
self.jobid = slave.recvstr()
self.cmd = slave.recvstr()
self.wait_accept = 0
self.port = None
def decide_rank(self, job_map):
if self.rank >= 0:
return self.rank
if self.jobid != 'NULL' and self.jobid in job_map:
return job_map[self.jobid]
return -1
def assign_rank(self, rank, wait_conn, tree_map, parent_map, ring_map):
self.rank = rank
nnset = set(tree_map[rank])
rprev, rnext = ring_map[rank]
self.sock.sendint(rank)
# send parent rank
self.sock.sendint(parent_map[rank])
# send world size
self.sock.sendint(len(tree_map))
self.sock.sendint(len(nnset))
# send the rprev and next link
for r in nnset:
self.sock.sendint(r)
# send prev link
if rprev != -1 and rprev != rank:
nnset.add(rprev)
self.sock.sendint(rprev)
else:
self.sock.sendint(-1)
# send next link
if rnext != -1 and rnext != rank:
nnset.add(rnext)
self.sock.sendint(rnext)
else:
self.sock.sendint(-1)
while True:
ngood = self.sock.recvint()
goodset = set([])
for _ in range(ngood):
goodset.add(self.sock.recvint())
assert goodset.issubset(nnset)
badset = nnset - goodset
conset = []
for r in badset:
if r in wait_conn:
conset.append(r)
self.sock.sendint(len(conset))
self.sock.sendint(len(badset) - len(conset))
for r in conset:
self.sock.sendstr(wait_conn[r].host)
self.sock.sendint(wait_conn[r].port)
self.sock.sendint(r)
nerr = self.sock.recvint()
if nerr != 0:
continue
self.port = self.sock.recvint()
rmset = []
# all connection was successfully setup
for r in conset:
wait_conn[r].wait_accept -= 1
if wait_conn[r].wait_accept == 0:
rmset.append(r)
for r in rmset:
wait_conn.pop(r, None)
self.wait_accept = len(badset) - len(conset)
return rmset
class RabitTracker(object):
"""
tracker for rabit
"""
def __init__(self, hostIP, nslave, port=9091, port_end=9999):
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind((hostIP, port))
self.port = port
break
except socket.error as e:
if e.errno in [98, 48]:
continue
else:
raise
sock.listen(256)
self.sock = sock
self.hostIP = hostIP
self.thread = None
self.start_time = None
self.end_time = None
self.nslave = nslave
logging.info('start listen on %s:%d', hostIP, self.port)
def __del__(self):
self.sock.close()
@staticmethod
def get_neighbor(rank, nslave):
rank = rank + 1
ret = []
if rank > 1:
ret.append(rank // 2 - 1)
if rank * 2 - 1 < nslave:
ret.append(rank * 2 - 1)
if rank * 2 < nslave:
ret.append(rank * 2)
return ret
def slave_envs(self):
"""
get enviroment variables for slaves
can be passed in as args or envs
"""
return {
'DMLC_TRACKER_URI': self.hostIP,
'DMLC_TRACKER_PORT': self.port
}
def get_tree(self, nslave):
tree_map = {}
parent_map = {}
for r in range(nslave):
tree_map[r] = self.get_neighbor(r, nslave)
parent_map[r] = (r + 1) // 2 - 1
return tree_map, parent_map
def find_share_ring(self, tree_map, parent_map, r):
"""
get a ring structure that tends to share nodes with the tree
return a list starting from r
"""
nset = set(tree_map[r])
cset = nset - set([parent_map[r]])
if len(cset) == 0:
return [r]
rlst = [r]
cnt = 0
for v in cset:
vlst = self.find_share_ring(tree_map, parent_map, v)
cnt += 1
if cnt == len(cset):
vlst.reverse()
rlst += vlst
return rlst
def get_ring(self, tree_map, parent_map):
"""
get a ring connection used to recover local data
"""
assert parent_map[0] == -1
rlst = self.find_share_ring(tree_map, parent_map, 0)
assert len(rlst) == len(tree_map)
ring_map = {}
nslave = len(tree_map)
for r in range(nslave):
rprev = (r + nslave - 1) % nslave
rnext = (r + 1) % nslave
ring_map[rlst[r]] = (rlst[rprev], rlst[rnext])
return ring_map
def get_link_map(self, nslave):
"""
get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together
"""
tree_map, parent_map = self.get_tree(nslave)
ring_map = self.get_ring(tree_map, parent_map)
rmap = {0: 0}
k = 0
for i in range(nslave - 1):
k = ring_map[k][1]
rmap[k] = i + 1
ring_map_ = {}
tree_map_ = {}
parent_map_ = {}
for k, v in ring_map.items():
ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]])
for k, v in tree_map.items():
tree_map_[rmap[k]] = [rmap[x] for x in v]
for k, v in parent_map.items():
if k != 0:
parent_map_[rmap[k]] = rmap[v]
else:
parent_map_[rmap[k]] = -1
return tree_map_, parent_map_, ring_map_
def accept_slaves(self, nslave):
# set of nodes that finishes the job
shutdown = {}
# set of nodes that is waiting for connections
wait_conn = {}
# maps job id to rank
job_map = {}
# list of workers that is pending to be assigned rank
pending = []
# lazy initialize tree_map
tree_map = None
while len(shutdown) != nslave:
fd, s_addr = self.sock.accept()
s = SlaveEntry(fd, s_addr)
if s.cmd == 'print':
msg = s.sock.recvstr()
logging.info(msg.strip())
continue
if s.cmd == 'shutdown':
assert s.rank >= 0 and s.rank not in shutdown
assert s.rank not in wait_conn
shutdown[s.rank] = s
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
continue
assert s.cmd == 'start' or s.cmd == 'recover'
# lazily initialize the slaves
if tree_map is None:
assert s.cmd == 'start'
if s.world_size > 0:
nslave = s.world_size
tree_map, parent_map, ring_map = self.get_link_map(nslave)
# set of nodes that is pending for getting up
todo_nodes = list(range(nslave))
else:
assert s.world_size == -1 or s.world_size == nslave
if s.cmd == 'recover':
assert s.rank >= 0
rank = s.decide_rank(job_map)
# batch assignment of ranks
if rank == -1:
assert len(todo_nodes) != 0
pending.append(s)
if len(pending) == len(todo_nodes):
pending.sort(key=lambda x: x.host)
for s in pending:
rank = todo_nodes.pop(0)
if s.jobid != 'NULL':
job_map[s.jobid] = rank
s.assign_rank(rank, wait_conn, tree_map, parent_map,
ring_map)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.debug(
'Recieve %s signal from %s; assign rank %d', s.cmd,
s.host, s.rank)
if len(todo_nodes) == 0:
logging.info('@tracker All of %d nodes getting started',
nslave)
self.start_time = time.time()
else:
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.info('@tracker All nodes finishes job')
self.end_time = time.time()
logging.info('@tracker %s secs between node start and job finish',
str(self.end_time - self.start_time))
def start(self, nslave):
def run():
self.accept_slaves(nslave)
self.thread = Thread(target=run, args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
while self.thread.isAlive():
self.thread.join(100)
def alive(self):
return self.thread.isAlive()
class PSTracker(object):
"""
Tracker module for PS
"""
def __init__(self, hostIP, cmd, port=9091, port_end=9999, envs=None):
"""
Starts the PS scheduler
"""
self.cmd = cmd
if cmd is None:
return
envs = {} if envs is None else envs
self.hostIP = hostIP
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind(('', port))
self.port = port
sock.close()
break
except socket.error:
continue
env = os.environ.copy()
env['DMLC_ROLE'] = 'scheduler'
env['DMLC_PS_ROOT_URI'] = str(self.hostIP)
env['DMLC_PS_ROOT_PORT'] = str(self.port)
for k, v in envs.items():
env[k] = str(v)
self.thread = Thread(target=(lambda: subprocess.check_call(
self.cmd, env=env, shell=True, executable='/bin/bash')),
args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
if self.cmd is not None:
while self.thread.isAlive():
self.thread.join(100)
def slave_envs(self):
if self.cmd is None:
return {}
else:
return {
'DMLC_PS_ROOT_URI': self.hostIP,
'DMLC_PS_ROOT_PORT': self.port
}
def alive(self):
if self.cmd is not None:
return self.thread.isAlive()
else:
return False
def get_host_ip(hostIP=None):
if hostIP is None or hostIP == 'auto':
hostIP = 'ip'
if hostIP == 'dns':
hostIP = socket.getfqdn()
elif hostIP == 'ip':
from socket import gaierror
try:
hostIP = socket.gethostbyname(socket.getfqdn())
except gaierror:
logging.warn('gethostbyname(socket.getfqdn()) failed... '
'trying on hostname()')
hostIP = socket.gethostbyname(socket.gethostname())
if hostIP.startswith("127."):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# doesn't have to be reachable
s.connect(('10.255.255.255', 1))
hostIP = s.getsockname()[0]
return hostIP
def submit(nworker, nserver, fun_submit, hostIP='auto', pscmd=None):
if nserver == 0:
pscmd = None
envs = {'DMLC_NUM_WORKER': nworker, 'DMLC_NUM_SERVER': nserver}
hostIP = get_host_ip(hostIP)
if nserver == 0:
rabit = RabitTracker(hostIP=hostIP, nslave=nworker)
envs.update(rabit.slave_envs())
rabit.start(nworker)
if rabit.alive():
fun_submit(nworker, nserver, envs)
else:
pserver = PSTracker(hostIP=hostIP, cmd=pscmd, envs=envs)
envs.update(pserver.slave_envs())
if pserver.alive():
fun_submit(nworker, nserver, envs)
if nserver == 0:
rabit.join()
else:
pserver.join()
def start_rabit_tracker(args):
"""Standalone function to start rabit tracker.
Parameters
----------
args: arguments to start the rabit tracker.
"""
envs = {
'DMLC_NUM_WORKER': args.num_workers,
'DMLC_NUM_SERVER': args.num_servers
}
rabit = RabitTracker(hostIP=get_host_ip(args.host_ip),
nslave=args.num_workers)
envs.update(rabit.slave_envs())
rabit.start(args.num_workers)
sys.stdout.write('DMLC_TRACKER_ENV_START\n')
# simply write configuration to stdout
for k, v in envs.items():
sys.stdout.write('%s=%s\n' % (k, str(v)))
sys.stdout.write('DMLC_TRACKER_ENV_END\n')
sys.stdout.flush()
rabit.join()
def main():
"""Main function if tracker is executed in standalone mode."""
parser = argparse.ArgumentParser(description='Rabit Tracker start.')
parser.add_argument('--num-workers',
required=True,
type=int,
help='Number of worker proccess to be launched.')
parser.add_argument(
'--num-servers',
default=0,
type=int,
help='Number of server process to be launched. Only used in PS jobs.')
parser.add_argument(
'--host-ip',
default=None,
type=str,
help=('Host IP addressed, this is only needed ' +
'if the host IP cannot be automatically guessed.'))
parser.add_argument('--log-level',
default='INFO',
type=str,
choices=['INFO', 'DEBUG'],
help='Logging level of the logger.')
args = parser.parse_args()
fmt = '%(asctime)s %(levelname)s %(message)s'
if args.log_level == 'INFO':
level = logging.INFO
elif args.log_level == 'DEBUG':
level = logging.DEBUG
else:
raise RuntimeError("Unknown logging level %s" % args.log_level)
logging.basicConfig(format=fmt, level=level)
if args.num_servers == 0:
start_rabit_tracker(args)
else:
raise RuntimeError(
"Do not yet support start ps tracker in standalone mode.")
if __name__ == "__main__":
main()
|
This set of 100 white silicone wristbands is perfect for improving event security at your next event. Whether you're hosting a festival, conference, convention or exhibition, our 100 white silicone wristbands help you control admissions at the point of entry and monitor attendance throughout the day. Silicon is a sturdy enough material to not tear and support medium-sized events lasting anywhere from an afternoon to a whole weekend. Order before 3pm Monday-Friday or before 10.30am on Saturday for next-day shipping.
|
#!/usr/bin/env python3
# -*- coding: utf-8, vim: expandtab:ts=4 -*-
###############################################################################
# Copyright (c) 2015 Móréh, Tamás
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Lesser Public License v3
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/
#
# This file is part of PurePos-Python3.
#
# PurePos-Python3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PurePos-Python3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# Contributors:
# Móréh, Tamás - initial API and implementation
##############################################################################
__author__ = 'morta@digitus.itk.ppke.hu'
import math
UNKNOWN_VALUE = -99.0
class HashSuffixGuesser: # (BaseSuffixGuesser):
@staticmethod
def max_probability_tag(probabilities: dict) -> int:
m = max(probabilities.items(), key=lambda x: x[1])
return m[0]
def __init__(self, freq_table: dict, theta: float):
self.freq_table = freq_table
self.theta = theta
self.theta_plus_one = theta + 1
self.mapper = None
self.lemma_mapper = None
def tag_log_probabilities(self, word) -> dict:
return {k: math.log(v) for k, v in self.tag_probabilities(word).items()}
def tag_probabilities(self, word) -> dict:
mret = dict()
for i in range(len(word), -1, -1):
suffix_value = self.freq_table.get(word[i:], [dict(), 0])
mret.update({tag: (mret.get(tag, 0.0) + (float(val) / suffix_value[1] * self.theta))
/ self.theta_plus_one
for tag, val in suffix_value[0].items()})
return mret
def tag_log_probability(self, word, tag) -> float:
prob = self.tag_probability(word, tag)
return math.log(prob) if prob > 0 else UNKNOWN_VALUE
def tag_probability(self, word, tag) -> float:
if self.mapper is not None:
tag = self.mapper.map(tag)
return self.tag_probabilities(word).get(tag, 0.0)
# todo not used?
def tag_prob_hunpos(self, word, tag) -> float:
ret = 0.0
for i in range(len(word)-1, -1, -1):
suffix_value = self.freq_table.get(word[:i])
if suffix_value is not None:
tag_suff_freq = suffix_value[0].get(tag)
if tag_suff_freq is not None:
ret = (ret + (tag_suff_freq / suffix_value[1] * self.theta))\
/ self.theta_plus_one
else:
break
return ret
def __str__(self):
return str(self.freq_table)
|
The word listed above (mollusca) is probably the correct spelling for the word that you entered (mullolka). This is just an educated guess based on commonly misspelled words. To double check that this is the correct word you can use the resources below to find the definition of mollusca, antonyms for mollusca, synonyms for mollusca, quotes relating to mollusca and other information about mollusca.
According to our records, the word (mullolka) may be misspelled. For information about what we believe to be the correct spelling of the word you are looking for (mollusca), please click here.
|
# -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
.. moduleauthor:: Bogdan Neacsa <bogdan.neacsa@codemart.ro>
"""
import json
from tvb.core.entities import model
from tvb.basic.filters.chain import FilterChain
class StaticFiltersFactory():
"""
Factory class to build lists with static used filters through the application.
"""
RELEVANT_VIEW = "Relevant view"
FULL_VIEW = "Full view"
@staticmethod
def build_datatype_filters(selected=RELEVANT_VIEW, single_filter=None):
"""
Return all visibility filters for data structure page, or only one filter.
"""
filters = {StaticFiltersFactory.FULL_VIEW: FilterChain(StaticFiltersFactory.FULL_VIEW),
StaticFiltersFactory.RELEVANT_VIEW: FilterChain(StaticFiltersFactory.RELEVANT_VIEW,
[FilterChain.datatype + '.visible'],
[True], operations=["=="])}
if selected is None or len(selected) == 0:
selected = StaticFiltersFactory.RELEVANT_VIEW
if selected in filters:
filters[selected].selected = True
if single_filter is not None:
if single_filter in filters:
return filters[single_filter]
else:
### We have some custom filter to build
return StaticFiltersFactory._build_custom_filter(single_filter)
return filters.values()
@staticmethod
def _build_custom_filter(filter_data):
"""
Param filter_data should be at this point a dictionary of the form:
{'type' : 'fitler_type', 'value' : 'fitler_value'}
If 'filter_type' is not handled just return None.
"""
filter_data = json.loads(filter_data)
if filter_data['type'] == 'from_burst':
return FilterChain('Burst', [FilterChain.datatype + '.fk_parent_burst'],
[filter_data['value']], operations=["=="])
if filter_data['type'] == 'from_datatype':
return FilterChain('Datatypes', [FilterChain.operation + '.parameters'],
[filter_data['value']], operations=["like"])
return None
@staticmethod
def build_operations_filters(simulation_algorithm, logged_user_id):
"""
:returns: list of filters that can be applied on Project View Operations page.
"""
new_filters = []
### Filter by algorithm / categories
new_filter = FilterChain("Omit Views", [FilterChain.algorithm_category + '.display'],
[False], operations=["=="])
new_filters.append(new_filter)
new_filter = FilterChain("Only Upload", [FilterChain.algorithm_category + '.rawinput'],
[True], operations=["=="])
new_filters.append(new_filter)
if simulation_algorithm is not None:
new_filter = FilterChain("Only Simulations", [FilterChain.algorithm_group + '.id'],
[simulation_algorithm.id], operations=["=="])
new_filters.append(new_filter)
### Filter by operation status
filtered_statuses = {model.STATUS_STARTED: "Only Running",
model.STATUS_ERROR: "Only with Errors",
model.STATUS_CANCELED: "Only Canceled",
model.STATUS_FINISHED: "Only Finished",
model.STATUS_PENDING: "Only Pending" }
for status, title in filtered_statuses.iteritems():
new_filter = FilterChain(title, [FilterChain.operation + '.status'], [status], operations=["=="])
new_filters.append(new_filter)
### Filter by author
new_filter = FilterChain("Only mine", [FilterChain.operation + '.fk_launched_by'],
[logged_user_id], operations=["=="])
new_filters.append(new_filter)
### Filter by other flags
new_filter = FilterChain("Only relevant", [FilterChain.operation + '.visible'], [True], operations=["=="])
new_filter.selected = True
new_filters.append(new_filter)
return new_filters
|
Global change starts with empowering the world’s next resource of talent and innovation: young women.
We aim to educate, invest in, and connect girls globally by offering a platform with tools and resources to enable them to succeed in the present and in the future. Our leadership programs are designed intentionally for adolescent girls and will equip them with tools to navigate obstacles while making informed life-choices from a place of strength, courage, and self-love.
|
# coding=utf-8
import math
import arcpy
import functions_validation as fv
import functions_visibility as visibility
from los import functions_arcmap
class PrepareGlobalLoS(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Create Global Lines of Sight"
self.description = "A tool to create Lines of Sight from observer to target points and further beyond target " \
"points to the spatial extent of the surface layer. This is necessary to analyze targets " \
"relation to the global horizon. The shapefile itself does not store information about " \
"observer's and target's offsets. This information is stored in appropriate fields."
self.canRunInBackground = False
def getParameterInfo(self):
"""Define parameter definitions"""
param0 = arcpy.Parameter(
displayName="Surface",
name="in_surface",
datatype="GPRasterLayer",
parameterType="Required",
direction="Input")
param1 = arcpy.Parameter(
displayName="Observer points",
name="in_observers",
datatype="GPFeatureLayer",
parameterType="Required",
direction="Input")
param1.filter.list = ["Point"]
param2 = arcpy.Parameter(
displayName="Observer points offset",
name="in_observer_offset",
datatype="Field",
parameterType="Required",
direction="Input")
param2.filter.list = ["Double"]
param2.parameterDependencies = [param1.name]
param2.enabled = 0
param3 = arcpy.Parameter(
displayName="Target points",
name="in_targets",
datatype="GPFeatureLayer",
parameterType="Required",
direction="Input")
param3.filter.list = ["Point"]
param4 = arcpy.Parameter(
displayName="Target points offset",
name="in_target_offset",
datatype="Field",
parameterType="Required",
direction="Input")
param4.filter.list = ["Double"]
param4.parameterDependencies = [param3.name]
param4.enabled = 0
param5 = arcpy.Parameter(
displayName="Sampling distance",
name="in_sampling_distance",
datatype="GPDouble",
parameterType="Required",
direction="Input")
# param5.value = 0
param6 = arcpy.Parameter(
displayName="Output feature layer",
name="in_output_layer",
datatype="GPFeatureLayer",
parameterType="Required",
direction="output")
params = [param0, param1, param2, param3, param4, param5, param6]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
if arcpy.CheckOutExtension("spatial") == "CheckedOut" and arcpy.CheckOutExtension("3D") == "CheckedOut":
return True
else:
return False
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
fv.enableParamIfPoint(parameters, 1, 2)
fv.enableParamIfPoint(parameters, 3, 4)
if not parameters[6].value:
parameters[6].value = str(arcpy.env.workspace) + "\\Global_LoS"
if parameters[0].value and not parameters[5].altered:
parameters[5].value = str(arcpy.Describe(parameters[0].valueAsText).meanCellHeight)
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
fv.checkProjected(parameters, 1)
fv.checkProjected(parameters, 3)
return
def execute(self, parameters, messages):
"""The source code of the tool."""
surface = parameters[0].valueAsText
observer_points = parameters[1].valueAsText
observer_offset = parameters[2].valueAsText
target_points = parameters[3].valueAsText
target_offset = parameters[4].valueAsText
sampling_distance = parameters[5].valueAsText
output_los = parameters[6].valueAsText
sightlines = arcpy.ConstructSightLines_3d(observer_points, target_points,
arcpy.CreateScratchName(prefix="sightlines",
workspace=arcpy.env.scratchGDB),
observer_offset, target_offset, "<None>", 1,
"NOT_OUTPUT_THE_DIRECTION")
raster_extent = arcpy.sa.Raster(surface).extent
maximal_possible_distance = math.sqrt(
math.pow(max(raster_extent.XMax - raster_extent.XMin, raster_extent.YMax - raster_extent.YMin), 2) * 2)
spatial_ref = arcpy.Describe(sightlines).spatialReference
visibility.makeGlobalLoS(sightlines, maximal_possible_distance, spatial_ref)
arcpy.AddField_management(sightlines, "ID_OBSERV", "LONG")
arcpy.CalculateField_management(sightlines, "ID_OBSERV", "!OID_OBSERV!", "PYTHON")
arcpy.AddField_management(sightlines, "ID_TARGET", "LONG")
arcpy.CalculateField_management(sightlines, "ID_TARGET", "!OID_TARGET!", "PYTHON")
arcpy.DeleteField_management(sightlines, ["OID_TARGET", "OID_OBSERV"])
temp_los_name = arcpy.CreateScratchName(prefix="los", workspace=arcpy.env.scratchGDB)
arcpy.InterpolateShape_3d(surface, sightlines, temp_los_name, sample_distance=sampling_distance, method="BILINEAR")
visibility.updateLoS(temp_los_name, output_los, sightlines, target_points, True)
arcpy.DeleteField_management(output_los, "SourceOID")
visibility.verifyShapeStructure(sightlines, output_los)
functions_arcmap.addLayer(output_los)
return
|
President Donald Trump intends to signal the border safety deal to keep away from one other partial authorities shutdown, in keeping with two sources who’ve spoke straight with the President.
Trump stated Tuesday that he was “not glad” with the tentative deal reached by congressional negotiators late Monday evening that falls far in need of his unique calls for.
Congress faces a deadline to get a deal handed and signed by Trump earlier than Friday.
This can be a breaking information story and will likely be up to date.
|
import ptypes
from ptypes import *
# integral types
class u8(pint.uint8_t): pass
class u16(pint.uint16_t): pass
class u32(pint.uint32_t): pass
class u64(pint.uint64_t): pass
class s8(pint.sint8_t): pass
class s16(pint.sint16_t): pass
class s32(pint.sint32_t): pass
class s64(pint.sint64_t): pass
# lzh-specific integrals
class method_id(pstr.string):
length = 5
def set(self, value):
if not isinstance(value, tuple):
return super(Signature._method, self).set(value)
type, version = value
if type == 'lh':
versionmap = '0123456789abcdef'
if version is None:
version = versionmap.index('d')
elif version == 'x':
return super(Signature._method, self).set('-lhx-')
try:
res = '-lh{:s}-'.format(versionmap[version])
except (IndexError, TypeError):
raise NotImplementedError((type, version))
return super(Signature._method, self).set(res)
elif type in {'pc', 'pm'}:
versionmap = '012'
if version is None:
res = '-{:s}0-'.format(type)
return super(Signature._method, self).set(res)
elif version == 's':
res = '-{:s}s-'.format(type)
return super(Signature._method, self).set(res)
try:
res = '-{:s}{:s}-'.format(type, versionmap[version])
except (IndexError, TypeError):
raise NotImplementedError((type, version))
return super(Signature._method, self).set(res)
elif type == 'lz':
versionmap = '012345678'
if version == 's':
res = '-lzs-'
return super(Signature._method, self).set(res)
elif version is None:
res = '-lz4-'
return super(Signature._method, self).set(res)
try:
res = '-lz{:s}-'.format(versionmap[version])
except (IndexError, TypeError):
raise NotImplementedError((type, version))
return super(Signature._method, self).set(res)
raise NotImplementedError((type, version))
def get(self):
res = self.str()
if res.startswith('-') and res.endswith('-'):
res = res[1:-1]
if res.startswith('lh'):
versionmap = '0123456789abcdef'
res = res[2:]
if res == 'd':
return 'lh', None
elif res == 'x':
return 'lh', 'x'
return 'lh', versionmap.index(res)
elif res.startswith('pc') or res.startswith('pm'):
type, version = res[:2], res[2:]
versionmap = '012'
if version == 's':
return type, version
return type, versionmap.index(version)
elif res.startswith('lz'):
versionmap = '012345678'
type, version = res[:2], res[2:]
if version == 's':
return 'lz', version
elif version == '4':
return 'lz', None
return 'lz', versionmap.index(version)
raise NotImplementedError
raise ValueError(res)
# extension header levels
class Level(ptype.definition): cache = {}
@Level.define
class Level0(pstruct.type):
type = 0
def __filename(self):
res = self['filename-length'].li.int()
return dyn.clone(pstr.string, length=res)
_fields_ = [
(u8, 'filename-length'),
(__filename, 'filename'),
(u16, 'crc'),
]
@Level.define
class Level1(pstruct.type):
type = 1
def __filename(self):
res = self['filename-length'].li.int()
return dyn.clone(pstr.string, length=res)
_fields_ = [
(u8, 'filename-length'),
(__filename, 'filename'),
(u16, 'crc'),
(u8, 'os-identifier'),
(u16, 'next-header-size'),
]
# base structures
class Signature(pstruct.type):
_fields_ = [
(u8, 'size'),
(u8, 'checksum'),
(method_id, 'method'),
]
class Attributes(pstruct.type):
class _timestamp(u32): pass
class _attribute(u8): pass
_fields_ = [
(u32, 'compressed-size'),
(u32, 'uncompressed-size'),
(_timestamp, 'timestamp'),
(_attribute, 'file-attribute'),
(u8, 'level-identifier'),
]
def Level(self):
return self['level-identifier'].int()
class Header(pstruct.type):
def __extended(self):
res = self['attributes'].li
return Level.lookup(res.Level())
def __padding_header(self):
res = self['signature'].li
cb = res['size'].int()
total = 2 + sum(self[fld].li.size() for fld in ['signature', 'attributes', 'extended'])
return dyn.block(max(0, cb - total))
_fields_ = [
(Signature, 'signature'),
(Attributes, 'attributes'),
(__extended, 'extended'),
(__padding_header, 'padding'),
]
class File(pstruct.type):
def __data(self):
res = self['header'].li
return dyn.block(res['attributes']['compressed-size'].int())
_fields_ = [
(Header, 'header'),
(__data, 'data'),
]
if __name__ == '__main__':
import ptypes, archive.lha
ptypes.setsource(ptypes.prov.file('c:/users/user/Downloads/fcgb2.lzh', mode='r'))
z = archive.lha.File()
z = z.l
print(z.source.size())
print(z['header']['signature'])
print(z['header']['attributes'])
print(z['header'])
print(z['header']['filename'])
|
Delta Air Lines officials say the company will hire 300 pilots starting in November.
ATLANTA (AP) - Delta Air Lines officials say the company will hire 300 pilots starting in November.
The Atlanta Journal-Constitution reports that the development marks the first pilot hiring since 2010 for Atlanta-based Delta.
Company officials say they will add 50 pilots per month from November through early 2014, then about 20 per month through September 2014.
Pilots who were previously furloughed will have first dibs on the positions, but not all are expected to return.
Pilots from Delta Connection carriers Compass and Endeavor Air are expected to fill some of the positions, along with experienced pilots from elsewhere such as other regional carriers and the U.S. military.
|
from django.core.management.base import BaseCommand
from rest_framework.compat import coreapi
from rest_framework.renderers import (
CoreJSONRenderer, JSONOpenAPIRenderer, OpenAPIRenderer
)
from rest_framework.schemas.generators import SchemaGenerator
class Command(BaseCommand):
help = "Generates configured API schema for project."
def add_arguments(self, parser):
parser.add_argument('--title', dest="title", default=None, type=str)
parser.add_argument('--url', dest="url", default=None, type=str)
parser.add_argument('--description', dest="description", default=None, type=str)
parser.add_argument('--format', dest="format", choices=['openapi', 'openapi-json', 'corejson'], default='openapi', type=str)
def handle(self, *args, **options):
assert coreapi is not None, 'coreapi must be installed.'
generator = SchemaGenerator(
url=options['url'],
title=options['title'],
description=options['description']
)
schema = generator.get_schema(request=None, public=True)
renderer = self.get_renderer(options['format'])
output = renderer.render(schema, renderer_context={})
self.stdout.write(output.decode('utf-8'))
def get_renderer(self, format):
renderer_cls = {
'corejson': CoreJSONRenderer,
'openapi': OpenAPIRenderer,
'openapi-json': JSONOpenAPIRenderer,
}[format]
return renderer_cls()
|
Venture capital is money provided to companies that are in their early stages if they are high potential companies that have the probability for above average capital returns. Venture capital usually comes from professionally managed funds that have between $25 million and $1 billion dollars to invest in early stage growth companies. A venture capital fund makes its money back by owning equity in the company that it invested in and venture capitalists usually look for companies that have a new technology, new product or a business model in the high-tech industry.
Another source of much needed money for start-ups can come from what is called Angel Money, which is capital provided to start-ups by rich investors (called Angel Investors) and like VC firms, take equity in return for their much-needed capital. Angel investors can be somewhat difficult to track down. You can look nearby for affluent individuals because these kinds of investors like to be able to pop into the company they invested in if they feel like it. Oftentimes, Angel Investors will not be alone; Angel Investors will include themselves into a conglomerate of Angel Investors called an Angel Investor Group, or AG for Angel Group. AG directories can be found by searching the term in your favorite search engine, by asking your financial advisor, and even asking a friend if they know an affluent investor.
Another form of funding of great help to the entrepreneur is the small business grant. It provides money from the government to help fund a venture and best of all, the money is a grant, not a loan, so the money from a grant comes for free. A small business grant can come from federal and state/provincial agencies but certain conditions must be met before a venture can benefit from a small business grant. First of all, a venture needs to qualify as a small business. One of the determinants for this is the number of employees a venture has. Generally, 500 employees or less qualifies a venture as a small business. Also, a business’s credit is also a factor for the government to decide whether or not to give a grant.
There are many purposes for seeking a grant because the money can be used for anything a venture requires such as capital to purchase real estate, money to invest in marketing, assisting debt control, purchasing inventory, etc. A quick query resembling “small business grants” typed into your search engine should direct you to your federal government’s web page for small business grants. The information on how to apply can vary from state/province to state so be sure to read the information thoroughly before applying.
There are many reasons why someone would want to start their own business but three of the more common reasons people choose to go into business for themselves are: to be your own boss, the potential to make more money than a regular salaried job and the satisfaction of creating and running a successful business.
The risks associated with starting a business can be scary to some people though. However, a great deal of the risk associated with starting a business can be avoided with a proper business plan. It is essential to draft a proper and professional business plan and it must include: A marketing section, a financial section, mission statement, sales forecasts for the next 3-5 years, a list of all possible expenses and an outline of exactly what your business will do. If you try to pitch your idea to VCs or AGs without a proper business plan, they will not take you seriously.
There are many shapes and forms your budding business can take. Before choosing one, consult your accountant.
Sole proprietorship – One person is the owner and is liable for all risks.
Corporation – Many owners become involve in the business by having shares of the company. Owners are not liable for risk.
Partnership – Two or more people own the business and are liable for all risks.
Limited liability company – A type of private corporation where one or a few people can own without the risk of a sole proprietorship.
You should now have an understanding of how venture capitalism works and how to make it work for you. Small business grants, AGs and Venture Capital firms will be your start-up’s lifeblood for the first few years until profits expand so you can utilize them to your advantage.
|
# -*- coding: utf-8 *-*
# Copyright (c) 2013 Tisserant Pierre
#
# This file is part of Dragon dice simulator.
#
# Dragon dice simulator is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragon dice simulator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Dragon dice simulator. If not, see <http://www.gnu.org/licenses/>.
from business.effect import Effect
class TargetedKillEffect(Effect):
@property
def name(self):
return 'Slay'
@property
def description(self):
return '%s targeted ennemy units must roll ID or die' % self.amount
@property
def key(self):
return 'targeted_kill'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted death effect should be resolved and saved against'
self.expired = True
class TargetedManeuverKillByHealthEffect(Effect):
@property
def name(self):
return 'Stomp'
@property
def description(self):
return 'Target unit(s) worth %s health or less units must roll a maneuver or die' % self.amount
@property
def key(self):
return 'targeted_maneuver_kill_by_health'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted death effect should be resolved and saved against'
self.expired = True
class TargetedManeuverKillBuryingByHealthEffect(Effect):
@property
def name(self):
return 'Crush'
@property
def description(self):
return 'Target unit(s) worth %s health or less units must roll a maneuver or die. Thoses who die must roll a save or be buried.' % self.amount
@property
def key(self):
return 'targeted_maneuver_kill_bury_by_health'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted death effect should be resolved and saved against'
self.expired = True
class TargetedKillBuryingByHealthEffect(Effect):
@property
def name(self):
return 'Poison'
@property
def description(self):
return 'Target unit(s) worth %s health or less units must roll a save or die. Thoses who die must roll a save or be buried.' % self.amount
@property
def key(self):
return 'targeted_kill_bury_by_health'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted death effect should be resolved and saved against'
self.expired = True
class TargetedBuryEffect(Effect):
@property
def name(self):
return 'Swallow'
@property
def description(self):
return '%s targeted ennemy units must roll ID or be killed and buried' % self.amount
@property
def key(self):
return 'targeted_bury'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted death effect should be resolved and saved against'
self.expired = True
class TargetedDamageEffect(Effect):
@property
def name(self):
return 'Bullseye'
@property
def description(self):
return '%s damage targeted as the active player choose' % self.amount
@property
def key(self):
return 'targeted_damage'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted damage should be resolved and saved by the opposing player'
self.expired = True
class TargetedUnsecableDamageEffect(Effect):
def __init__(self, amount, increment):
self.amount = amount
self.expired = False
self.increment = increment
@property
def name(self):
return 'Kick'
@property
def description(self):
return '%s chosen unit suffer %s damages' % (self.amount, self.increment)
def stack(self, effect):
stackable = False
if (effect.key == self.key):
if (self.increment == effect.increment):
self.amount += effect.amount
stackable = True
else:
raise RuntimeError('Trying to stack two different effect')
return stackable
@property
def key(self):
return 'targeted_unsecable_damage'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted damage should be resolved and saved by the opposing player'
self.expired = True
class TargetedUnsecableBuryingDamageEffect(Effect):
def __init__(self, amount, increment):
self.amount = amount
self.expired = False
self.increment = increment
@property
def name(self):
return 'Flaming Arrows'
@property
def description(self):
return '%s chosen unit suffer %s damages ; killed unit must save or be buried' % (self.amount, self.increment)
def stack(self, effect):
stackable = False
if (effect.key == self.key):
if (self.increment == effect.increment):
self.amount += effect.amount
stackable = True
else:
raise RuntimeError('Trying to stack two different effect')
return stackable
@property
def key(self):
return 'targeted_unsecable_burying_damage'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted damage should be resolved and saved by the opposing player'
self.expired = True
class TargetedUnsecableInstantBuryingDamageEffect(Effect):
def __init__(self, amount, increment):
self.amount = amount
self.expired = False
self.increment = increment
@property
def name(self):
return 'Gore'
@property
def description(self):
return '%s chosen unit suffer %s damages ; killed unit are buried' % (self.amount, self.increment)
def stack(self, effect):
stackable = False
if (effect.key == self.key):
if (self.increment == effect.increment):
self.amount += effect.amount
stackable = True
else:
raise RuntimeError('Trying to stack two different effect')
return stackable
@property
def key(self):
return 'targeted_unsecable_instant_burying_damage'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted damage should be resolved and saved by the opposing player'
self.expired = True
class TargetedIDKillEffect(Effect):
@property
def name(self):
return 'Decapitate/Impale'
@property
def description(self):
return 'After save are rolled, as an Instant effect, choose and kill %s unit(s) that rolled an ID' % (self.amount)
@property
def key(self):
return 'targeted_ID_kill'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted damage should be resolved and saved by the opposing player'
self.expired = True
class TargetedIDKillByHealthEffect(Effect):
@property
def name(self):
return 'Choke'
@property
def description(self):
return 'After save are rolled, as an Instant effect, choose and kill up to %s worth of health unit(s) that rolled an ID' % (self.amount)
@property
def key(self):
return 'targeted_ID_kill_by_health'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted damage should be resolved and saved by the opposing player'
self.expired = True
class TargetedJawDragonKillEffect(Effect):
@property
def name(self):
return 'Decapitate/Impale'
def __init__(self, amount, default_damage):
self.amount = amount
self.expired = False
self.default_damage = default_damage
def stack(self, effect):
stackable = False
if (effect.key == self.key):
if (self.default_damage == effect.default_damage):
self.amount += effect.amount
stackable = True
else:
raise RuntimeError('Trying to stack two different effect')
return stackable
@property
def description(self):
return 'Kill up to %s dragon(s) that rolled jaw ; if there is not enough targets, inflict %s damage to any dragon' % (self.amount, self.default_damage)
@property
def key(self):
return 'targeted_jaws_kill'
def before_resolution(self, army, opposing_armies):
print 'Placeholder - here the targeted damage should be resolved and saved by the opposing player'
self.expired = True
|
The Northern League MP at the centre of a racist row said Cecile Kyenge, Italy's first black minister, has accepted his apology after he likened her to an 'orangutan'. But he will not resign.
Roberto Calderoli admitted he had done “something stupid” and pledged to send Kyenge, Italy’s integration minister, a bunch of roses, Il Fatto Quotidiano reported.
He addressed his apology to the Senate and said it had also been accepted by Kyenge.
His remarks, made at a rally on Sunday, have been met with a backlash in Italy, with 135,000 people signing an online petition calling for his dismissal from the anti-immigration Northern League party.
In a statement, he said the comment was meant “as a joke” and he did not mean to offend Kyenge.
Kyenge has been subjected to numerous racial attacks since becoming integration minister in April.
|
#!/usr/bin/python
#
# Copyright (C) 2015 Christoph Lehmann
#
# This file is part of pvd-tool.
#
# pvd-tool is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pvd-tool is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pvd-tool. If not, see <http://www.gnu.org/licenses/>.
#
# TODO
# * more metadata written to csv (timestamp, cmdline args, ...)
import sys
import argparse
import os
import xml.etree.cElementTree as ET
import numpy as np
import re
from collections import OrderedDict
import json
import imp
import time # for performance measurement
import numbers
import six
import plot
from helpers import *
# Plot = plot.MPLPlot
Plot = plot.GnuPlot
time_total = time.time()
time_import_vtk = 0.0
def die(msg, status=1):
sys.stderr.write(msg)
sys.exit(status)
def warn(msg):
sys.stderr.write("WARNING: {0}\n".format(msg))
class JsonSer(json.JSONEncoder):
def default(self, o):
try:
iterable = iter(o)
return OrderedDict(iterable)
except TypeError:
if isinstance(o, (DoV, Point, Cell)):
return str(o)
return json.JSONEncoder.default(self, o)
def getFilesTimes(xmlTree, pathroot):
node = xmlTree.getroot()
if node.tag != "VTKFile": return None, None
children = list(node)
if len(children) != 1: return None, None
node = children[0]
if node.tag != "Collection": return None, None
ts = []
fs = []
for child in node:
if child.tag != "DataSet": return None, None
ts.append(float(child.get("timestep")))
fs.append(relpathfrom(pathroot, child.get("file")))
return ts, fs
def relpathfrom(origin, relpath):
if os.path.isabs(relpath):
return relpath
return os.path.join(origin, relpath)
# TODO allow for wildcard attributes
# returns: list of (index, name)
def get_attribute_idcs(fieldData, attrs):
idcs = []
idcs_set = set()
for a in attrs:
found = False
num_arr = fieldData.GetNumberOfArrays()
for i in xrange(num_arr):
n = fieldData.GetArray(i).GetName()
if a.matches(n):
if i not in idcs_set:
idcs.append((i, n, a.get_axis()))
idcs_set.add(i)
found = True
# if num_arr != 0 and not found:
# warn("Attribute %s not found" % a)
return idcs
def apply_script(fcts, timesteps, grids):
assert len(timesteps) == len(grids)
res = [ None for _ in range(len(grids)) ]
for i in xrange(len(grids)):
ts = timesteps[i]
grid = grids[i]
ngrid = vtk.vtkUnstructuredGrid()
ngrid.DeepCopy(grid)
# TODO extend for cells
gridPoints = ngrid.GetPoints()
numPt = gridPoints.GetNumberOfPoints()
gridPD = ngrid.GetPointData()
for ai in xrange(gridPD.GetNumberOfArrays()):
arr = gridPD.GetArray(ai)
attr = arr.GetName()
for pi in xrange(numPt):
coords = gridPoints.GetPoint(pi)
tup = arr.GetTuple(pi)
if attr in fcts:
ntup = fcts[attr](ts, coords)
if type(ntup) == float: ntup = (ntup,)
assert len(tup) == len(ntup)
else:
warn("no function found for attribute {}".format(attr))
ntup = None
arr.SetTuple(pi, ntup)
res[i] = ngrid
return res
def get_point_data_from_grid(
point, point_id, grid, src, attrIdcs, attrData, incl_coords,
rec, meta
):
gridPoints = grid.GetPoints()
npts = gridPoints.GetNumberOfPoints()
if point_id >= npts or point_id < 0:
warn("point index {} out of bounds [0,{}]\n".format(point_id, npts-1))
return
if incl_coords:
coords = gridPoints.GetPoint(point_id)
for ci, coord in enumerate(coords):
rec.append(coord)
meta.append(Meta(src, DoV.DOM, "coord", ci, point))
for ai, a in enumerate(attrData):
an = attrIdcs[ai][1]
comps = a.GetTuple(point_id)
for ci, comp in enumerate(comps):
comp = comps[ci]
rec.append(comp)
meta.append(Meta(src, DoV.VAL, an, ci, point, axis=attrIdcs[ai][2]))
def get_cell_data_from_grid(
cell, grid, src, attrIdcs, attrData, incl_coords,
rec, meta
):
gridCells = grid.GetCells()
ncells = gridCells.GetNumberOfCells()
c = cell.get()
if c >= ncells or c < 0:
warn("{} out of bounds [0,{}]\n".format(cell, ncells-1))
return
if incl_coords:
# add dummy coordinates
coords = grid.GetPoints().GetPoint(0)
for ci, _ in enumerate(coords):
rec.append(0.0)
meta.append(Meta(src, DoV.DOM, "coord", ci, cell))
for ai, a in enumerate(attrData):
an = attrIdcs[ai][1]
comps = a.GetTuple(c)
for ci, comp in enumerate(comps):
comp = comps[ci]
rec.append(comp)
meta.append(Meta(src, DoV.VAL, an, ci, cell, axis=attrIdcs[ai][2]))
def filter_grid_ts(src, grid, timestep, attrs, points_cells, incl_coords):
gridPoints = grid.GetPoints()
gridCells = grid.GetCells()
attrIdcsPt = get_attribute_idcs(grid.GetPointData(), attrs)
attrDataPt = [ grid.GetPointData().GetArray(i) for i, _1, _2 in attrIdcsPt ]
attrIdcsCell = get_attribute_idcs(grid.GetCellData(), attrs)
attrDataCell = [ grid.GetCellData().GetArray(i) for i, _1, _2 in attrIdcsCell ]
npts = gridPoints.GetNumberOfPoints()
ncells = gridCells.GetNumberOfCells()
if (npts + ncells) > 0:
# categorize points: index or coordinates
coord_pts = []
map_point_indices = {} # maps point index in the list to point index in probeFilter
for i, point_cell in enumerate(points_cells):
if isinstance(point_cell, Point):
coords = point_cell.get_coords()
if coords:
map_point_indices[i] = len(coord_pts)
coord_pts.append(coords)
if coord_pts:
interpPts = vtk.vtkPoints()
for c in coord_pts:
interpPts.InsertNextPoint(*c)
interpData = vtk.vtkPolyData()
interpData.SetPoints(interpPts)
probeFilter = vtk.vtkProbeFilter()
probeFilter.SetSourceData(grid)
probeFilter.SetInputData(interpData)
probeFilter.Update()
grid_interpolated = probeFilter.GetOutput()
attrIdcsCoords = get_attribute_idcs(grid_interpolated.GetPointData(), attrs)
attrDataCoords = [ grid_interpolated.GetPointData().GetArray(i) for i, _ in attrIdcsCoords ]
rec = []
meta = []
rec.append(timestep)
meta.append(Meta(src, DoV.TIM, "time"))
for i, point_cell in enumerate(points_cells):
if isinstance(point_cell, Point):
if point_cell.get_coords():
p = map_point_indices[i]
get_point_data_from_grid(point_cell, p, grid_interpolated, src, attrIdcsCoords, attrDataCoords, incl_coords,
rec, meta)
else:
p = point_cell.get()
get_point_data_from_grid(point_cell, p, grid, src, attrIdcsPt, attrDataPt, incl_coords,
rec, meta)
elif isinstance(point_cell, Cell):
get_cell_data_from_grid(point_cell, grid, src, attrIdcsCell, attrDataCell, incl_coords,
rec, meta)
else:
print("Error: Given object is neither point nor cell index")
assert False
return rec, MetaList(meta)
return None, None
def filter_grid_dom(src, grid, attrs, points_cells, incl_coords):
gridPoints = grid.GetPoints()
gridCells = grid.GetCells()
attrIdcsPt = get_attribute_idcs(grid.GetPointData(), attrs)
attrDataPt = [ grid.GetPointData().GetArray(i) for i, _1, _2 in attrIdcsPt ]
attrIdcsCell = get_attribute_idcs(grid.GetCellData(), attrs)
attrDataCell = [ grid.GetCellData().GetArray(i) for i, _1, _2 in attrIdcsCell ]
npts = gridPoints.GetNumberOfPoints()
ncells = gridCells.GetNumberOfCells()
if points_cells is None:
points_cells = [ Point(i) for i in range(npts) ]
if (npts + ncells) > 0:
# categorize points: index or coordinates
coord_pts = []
map_point_indices = {} # maps point index in the list to point index in probeFilter
for i, point_cell in enumerate(points_cells):
if isinstance(point_cell, Point):
coords = point_cell.get_coords()
if coords:
map_point_indices[i] = len(coord_pts)
coord_pts.append(coords)
if coord_pts:
interpPts = vtk.vtkPoints()
for c in coord_pts:
interpPts.InsertNextPoint(*c)
interpData = vtk.vtkPolyData()
interpData.SetPoints(interpPts)
probeFilter = vtk.vtkProbeFilter()
probeFilter.SetSourceData(grid)
probeFilter.SetInputData(interpData)
probeFilter.Update()
grid_interpolated = probeFilter.GetOutput()
attrIdcsCoords = get_attribute_idcs(grid_interpolated.GetPointData(), attrs)
attrDataCoords = [ grid_interpolated.GetPointData().GetArray(i) for i, _1, _2 in attrIdcsCoords ]
recs = []
meta = []
meta.append(Meta(src, DoV.TIM, "ordinal number or dist from first point"))
first_loop = True
for i, point_cell in enumerate(points_cells):
x = point_cell.get_x_value()
if x is None: x = i
rec = [ x ]
tmp_meta = []
if isinstance(point_cell, Point):
if point_cell.get_coords():
p = map_point_indices[i]
get_point_data_from_grid(point_cell, p, grid_interpolated, src, attrIdcsCoords,
attrDataCoords, incl_coords, rec, tmp_meta)
else:
p = point_cell.get()
get_point_data_from_grid(point_cell, p, grid, src, attrIdcsPt,
attrDataPt, incl_coords, rec, tmp_meta)
elif isinstance(point_cell, Cell):
get_cell_data_from_grid(point_cell, grid, src, attrIdcsCell,
attrDataCell, incl_coords, rec, tmp_meta)
else:
print("Error: Given object is neither point nor cell index")
assert False
if first_loop:
first_loop = False
meta.extend(tmp_meta)
recs.append(rec)
return recs, MetaList(meta)
return None, None
def filter_grid_dom_old(src, grid, attrs):
gridPoints = grid.GetPoints()
attrIdcs = get_attribute_idcs(grid.GetPointData(), attrs)
attrData = [ grid.GetPointData().GetArray(i) for i, _ in attrIdcs ]
npts = gridPoints.GetNumberOfPoints()
meta = []
recs = []
first_loop = True
for p in xrange(gridPoints.GetNumberOfPoints()):
rec = []
coords = gridPoints.GetPoint(p)
for ci in xrange(len(coords)):
coord = coords[ci]
rec.append(coord)
if first_loop: meta.append(Meta(src, DoV.DOM, "coord %i" % ci))
for ai in xrange(len(attrData)):
a = attrData[ai]
an = attrIdcs[ai][1]
comps = a.GetTuple(p)
for ci in xrange(len(comps)):
comp = comps[ci]
rec.append(comp)
if first_loop: meta.append(Meta(src, DoV.VAL, "%s[%i]" % (an, ci)))
first_loop = False
recs.append(rec)
return recs, MetaList(meta)
def write_pvd(outfh, timesteps, vtus):
assert len(timesteps) == len(vtus)
outfh.write('<?xml version="1.0"?>\n'
'<VTKFile type="Collection" version="0.1" byte_order="LittleEndian" compressor="vtkZLibDataCompressor">\n'
' <Collection>\n')
for i in range(len(timesteps)):
outfh.write(' <DataSet timestep="{0}" group="" part="0" file="{1}"/>\n'.format(timesteps[i], vtus[i]))
outfh.write(" </Collection>\n"
"</VTKFile>")
outfh.close()
def write_csv(meta, records, outFile, precision, json_enc):
header = "Columns:\n"
header2 = "\n"
nc = len(meta)
header += "[\n"
old_meta = None
for i in xrange(nc):
# TODO: more tabular format for header
if old_meta and (
old_meta.src != meta[i].src or old_meta.dov != meta[i].dov
or old_meta.pex != meta[i].pex or old_meta.tfm != meta[i].tfm
): header += "\n"
meta[i].col = "{0:2}".format(i+1)
header += " {0}".format(json_enc.encode(meta[i]))
if i != nc-1: header += ","
header += "\n"
old_meta = meta[i]
# if i == 0:
# header2 += "{{:>{}}}".format(precision+5).format(meta[i].attr)
# else:
if i != 0:
header2 += " "
colwidth = precision + 7
else:
colwidth = precision + 5
attr = meta[i].attr
if len(attr) > colwidth:
attr = attr[:colwidth-2] + ".."
header2 += "{{:>{}}}".format(colwidth).format(attr)
header += "]\n" + header2
np.savetxt(outFile, records,
delimiter=" ",
fmt="%{}.{}g".format(precision+7, precision),
header=header)
def read_csv(fh, parse_header=True):
if isinstance(fh, six.string_types):
with open(fh) as fh_:
return read_csv(fh_, parse_header)
meta = None
if parse_header:
mode = 0 # initial
json_str = ""
while True:
lastpos = fh.tell()
line = fh.readline()
if not line: break
if line.startswith("#"):
line = line.lstrip("#").lstrip()
if mode == 0:
if line.startswith("Columns:"):
mode = 1
elif mode == 1: # "Columns:" in previous line
if line.rstrip() == "[":
mode = 2
json_str += line
elif not line:
# ignore empty line
pass
else:
warn("Unexpected header format. I will not attempt to process it.")
break
elif mode == 2: # assemble json
json_str += line
if line.rstrip() == "]":
break
elif not line.strip():
# ignore empty line
pass
else:
# no comment line
warn("unexpected end of header. Json found so far:\n{0}".format(json))
json_str = None
fh.seek(lastpos)
break
if json:
meta = MetaList(json.loads(json_str, object_hook=Meta))
arr = np.loadtxt(fh)
return arr, meta
def gather_files(infh):
if isinstance(infh, str):
fn = infh
else:
fn = infh.name
if fn.endswith(".pvd"):
pathroot = os.path.dirname(fn)
pcdtree = ET.parse(infh)
timesteps, files = getFilesTimes(pcdtree, pathroot)
elif fn.endswith(".vtu"):
timesteps = [0]
files = [ fn ]
else:
die("File `%s' has unknown type" % fn)
return timesteps, files
def gather_grids(infh, reader, filefilter=None):
def get_grid(path):
reader.SetFileName(path)
reader.Update()
g = vtk.vtkUnstructuredGrid()
g.DeepCopy(reader.GetOutput())
return g
timesteps, fs = gather_files(infh)
grids = [ None ] * len(timesteps)
for i, (f, t) in enumerate(zip(fs, timesteps)):
if (not filefilter) or filefilter.filter(t, f):
grids[i] = get_grid(f)
return timesteps, grids, fs
def get_timeseries(src, grids, tss, attrs, points, incl_coords):
oldMeta = None
records = []
for i in xrange(len(grids)):
rec, meta = filter_grid_ts(src, grids[i], tss[i], attrs, points, incl_coords)
if rec is not None:
if oldMeta is None:
oldMeta = meta
else:
assert meta == oldMeta
records.append(rec)
return records, oldMeta
def get_point_data(src, grids, attrs, points_cells, output_coords):
oldMeta = None
records = []
meta = []
for i, g in enumerate(grids):
if g:
recs, meta = filter_grid_dom(src, g, attrs, points_cells, output_coords)
if oldMeta is None:
oldMeta = meta
else:
assert meta == oldMeta
records.append(recs)
else:
records.append(())
return records, meta
def combine_arrays(arrays):
if len(arrays) == 1: return arrays[0]
res = []
nr = len(arrays[0])
na = len(arrays)
for ri in range(nr):
row = []
for ai in range(na):
assert len(arrays[ai]) == nr
row += arrays[ai][ri]
res.append(row)
return res
def combine_domains(metas, recs):
ncols = len(metas)
nmetas = []
nrecs = []
first_row = True
for row in recs:
assert len(row) == ncols
lbls = {}
nrow = []
for ci in range(ncols):
m = metas[ci]
val = row[ci]
if m.dov != DoV.VAL:
lbl = m.get_attr_id()
if lbl in lbls:
assert val == row[lbls[lbl]]
else:
lbls[lbl] = ci
nrow.append(val)
if first_row:
nmeta = Meta(m)
nmeta.src = None
nmetas.append(nmeta)
else:
nrow.append(val)
if first_row:
nmeta = Meta(m)
nmetas.append(nmeta)
first_row = False
nrecs.append(nrow)
return nmetas, nrecs
# argparse types
def InputFile(val):
parts = val.split(":", 2)
if len(parts) == 2:
try:
if parts[1] == "-":
fh = sys.stdin
else:
path = os.path.expanduser(parts[1])
assert os.path.isfile(path) and os.access(path, os.R_OK)
fh = path
except IOError:
warn("Warning: Could not open `{0}', will try `{1}' instead".format(parts[1], val))
else:
return parts[0], fh
if val == "-":
fh = sys.stdin
else:
try:
path = os.path.expanduser(val)
assert os.path.isfile(path) and os.access(path, os.R_OK)
fh = path
except AssertionError as e:
raise argparse.ArgumentTypeError("input file `{}' is not readable or not a file".format(path))
return None, fh
def DirectoryW(val):
# TODO implement
return val
def InputFileArgument(path):
path = os.path.expanduser(path)
assert os.path.isfile(path) and os.access(path, os.R_OK)
return path
def OutputFileArgument(path):
path = os.path.expanduser(path)
assert os.path.isfile(path) and os.access(path, os.W_OK)
return path
re_out_file = re.compile(r'^([%@^][0-9]+)+:')
def OutputFile(val):
m = re_out_file.match(val)
# if not m: raise argparse.ArgumentTypeError("`{0}' does not correspond to the output file path format".format(val))
if m:
path = val[m.end():]
tfm_and_num = val[m.start():m.end()-1]
else:
# TODO maybe add info message
path = val
tfm_and_num = "^0"
try:
if path == "-":
outfh = sys.stdout
else:
path = os.path.expanduser(path)
# assert os.path.isfile(path) and os.access(path, os.W_OK)
with open(path, "w") as outfh:
pass
outfh = path
except IOError as e:
raise argparse.ArgumentTypeError("I/O error({0}) when trying to open `{2}': {1}".format(e.errno, e.strerror, path))
spl = re.split(r'([%@^])', tfm_and_num)
assert len(spl) % 2 == 1 # empty string at the beginning, then pairs of [%@^] and a number
nums_tfms = []
for i in range(1, len(spl), 2):
tfm_char = spl[i]
if tfm_char == '^': do_transform = 0
if tfm_char == '@': do_transform = 1
if tfm_char == '%': do_transform = 2
nums_tfms.append((int(spl[i+1]), do_transform))
return (nums_tfms, outfh)
def OutputDir(val):
m = re_out_file.match(val)
# if not m: raise argparse.ArgumentTypeError("`{0}' does not correspond to the output directory path format".format(val))
if m:
path = val[m.end():]
tfm_and_num = val[m.start():m.end()-1]
else:
# TODO maybe add info message
path = val
tfm_and_num = "^0"
path = os.path.expanduser(path)
d = os.path.dirname(path) or "."
if not os.path.isdir(d):
raise argparse.ArgumentTypeError("`{0}' is not a directory".format(d))
spl = re.split(r'([%@^])', tfm_and_num)
assert len(spl) % 2 == 1 # empty string at the beginning, then pairs of [%@^] and a number
nums_tfms = []
for i in range(1, len(spl), 2):
tfm_char = spl[i]
if tfm_char == '^': do_transform = 0
if tfm_char == '@': do_transform = 1
if tfm_char == '%': do_transform = 2
nums_tfms.append((int(spl[i+1]), do_transform))
return (nums_tfms, path)
def check_consistency_ts(args):
assert args.points_cells is not None and len(args.points_cells) != 0 # at least one point or cell must be chosen
for nums_tfms, _ in args.out_csv or []:
for num, tfm in nums_tfms:
assert num < len(args.in_files)
assert args.script or not tfm # if script is used, script must be given
for nums_tfms, _ in args.out_plot or []:
for num, tfm in nums_tfms:
assert num < len(args.in_files)
assert args.script or not tfm # if script is used, script must be given
def check_consistency_dom(args):
# assert (not args.out_pvd) != (not args.attr)
if args.points_cells:
t = None
for pc in args.points_cells:
if t is not None:
assert type(pc) is t # either only points or only cells are allowed
else:
t = type(pc)
for nums_tfms, _ in args.out_csv or []:
# assert len(nums_tfms) == 1 # currently no combination of whole grids allowed
for num, tfm in nums_tfms:
assert num < len(args.in_files)
assert args.script or not tfm # if script is used, script must be given
for nums_tfms, _ in args.out_pvd or []:
assert len(nums_tfms) == 1 # currently no combination of whole grids allowed
for num, tfm in nums_tfms:
assert num < len(args.in_files)
assert args.script or not tfm # if script is used, script must be given
def load_input_files(in_files, req_out, script_fh, script_params, filefilter):
assert len(in_files) > 0
# check that all input files are of the same type (either vtu or pvd)
input_type = None
for _, f in in_files:
path = f if isinstance(f, six.string_types) else f.name
if not path: continue
m = re.search(r'[.][^.]*$', path)
if m:
if input_type is None:
input_type = m.group(0)
elif input_type != m.group(0):
print("Error: You must not mix input files of different type!")
assert input_type == m.group(0)
if script_fh is not None and isinstance(script_fh, list): script_fh = script_fh[0]
reader = vtk.vtkXMLUnstructuredGridReader()
scr_loaded = False
if input_type == ".pvd":
timesteps = [ None for _ in range(len(in_files)) ]
vtuFiles = [ None for _ in range(len(in_files)) ]
vtuFiles_transformed = [ None for _ in range(len(in_files)) ]
vtuPaths = [ None for _ in range(len(in_files)) ]
# load and, if necessary, transform source files
for nums_tfms, _ in req_out:
for num, tfm in nums_tfms:
if not vtuFiles[num]:
timesteps[num], vtuFiles[num], vtuPaths[num] \
= gather_grids(in_files[num][1], reader, filefilter)
if tfm != 0:
assert script_fh is not None
if not scr_loaded:
script_args = {}
for kv in script_params:
k, v = kv.split('=', 2)
script_args[k] = v
analytical_model = imp.load_source("analytical_model", script_fh.name, script_fh)
analytical_model.init(script_args)
scr_loaded = True
if not vtuFiles_transformed[num]:
vtuFiles_transformed[num] = apply_script(analytical_model.get_attribute_functions(), timesteps[num], vtuFiles[num])
elif input_type == ".vtu":
timesteps = [ [ None ]*len(in_files) ]
vtuFiles = [ [ None ]*len(in_files) ]
vtuFiles_transformed = [ None ]
vtuPaths = [ [ None ]*len(in_files) ]
# load and, if necessary, transform source files
for nums_tfms, _ in req_out:
for num, tfm in nums_tfms:
assert num == 0
for fi, (_, in_file) in enumerate(in_files):
if filefilter.filter(fi, in_file):
_, vtu, vtuPath = gather_grids(in_file, reader)
timesteps[0][fi] = fi
vtuFiles[0][fi] = vtu[0]
vtuPaths[0][fi] = vtuPath[0]
if tfm != 0:
assert script_fh is not None
if not scr_loaded:
script_args = {}
for kv in script_params:
k, v = kv.split('=', 2)
script_args[k] = v
analytical_model = imp.load_source("analytical_model", script_fh.name, script_fh)
analytical_model.init(script_args)
scr_loaded = True
if not vtuFiles_transformed[0]:
vtuFiles_transformed[0] = apply_script(analytical_model.get_attribute_functions(), timesteps[0], vtuFiles[0])
return timesteps, vtuFiles, vtuFiles_transformed, vtuPaths
def get_output_data_diff(aggr_data, req_out):
for nums_tfms, outfh in req_out:
meta_attr_comp = {}
meta = []
recs = []
for num, tfm in nums_tfms:
assert tfm == 0
if tfm == 0: rng = [0]
elif tfm == 1: rng = [1]
elif tfm == 2: rng = [0,1]
for tfm_idx in rng:
r, m = aggr_data[num][tfm_idx]
recs.append(r)
meta += m
for mt in m:
if mt.dov != DoV.VAL: continue
a = mt.attr
c = mt.comp
if a not in meta_attr_comp: meta_attr_comp[a] = set()
meta_attr_comp[a].add(c)
meta = MetaList(meta)
recs = combine_arrays(recs)
for attr, comps in sorted(meta_attr_comp.items()):
print("{} -- {}".format(attr, ", ".join([str(c) for c in comps])))
for comp in comps:
cols = meta.get_columns(attr=attr, comp=comp, dov=DoV.VAL)
if len(cols) < 2:
warn("attribute {}[{}] is only present in one input file. skipping".format(attr, comp))
continue
assert len(cols) == 2
c0 = cols[0]
c1 = cols[1]
meta.append(Meta(None, DoV.VAL, attr + "_diff", comp))
meta.append(Meta(None, DoV.VAL, attr + "_reldiff", comp))
for r in recs:
v0 = r[c0]
v1 = r[c1]
diff = v0-v1
r += [diff, diff / max(abs(v0), abs(v1))]
# for attr, cols in meta.each("attr", dov=DoV.VAL):
# print("{} -- {}".format(attr, ", ".join([str(c) for c in cols])))
yield meta, recs, outfh
class FileFilterByTimestep:
def __init__(self, timesteps):
if timesteps:
self._timesteps = sorted([ float(t) for t in timesteps ])
else:
self._timesteps = None
def filter(self, ts, fn):
if self._timesteps:
for t in self._timesteps:
# print("ts vs t {} {} -- {} ?<? {}".format(ts, t, abs(ts-t), sys.float_info.epsilon))
if abs(ts-t) < sys.float_info.epsilon \
or (ts != 0.0 and abs(ts-t)/ts < 1.e-6):
return True
else:
return True
# split_re matches any number, possibly in scientific notation
split_re = re.compile(r'([+-]?[0-9]+(?:[.][0-9]+)?(?:[eE][+-]?[0-9]+)?)')
# returns a sorted version of the given list like `sort -V`
def version_sort(in_files):
return sorted(in_files, key=lambda f: [
s if i%2==0 else float(s)
for i, s in enumerate(split_re.split(
f[1] if isinstance(f[1], six.string_types) else f[1].name
))
])
# TODO provide a similar function also for similar cases
def process_timeseries_diff(args):
if args.out_plot:
import matplotlib as mpl # needed to avoid conflicts with vtk
import matplotlib.pyplot as plt
globals()["mpl"] = mpl
globals()["plt"] = plt
# has to be imported after matplotlib
import vtk
globals()["vtk"] = vtk
in_files = args.in_files
if args.version_sort:
in_files = version_sort(in_files)
assert len(args.points_cells) == 1 # currently only one point at once
if args.out_csv:
# output file uses both input files and not transforms
args.out_csv = [ ([(0, 0), (1, 0)], fh) for fh in args.out_csv ]
if args.out_plot:
# output file uses both input files and not transforms
args.out_plot = [ ([(0, 0), (1, 0)], fh) for fh in args.out_plot ]
req_out = (args.out_csv or []) \
+ (args.out_plot or [])
assert len(req_out) > 0
timesteps, vtuFiles, vtuFiles_transformed, _ = \
load_input_files(in_files, req_out, None, None)
# aggregate timeseries data
aggr_data = [ [ None, None ] for _ in in_files ]
for nums_tfms, _ in req_out:
for num, tfm in nums_tfms:
assert tfm == 0 # no transformations allowed here
src = in_files[num][0]
if src is None: src = in_files[num][1].name
tss = timesteps[num]
if tfm == 0: rng = [0]
elif tfm == 1: rng = [1]
elif tfm == 2: rng = [0,1]
for tfm_idx in rng:
if aggr_data[num][tfm_idx]: continue
if tfm_idx != 0:
grids = vtuFiles_transformed[num]
else:
grids = vtuFiles[num]
# TODO find better solution for out_coords
recs, meta = get_timeseries(src, grids, tss, args.attr, args.points_cells, args.out_coords)
if tfm_idx != 0:
for m in meta: m.tfm = True
aggr_data[num][tfm_idx] = (recs, meta)
if args.out_csv:
json_enc = JsonSer()
for meta, recs, outfh in get_output_data_diff(aggr_data, args.out_csv):
if True: #args.combine_domains:
meta, recs = combine_domains(meta, recs)
write_csv(meta, recs, outfh, args.prec[0], json_enc)
if args.out_plot:
plt = Plot(args.prec[0])
for meta, recs, outfh in get_output_data_diff(aggr_data, args.out_plot):
plt.plot_to_file(meta, recs, outfh)
def process_timeseries(args):
if args.out_plot:
import matplotlib as mpl # needed to avoid conflicts with vtk
import matplotlib.pyplot as plt
globals()["mpl"] = mpl
globals()["plt"] = plt
# has to be imported after matplotlib
import vtk
globals()["vtk"] = vtk
check_consistency_ts(args)
# there shall be only single points or cells in the list
points_cells = []
for pc in args.points_cells:
pc_flat = pc.flatten()
if pc_flat:
points_cells.extend(pc_flat)
else:
points_cells.append(pc)
in_files = args.in_files
if args.version_sort:
in_files = version_sort(in_files)
req_out = (args.out_csv or []) \
+ (args.out_plot or [])
assert len(req_out) > 0
timesteps, vtuFiles, vtuFiles_transformed, _ = \
load_input_files(in_files, req_out, args.script, args.script_param, FileFilterByTimestep(None))
# aggregate timeseries data
aggr_data = [ [ None, None ] for _ in in_files ]
for nums_tfms, _ in req_out:
for num, tfm in nums_tfms:
src = in_files[num][0]
if src is None: src = in_files[num][1]
tss = timesteps[num]
if tfm == 0: rng = [0]
elif tfm == 1: rng = [1]
elif tfm == 2: rng = [0,1]
for tfm_idx in rng:
if aggr_data[num][tfm_idx]: continue
if tfm_idx != 0:
grids = vtuFiles_transformed[num]
else:
grids = vtuFiles[num]
recs, meta = get_timeseries(src, grids, tss, args.attr, points_cells, args.out_coords)
if tfm_idx != 0:
for m in meta: m.tfm = True
aggr_data[num][tfm_idx] = (recs, meta)
# write csv files
json_enc = JsonSer()
for nums_tfms, outfh in args.out_csv or []:
meta = []
recs = []
for num, tfm in nums_tfms:
if tfm == 0: rng = [0]
elif tfm == 1: rng = [1]
elif tfm == 2: rng = [0,1]
for tfm_idx in rng:
r, m = aggr_data[num][tfm_idx]
recs.append(r)
meta += m
recs = combine_arrays(recs)
if args.combine_domains:
meta, recs = combine_domains(meta, recs)
write_csv(meta, recs, outfh, args.prec[0], json_enc)
# plot
plt = Plot(args.prec[0])
for nums_tfms, outfh in args.out_plot or []:
meta = []
recs = []
for num, tfm in nums_tfms:
if tfm == 0: rng = [0]
elif tfm == 1: rng = [1]
elif tfm == 2: rng = [0,1]
for tfm_idx in rng:
r, m = aggr_data[num][tfm_idx]
recs.append(r)
meta += m
recs = combine_arrays(recs)
# plt.plot_to_file(meta, recs, outfh)
plt.add_data(meta, recs, outfh)
plt.do_plots(1)
def process_whole_domain(args):
if not args.attr: args.attr = ['*']
# has to be imported after matplotlib
import vtk
globals()["vtk"] = vtk
check_consistency_dom(args)
if args.points_cells:
# there shall be only single points or cells in the list
points_cells = []
for pc in args.points_cells:
pc_flat = pc.flatten()
if pc_flat:
points_cells.extend(pc_flat)
else:
points_cells.append(pc)
else:
points_cells = None
in_files = args.in_files
if args.version_sort:
in_files = version_sort(in_files)
req_out = (args.out_csv or []) \
+ (args.out_pvd or []) \
+ (args.out_plot or [])
timesteps, vtuFiles, vtuFiles_transformed, vtuPaths = \
load_input_files(in_files, req_out, args.script, args.script_param, FileFilterByTimestep(args.timestep))
# write csv files
json_enc = JsonSer()
if args.out_csv or args.out_plot:
# get data
aggr_data = [ [ None, None ] for _ in range(len(in_files)) ]
for nums_tfms, _ in (args.out_csv or []) + (args.out_plot or []):
for num, tfm in nums_tfms:
src = in_files[num][0]
if src is None: src = in_files[num][1]
if tfm == 0: rng = [0]
elif tfm == 1: rng = [1]
elif tfm == 2:
rng = [0,1]
for tfm_idx in rng:
if aggr_data[num][tfm_idx]: continue
if tfm_idx != 0:
grids = vtuFiles_transformed[num]
else:
grids = vtuFiles[num]
# TODO add switch cells/points
recs, meta = get_point_data(src, grids, args.attr, points_cells, args.out_coords)
if tfm_idx != 0:
for m in meta: m.tfm = True
aggr_data[num][tfm_idx] = (recs, meta)
if args.out_csv:
# write csv files
for nums_tfms, outdirn in args.out_csv:
for ti in range(len(timesteps[nums_tfms[0][0]])):
meta = []
recs = []
for num, tfm in nums_tfms:
assert timesteps[num] == timesteps[nums_tfms[0][0]]
if tfm == 0: rng = [0]
elif tfm == 1: rng = [1]
elif tfm == 2: rng = [0,1]
for tfm_idx in rng:
r, m = aggr_data[num][tfm_idx]
recs.append(r[ti])
meta += m
recs = combine_arrays(recs)
if args.combine_domains:
meta, recs = combine_domains(meta, recs)
if recs:
if len(timesteps) == 1:
fn = outdirn \
+ re.sub(r"[.][^.]+$", ".csv", os.path.basename(vtuPaths[nums_tfms[0][0]][ti]))
else:
t = timesteps[num][ti]
if isinstance(t, numbers.Integral):
max_ts = max(timesteps[num])
width = len(str(max_ts))
fn = ("{}_{:0"+str(width)+"}.csv").format(outdirn, t)
else:
fn = "{}_{}.csv".format(outdirn, t)
print("csv output to {}".format(fn))
write_csv(meta, recs, fn, args.prec[0], json_enc)
if args.out_plot:
assert(args.num_threads >= 0)
# plot
plt = Plot(args.prec[0])
for nums_tfms, outdirn in args.out_plot or []:
for ti in range(len(timesteps[nums_tfms[0][0]])):
meta = []
recs = []
for num, tfm in nums_tfms:
assert timesteps[num] == timesteps[nums_tfms[0][0]]
if tfm == 0: rng = [0]
elif tfm == 1: rng = [1]
elif tfm == 2: rng = [0,1]
for tfm_idx in rng:
r, m = aggr_data[num][tfm_idx]
# TODO: add x-axis value
recs.append(r[ti])
meta += m
recs = combine_arrays(recs)
if recs:
if len(timesteps) == 1:
fn = outdirn \
+ re.sub(r"[.][^.]+$", ".png", os.path.basename(vtuPaths[nums_tfms[0][0]][ti]))
else:
t = timesteps[num][ti]
if isinstance(t, numbers.Integral):
max_ts = max(timesteps[num])
width = len(str(max_ts))
fn = ("{}_{:0"+str(width)+"}.png").format(outdirn, t)
else:
fn = "{}_{}.png".format(outdirn, t)
print("plot output to {}".format(fn))
if args.update:
if os.path.isfile(fn):
mt_in = os.stat(vtuPaths[nums_tfms[0][0]][ti]).st_mtime
mt_out = os.stat(fn).st_mtime
if mt_out > mt_in:
# print(in_files[ti][1], "is older than out")
continue
plt.add_data(meta, recs, fn)
plt.do_plots(args.num_threads)
# write pvd files
if args.out_pvd:
writer = vtk.vtkXMLUnstructuredGridWriter()
for nums_tfms, outfh in args.out_pvd:
outfn = outfh.name
outf_base = re.sub(r'.pvd', '', outfn)
out_vtus = []
for num, tfm in nums_tfms:
src = in_files[num][0]
if src is None: src = in_files[num][1].name
if tfm == 0: rng = [0]
elif tfm == 1: rng = [1]
elif tfm == 2:
assert tfm != 2
rng = [0,1]
for tfm_idx in rng:
if tfm_idx != 0:
grids = vtuFiles_transformed[num]
else:
grids = vtuFiles[num]
for ti in range(len(timesteps[num])):
# TODO: make output file names resemble input file names
fn = "{0}_{1}.vtu".format(outf_base, timesteps[num][ti])
out_vtus.append(fn)
writer.SetFileName(fn)
writer.SetInputData(grids[ti])
writer.Write()
write_pvd(outfh, timesteps[num], out_vtus)
def process_proxy(args):
script_fh = args.script[0]
script_args = {}
for kv in args.script_param:
k, v = kv.split('=', 2)
script_args[k] = v
analytical_model = imp.load_source("analytical_model", script_fh.name, script_fh)
analytical_model.init(script_args)
analytical_model.proxied(args.in_files, args.out_files)
def _run_main():
parser = argparse.ArgumentParser(description="Process PVD files")
# common
parser_common = argparse.ArgumentParser(description="Common options", add_help=False)
parser_common.add_argument("-s", "--script", nargs=1, type=InputFileArgument, help="script for generating field data, e.g., exact solutions of FEM models")
parser_common.add_argument("--script-param", "--sp", action="append", help="parameters for the script", default=[])
# I/O
parser_io = argparse.ArgumentParser(description="Input/output options", add_help=False)
parser_io.add_argument("-i", "--in", nargs='+', type=InputFile, required=True, help="input file", dest="in_files", metavar="IN_FILE")
parser_io.add_argument("--no-combine-domains", action="store_false", dest="combine_domains", help="do not combine domains when aggregating several input files into one output file")
parser_io.add_argument("--prec", nargs=1, type=int, help="decimal precision for output", default=[14])
parser_io.add_argument("--no-coords", action="store_false", dest="out_coords", help="do not output coordinate columns")
parser_io.add_argument("--version-sort-inputs", "-V", action="store_true", dest="version_sort", help="version sort input file names before further processing")
subparsers = parser.add_subparsers(dest="subcommand", help="subcommands")
subparsers.required = True
parser_frag_ts = argparse.ArgumentParser(description="compute timeseries", add_help=False)
parser_frag_ts.add_argument("-p", "--point", type=Point, action="append", required=False, dest="points_cells")
parser_frag_ts.add_argument("-c", "--cell", type=Cell, action="append", required=False, dest="points_cells")
parser_frag_ts.add_argument("-a", "--attr", type=AttributePack, action="append", required=False)
# timeseries
parser_ts = subparsers.add_parser("timeseries", help="compute timeseries", parents=[parser_io, parser_common, parser_frag_ts])
parser_ts.set_defaults(func=process_timeseries)
parser_ts.add_argument("--out-plot", action="append", type=OutputFile)
parser_ts.add_argument("--out-csv", action="append", type=OutputFile)
# timeseries diff
parser_tsd = subparsers.add_parser("ts-diff", help="compute differences between two timeseries", parents=[parser_frag_ts])
parser_tsd.add_argument("-i", "--in", nargs=2, type=InputFile, required=True, help="input file", dest="in_files", metavar="IN_FILE")
parser_tsd.add_argument("--out-plot", nargs=1, type=OutputFileArgument)
parser_tsd.add_argument("--out-csv", nargs=1, type=OutputFileArgument)
parser_tsd.add_argument("--prec", nargs=1, type=int, help="decimal precision for output", default=[6])
parser_tsd.set_defaults(func=process_timeseries_diff)
# domain
parser_dom = subparsers.add_parser("domain", help="dom help", parents=[parser_io, parser_common, parser_frag_ts])
parser_dom.add_argument("--out-pvd", action="append", type=OutputFile)
parser_dom.add_argument("--out-csv", action="append", type=OutputDir)
parser_dom.add_argument("--out-plot", action="append", type=OutputDir)
parser_dom.add_argument("-t", "--timestep", action="append", required=False)
parser_dom.add_argument("-N", "--num-threads", type=int, default=1)
parser_dom.add_argument("-U", "--update", action="store_true")
parser_dom.set_defaults(func=process_whole_domain)
# proxy
parser_proxy = subparsers.add_parser("proxy", help="proxy help", parents=[parser_common])
parser_proxy.add_argument("-i", "--in", action="append", type=InputFileArgument, help="input file", dest="in_files", metavar="IN_FILE", default=[])
parser_proxy.add_argument("-o", "--out", action="append", type=OutputFileArgument, help="output file", dest="out_files", metavar="OUT_FILE", default=[])
parser_proxy.set_defaults(func=process_proxy)
args = parser.parse_args()
if "attr" in args:
if args.attr:
attrs = []
for a in args.attr:
attrs += a.get_attrs()
args.attr = attrs
else:
args.attr = [ Attribute('*') ]
args.func(args)
global time_total, time_import_vtk
print("total execution took {} seconds".format(time.time() - time_total))
print("importing vtk took {} seconds".format(time_import_vtk))
print("plotting took {} seconds".format(plot.time_plot))
print("saving plots took {} seconds".format(plot.time_plot_save))
if __name__ == "__main__":
_run_main()
else:
import matplotlib as mpl # needed to avoid conflicts with vtk
import matplotlib.pyplot as plt
# has to be imported after matplotlib
try:
start_time = time.clock()
import vtk
time_import_vtk = time.clock() - start_time
except ImportError:
warn("module vtk will not be available")
|
LAWRENCE — The Federal Communications Commission has voted to kill rules designed to protect consumers from service providers blocking certain websites or charging for higher-quality service. The commission voted 3-2 along party lines to scrap the net neutrality rules implemented in 2015 during the Barack Obama administration.
Chairman Ajit Pai said removing the rules would help spur innovation, while those opposed claimed it would allow giant telecom industries to charge more, block content from competitors and make it too expensive for startup companies to compete. The changes won’t be seen immediately, as 18 state attorneys general and others opposed to the changes have said they will sue to stop the change. The move, if implemented, will favor providers over consumers, a University of Kansas journalism professor and media law expert said.
Belmas specializes in media law and teaches courses in that subject as well as gamification, communication technology, media ethics and computer-assisted reporting. Her research has been published in the Yale Journal of Law & Technology and the Federal Communications Law Journal, among others. She can discuss net neutrality, the original rules, the FCC’s decision to overturn them, media law, the future of Internet communication and related topics.
To schedule an interview, contact Mike Krings at 785-864-8860 or mkrings@ku.edu.
|
#!/usr/bin/env python
'''
Extract reads which aren't mapped from a SAM or SAM.gz file.
Behavior for PE:
-Write out PE only if both do not map (if either of the pair maps, neither is retained)
Behavior for SE:
-Write out SE if they don't map
Iterate over a SAM or SAM.gz file. take everything where the 3rd and
4th flag bit are set to 1 and write reads out to files.
0x1 template having multiple segments in sequencing
0x2 each segment properly aligned according to the aligner
0x4 segment unmapped
0x8 next segment in the template unmapped
0x10 SEQ being reverse complemented
0x20 SEQ of the next segment in the template being reversed
0x40 the first segment in the template
0x80 the last segment in the template
0x100 secondary alignment
0x200 not passing quality controls
0x400 PCR or optical duplicate
TODO:
1) Add support for retaining both reads if one of a pair don't map but the other does
2) Add support for retaining the pair (or SE) if a read maps with low mapq
Note:
It is necessary to double check that both pairs of the PE read really exist in the SAM
file just in case it somehow gets disordered. This is taken care of by keeping the PE
reads in a set of dictionaries and then deleting them once the pair is written.
In the case where a read is somehow labeled as paired, but the pair doesn't exist, the
read is NOT written.
'''
import sys
import os
from optparse import OptionParser # http://docs.python.org/library/optparse.html
import gzip
usage = "usage: %prog [options] -o output_base inputfile.SAM"
parser = OptionParser(usage=usage,version="%prog 2.0.0")
parser.add_option('-u', '--uncompressed', help="leave output files uncompressed",
action="store_true", dest="uncompressed")
parser.add_option('-o', '--output_base', help="output file basename",
action="store", type="str", dest="output_base",default="screened")
parser.add_option('-v', '--verbose', help="verbose output",
action="store_false", dest="verbose", default=True)
(options, args) = parser.parse_args() # uncomment this line for command line support
if len(args) == 1:
infile = args[0]
#Start opening input/output files:
if not os.path.exists(infile):
print "Error, can't find input file %s" % infile
sys.exit()
if infile.split(".")[-1] == "gz":
insam = gzip.open(infile, 'rb')
else:
insam = open(infile, 'r')
else:
## reading from stdin
insam = sys.stdin
base = options.output_base
PE1 = {}
PE2 = {}
contig_map = {}
def writeread(ID, r1, r2):
#read1
outPE1.write("@" + ID + "#0/1" '\n')
outPE1.write(r1[0] + '\n')
outPE1.write('+\n' + r1[1] + '\n')
#read2
outPE2.write("@" + ID + "#0/2" '\n')
outPE2.write(r2[0] + '\n')
outPE2.write('+\n' + r2[1] + '\n')
i = 0
PE_written = 0
SE_written = 0
SE_open = False
PE_open = False
for line in insam:
if i % 100000 == 0 and i > 0 and options.verbose:
print "Records processed: %s, PE_written: %s, SE_written: %s" % (i, PE_written, SE_written)
#Comment/header lines start with @
if line[0] != "@" and len(line.strip().split()) > 2:
i += 1
line2 = line.strip().split()
flag = int(line2[1])
#Handle SE:
# unapped SE reads have 0x1 set to 0, and 0x4 (third bit) set to 1
if (flag & 0x1 == 0) and (flag & 0x4):
ID = line2[0].split("#")[0]
if not SE_open:
if options.uncompressed:
outSE = open(base + "_SE.fastq", 'w')
else:
outSE = gzip.open(base + "_SE.fastq.gz", 'wb')
SE_open = True
outSE.write("@" + ID + '\n')
outSE.write(line2[9] + '\n')
outSE.write('+\n' + line2[10] + '\n')
SE_written += 1
continue
#Handle PE:
#logic: 0x1 = multiple segments in sequencing, 0x4 = segment unmapped, 0x8 = next segment unmapped, 0x80 the last segment in the template
if ((flag & 0x1) and (flag & 0x4) and (flag & 0x8)):
if not PE_open:
if options.uncompressed:
outPE1 = open(base + "_PE1.fastq", 'w')
outPE2 = open(base + "_PE2.fastq", 'w')
else:
outPE1 = gzip.open(base + "_PE1.fastq.gz", 'wb')
outPE2 = gzip.open(base + "_PE2.fastq.gz", 'wb')
PE_open = True
if (flag & 0x40): # is this PE1 (first segment in template)
#PE1 read, check that PE2 is in dict and write out
ID = line2[0].split("#")[0]
r1 = [line2[9], line2[10]] # sequence + qual
if ID in PE2:
writeread(ID, r1, PE2[ID])
del PE2[ID]
PE_written += 1
else:
PE1[ID] = r1
continue
elif (flag & 0x80): # is this PE2 (last segment in template)
#PE2 read, check that PE1 is in dict and write out
ID = line2[0].split("#")[0]
r2 = [line2[9], line2[10]]
if ID in PE1:
writeread(ID, PE1[ID], r2)
del PE1[ID]
PE_written += 1
else:
PE2[ID] = r2
continue
# was mapped, count it up
contig = line2[2]
if contig in contig_map.keys():
if (flag & 0x1 == 0): ## SE
contig_map[contig]["SE"] += 1
elif (flag & 0x40): ## PE, Just count the first in the pair
contig_map[contig]["PE"] += 1
else:
contig_map[contig] = {}
if (flag & 0x1 == 0): ## SE
contig_map[contig]["SE"] = 1
contig_map[contig]["PE"] = 0
elif (flag & 0x40): ## PE, Just count the first in the pair
contig_map[contig]["SE"] = 0
contig_map[contig]["PE"] = 1
print "Records processed: %s, PE_written: %s, SE_written: %s" % (i, PE_written, SE_written)
for k in contig_map.keys():
print "\tFound %s: percent: %.2f, PE mapped: %s, SE mapped: %s" % (k,(2*PE_written+SE_written)/i, contig_map[k]["PE"], contig_map[k]["SE"])
if PE_open:
outPE1.close()
outPE2.close()
if SE_open:
outSE.close()
|
Collecting Merritt: "You Collect Who?"
As important to me as A. Merritt is, it constantly surprises me that there are still many today who have never heard of him. Actually - let me walk that back a bit. It used to surprise me.
|
from __future__ import print_function, division
__author__ = 'george'
import sys
import numpy as np
from utils.lib import *
from algorithm import *
def default():
return O(
gens = 10,
max_changes = 100,
change_prob = 0.5,
steps = 10,
threshold = 170,
better = lt,
verbose = True,
step_size = 100
)
class MWS(Algorithm):
def __init__(self, model, settings=None):
if not settings:
settings = default()
Algorithm.__init__(self, model, settings)
def energy(self, decisions, do_norm=True):
"""
Energy function. Used to evaluate
:param decisions: Decisions to be evaluated
:param do_norm: If objectives have to be normalized
:return: Computed energy value
"""
norms = []
objectives = self.model.evaluate(decisions)
if do_norm:
for i, obj in enumerate(objectives):
norms.append(self.model.objectives[i].norm(obj))
return sum(norms)
else:
return sum(objectives)
def run(self):
"""
Runner function to run the
max walk sat algorithm
:return: Best solution, Objectives and number of evals
"""
model = self.model
settings = self.settings
if settings.verbose:
print(model)
print(settings)
evals = 0
decs = model.decisions
front = Front()
for _ in range(settings.gens):
solution = model.generate()
out = ""
for __ in range(settings.max_changes):
evals += 1
rand_index = choice(range(len(decs)))
if settings.change_prob < rand():
clone = list(solution)
clone[rand_index] = within(decs[rand_index].low,
decs[rand_index].high)
if model.check_constraints(clone):
solution = clone
key = " ?"
else:
key = " ."
else:
cloned, int_evals = self.jiggle_solution(solution, rand_index)
evals += int_evals
if cloned != solution:
key = " +"
solution = cloned
else:
key = " ."
out+=key
if settings.verbose:
print(model.evaluate(solution), out)
front.update(Point(solution, model.evaluate(solution)))
front.evals = evals
return front
def jiggle_solution(self, solution, index):
"""
Modify an index in a solution that
leads to the best solution range in that index
"""
t_evals = 0
lo = self.model.decisions[index].low
hi = self.model.decisions[index].high
delta = (hi - lo) / self.settings.step_size
best_soln, best_score = solution, sys.maxint
if self.settings.better == gt:
best_score = -best_score
for val in np.arange(lo, hi+delta, delta):
cloned = list(solution)
cloned[index] = val
t_evals += 1
if not self.model.check_constraints(cloned):
continue
objs = self.model.evaluate(cloned)
objs = self.model.norm_objectives(objs)
t_score = sum(objs)
t_evals += 1
if self.settings.better(t_score, best_score):
best_soln, best_score = list(cloned), t_score
return best_soln, t_evals
|
Over the span of less than a year, Jen Shyu lost two dear friends: Taiwanese nuclear scientist and poet Edward Cheng, and Javanese wayang (gamelan shadow puppetry) master Joko Raharjo, known as Cilik. The latter died along with his wife and infant daughter in a car crash; their other daughter, Naja, age six, survived. Shyu’s latest suite, Song of Silver Geese – streaming at Pi Recordings – is dedicated to those friends, and imagines Naja encountering a series of spirit guides from throughout Asian mythology, who give her strength.
The result is a hypnotic, otherworldly, sometimes harrowing narrative. Shyu is performing her characteristically theatrical, solo Nine Doors suite at the Jazz Gallery on Jan 24, with sets at 7:30 and 9:30; cover is $25. She’s also at the Stone the following night, Jan 25 at 8:30 PM as part of pianist Kris Davis’ weeklong stand there; the band also includes Ikue Mori on laptop percussion samples, Trevor Dunn on bass, Mat Maneri on viola and Ches Smith on drums. Cover is $20.
The suite is divided into nine “doors” – portals into other worlds. Shyu plays Taiwanese moon lute, piano and the magically warpy Korean gayageum, singing in both English and several Asian vernaculars. She’s joined by the strings of the Mivos Quartet as well as vibraphonist Chris Dingman’s Jade Tongue ensemble with violist Mat Maneri, bassist Thomas Morgan, drummer Dan Weiss, percussionist Satoshi Takeishi and flutist Anna Webber.
Door 2, World of Java is a hauntingly suspenseful nightscape, cautious flute underscored by a low rumble of percussion. Door 3, Dark Road, Silent Moon rises methodically from pensive, allusively Asian solo flute to an astringent string quartet interlude that reaches toward frenzy.
Shyu’s stark, plaintively melismatic vocals slowly build and then soar over spare gayageum and moon lute in Door 4, Simon Semarangam, the suite’s epic centerpiece. The flute flutters and spirals as the strings gain force and then recede for cellist Victor Lowrie’s brooding, cautious solo against sparse piano and percussion. Dingman and Morgan interchange quietly within Shyu’s plucks as the she segues into Door 5, World of Hengchun, her dreamy vocals contrasting with gritty lute, striking melismatic cello, an acidic string canon and the lush sweep of the full ensemble.
Door 8, World of Baridegi (a Korean princess who made a legendary journey to the underworld) is the dancingly explosive, almost tortuously shamanistic coda where Shyu imagines that Cilik’a family is saved. Her narration and then her singing offer a closing message of hope and renewal over spare accents in Door 9, Contemplation. Nocturnes don’t get any more surrealistically haunting than this.
The single most riveting jazz album, and arguably the most important album of the year in any style of music was Fukushima, by the Satoko Fujii Orchestra New York. A narrative of personal terror rather than a depiction of the horrific events of March 11, 2011, its tension is relentless. Fujii, who conducts the orchestra, alternates several harrowing themes within ominous cloudbanks of improvisation, poignantly lyrical solos and segments which shift from stately and elegaic to withering, chattering satire. That’s the bandleader’s response to the greed-fueled attempts to cover up the disaster. As Fukushima reactor number three continues to leak its deadly contents into the Pacific, it’s a shock that more artists haven’t addressed the ongoing environmental crisis. As Fujii succinctly said after leading the group in the world premiere of the suite in 2016, it’s not over.
Whittling this list down to another nineteen albums out of the hundreds of releases that deserve to be credited here was almost painful. It makes no sense to try to rank them: if an album’s good enough to make this list, you ought to hear it.
Ultimately, Jen Shyu‘s mission is to break down cultural barriers and unite people. In her own work, the singer/multi-instrumentalist has assimilated an astonishing number of styles, both from her heritage – Taiwan and East Timor – as well as from Korea, Indonesia, China and the United States, among other places around the world. Last night at Roulette she celebrated her birthday by unveiling a bracingly dynamic, otherworldly surrealistic, envelopingly beautiful new suite, Song of Silver Geese, a characteristically multilingual work combining the strings of the Mivos Quartet as well as vibraphonist Chris Dingman’s Jade Tongue ensemble with violist Mat Maneri, bassist Thomas Morgan, drummer Dan Weiss and flutist Anna Webber.
Shyu opened with a series of judicious plucks on her Korean gayageum lute, then switched to piano, Taiwanese moon lute and eventually a small Indonesian gong. Throughout the roughly hourlong piece, dancer Satoshi Haga struck dramatic poses when he wasn’t moving furtively or tiptoeing in the background when the music reached a lull.
The storyline, according to the program notes, involves the interaction between two characters from Timorese and Korean folklore, both known for their disguises, in addition to an iconic Taiwanese freedom fighter and a Javanese schoolgirl who was tragically orphaned at age six in a car accident.
Spare exchanges between the strings and the gayageum grew to an uneasy lustre evocative of 80s serialism, Cellist Mariel Roberts’ wounded, ambered lines eventually giving way to sinister microtones from Maneri. Shyu’s switch to the moon lute signaled a long upward climb through a dreamlike sequence punctuated by Weiss’ increasingly agitated rumble and the flutter of the strings, texturally ravishing yet troubled.
Shyu’s uncluttered vocals were just as dynamic, ranging from a whisper, to an imploring, angst-fueled Carol Lipnik-like delivery, to an insistent, earthy, shamanistic growl and pretty much everywhere in between. The big coda, seemingly meant to illustrate the fatal crash, built to a pandemonium that came as a real shock in view of the lustre and glistening atmospherics that had been lingering up to that point.
The performance ended with the ensemble members performing a candle ceremony of sorts and then walking out through the audience as Shyu sang a mantra: “I am alone, but not lonely; Life has no boundaries when every place can be home.” Something for everybody in the audience to take home.
Shyu’s next performance features another premiere,of a dance piece at 7 PM on April 21 at the Czech Center, 321 E 73rd St. Those who were lucky enough to catch this performance would probably also enjoy the concert of rare, delicately haunting folk music from Amami Island, Japan, played by Anna Sato and Shogo Yashi at Roulette on May 14 at 8. Tix are $25/$21 stud/srs.
Weiss kicks off the album solo with a terse series of licks that the ensemble will build on later. The compositions’ titles all refer to iconic jazz drummers: Elvin Jones, Max Roach, Tony Williams, Philly Joe Jones and so on. The arrangements very seldom have the full orchestra going all at once, instead relying on momentary handoffs, slowly rising trajectories and frequent pairings or conversations. Those can be downright hilarious. The interlude during Max where it sounds like John Zorn doing P-Funk, Weiss’ abrupt WTF reaction to increasingly cacaphonous sax chatter in Tony and the many, many, many trick endings in Philly Joe are some of the best. There are plenty more.
In their most hectic moments, the band evoke the Claudia Quintet on crank; in their most ornately lustrous, Karl Berger joining forces with Roomful of Teeth. Most of the seven tracks here are partitas, shifting completely from one theme to a seemingly unrelated one. Although the segues are a little off-kilter, the music is consistently interesting. Elvin has jaunty wafts of vocalese from Shyu to Berkson and come-hither fingersnaps. Max features tongue-in-cheek juxtapositions between faux-metal fuzzbox guitar and Berkson’s arioso vocalese…and then takadimi drum language taking over in the drollery department.
For all its hijinks, the creepy piano riffage early on in Tony foreshadows a lot of what’s to come. There are echoes of Missy Mazzoli in a rare carefree mood throughout the vocal swoops and dives in Philly Joe. Klook features an enigmatic, starlit interlude amidst its circling, indie classical-influenced riffage, as does Ed. That passage is a stark, desolate one with acoustic guitar, glockenspiel and tinkly piano, straight out of the Iron Maiden playbook. Even for those who don’t get all the references and insider jokes here, this is still an awfully fun ride.
On one level, the Velocity Duo‘s new album Dichotomies – streaming online at singer Lauren Lee’s site – is avant garde to the extreme. On the other, it’s very accessible and irresistibly fun. You’d hardly guess that just vocals and bass (that’s Charley Sabatino on the four-string) could be this entertaining. The duo are playing the album release show tomorrow night, May 6 at 6 PM at the Whynot Jazz Room on Christopher St.; cover is $10, and there’s probably a drink minimum, the venue site isn’t clear on that.
Rare as bass-and-vocal albums are, the obvious recent point of comparison is singer Jen Shyu‘s 2011 masterpiece, Synastry with bassist Mark Dresser. Both that album and this new one have a dramatic flair, but where Shyu goes for pointed sociopolitical commentary and knifes-edge theatrics, Lee goes for mood and ambience with frequent bursts of humor. And where Shyu writes lyrics, Lee sings vocalese, which raises the conversational factor with Sabatino (who is an equal even if he’s not centerstage in this collaboration).
The album title says it all: the three first tracks are Apathy/Desire, Awe/Melancholy, and Disappointment and Joy. Again, as these titles indicate, Lee and Sabatino are working contrasts rather than opposite extremes. Sometimes the two take separate roles, other times working in tandem to bring each emotion or mix of emotions to life. The first track seems to be the former, Lee’s carefree, soul-infused flights and occcasional detours into jazz scatting contrast with Sabatino’s close-to-the-vest, almost claustrophobic minimalism. The second is airy and spacious: Sabatino’s punchy, percussive, incisive lines give these tunes a much-needed drive. The third sets a bittersweet, jazz-tinged Lee against Sabatino’s steady, dancing low-register lines.
Likewise, Elation/Woe pits Lee’s blithe scattting – the album’s most straight-ahead jazz passages – against Sabatino’s somber, rustically bluesy lines…until he goes up the scale and joins the fun. Holiday/Death – what a contrast, huh? – pairs Sabatino’s furtiveness with Lee’s operatically-tinged leaps and bounds. Hunger/Satiety is both the album’s most outside moment and also one of its funnier ones, while on Insecurity and Substance Sabatino once again anchors Lee’s LMAO attention-deficit attack with his gravitas until he too can’t resist getting in on the joke.
Lee’s sarcastic noodling on Narcisissm/Selfless is even funnier – it’s hard to see where if at all a contrast comes in. Skeptical/Naive also goes for laughs, but far more subtly, at least til midway through. Likewise, the bass/vocal tradeoffs in Tranquility/Cacaphony are more low-key. The album winds up with a strange and thought-provoking dichotomy, Uncomfortable/Placid. These short, most likely at least half-improvised vignettes transcend the question of whether or not this is jazz or indie classical or whatever mix of genres it might be: it’s just good, fun music.
|
# https://www.codewars.com/kata/valid-braces/train/python
def validBraces(braces):
# Print the arguments
print('braces = -->{}<--'.format(braces))
# Create dictionary to map closing brace to opening brace
closing_brace_of = {
'{' : '}',
'[' : ']',
'(' : ')',
}
# Create lists to contain the closing braces
closing = []
# Go through each character. If you see an opening brace, add the corresponding closing brace to the list
result = None
for c in braces:
print('Looking at c = [{}]'.format(c))
if c in closing_brace_of:
print('[{}] is in an opening bracket'.format(c))
closing.append(closing_brace_of[c])
print('closing = {}'.format(closing))
else:
print('[{}] is in a closing bracket'.format(c))
if (not closing or (c != closing.pop())): # If we're looking at a letter, but the closing list is empty or doesn't match what we expect
print('closing is empty')
result = False
break
else:
print('all seems OK')
result = True
print
print
# Make sure all the closing brackets have been used
if (closing):
result = False
print('result = {}'.format(result))
return result
|
Didn't Find What You're Looking For? At Star Motor Cars we want you to find the perfect auto, and we'll work hard for you to make sure you do. Simply tell us what you're looking for and when it's available you'll be the first to know!
By submitting this form, you agree to be contacted by Star Motor Cars with information regarding the vehicle you are searching for.
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from qtpy import QtCore, QtWidgets
from Muon.GUI.Common.utilities import table_utils
class RemovePlotWindow(QtWidgets.QDialog):
applyRemoveSignal = QtCore.Signal(object)
closeEventSignal = QtCore.Signal()
def __init__(self, lines, subplot, vlines=[], parent=None):
super(RemovePlotWindow, self).__init__()
all_lines = lines + vlines
self._subplot = subplot
self.grid = QtWidgets.QGridLayout()
self.table = QtWidgets.QTableWidget(self)
self.table.resize(200, 200)
self.table.setRowCount(len(all_lines))
self.table.setColumnCount(2)
self.table.setColumnWidth(0, 150)
self.table.setColumnWidth(1, 50)
self.table.verticalHeader().setVisible(False)
self.table.horizontalHeader().setStretchLastSection(True)
self.table.setHorizontalHeaderLabels("Line name;Remove".split(";"))
table_utils.setTableHeaders(self.table)
self.widgets = {}
for index, line in enumerate(all_lines):
table_utils.setRowName(self.table, index, line)
tmp = {"line": line, "box": table_utils.addCheckBoxToTable(self.table, False, index)}
self.widgets[line] = tmp
self.grid.addWidget(self.table)
btn = QtWidgets.QPushButton("Remove")
self.grid.addWidget(btn)
self.setLayout(self.grid)
self.setWindowTitle("Remove Lines For " + self._subplot)
btn.clicked.connect(self.buttonClick)
def closeEvent(self, event):
self.closeEventSignal.emit()
def buttonClick(self):
self.applyRemoveSignal.emit(self.widgets.keys())
def getState(self, name):
return self.widgets[name]["box"].checkState() == QtCore.Qt.Checked
def getLine(self, name):
return self.widgets[name]["line"]
@property
def subplot(self):
return self._subplot
|
Unless you live in a sunshine climate like Florida or southern California, winter is associated with darkness and a shortage of fresh, locally grown produce. The farmer’s markets are all packed up and everyone’s holed up at home, waiting it out.
But just because fresh produce isn’t growing where you live during the winter doesn’t mean you can’t enjoy autumn’s bounty of nutrient-dense, cancer-fighting foods all through the dark, cold months.
in the 60,000 miles of your arteries?
This powerhouse food is winter squash. Winter squash is a fruit in the gourd family (Cucurbitaceae), but most of us think of it as a vegetable. It gets its name from the fact that it’s harvested in late fall into early winter, and its hard outer shell makes it good for curing and storing over the winter.
Types of winter squash include the above-mentioned, plus butternut, acorn and spaghetti squash. Pumpkins too, are winter squash, and they’re great for more than just decoration.
Winter squash contain a large amount of fiber so you feel full faster when you eat. This means you eat less, which helps you lose weight. Plus, squash are full of nutrients like potassium, magnesium, niacin and most importantly zinc.
The mineral zinc is crucial to protecting men’s health, especially from prostate cancer. Prostate cells have a unique ability to store large amounts of zinc, which they use in secreting prostatic fluid.
Studies have shown that high levels of zinc in the prostate inhibit invasive activity of malignant prostate cancer cells and exert anti-tumor activity.1 All of this is pretty well known, so many men take a zinc supplement. But zinc embedded in food is much more bioavailable.
Prostate cancer has been associated with zinc deficiencies,2 so it’s important for men to get enough dietary zinc to maintain a healthy prostate. The Institute of Medicine has established adequate intake (AI) levels of zinc at 11 milligrams (mg) a day for boys and men age 14 and older.3 I think that’s a very low amount, and in my opinion most men should probably be taking 30 mg or more. The only way to be sure is get a blood test for your zinc levels.
One cup of dried, roasted pumpkin seeds, also known as pepitas, contains 16.9 mg of zinc. You can make them yourself at home (see below) or buy them at the grocery store. Just be sure to check the label, as some store-bought pumpkin seeds contain unhealthy oils and excess salt.
Beta-cryptoxanthin in particular is known to protect against lung and colon cancer. Winter squash contain some of the highest concentrations of this nutrient of any food around. Only red peppers have more.
Researchers have found that beta-cryptoxanthin works to minimize lung cancer incidence by reducing both the proteins and cell receptors that have been implicated in the creation of lung tumors.
While there isn’t a hard and fast recommendation for a daily intake, the general consensus is that getting between 2 and 6 mg of carotenoids a day is enough to keep chronic illnesses and cancer at bay – if merely “not being sick” is your only aim.7 In all likelihood this recommendation is much too low if you really want to be healthy.
Eating just one cup of cooked pumpkin provides. . .
One cup of cooked butternut squash provides. . .
Cucurbitacins are chemicals found throughout the plant kingdom that protect organisms from harmful invaders. They have potent antioxidant, anti-inflammatory and anti-cancer effects in humans.11And fruits in the Cucurbitaceae family are chock full of them.
When winter squash is in season, you can find a wide variety at farmer’s markets and local pumpkin patches. Once the snow flies you should be able to find it in any grocery store.
Choose organic squash whenever possible, with a firm skin and without visible blemishes. Store in a cool, dark place (not the refrigerator) for up to a month. If the squash has been cured it can be stored longer.
Winter squash, including pumpkin, tastes great roasted, baked, braised or steamed. Eat them mashed with a little coconut oil and nutmeg, stuffed with nuts and veggies or in a pureed soup.
Pumpkin seeds can be roasted in a single layer at low heat (usually under 170 degrees). Roast them low, slow and for only 15 minutes to make sure you preserve the delicate linoleic and oleic acids that make up about three-fourths of the nutritious fat found in the seeds.
When it comes to pumpkins, canned is an option as well. Check the label to ensure the only ingredient is “pumpkin” and it doesn’t contain any added sugar, fillers or preservatives.
Just because the growing season is over doesn’t mean you can’t still enjoy nutrient-dense whole foods all winter long. By including winter squash in your diet as winter settles in, you’ll ensure your body continues to get the nutrients it needs to stay healthy and prevent cancer.
1 Zinc as an anti-tumor agent in prostate cancer and in other cancers.
2 Zinc transporters in prostate cancer.
4 The role of carotenoids in human health.
5 Chemoprevention by the oxygenated carotenoid beta-cryptoxanthin of N-methylnitrosourea-induced colon carcinogenesis in F344 rats.
6 Dietary cryptoxanthin and reduced risk of lung cancer: The Singapore Chinese Health Study.
7 Daily intake of carotenoids (carotenes and xanthophylls) from total diet and the carotenoid content of selected vegetables and fruit.
8 Pumpkin, canned, without salt.
9 List of winter squashes, pumpkins and other foods rich in Beta-cryptoxanthin.
10 Squash, winter, butternut, cooked, baked, without salt.
11 Cucurbitacins – A promising target for cancer therapy.
12 Cucurbitacin E inhibits breast tumor metastasis by suppressing cell migration and invasion.
13 Growth inhibitory effect of Cucurbitacin E on breast cancer cells.
14 Isolation of cucurbitacin E from pumpkin seed and analysis of its anti-cancer and anti-inflammatory activities.
15 Characterization of anticancer, DNase and antifungal activity of pumpkin 2S albumin.
|
"""my_app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.views.static import serve
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from . import views
from news import views as post_view
urlpatterns = [
url(r'^$',post_view.PostList.as_view(), name='all'),
url(r'^admin/', admin.site.urls),
url(r'^users/',include('users.urls',namespace='users')),
url(r'^users/',include('django.contrib.auth.urls')),
url(r'^test/$',views.TestView.as_view(),name='test'),
url(r'^thanks/$',views.ThanksView.as_view(),name='thanks'),
url(r'^posts/', include('news.urls',namespace='news'))
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url('^__debug__/', include(debug_toolbar.urls)),
# staticfiles_urlpatterns(),
url(r'^media/(?P<path>.*)$', serve, {
'document_root': settings.MEDIA_ROOT,
}),
] + urlpatterns
|
The coveted A' Manufacturing and Processing Machinery Design Award is one of the highest awards given to excellence in design. This award is open for any designer with a finished product or prototype. There are plenty of benefits with this award, such as having your sales increased due to the product exposure on ResultBooks. Another great reason to submit your best product for an entry is for your product to reach international audiences, and the prestige and honor that winning this award entitles you to. The A' Manufacturing and Processing Machinery Design Award includes products such as computer aided manufacturing machinery, machining systems, machines and many more. You can also find many more product subcategories on designaward.com. You are required to present a high-resolution photograph when presenting your final creation to the judges. A package that does not have a photograph of its product and operating guide, if applicable, will be dismissed. Also, this product category will be judged by its packaging, so keep this in mind. Some of the benefits of winning this award include international recognition, including international sales. You will also be equipped to present press releases, so your product and design studio will be able to reach additional audiences. Your sales will grow tremendously thanks to this award. we are currently accepting entries for this product award. Feel free to visit designaward.com, should you want to view previous winners, see a detailed list of products accepted, or just gather some more information regarding entries. Best of luck to you!In Summary:The coveted A' Manufacturing and Processing Machinery Design Award is one of the highest awards given to excellence in design. This award is open for any designer with a finished product or prototype. There are plenty of benefits with this award, such as having your sales increased due to the product exposure on ResultBooks. Another great reason to submit your best product for an entry is for your product to reach international audiences, and the prestige and honor that winning this award e..
|
def fid_experiment(run):
e=Experiment()
e.set_description("run",run)
pulse90 = 2.1e-6 # s
pulse180 = 4.2e-6 # s
td = 1.3*5 # repetition time
phase = 155 #receiver phase
tau =300e-6 # s CPMG interpulse delay; > 100e-6
rec_time = 4e-6 # s <= tau-15e-6
sampl_freq = 20e6
no_echoes = 4000 # number of echoes
no_points = 64 # measured points in the accumulated signal
# ---------------------------------------------------------------------
e.set_description("tau",tau)
e.set_description("no_echoes", no_echoes+1)
e.set_description("pulse180",pulse180)
if pulse90>10e-6:
raise Exception("--- 90 Pulse too long!!! ---")
if pulse180>10e-6:
raise Exception("--- 180 Pulse too long!!! ---")
if tau <5e-6:
raise Exception("--- Echo time shorter than gate time!!! ---")
# ---------------------------------------------------------------------
e.set_phase(0)
e.wait(td-5e-6-0.5e-6)
# first pulse ----------------------------------------------------------------
e.ttl_pulse(length=5e-6, value=1) # gate high-power ampli on
e.ttl_pulse(length=pulse90, value=1+2) # RF pulse
# -----------------------------------------------------------------------------
e.set_phase([90, 90, 270, 270][run%4])
e.wait(tau-5e-6-0.5e-6) # e.set_phase introduces 0.5e-6 delay
# first 180 pulse and recording -----------------------------------------------
e.ttl_pulse(length=5e-6, value=1) # gate high-power ampli on
e.ttl_pulse(length=pulse180, value=1+2) # RF pulse
e.set_phase(phase+[0,180,0,180][run%4])
e.wait(tau-0.5e-6-rec_time/2)
e.record(samples=no_points, frequency=sampl_freq, timelength=rec_time, sensitivity=10) # this is rec_time long
# -----------------------------------------------------------------------------
e.loop_start(no_echoes)
e.set_phase([90.0, 90.0, 270.0, 270.0][run%4])
e.wait(tau-0.5e-6-5e-6-rec_time/2)
e.ttl_pulse(length=5e-6, value=1) # gate high-power ampli on
e.ttl_pulse(length=pulse180, value=1+2) # RF pulse
e.set_phase(phase+[0,180,0,180][run%4])
e.wait(tau-0.5e-6-rec_time/2)
e.record(samples=no_points, frequency=sampl_freq, timelength=rec_time, sensitivity=5) # this is rec_time long
e.loop_end()
return e
def experiment():
accumulations=4
for run in xrange(accumulations):
yield fid_experiment(run)
pass
|
Some of the best celeb Halloween costumes, a Croc designed by Post Malone, and lots more in the Dirty Laundry!
Hilary Duff welcomes a baby girl, Jenna Dewan's new mystery man has been revealed, and more Dirty Laundry!
NBC gives Megyn Kelly the boot, plus Robert DeNiro and Joe Biden are the latest people to receive bombs in the mail, and who did Modern Family kill off for Halloween? Find out in the latest Dirty Laundry!
Ice-T gets busted for skipping out on a toll, David Schwimmer swears he's innocent, and John Mayer establishes how many notches are in his bedpost in the latest Dirty Laundry!
Imagine hitchhiking in Australia... and getting picked up by Chris Hemsworth! Plus, Paula Abdul takes a bad spill, Swizz Beatz gets a great surprise, and more in today's Dirty Laundry!
Dirty Laundry: AMA Ratings Were Down... Way Down!
Our theories on why the AMAs had such low ratings, plus are Ben Affleck and Shauna Sexton finished? Get the latest in today's Dirty Laundry.
Dirty Laundry: Are Dakota Johnson and Chris Martin Having a Baby?
Are Dakota Johnson and Chris Martin expecting a baby, or not? Plus, Ben Affleck hits the road, Dancing With The Stars Junior premieres, and more.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.