text
stringlengths 29
850k
|
|---|
# -*- coding: utf-8 -*-
"""
For usage instructions, see:
https://kolibri-dev.readthedocs.io/en/develop/references/i18n.html
"""
from __future__ import unicode_literals
import argparse
import base64
import io
import json
import logging
import mimetypes
import os
import re
import sys
import tempfile
import noto_source
import utils
from fontTools import merge
from fontTools import subset
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
logging.getLogger("fontTools").setLevel(logging.WARNING)
logging.StreamHandler(sys.stdout)
"""
Constants
"""
OUTPUT_PATH = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
os.pardir,
os.pardir,
os.pardir,
os.pardir,
"kolibri",
"core",
"static",
"assets",
"fonts",
)
)
# Sets the source date epoch to 1/1/21 to prevent temporary files from
# getting different headers on each run, leading to non-glyph-related changes to
# their base64 encoding
# ref: https://github.com/fonttools/fonttools/issues/1135
os.environ["SOURCE_DATE_EPOCH"] = "1609459200000"
FONT_TOOLS_OPTIONS = subset.Options()
FONT_TOOLS_OPTIONS.flavor = "woff" # most widely supported format
FONT_TOOLS_OPTIONS.ignore_missing_unicodes = True # important for subsetting
# basic latin glyphs
NOTO_SANS_LATIN = "NotoSans"
# font family name conventions
SCOPE_FULL = "noto-full"
SCOPE_SUBSET = "noto-subset"
SCOPE_COMMON = "noto-common"
"""
Shared helpers
"""
_FONT_FACE = """
@font-face {{
font-family: '{family}';
src: url('{url}') format('woff');
font-style: normal;
font-weight: {weight};
unicode-range: {unicodes};
font-display: swap;
}}
"""
def _gen_font_face(family, url, is_bold, unicodes):
weight = "bold" if is_bold else "normal"
return _FONT_FACE.format(family=family, url=url, weight=weight, unicodes=unicodes)
def _scoped(scope, name):
return "{}.{}".format(scope, name)
@utils.memoize
def _woff_font_path(name, is_bold):
file_name = "{name}.{weight}.woff".format(
name=name, weight="700" if is_bold else "400"
)
return os.path.join(OUTPUT_PATH, file_name)
def _load_font(path):
guess = mimetypes.guess_type(path)
if guess[0] not in [
"font/ttc",
"font/ttf",
"font/otf",
"font/woff",
"application/font-sfnt",
"application/font-woff",
]:
logging.error("Not a font file: {}".format(path))
logging.error("Guessed mimetype: '{}'".format(guess[0]))
logging.error("If this is a text file: do you have Git LFS installed?")
sys.exit(1)
try:
return subset.load_font(path, FONT_TOOLS_OPTIONS, dontLoadGlyphNames=True)
except FileNotFoundError as e: # noqa F821
logging.error("Could not load font: {}".format(str(e)))
logging.error("You may need to run: `make i18n-download-source-fonts`")
sys.exit(1)
@utils.memoize
def _font_priorities(default_font):
"""
Given a default font, return a list of all possible font names roughly in the order
that we ought to look for glyphs in. Many fonts contain overlapping sets of glyphs.
Without doing this: we risk loading a bunch of random font files just because they
happen to contain one of the glyphs, and we also risk loading the 'wrong' version
of the glyphs if they happen to differ.
"""
# start with the default
font_names = [default_font]
# look in the latin set next
if default_font is not NOTO_SANS_LATIN:
font_names.append(NOTO_SANS_LATIN)
# then look at the rest of the supported languages' default fonts
for lang_info in utils.available_languages():
name = lang_info[utils.KEY_DEFAULT_FONT]
if name not in font_names:
font_names.append(name)
# finally look at the remaining langauges
font_names.extend([fn for fn in noto_source.FONT_MANIFEST if fn not in font_names])
return font_names
@utils.memoize
def _font_glyphs(font_path):
"""
extract set of all glyphs from a font
"""
glyphs = set()
for table in _load_font(font_path)["cmap"].tables:
glyphs |= set(table.cmap.keys())
return glyphs
def _clean_up(scope):
"""
Delete all files in OUTPUT_PATH that match the scope
"""
css_pattern = r"{}.*?\.css".format(scope)
woff_pattern = r"{}.*?\.woff".format(scope)
for name in os.listdir(OUTPUT_PATH):
if re.match(css_pattern, name) or re.match(woff_pattern, name):
os.unlink(os.path.join(OUTPUT_PATH, name))
"""
CSS helpers
"""
CSS_HEADER = """
/*
* This is an auto-generated file, so any manual edits will be overridden.
*
* To regenerate, see instructions here:
* https://kolibri-dev.readthedocs.io/en/develop/references/i18n.html
*
* This file was generated by build_tools/i18n/fonts.py
*/
"""
def _list_to_ranges(input_list):
"""
Iterator of ranges of contiguous numbers from a list of integers.
Ranges returned are [x, y) – in other words, y is non-inclusive.
(from: http://code.activestate.com/recipes/496682/)
"""
new_list = list(input_list)
new_list.sort()
start = new_list[0]
currentrange = [start, start + 1]
for item in new_list[1:]:
if currentrange[1] == item:
currentrange[1] += 1 # contiguous
else:
yield tuple(currentrange) # new range start
currentrange = [item, item + 1]
yield tuple(currentrange) # last range
def _fmt_code(code):
return "{:x}".format(code).upper()
def _fmt_range(glyphs):
"""
Generates a font-face-compatible 'unicode range' attribute for a given set of glyphs
"""
fmt_ranges = []
for r in _list_to_ranges(sorted(glyphs)):
if r[0] == r[1] - 1:
fmt_ranges.append("U+{}".format(_fmt_code(r[0])))
else:
fmt_ranges.append("U+{}-{}".format(_fmt_code(r[0]), _fmt_code(r[1] - 1)))
return ",".join(fmt_ranges)
"""
Full Fonts
"""
def _full_font_face(font_family, font_name, is_bold, omit_glyphs=set()):
"""
generate the CSS reference for a single full font
"""
file_path = _woff_font_path(_scoped(SCOPE_FULL, font_name), is_bold=is_bold)
file_name = os.path.basename(file_path)
glyphs = _font_glyphs(file_path) - omit_glyphs
if not glyphs:
return ""
return _gen_font_face(
font_family, file_name, is_bold=is_bold, unicodes=_fmt_range(glyphs)
)
def _gen_full_css_modern(lang_info):
"""
Generates listing for all full fonts, segmented by unicode ranges and weights
"""
# skip previously accounted for glyphs so there is no overlap between font-faces
previous_glyphs = set()
# all available fonts
font_faces = []
for font_name in _font_priorities(lang_info[utils.KEY_DEFAULT_FONT]):
font_faces.append(
_full_font_face(
SCOPE_FULL, font_name, is_bold=False, omit_glyphs=previous_glyphs
)
)
font_faces.append(
_full_font_face(
SCOPE_FULL, font_name, is_bold=True, omit_glyphs=previous_glyphs
)
)
# Assumes all four variants have the same glyphs, from the content Regular font
previous_glyphs |= _font_glyphs(
_woff_font_path(_scoped(SCOPE_FULL, font_name), is_bold=False)
)
output_name = os.path.join(
OUTPUT_PATH,
"{}.modern.css".format(_scoped(SCOPE_FULL, lang_info[utils.KEY_INTL_CODE])),
)
logging.info("Writing {}".format(output_name))
with open(output_name, "w") as f:
f.write(CSS_HEADER)
f.write("".join(font_faces))
def _gen_full_css_basic(lang_info):
output_name = os.path.join(
OUTPUT_PATH,
"{}.basic.css".format(_scoped(SCOPE_FULL, lang_info[utils.KEY_INTL_CODE])),
)
logging.info("Writing {}".format(output_name))
with open(output_name, "w") as f:
f.write(CSS_HEADER)
default_font = lang_info[utils.KEY_DEFAULT_FONT]
f.write(_full_font_face(SCOPE_FULL, default_font, is_bold=False))
f.write(_full_font_face(SCOPE_FULL, default_font, is_bold=True))
def _write_full_font(font_name, is_bold):
font = _load_font(noto_source.get_path(font_name, is_bold=is_bold))
output_name = _woff_font_path(_scoped(SCOPE_FULL, font_name), is_bold=is_bold)
logging.info("Writing {}".format(output_name))
font.save(output_name)
def command_gen_full_fonts():
logging.info("generating full fonts...")
_clean_up(SCOPE_FULL)
for font_name in noto_source.FONT_MANIFEST:
_write_full_font(font_name, is_bold=False)
_write_full_font(font_name, is_bold=True)
languages = utils.available_languages(include_in_context=True, include_english=True)
for lang_info in languages:
_gen_full_css_modern(lang_info)
_gen_full_css_basic(lang_info)
logging.info("finished generating full fonts")
"""
Subset fonts
"""
def _chunks(string, n=72):
"""
Yield successive n-sized chunks from string
"""
for i in range(0, len(string), n):
yield string[i : i + n]
def _write_inline_font(file_object, font_path, font_family, is_bold):
"""
Inlines a font as base64 encoding within a CSS file
"""
with io.open(font_path, mode="rb") as f:
data = f.read()
data_uri = "data:application/x-font-woff;charset=utf-8;base64,\\\n{}".format(
"\\\n".join(_chunks(base64.b64encode(data).decode()))
)
glyphs = _font_glyphs(font_path)
if not glyphs:
return
file_object.write(
_gen_font_face(
family=font_family,
url=data_uri,
is_bold=is_bold,
unicodes=_fmt_range(glyphs),
)
)
def _generate_inline_font_css(name, font_family):
"""
Generate CSS and clean up inlined woff files
"""
font_path_reg = _woff_font_path(name, is_bold=False)
font_path_bold = _woff_font_path(name, is_bold=True)
output_name = os.path.join(OUTPUT_PATH, "{}.css".format(name))
logging.info("Writing {}".format(output_name))
with open(output_name, "w") as f:
f.write(CSS_HEADER)
_write_inline_font(f, font_path_reg, font_family, is_bold=False)
_write_inline_font(f, font_path_bold, font_family, is_bold=True)
os.unlink(font_path_reg)
os.unlink(font_path_bold)
def _get_subset_font(source_file_path, text):
"""
Given a source file and some text, returns a new, in-memory fontTools Font object
that has only the glyphs specified in the set.
Note that passing actual text instead of a glyph set to the subsetter allows it to
generate appropriate ligatures and other features important for correct rendering.
"""
if not os.path.exists(source_file_path):
logging.error("'{}' not found".format(source_file_path))
font = _load_font(source_file_path)
subsetter = subset.Subsetter(options=FONT_TOOLS_OPTIONS)
subsetter.populate(text=text)
subsetter.subset(font)
return font
def _get_lang_strings(locale_dir):
"""
Text used in a particular language
"""
strings = []
for file_name in os.listdir(locale_dir):
if not file_name.endswith(".json"):
continue
file_path = os.path.join(locale_dir, file_name)
with io.open(file_path, mode="r", encoding="utf-8") as f:
lang_strings = json.load(f).values()
for s in lang_strings:
s = re.sub(r"\W", " ", s) # clean whitespace
strings.append(s)
strings.append(s.upper())
return strings
@utils.memoize
def _get_common_strings():
"""
Text useful for all languages: displaying the language switcher, Kolibri version
numbers, symbols, and other un-translated text
"""
# Special characters that are used directly in untranslated template strings.
# Search the codebase with this regex to find new ones: [^\x00-\x7F©–—…‘’“”•→›]
strings = [
chr(0x0), # null
"©",
"–", # en dash
"—", # em dash
"…",
"‘",
"’",
"“",
"”",
"•",
"●",
"→",
"›",
]
# all the basic printable ascii characters
strings.extend([chr(c) for c in range(32, 127)])
# text from language names, both lower- and upper-case
languages = utils.available_languages(include_in_context=True, include_english=True)
for lang in languages:
strings.append(lang[utils.KEY_LANG_NAME])
strings.append(lang[utils.KEY_LANG_NAME].upper())
strings.append(lang[utils.KEY_ENG_NAME])
strings.append(lang[utils.KEY_ENG_NAME].upper())
return strings
def _merge_fonts(fonts, output_file_path):
"""
Given a list of fontTools font objects, merge them and export to output_file_path.
Implemenatation note: it would have been nice to pass the fonts directly to the
merger, but the current fontTools implementation of Merger takes a list of file names
"""
tmp = tempfile.gettempdir()
f_names = []
for i, f in enumerate(fonts):
tmp_font_path = os.path.join(tmp, "{}.woff".format(i))
f_names.append(tmp_font_path)
f.save(tmp_font_path)
merger = merge.Merger(options=FONT_TOOLS_OPTIONS)
merged_font = merger.merge(f_names)
merged_font.save(output_file_path)
logging.info("created {}".format(output_file_path))
def _cannot_merge(font):
# all fonts must have equal units per em for merging, and 1000 is most common
return font["head"].unitsPerEm != 1000
def _subset_and_merge_fonts(text, default_font, subset_reg_path, subset_bold_path):
"""
Given text, generate both a bold and a regular font that can render it.
"""
reg_subsets = []
bold_subsets = []
skipped = []
# track which glyphs are left
remaining_glyphs = set([ord(c) for c in text])
for font_name in _font_priorities(default_font):
full_reg_path = _woff_font_path(_scoped(SCOPE_FULL, font_name), is_bold=False)
full_bold_path = _woff_font_path(_scoped(SCOPE_FULL, font_name), is_bold=True)
reg_subset = _get_subset_font(full_reg_path, text)
bold_subset = _get_subset_font(full_bold_path, text)
if _cannot_merge(reg_subset) or _cannot_merge(bold_subset):
skipped.append(font_name)
continue
reg_subsets.append(reg_subset)
bold_subsets.append(bold_subset)
remaining_glyphs -= _font_glyphs(full_reg_path)
if not remaining_glyphs:
break
_merge_fonts(reg_subsets, os.path.join(OUTPUT_PATH, subset_reg_path))
_merge_fonts(bold_subsets, os.path.join(OUTPUT_PATH, subset_bold_path))
def command_gen_subset_fonts():
"""
Creates custom fonts that attempt to contain all the glyphs and other font features
that are used in user-facing text for the translation in each language.
We make a separate subset font for common strings, which generally overaps somewhat
with the individual language subsets. This slightly increases how much the client
needs to download on first request, but reduces Kolibri's distribution size by a
couple megabytes.
"""
logging.info("generating subset fonts...")
_clean_up(SCOPE_COMMON)
_clean_up(SCOPE_SUBSET)
_subset_and_merge_fonts(
text=" ".join(_get_common_strings()),
default_font=NOTO_SANS_LATIN,
subset_reg_path=_woff_font_path(SCOPE_COMMON, is_bold=False),
subset_bold_path=_woff_font_path(SCOPE_COMMON, is_bold=True),
)
languages = utils.available_languages(include_in_context=True, include_english=True)
for lang_info in languages:
logging.info("gen subset for {}".format(lang_info[utils.KEY_ENG_NAME]))
strings = []
strings.extend(_get_lang_strings(utils.local_locale_path(lang_info)))
strings.extend(_get_lang_strings(utils.local_perseus_locale_path(lang_info)))
name = lang_info[utils.KEY_INTL_CODE]
_subset_and_merge_fonts(
text=" ".join(strings),
default_font=lang_info[utils.KEY_DEFAULT_FONT],
subset_reg_path=_woff_font_path(_scoped(SCOPE_SUBSET, name), is_bold=False),
subset_bold_path=_woff_font_path(_scoped(SCOPE_SUBSET, name), is_bold=True),
)
# generate common subset file
_generate_inline_font_css(name=SCOPE_COMMON, font_family=SCOPE_COMMON)
# generate language-specific subset font files
languages = utils.available_languages(include_in_context=True, include_english=True)
for lang in languages:
_generate_inline_font_css(
name=_scoped(SCOPE_SUBSET, lang[utils.KEY_INTL_CODE]),
font_family=SCOPE_SUBSET,
)
logging.info("subsets created")
"""
Add source fonts
"""
def command_update_font_manifest(ref):
noto_source.update_manifest(ref)
def command_download_source_fonts():
noto_source.fetch_fonts()
"""
Main
"""
def main():
"""
Generates files to support both 'basic' and a 'modern' browsers.
Both browsers get the common and language-specific application subset fonts inline
to load quickly and prevent a flash of unstyled text, at least for all application
text. Full font files are linked and will load asynchronously.
# Modern behavior
Newer browsers have full support for the unicode-range attribute of font-face
definitions, which allow the browser to download fonts as-needed based on the text
observed. This allows us to make _all_ font alphabets available, and ensures that
content will be rendered using the best font possible for all content, regardless
of selected app language.
# Basic behavior
Older browsers do not fully support the unicode-range attribute, and will eagerly
download all referenced fonts regardless of whether or not they are needed. This
would have an unacceptable performance impact. As an alternative, we provide
references to the full fonts for the user's currently-selected language, under the
assumption that most of the content they use will be in that language.
Content viewed in other languages using the basic variant should still usually
display, albeit using system fonts.
"""
description = "\n\nProcess fonts.\nSyntax: [command] [branch]\n\n"
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(dest="command")
subparsers.add_parser(
"update-font-manifest",
help="Update manifest from https://github.com/googlei18n/noto-fonts/",
).add_argument(
"--ref",
help="Github reference, e.g. commit or tag. Defaults to head of master.",
type=str,
)
subparsers.add_parser(
"download-source-fonts",
help="Download sources from https://github.com/googlei18n/noto-fonts/",
)
subparsers.add_parser(
"generate-subset-fonts", help="Generate subset fonts based on app text"
)
subparsers.add_parser("generate-full-fonts", help="Generate full fonts")
args = parser.parse_args()
if args.command == "update-font-manifest":
command_update_font_manifest(args.ref)
elif args.command == "download-source-fonts":
command_download_source_fonts()
elif args.command == "generate-subset-fonts":
command_gen_subset_fonts()
elif args.command == "generate-full-fonts":
command_gen_full_fonts()
else:
logging.warning("Unknown command\n")
parser.print_help(sys.stderr)
sys.exit(0)
if __name__ == "__main__":
main()
|
People ask all the time, "Why hire a GM?" Well, here is the answer: when you hire a Game Master you know that they will come prepared to the session, make sure you get what you paid for, and all of the other players will be in the same boat. It takes time and dedication to run a game as much as it takes some serious effort to find a group worth your time. Paying for a professional Game Master just helps everything go a whole lot smoother.
Experienced players want someone reliable. New players want someone knowledgeable. And some players want a solo adventure where their character can shine. Whatever the case may be, consider hiring a Game Master for your next game.
2.20.2019 - Please be patient with missing content while the website is growing. Thank you!
Adventure is out there. Begin yours today!
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import os
import json
import time
from dateutil import parser as du_parser
from datetime import datetime
import StringIO
import logging
import requests
from requests.auth import HTTPBasicAuth
import html5lib
from html5lib import treebuilders
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
GITHUB_TOKEN = os.environ.get('GITHUB_TOKEN')
last_id = None
one_minute = 60
one_hour = one_minute * 60
min_remaining_tostop = 30
reqs = 0
# reqs_limit = None
# reqs_remaining = None
headers = {}
TOKEN_AUTH = HTTPBasicAuth(GITHUB_TOKEN, "x-oauth-basic")
def check_limits(headers):
reqs_limit = int(headers.get('X-RateLimit-Limit', 0))
reqs_remaining = int(headers.get('X-RateLimit-Remaining', 0))
if reqs_remaining <= min_remaining_tostop:
logger.info("Reached %d requests over %d. Pausing one hour."
% (reqs_limit - reqs_remaining, reqs_limit))
pause(one_hour)
def pause(duration):
''' basic sleep with periodic logging (to show progess) '''
interval = 10
tick = duration / interval
for i in xrange(interval):
logger.info(u"Pause (%dmn) Elapsed: %dmn" % (duration / one_minute,
tick * i / one_minute))
time.sleep(tick)
existing_users = json.load(open('step2.json'))
try:
all_users = json.load(open('step3.json'))
except:
all_users = []
def getElementsByClassName(root, tag, className):
return [e for e in root.getElementsByTagName(tag)
if className in e.getAttribute('class')]
def extend_user(user):
print(user.get('username'))
def get_activity_from_html(username):
r = requests.get('https://github.com/%s' % username,
headers=headers, auth=TOKEN_AUTH)
if r.status_code == 404:
return None
parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
dom = parser.parse(StringIO.StringIO(r.content))
divs = dom.getElementsByTagName('div')
contrib_columns = [d for d in divs
if 'contrib-column' in
d.getAttribute('class')]
if not len(contrib_columns):
return {'contrib_total_num': 0,
'contrib_total_start': None,
'contrib_total_end': None,
'contrib_long_num': 0,
'contrib_long_start': None,
'contrib_long_end': None}
total_str = getElementsByClassName(
contrib_columns[0], "span",
"contrib-number")[0].firstChild.nodeValue
# logger.debug("total_str: {}".format(total_str))
total_dates_dom = getElementsByClassName(
contrib_columns[0], "span", "text-muted")[1]
total_dates = "".join([n.nodeValue
for n in total_dates_dom.childNodes])
# logger.debug("total_dates: {}".format(total_dates))
total_start = du_parser.parse(total_dates.split(u'–')[0])
total_end = du_parser.parse(total_dates.split(u'–')[1])
# logger.debug("total_start: {}".format(total_start))
# logger.debug("total_end: {}".format(total_end))
long_str = getElementsByClassName(
contrib_columns[1], "span",
"contrib-number")[0].firstChild.nodeValue
# logger.debug("long_str: {}".format(long_str))
long_dates_dom = getElementsByClassName(
contrib_columns[1], "span", "text-muted")[1]
long_dates = "".join([n.nodeValue
for n in long_dates_dom.childNodes])
# logger.debug("total_dates: {}".format(total_dates))
# logger.debug("long_dates: {}".format(long_dates))
if long_dates == "No recent contributions":
long_start = None
long_end = None
else:
long_start = du_parser.parse(long_dates.split(u'–')[0].strip())
if long_start.year > total_end.year:
long_start = datetime(long_start.year - 1,
long_start.month, long_start.year.day)
long_end = du_parser.parse(long_dates.split(u'–')[1].strip())
if long_end.year > total_end.year:
long_end = datetime(long_end.year - 1, long_end.month,
long_end.year.day)
return {
'contrib_total_num': int(total_str.split()[0].replace(',', '')),
'contrib_total_start': total_start.isoformat(),
'contrib_total_end': total_end.isoformat(),
'contrib_long_num': int(long_str.split()[0].replace(',', '')),
'contrib_long_start':
long_start.isoformat() if long_start is not None else None,
'contrib_long_end':
long_end.isoformat() if long_end is not None else None}
def get_profile(user):
r = requests.get(
'https://api.github.com/users/%s' % user.get('username'),
headers=headers, auth=TOKEN_AUTH)
check_limits(r.headers)
nd = {}
data = json.loads(r.content)
for col in data.keys():
if 'url' in col and not col == 'avatar_url':
continue
if col in user.keys():
continue
nd.update({col: data[col]})
return nd
def get_orgs(username):
orgs = {}
r = requests.get('https://api.github.com/users/%s/orgs' % username,
headers=headers, auth=TOKEN_AUTH)
check_limits(r.headers)
data = json.loads(r.content)
orgs.update({'orgs_num': len(data)})
for i, org in enumerate(data):
org_name = org.get('login')
prefix = 'org%d_' % i
rorg = requests.get('https://api.github.com/orgs/%s' % org_name,
headers=headers, auth=TOKEN_AUTH)
check_limits(rorg.headers)
data_org = json.loads(rorg.content)
nd = {}
for col in data_org.keys():
if 'url' in col and not col == 'avatar_url':
continue
nd.update({prefix + col: data_org[col]})
orgs.update(nd)
return orgs
try:
acitiviy = get_activity_from_html(user.get('username'))
except Exception as e:
logger.exception(e)
raise
acitiviy = {}
from pprint import pprint as pp ; pp(acitiviy)
if acitiviy is None:
return None
profile = get_profile(user)
orgs = get_orgs(user.get('username'))
user.update(acitiviy)
user.update(profile)
user.update(orgs)
return user
# extend_user({'username': 'tensystems'})
# raise
all_usernames = [u['username'] for u in all_users]
for user in existing_users:
if user['username'] in all_usernames:
continue
user_update = extend_user(user)
if user_update is None:
continue
all_users.append(user_update)
json.dump(all_users, open('step3.json', 'w'), indent=4)
json.dump(all_users, open('step3.json', 'w'), indent=4)
|
The Baumhaus Hotel has seven separate treehouses for you to choose from. They are all made form untreated local wood, stand 5-7 metres high and each one has a dreamy balcony. The houses are insulated and heated so you can be toasty warm while winter rages outside. Showers are near to the treehouse by the firepit where you can drink cocoa in the evenings. The treehouses are located in the experimental forest of Schönhagen – a popular tourist destination with plenty to see and do. ‘The Castle’ is one of the treehouses on offer and it is built like a medieval castle. But rather than being a tool to oppress the masses this Castle is more like an opportunity to oppress boring holidays! There is a high battlement from where you can see stunning views of the surrounding area. The ‘Tree Dream’ is another treehouse and features little rooms suspended by carbon fiber while the large ‘Roundhouse’ is a delicious wooden donut… with a family filling...? I don’t know, get your own metaphor.
|
#IFIc Version 2.4 corresponding to IFIb Version 2.3 3/8/16 - cmf
#changed oofthreshold = 16, and scalefactor = 31.
#log now collects the IFIb float value scaled by scalefactor
#IFIc Version 2.3 corresponding to IFIb Version 2.3 3/1/16 - cmf
#changed oofthreshold = 16, and scalefactor = 32
import serial
from serial import SerialException #to be able to use Except SerialException:
from subprocess import Popen, PIPE
from os import path
from time import sleep
from datetime import datetime
from datetime import timedelta
##############################################################################
# Imports the necessary software for graphing
import gaugette.ssd1306
import sys
from math import sin #imported sin for Foale function
#Imports necessary setup for GPIO input/output
import RPi.GPIO as GPIO
start_time = datetime.now()
def millis():
dt = datetime.now()-start_time
ms = (dt.days * 24 *60 *60 +dt.seconds)*1000 + dt.microseconds/1000.0
return ms
#declare rfcomm functions to manage the system rf comm device
def rfinit():
#see if channel 22 has been added
p=Popen('sdptool browse local', shell=True, stdout=PIPE)
result = p.communicate()[0] #check to see if 'Serial Port' is present
position = result.find('Channel: 22')
if position > -1:
print("Serial Port is already present")
else:
#this initializes bluetooth to be discoverable and adds serial channel 22
Popen('sudo hciconfig hci0 piscan',shell=True)
Popen('sudo sdptool add --channel=22 SP',shell=True)
print("Serial Port channel 22 was added")
return
def rfhangup():
#this releases the comm port, if it exists, and returns
Popen('sudo rfcomm release /dev/rfcomm0',shell=True,stdout=PIPE)
return
def rflisten():
#this opens a comm port on rfcomm0 channel 22, which is left running,
p = Popen('sudo rfcomm listen /dev/rfcomm0 22 &',shell=True,stdout=PIPE)
return
def rfshow():
#this checks to see if a connection on rfcomm0 has been made
#it returns a bool, True or False
p = Popen('rfcomm show /dev/rfcomm0',shell=True,stdout=PIPE,stderr=PIPE)
result = p.communicate()[0] #check the 1st tuple for the string returned
position= result.find('connect') #does it contain connect?
bool_connected = False
if position > -1:
bool_connected = True
return bool_connected
def rfcommListen(timeout):
start_count = 0 #counter to see if we have waited too long
goodQ = False # we return the value of this, True means we got connected
#first hangup any connection
rfhangup()
#give the system and the remote a chance to do stuff
#print('rfcommlisten: sleeping 60 sec after hangup')
sleep(2)
#open the port on channel 22 and wait
rflisten()
#put some text on the display as to what we are doing
led.clear_display()
text = 'Waiting for connect..'
led.draw_text2(0,0,text,1)
text2 = 'Press Pb1+Pb2 to Exit'
led.draw_text2(0,16,text2,1)
led.display()
#print('finished rflisten')
#now wait for a connection
while True:
start_count = start_count +1
#print('while loop begun')
if start_count > timeout:
print('Listen for connection timed out. Hanging up')
rfhangup()
return False
sleep(1) #wait a second, so we dont use up too much cpu
if GPIO.input(Pb1) == False: #see if button pushed to Exit
if GPIO.input(Pb2) == False:
print('Pb1 & Pb2 pressed!')
led.clear_display()
text = 'Pb1 & Pb2 pressed!'
led.draw_text2(0,0,text,1)
text2 = 'Powercycle to restart'
led.draw_text2(0,16,text2,1)
led.display()
exit()
#print('starting rfshow')
if rfshow():
print('We are connected')
break #we are connected
#see if the /dev/rfcomm0 path exists to prove we are connected
#print('past rfshow')
bool_path_exists = path.exists('/dev/rfcomm0')
print('rfcomm0 path is '+str(bool_path_exists))
if bool_path_exists :
ser = serial.Serial('/dev/rfcomm0', 9600)
else :
print('rfcomm0 was not created as expected')
rfhangup()
return False
#rfcomm exists so open the serial port
ser.open()
#send an acknowlegement
ser.write('Ready for data\n')
#read the response - it will wait forever
singleread = ser.readline()
if singleread.find('data follows') > -1 :
print(singleread)
goodQ = True
nbytes = ser.inWaiting()
#print("inWaiting "+ str(nbytes))
return (goodQ, ser)
def command(bOn): # function for sending a command back to data-recorder Pi upon button push - ICF
print('Send Command1') # Temporary command response for testing - ICF
ser.write('CMD1\n') #an acknowledgement is always terminated with \n
#log the event
timems = millis()
logfile.write(str(timems) + ' ' + '9999' + '\n')
#put some text on the display as to what we are doing
led.clear_display()
text = 'Saving recent history..'
led.draw_text2(0,0,text,1)
text2 = 'Push button 2 for IFI incorporation'
led.draw_text2(0,16,text2,1)
led.display()
#reconfigure the LED
if bOn: #this means the LED should be on already
GPIO.output(LED_out, False) #bOn ==True means we were out of family, so turn off the LED
return
GPIO.output(LED_out, True) #bOn==False means we are in family, just want to send a command
sleep(0.2)
GPIO.output(LED_out, False)
return
def command2(bOn): # function for sending a command back to data-recorder Pi upon button push(new command will be update command) - ICF-cmf
print('Send Command3') # tells IFIb to pause sending, and only continues when it receives another pause command
ser.write('CMD3\n') #an acknowledgement is always terminated with \n
#put some text on the display as to what we are doing
led.clear_display()
text = 'IFI Paused..'
led.draw_text2(0,0,text,1)
text2 = 'Press Pb1 IFI Update'
led.draw_text2(0,16,text2,1)
led.display()
sleep(1)
while True: #wait for either pb1 or pb2
sleep(.1)
try:
nbytes = ser.inWaiting()
if nbytes > 0:
singleread = ser.readline()
#now see what we got..it should always be a float number of one sort or another
fnum = float(singleread)
print('command2 got ' + str(singleread))
ser.write('\n') #an acknowledgement of any bytes received must be made
if fnum == 8881.0:
print('CMD3 pause terminated by IFI ')
timems = millis()
logfile.write(str(timems) + ' ' + '8881' + '\n')
return
if GPIO.input(Pb1) == False: # this is confirmation we want IFI Update
print('Send Command3') # tells IFIb to unpause sending, and only continues when it receives another pause command
ser.write('CMD3\n') #an acknowledgement is always terminated with \n
text2 = 'Pb1 pressed... '
led.draw_text2(0,16,text2,1)
led.display()
sleep(2) #time to release pb1 or else the shutdown routine will execute
break
if GPIO.input(Pb2) == False:
print('Send Command3') # tells IFIb to unpause sending, and only continues when it receives another pause command
ser.write('CMD3\n') #an acknowledgement is always terminated with \n
return
except IOError:
print("IO error in command2()")
return
#first see if we are trying to do a regular shutdown using pb1 as well
if GPIO.input(Pb1) == False:
logfile.close()
led.clear_display()
text = 'Both pbs pressed..'
led.draw_text2(0,0,text,1)
text2 = 'IFI Closed'
led.draw_text2(0,16,text2,1)
led.display()
exit()
print('Send Command2') # Temporary command response for testing - ICF
ser.write('CMD2\n') #an acknowledgement is always terminated with \n
#log the event
timems = millis()
logfile.write(str(timems) + ' ' + '8888' + '\n')
#put some text on the display as to what we are doing
led.clear_display()
text = 'Updating IFI... '
led.draw_text2(0,0,text,1)
text2 = 'Please wait > 2 mins'
led.draw_text2(0,16,text2,1)
led.display()
#reconfigure the LED
if bOn: #this means the LED should be on already
GPIO.output(LED_out, False) #bOn ==True means we were out of family, so turn off the LED
GPIO.output(LED_out, True) #bOn==False means we are in family, just want to send a command
sleep(0.2)
GPIO.output(LED_out, False)
#now wait for a response. It is possible another number will be received, that was sent by IFI
#before it received the CMD2 command, so we need to handle that
sleep(2) #should be enough for any present to be received
print("Command2 done waiting for bytes")
while True:
sleep(0.1)
try:
nbytes = ser.inWaiting()
if nbytes > 0:
singleread = ser.readline()
ser.write('\n') #an acknowledgement of any bytes received must be made
#now see what we got..it should always be a float number of one sort or another
fnum = float(singleread)
print('command2 got ' + str(singleread))
if fnum == 8888.0:
print('CMD2 is complete')
timems = millis()
logfile.write(str(timems) + ' ' + '8888' + '\n')
break
except IOError:
print("IO error in command2()")
return
return
### function added to draw vertical lines by CMF
def line(i,j,k,bLed): #draw a line from (i,j) to (i+1,k)
m = (j+k)/2 #this is the mid point in height between the start y and end y
if k > j: #going up
for l in range(j,m+1): #starting at j, above i, go up to m
led.draw_pixel(i,l, bLed) #draw the pixels up half way
for l in range(m+1,k+1):
led.draw_pixel(i+1,l, bLed) #draw the remaining pixes on step over in i
return
if k < j: #going down
for l in range(j, m, -1): #starting at j, decrease down to m
led.draw_pixel(i,l, bLed)
for l in range(m, k-1, -1): #starting half way down, decrease down to k, one i over
led.draw_pixel(i+1,l, bLed)
return
if j==k: #just draw pixels horizontally next to each other
led.draw_pixel(i,j, bLed)
led.draw_pixel(i+1, k, bLed)
return
def testline(): #test the line function
led.clear_display()
lh = 21
history = range(0,31,3)+range(31,-1,-3)
for i in range(0,lh):
line(i,history[i],history[i+1], True)
sleep(1)
for i in range(0,lh):
line(i,history[i],history[i+1], False)
#check the OLED has no lines :-) - CMF
led.clear_display()
return
########################END OF FUNCTION DEFINITIONS##################################
print('start')
# Sets up our pins and creates variables for the size of the display. If using other size display you can easily change them.
#get the next log file number
lognumberfile = open('/home/pi/projects/OLEDPython/lognumber.txt','r')
lognumberstr = lognumberfile.readline()
#increment the next log file number
nextlognumber = int(lognumberstr) +1
#write the next lognumber to the file
lognumberfile.close()
lognumberfile = open('/home/pi/projects/OLEDPython/lognumber.txt','w')
lognumberfile.write(str(nextlognumber)+'\n')
lognumberfile.close()
#setup the logfile name
logfilename = '/home/pi/projects/OLEDPython/ifilog' + str(int(lognumberstr)) + '.txt'
print('Using log file ' + logfilename)
logfile = open(logfilename,'w')
RESET_PIN = 15
DC_PIN = 16
width = 128
height = 32
led = gaugette.ssd1306.SSD1306(reset_pin=RESET_PIN, dc_pin=DC_PIN)
led.begin()
led.clear_display()
GPIO.setmode(GPIO.BCM)
Pb1 = 24 #CMF switched pb's 2/11/16 #GPIO pin 23 is the input from button 1 for restarting program upon disconnect, or sending 'save data for IFI' command back to data recorder - ICF
Pb2 = 23 #CMF switched pb's #GPIO pin 24 is input from button 2 for reboot command to reboot data recorder - ICF
LED_out = 18
GPIO.setup(Pb2, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(Pb1, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(LED_out, GPIO.OUT) #GPIO pin 18 is the output to the LED
#RUN = True #Variable that keeps program running until final exception
for x in range(width):
#print(x)
fx = float(x)
y = (16 * sin((fx/128)*6.28) +16)
iy = int(y)
#print(iy)
led.draw_pixel(x,iy, True)
led.display()
print('Display initialized')
#initialize LED on count down timer to be 0, so the light will not be on
ledoncountdowntimer = 0;
#initialize the out of family threshold compared to max scale of 32
oofthreshold = 16 # 3/1/16 -cmf v 2.3
GPIO.output(LED_out, False) #make sure the LED is off
while True:
rfinit()
#Listen for connection
bool_result, ser = rfcommListen(3600)
if bool_result == False:
print('nobody connected before timeout')
print('releasing rfcomm0')
rfhangup()
exit()
led.clear_display()
timeout = 300 #roughly 25s delay
startcount = 0
history = [] #array for datapoints to go on graph, for 128 pixels wide
scalefactor = 31.0 #multiplies received normalized datapoints for display - height is 31
for i in range(width):
history.append(31) #fills the history array with max values, which is a line at the bottom of the display
#initialization is done, now get data
while True:
sleep(0.1)
startcount = startcount + 1
if startcount > timeout:
ser.close()
print('Timeout exceeded - closing')
break
led.display()
try:
nbytes = ser.inWaiting()
if nbytes > 0:
singleread = ser.readline()
print(singleread)
timems = millis()
fnum = scalefactor * float(singleread)
logfile.write(str(timems) + ' ' + str(fnum) + '\n') #Version 2.4 - 3/8/16 -cmf
inum =31 - int(fnum)
if inum > 31 :
inum = 31
if inum < 0:
inum = 0
oof = (31 - inum) # Out Of Family scaled 0 - 31
print(str(fnum)+' ' + str(oof)) #our debug output
for i in range(width-1):
line(i,history[i],history[i+1],False) #undraw the old pixels
history.pop(0) #remove the oldest value
history.append(inum) #add the new value to history
for i in range(width-1):
#led.draw_pixel(i,history[i], True) #draw the new value as a pixel
line(i,history[i],history[i+1],True)
# This is the sending acknowledgement
bOn = (oof > oofthreshold) # this is our Out of Family threshhold
if oof > oofthreshold:
GPIO.output(LED_out, True) #we are Out of Family - turn the light on
ledoncountdowntimer = 120; #for the time the data is on the display we will stay on before resetting, to attract attention to display
if oof < oofthreshold and ledoncountdowntimer > 0: #see if we should reset the LED
ledoncountdowntimer = ledoncountdowntimer-1;
if ledoncountdowntimer == 0: #the data has scrolled off the display, so reset
GPIO.output(LED_out, False)
if GPIO.input(Pb1) == False:
command(bOn)
#wait for a response from sender the command is complete - TBD
sleep(1) #this has to be very short so that sender does not wait too long for a response, then hangsup
led.clear_display() #prepare the display for normal graph
if GPIO.input(Pb2) == False:
command2(bOn)
print("command2 done. Back in the main loop..")
#wait for a response from sender the command is complete - TBD
sleep(1) #this has to be very short so that sender does not wait too long for a response, then hangsup
led.clear_display() #prepare the display for normal graph
else: #we either send a \n on its own, or preceded as a command (see above)
ser.write('\n')
startcount = 0
except IOError:
print('connection was dropped')
#close the log file
logfile.close()
#put some text on the display as to what we are doing
led.clear_display()
text = 'Connection was dropped'
led.draw_text2(0,0,text,1)
text2 = 'Push Pb1 to restart'
led.draw_text2(0,16,text2,1)
led.display()
while True:
if GPIO.input(Pb1) == False: #see if button pushed to reboot
exit()
#Popen('sudo reboot', shell=True)
ser.close()
break
print('releasing rfcomm0')
rfhangup()
print(history)
|
Abstract: Understand the principles, applications, and limitations of a cutting-edge material. Based on the author’s 26 years of experience in the field of Nanotechnology, this reference offers researchers and materials scientists a complete reference to the physical concepts, techniques, applications and principles underlying one of the most researched materials. Keeps you abreast of the latest trends, developments, and commercial applications.
Why Is This Length Scale So Important?
What Does Nano Really Mean?
Copyright / Pub. Date: 2006 The McGraw-Hill Companies, Inc.
Joseph H. Koo is currently a senior research fellow and director of Solid Freeform Fabrication Consortium in the Department of Mechanical Engineering at the University of Texas at Austin, Austin, TX. He is chairman of the newly established SAMPE Nanotechnology Committee and immediate past chairman of the AIAA Materials Technical Committee. Dr. Koo has 30 years of industrial and academic experience in program and engineering management and has published over 200 papers on thermal, material, and optical sciences in a variety of science journals and conference proceedings.
Description: Understand the principles, applications, and limitations of a cutting-edge material. Based on the author’s 26 years of experience in the field of Nanotechnology, this reference offers researchers and materials scientists a complete reference to the physical concepts, techniques, applications and principles underlying one of the most researched materials. Keeps you abreast of the latest trends, developments, and commercial applications.
|
from django import forms
from django.forms import ModelForm
from django.contrib.auth.models import User
from app_users.models import UserProfile, Employment
from app_users.models_nn import CreativeFields
from app_collaborations.options import CREATIVE_FIELDS
#=========================================================================
# USER
#=========================================================================
class UserProfileForm(ModelForm):
"""
Manage the basic informations of a user profile
.. seealso:: :class:`app_users.models.UserProfile`
"""
class Meta:
model = UserProfile
exclude = ('user', 'employment')
class UserAuthForm(ModelForm):
"""
Manage the account informations of a user profile
"""
class Meta:
model = User
fields = ('email',)
#=========================================================================
# EMPLOYMENT
#=========================================================================
class EmploymentForm(ModelForm):
"""
Manage the employment data of a user
.. seealso:: :class:`app_users.models.Employment`
"""
class Meta:
model = Employment
#=========================================================================
# CREATIVE FIELDS
#=========================================================================
class CreativeFieldsAddForm(forms.Form):
"""
Manage the creative fields of a user
.. seealso:: :class:`app_users.models_nn.CreativeFields`
"""
class Meta:
model = CreativeFields
exclude = ('userprofile',)
def __init__(self, *args, **kwargs):
user_filter = kwargs.pop('user_filter')
super(CreativeFieldsAddForm, self).__init__(*args, **kwargs)
fields = CreativeFields.objects.filter(userprofile=user_filter)
already_present = [ x.creative_field for x in fields ]
actual_choices = [ (k, v) for k, v in CREATIVE_FIELDS if k not in already_present ]
actual_choices = tuple(tuple(x) for x in actual_choices)
self.fields.insert(len(self.fields)-1, 'creative_field', forms.ChoiceField(choices=actual_choices))
|
S* Golden Oracle’s Sugar Rush JW, NFO d 09.
S* Just Catnap’s Impreza JW, NFO n 09 22.
IT* Norr Venn’s Whichita JW, NFO f 09 22.
S*Zygot’s Inge Strand, NFO n 03.
(N) Migoto’s Yvette, NFO n 09 22.
(N) Migoto’s Walcot, NFO a.
(N) Migoto’s Wilde, NFO n 09.
(N) Migoto’s Zonya, NFO n 22.
SC (N) Migoto’s Simba JW, NFO n 09 24.
NW SW’12 SC (N) Migoto’s Othelia JW, DSM, DVM NFO g 09 24.
SW’10 SC (N) Migoto’s Johanne JW, NFO f 24.
SC (N) Migoto’s Sarah, NFO f 03 22.
IC S* Restless Bluestar, NFO a 22.
SC S*Just Catnap’s Warm Cotton, JW, DVM, NFO ns 01 21.
GIC DK* Europa’s Mynte Is JW, NFO n 22.
IC (N) Migoto’s Viola, NFO a 22.
GIC (N) Migoto’s Teoline, NFO n.
IC (N) Migoto’s Tomine JW, NFO w 62.
IC S* Viento’s Tequila Rose JW, NFOf 09 22.
– Årets Skogkatt Ungdyr, 4-10 måneder.
|
# Copyright 2014 Zoltán Zörgő <soltan.zorgo@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Sensor datasheet: http://www.meas-spec.com/downloads/HTU21D.pdf
#
# Credits to: Jay Wineinger <jay.wineinger@gmail.com>
# Based on: https://github.com/jwineinger/quick2wire-HTU21D/blob/master/htu21d.py
import time
from webiopi.devices.i2c import I2C
from webiopi.devices.sensor import Temperature,Humidity
from webiopi.utils.types import toint
class CRCFailed(Exception): pass
class HTU21D(I2C, Temperature, Humidity):
CMD_READ_TEMP_HOLD = 0xe3
CMD_READ_HUM_HOLD = 0xe5
CMD_READ_TEMP_NOHOLD = 0xf3
CMD_READ_HUM_NOHOLD = 0xf5
CMD_WRITE_USER_REG = 0xe6
CMD_READ_USER_REG = 0xe7
CMD_SOFT_RESET= 0xfe
# uses bits 7 and 0 of the user_register mapping
# to the bit resolutions of (relative humidity, temperature)
RESOLUTIONS = {
(0, 0) : (12, 14),
(0, 1) : (8, 12),
(1, 0) : (10, 13),
(1, 1) : (11, 11),
}
# sets up the times to wait for measurements to be completed. uses the
# max times from the datasheet plus a healthy safety margin (10-20%)
MEASURE_TIMES = {
(12, 14): (.018, .055),
(8, 12): (.005, .015),
(10, 13): (.006, .028),
(11, 11): (.01, .009),
}
def __init__(self):
I2C.__init__(self, 0x40)
self.resolutions = self.get_resolutions()
self.rh_timing, self.temp_timing = self.MEASURE_TIMES[self.resolutions]
def __str__(self):
return "HTU21D(slave=0x%02X)" % self.slave
def __family__(self):
return [Temperature.__family__(self), Humidity.__family__(self)]
def check_crc(self, sensor_val):
message_from_sensor = sensor_val >> 8
check_value_from_sensor = sensor_val & 0x0000FF
remainder = message_from_sensor << 8 # Pad with 8 bits because we have to add in the check value
remainder |= check_value_from_sensor # Add on the check value
divisor = 0x988000 # This is the 0x0131 polynomial shifted to farthest left of three bytes
# Operate on only 16 positions of max 24. The remaining 8 are our remainder and should be zero when we're done.
for i in range(16):
if remainder & (1<<(23 - i)): #Check if there is a one in the left position
remainder ^= divisor
divisor >>= 1 # Rotate the divisor max 16 times so that we have 8 bits left of a remainder
if remainder:
raise CRCFailed("CRC checksum failed.")
def reset(self):
self.writeByte(self.CMD_SOFT_RESET);
time.sleep(.02)
def set_resolution(self, resIndex):
self.writeRegister(self.CMD_WRITE_USER_REG, resIndex)
time.sleep(.02)
def get_resolutions(self):
user_reg = self.readRegister(self.CMD_READ_USER_REG)
return self.RESOLUTIONS[user_reg >> 6, user_reg & 0x1]
def get_temp(self):
self.writeByte(self.CMD_READ_TEMP_NOHOLD);
time.sleep(self.temp_timing)
results = self.readBytes(3)
raw_temp = int.from_bytes(results, byteorder="big")
self.check_crc(raw_temp)
results[1] = results[1] & 0xFC # clear status bits
raw_temp = int.from_bytes(results, byteorder="big")
return -46.85 + (175.72 * ((raw_temp >> 8) / float(2**16)))
def get_rel_humidity(self):
self.writeByte(self.CMD_READ_HUM_NOHOLD);
time.sleep(self.rh_timing)
results = self.readBytes(3)
raw_hum = int.from_bytes(results, byteorder="big")
self.check_crc(raw_hum)
results[1] = results[1] & 0xFC # clear status bits
raw_hum = int.from_bytes(results, byteorder="big")
return -6 + (125 * ((raw_hum >> 8) / float(2**16)))
def get_comp_rel_humidity(self):
RHactualT = self.get_rel_humidity()
Tactual = self.get_temp()
CoeffTemp = -0.15 # from datasheet
return RHactualT + (25 - Tactual)*CoeffTemp
def __getCelsius__(self):
self.reset()
return self.get_temp()
def __getFahrenheit__(self):
return self.Celsius2Fahrenheit()
def __getKelvin__(self):
return self.Celsius2Kelvin()
def __getHumidity__(self):
self.reset()
return self.get_comp_rel_humidity() / 100.00
|
Using Adobe Indesign you will have fun creating an event flyer you can customize for your own purposes. Along the way I will teach you the general basics of graphics design that you will end up using again and again. Tghis is a great class if you are new to Indesign and wasnbt to start creating.
The Best Indesign Flyer Ever!
After I graduated from the University of Connecticut I attended the Savannah College of Art & Design to embark upon my career in professional design. Recently I have been providing digital services to corporate brands and have been fortunate enough to work with some great companies such as NBC Sports & Olympics, Gartner Group, Design Within Reach and the United States Tennis Association.
Typesetting for Classy Invitations: The Easy Way to Design Your Own Invites!
|
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Aaron Meier <aaron@bluespeed.org>
'''
from django import forms
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from webinterface.lib.models import UserProfile, Settings, Coffee
class UserCreationForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ["username", "password", "first_name", "last_name", "email"]
class ProfileCreationForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ["language", "is_admin"]
class SettingsChangeForm(forms.ModelForm):
descale = forms.BooleanField(widget=forms.CheckboxInput, required=False)
welcome_message = forms.CharField(widget=forms.Textarea(attrs={'rows':5, 'cols':30}))
class Meta:
model = Settings
fields = ["force_ssl", "welcome_message", "telnet"]
class LoginForm(forms.ModelForm):
remember = forms.BooleanField(widget=forms.CheckboxInput, required=False)
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ["username", "password"]
class CoffeeOrderForm(forms.ModelForm):
typ = forms.ChoiceField(widget=forms.RadioSelect, required=True, choices=Coffee.typ_choices)
cups = forms.ChoiceField(widget=forms.RadioSelect, required=True, choices=Coffee.cups_choices)
datetime = forms.DateTimeField(required=False)
now = forms.BooleanField(required=False)
class Meta:
model = Coffee
fields = ["typ", "cups", "datetime"]
|
Fast forward too many years to mention and eons of running scales, picking patterns, hammer-on and pull-off exercises. I decided I needed to start warming up in a new way. I set my intention and purpose of becoming more musical every time I picked up the guitar to warm up and devised a three part warm up routine. Starting today I’ll share them with you.
The first video is designed to build up your finger dexterity. While it may not be very musical, it will give you a new found access to each of your fingers and a strong connection between right and left hands. This is extremely beneficial on your journey to becoming a more melodic player. When you aren’t limited by technique, you’re not limited in your ability to create. If you can move each of your fingers easily and independently and have a a great right/left hand connection, melodic and musical ideas will be right there where they should be…at your fingertips!
|
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Toolbelt - a utility tu run tools in docker containers
# Copyright (C) 2016 Bitcraze AB
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import subprocess
import unittest
from unittest.mock import MagicMock
from toolbelt.utils.subproc import SubProc
from toolbelt.utils.exception import ToolbeltException
class SubprocTest(unittest.TestCase):
def setUp(self):
self.sut = SubProc()
def test_that_call_passes_args_on(self):
# Fixture
subprocess.call = MagicMock(return_value=47)
# Test
actual = self.sut.call(1, "string", name="value")
# Assert
subprocess.call.assert_called_with(1, "string", name="value")
self.assertEqual(47, actual)
def test_that_call_handles_exception(self):
# Fixture
subprocess.call = MagicMock(
side_effect=subprocess.CalledProcessError(
17, 'cmd', b'output'))
# Test
# Assert
with self.assertRaises(ToolbeltException):
self.sut.call()
def test_that_check_call_passes_args_on(self):
# Fixture
subprocess.check_call = MagicMock(return_value=b'Some string')
# Test
self.sut.check_call(1, "string", name="value")
# Assert
subprocess.check_call.assert_called_with(1, "string", name="value")
def test_that_check_call_handles_exception(self):
# Fixture
subprocess.check_call = MagicMock(
side_effect=subprocess.CalledProcessError(
17, 'message', b'output'))
# Test
# Assert
with self.assertRaises(ToolbeltException):
self.sut.check_call()
def test_that_check_output_passes_args_on(self):
# Fixture
subprocess.check_output = MagicMock(return_value=b'Some string')
# Test
self.sut.check_output(1, "string", name="value")
# Assert
subprocess.check_output.assert_called_with(1, "string", name="value")
def test_that_check_output_handles_exception(self):
# Fixture
subprocess.check_output = MagicMock(
side_effect=subprocess.CalledProcessError(
17, 'message', b'output'))
# Test
# Assert
with self.assertRaises(ToolbeltException):
self.sut.check_output()
def test_that_output_is_converted_to_utf8(self):
# Fixture
subprocess.check_output = MagicMock(return_value=b'Some string')
# Test
actual = self.sut.check_output()
# Assert
self.assertEqual('Some string', actual)
|
Savannah Guthrie is under fire for her interview with the Kentucky high school student at the center of the viral video that has divided the nation. The “Today” show host is facing outrage from all sides. Nick Sandmann, 16, spoke on-camera for the first time to Guthrie about that confrontation with a Native American activist on the steps of the Lincoln Memorial.
|
import sys, os
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
def load_dataset(if_data_shake):
if sys.version_info[0] == 2:
from urllib import urlretrieve
else:
from urllib.request import urlretrieve
def download(filename, source='http://yann.lecun.com/exdb/mnist/'):
print("Downloading %s" % filename)
urlretrieve(source + filename, 'mnist/' + filename)
import gzip
def load_mnist_images(filename):
if not os.path.exists('mnist/' + filename):
download(filename)
with gzip.open('mnist/' + filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
data = data.reshape(-1, 784)
return data / np.float32(256)
def load_mnist_labels(filename):
if not os.path.exists('mnist/' + filename):
download(filename)
with gzip.open('mnist/' + filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
return data
if if_data_shake==0:
# We can now download and read the training and test set images and labels.
X_train = load_mnist_images('train-images-idx3-ubyte.gz')
y_train = load_mnist_labels('train-labels-idx1-ubyte.gz')
X_test = load_mnist_images('t10k-images-idx3-ubyte.gz')
y_test = load_mnist_labels('t10k-labels-idx1-ubyte.gz')
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
else:
X_train = load_mnist_images('train-images-idx3-ubyte.gz')
y_train = load_mnist_labels('train-labels-idx1-ubyte.gz')
X_test = load_mnist_images('t10k-images-idx3-ubyte.gz')
y_test = load_mnist_labels('t10k-labels-idx1-ubyte.gz')
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
X_train.flags.writeable = True
y_train.flags.writeable = True
for i in range(50000):
y_train[i] = i%2
if i%2 ==0:
dd = -10
else:
dd = 10
mm = np.ones(784)
X_train[i] = mm*dd
return X_train, y_train, X_val, y_val, X_test, y_test
def load_cifar(f):
import cPickle
fo = open(f, 'rb')
d = cPickle.load(fo)
fo.close()
X_train, y_train = d['data'], np.array(d['labels'], dtype=np.int32)
X_train = X_train.reshape(-1, 3, 32, 32)
X_train, X_val = X_train[:-1000], X_train[-1000:]
y_train, y_val = y_train[:-1000], y_train[-1000:]
X_train, X_test = X_train[:-1000], X_train[-1000:]
y_train, y_test = y_train[:-1000], y_train[-1000:]
return X_train, y_train, X_val, y_val, X_test, y_test
def load_20news():
newsgroups_train = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'))
newsgroups_test = fetch_20newsgroups(subset='test', remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer()
X_train = vectorizer.fit_transform(newsgroups_train.data)
X_test = vectorizer.transform(newsgroups_test.data)
y_train = newsgroups_train.target
y_test = newsgroups_test.target
X_train, X_val = X_train[:-1000], X_train[-1000:]
y_train, y_val = y_train[:-1000], y_train[-1000:]
return X_train, y_train, X_val, y_val, X_test, y_test
|
Discussion in 'General Chatter' started by ablaze, Mar 20, 2007.
does anyone have this? just i have an L shaped hall and will be a fortune for carpet!! i have laminate in my living room and is a bugger 2 mop(yes i am that lazy!) so pros and cons please!!!
I dont have laminate but i do have lino.
Pros, easy to clean up the dirt and mud from when people/things walk in with mudy feet.
I have soild oak flooring through my living room, and hall and upstairs in the bathroom!
i have lam flooring in my hall way in this new house, i like it better there than carpet coz its easier to keep clean from muddy footprints, but in front rooms etc, i like carpet better.
i reckon ill price the 2 then decide lol, as id need 2 get the soundproofing stuff underneath as i live in flats, just dont want to pay for a carpet to have 2 bin half of it!
can you not get an off cut?
In out local carpet places then sell rolls that are the end of the line or spare stuff. you can normaly get them cheaper.
I make things easier by hoovering then mopping my laminate.
i want wooden floor as im sick to death of hovering up crumbs and things getting stuck !!!
y mum had lamintae in her hall cause of having dogs, made it a lot easier to deal with muddy foot prints. Personally I think because halls are heavy traffic if you dont have laminate or wood then you need a decent quality carpet. Imade the mistake in my old house of having cheap carpet on my hall and stairs and it wore soo badly it looked like crap within 6 months. This time round I have a mottled oatmeal berber carpet which is ideal for hallways, hard wearing and doesnt show up footprints. To stay chaep look at wearouse fitter advertised on local papers, they dont pay shop overheads, and come out out to see you with samples instead. We have a very large hall landing and stairs and inclusing fitting it cost just £360 all in. We just had to pull up the existing carpet and get rid of it.
my last house had laminate flooring all down stairs it was rhe main reason i wanted to move, i hated the kids crawling along it, and when the sun shines on it it shows every mark no matter how much i mopped it.
The dog and muddy footprints is exactly the reason i went with lino. The carpet was horrible and i could get the marks out!
i wont have this problem as i dont have stairs and no windows in my hall lol, im gunna price them both and see i think!
I have laminate in my hallway and playroom/dining room. In the front room we have carpet. I hoover mine then wash on my hands and knees with a cloth and cleaner then dry with a teatowel, otherwise when the light shines through my front door you can see every mark and smudge which drives me crazy!
Pros - easy to wipe up footprints etc.
Cons - colder especially for LOs crawling, shows all the smudges and carpet only needs to be hoovered off.
But I do think it's best to have laminate in the hall because of the traffic. So many people don't take their shoes off when they come into our house (assholes!) and someone brought their pram through it the other day instead of leaving it in the front garden/taking it round the back.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_encap_pool
short_description: Manage encap pools on Cisco ACI fabrics (fvns:VlanInstP, fvns:VxlanInstP, fvns:VsanInstP)
description:
- Manage vlan, vxlan, and vsan pools on Cisco ACI fabrics.
- More information from the internal APIC class
I(fvns:VlanInstP), I(fvns:VxlanInstP), and I(fvns:VsanInstP) at
U(https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Jacob McGill (@jmcgill298)
version_added: '2.5'
options:
allocation_mode:
description:
- The method used for allocating encaps to resources.
- Only vlan and vsan support allocation modes.
aliases: [ mode ]
choices: [ dynamic, static]
description:
description:
- Description for the C(pool).
aliases: [ descr ]
pool:
description:
- The name of the pool.
aliases: [ name, pool_name ]
pool_type:
description:
- The encap type of C(pool).
required: yes
aliases: [ type ]
choices: [ vlan, vxlan, vsan]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add a new vlan pool
aci_encap_pool:
hostname: apic
username: admin
password: SomeSecretPassword
pool: production
pool_type: vlan
description: Production VLANs
state: present
- name: Remove a vlan pool
aci_encap_pool:
hostname: apic
username: admin
password: SomeSecretPassword
pool: production
pool_type: vlan
state: absent
- name: Query a vlan pool
aci_encap_pool:
hostname: apic
username: admin
password: SomeSecretPassword
pool: production
pool_type: vlan
state: query
- name: Query all vlan pools
aci_encap_pool:
hostname: apic
username: admin
password: SomeSecretPassword
pool_type: vlan
state: query
'''
RETURN = r'''
#
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
ACI_MAPPING = dict(
vlan=dict(
aci_class='fvnsVlanInstP',
aci_mo='infra/vlanns-',
),
vxlan=dict(
aci_class='fvnsVxlanInstP',
aci_mo='infra/vxlanns-',
),
vsan=dict(
aci_class='fvnsVsanInstP',
aci_mo='infra/vsanns-',
),
)
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
allocation_mode=dict(type='str', aliases=['mode'], choices=['dynamic', 'static']),
description=dict(type='str', aliases=['descr']),
pool=dict(type='str', aliases=['name', 'pool_name']),
pool_type=dict(type='str', aliases=['type'], choices=['vlan', 'vxlan', 'vsan'], required=True),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['pool']],
['state', 'present', ['pool']],
],
)
allocation_mode = module.params['allocation_mode']
description = module.params['description']
pool = module.params['pool']
pool_type = module.params['pool_type']
state = module.params['state']
aci_class = ACI_MAPPING[pool_type]["aci_class"]
aci_mo = ACI_MAPPING[pool_type]["aci_mo"]
pool_name = pool
# ACI Pool URL requires the allocation mode for vlan and vsan pools (ex: uni/infra/vlanns-[poolname]-static)
if pool_type != 'vxlan' and pool is not None:
if allocation_mode is not None:
pool_name = '[{0}]-{1}'.format(pool, allocation_mode)
else:
module.fail_json(msg='ACI requires the "allocation_mode" for "pool_type" of "vlan" and "vsan" when the "pool" is provided')
# Vxlan pools do not support allocation modes
if pool_type == 'vxlan' and allocation_mode is not None:
module.fail_json(msg='vxlan pools do not support setting the allocation_mode; please remove this parameter from the task')
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class=aci_class,
aci_rn='{0}{1}'.format(aci_mo, pool_name),
filter_target='eq({0}.name, "{1}")'.format(aci_class, pool),
module_object=pool,
),
)
aci.get_existing()
if state == 'present':
# Filter out module parameters with null values
aci.payload(
aci_class=aci_class,
class_config=dict(
allocMode=allocation_mode,
descr=description,
name=pool,
)
)
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class=aci_class)
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
module.exit_json(**aci.result)
if __name__ == "__main__":
main()
|
The Winnipeg Goldeyes have signed right-handed pitchers Dexter Carter and Clayton Shunick for the 2012 season.
Carter, 25, played last season with Single-A Kannapolis of the South Atlantic League in the Chicago White Sox organization. He went 3-5 with a 6.65 ERA in 14 games, including 12 starts, with Kannapolis.
Shunick, 25, split last season in the Cincinnati Reds organization between High-A Bakersfield of the California League and Double-A Carolina of the Southern League. In 31 games with Bakersfield, he went 3-3 with a 2.51 ERA and four saves.
The Goldeyes have also traded starting pitcher Isaac Hess to the Bridgeport Bluefish of the Atlantic League for future considerations and released pitchers Zach Baldwin and Aaron Hartsock and infielder Steve Singleton.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2009-2011, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
__author__ = 'clarkmatthew'
from winrm.protocol import Protocol
from winrm.exceptions import WinRMTransportError
from isodate.isoduration import duration_isoformat
from datetime import timedelta
import StringIO
import traceback
import socket
import copy
import sys
import time
import re
class Winrm_Connection:
def __init__(self,
hostname,
username,
password,
port=5985,
protocol='http',
transport='plaintext',
default_command_timeout=600,
url=None,
debug_method=None,
verbose=True):
self.debug_method = debug_method
self.hostname = hostname
self.username = username
self.password = password
self.port = int(port)
self.protocol = protocol
self.transport = transport
self.default_command_timeout = default_command_timeout #self.convert_iso8601_timeout(default_command_timeout)
self.url = url or str(protocol)+"://"+str(hostname)+":"+str(port)+"/wsman"
self.winproto = self.get_proto()
self.shell_id = None
self.command_id = None
self.last_used = None
self.verbose = verbose
def get_proto(self):
self.debug('Creating winrm connection:' + str(self.hostname) + ":" + str(self.port) + ", Username:" + str(self.username) + ', Password:' + str(self.password))
winproto = Protocol(endpoint=self.url,transport=self.transport,username=self.username,password=self.password)
#winproto.transport.timeout = self.default_command_timeout
return winproto
def convert_iso8601_timeout(self, timeout):
#convert timeout to ISO8601 format
return duration_isoformat(timedelta(int(timeout)))
def debug(self, msg):
if self.debug_method:
self.debug_method(msg)
else:
print(msg)
def reset_shell(self, timeout=None, retries=5):
retry = 0
tb = ""
e = None
self.close_shell()
timeout = timeout or self.default_command_timeout
self.winproto.transport.timeout = timeout #self.default_command_timeout
#self.debug('reset_shell connection, Host:' + str(self.hostname) + ":" + str(self.port) + ", Username:" + str(self.username) + ', Password:' + str(self.password))
while retry < retries:
retry += 1
try:
self.shell_id = self.winproto.open_shell()
return self.shell_id
except WinRMTransportError, wte:
print "Failed to open shell on attempt#:" + str(retry) + "/" + str(retries)+ ", err:" + str(wte)
if retry < retries:
time.sleep(5)
except Exception, e:
tb = self.get_traceback()
errmsg = "Error caught while reseting winrm shell:" +str(e)
self.debug("Error caught while reseting winrm shell:" +str(e))
self.debug(str(tb))
raise Exception('Could not open shell to ' + str(self.url) + str(e))
def cmd(self, command, console_mode_stdin=True, skip_cmd_shell=False, timeout=None, verbose=None):
errmsg = ""
if verbose is None:
verbose = self.verbose
orig_cmd = copy.copy(command)
arguments = command.split(' ')
command = arguments.pop(0)
self.command_id = None
#if timeout is not None:
#convert timeout to ISO8601 format
#timeout = self.convert_iso8601_timeout(timeout)
self.reset_shell(timeout=timeout)
try:
self.command_id= self.winproto.run_command(self.shell_id,
command,
arguments=arguments,
console_mode_stdin=console_mode_stdin,
skip_cmd_shell=skip_cmd_shell)
self.debug('winrm timeout:' + str(timeout) + ', cmd:' + str(orig_cmd))
if timeout is not None:
sockdefault = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
stdout, stderr, statuscode = self.get_timed_command_output(self.shell_id, self.command_id, active_timeout=timeout)
else:
stdout, stderr, statuscode = self.winproto.get_command_output(self.shell_id, self.command_id)
self.debug( 'Command:"' + str(orig_cmd) + '" , Done.')
except WinRMTransportError as wte:
errmsg = str(wte)
except CommandTimeoutException as cte:
self.debug(str(cte))
errmsg = 'timed out'
finally:
try:
#self.winproto.transport.timeout = self.default_command_timeout
if timeout is not None:
socket.setdefaulttimeout(sockdefault)
self.winproto.cleanup_command(self.shell_id, self.command_id)
except: pass
self.close_shell()
if errmsg:
if re.search('timed out', errmsg, re.IGNORECASE):
raise CommandTimeoutException('ERROR: Timed out after:' +
str(self.winproto.transport.timeout) +
', Cmd:"' + str(orig_cmd))
else:
raise Exception(errmsg)
if verbose:
self.debug("\n" + str(stdout) + "\n" + str(stderr))
return {'stdout':stdout, 'stderr':stderr, 'statuscode':statuscode}
def get_timed_command_output(self, shell_id, command_id, active_timeout=0):
"""
Get the Output of the given shell and command
@param string shell_id: The shell id on the remote machine. See #open_shell
@param string command_id: The command id on the remote machine. See #run_command
@param int active_timeout: Time out used during an active session. For example as the shell is actively returning
data, but we want to timeout anyways. See cmd timeout for idle timeout where no
data has been read.
"""
stdout_buffer, stderr_buffer = [], []
command_done = False
start = time.time()
while not command_done:
elapsed = time.time()-start
if active_timeout and (elapsed > active_timeout):
raise CommandTimeoutException('Active timeout fired after:' + str(elapsed))
stdout, stderr, return_code, command_done = \
self.winproto._raw_get_command_output(shell_id, command_id)
stdout_buffer.append(stdout)
stderr_buffer.append(stderr)
return ''.join(stdout_buffer), ''.join(stderr_buffer), return_code
def close_shell(self):
if self.shell_id:
self.winproto.close_shell(self.shell_id)
self.shell_id = None
def sys(self, command, include_stderr=False, listformat=True, carriage_return=False, timeout=None, code=None, verbose=None):
ret = []
if verbose is None:
verbose = self.verbose
output = self.cmd(command, timeout=timeout, verbose=verbose )
if code is not None and output['statuscode'] != code:
raise CommandExitCodeException('Cmd:' + str(command) + ' failed with status code:'
+ str(output['statuscode'])
+ "\n, stdout:" + str(output['stdout'])
+ "\n, stderr:" + str(output['stderr']))
ret = output['stdout']
if ret:
if not carriage_return:
#remove the '\r' chars from the return buffer, leave '\n'
ret = ret.replace('\r','')
if listformat:
ret = ret.splitlines()
if include_stderr:
ret = ret.extend(output['stderr'].splitlines())
return ret
@classmethod
def get_traceback(cls):
'''
Returns a string buffer with traceback, to be used for debug/info purposes.
'''
try:
out = StringIO.StringIO()
traceback.print_exception(*sys.exc_info(),file=out)
out.seek(0)
buf = out.read()
except Exception, e:
buf = "Could not get traceback"+str(e)
return str(buf)
class CommandExitCodeException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class CommandTimeoutException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
Searching the web for a gutters contractors near Newburg Kentucky?
Do you need to have gutters for our Newburg, KY property?
How much more is it for gutter leaf guards to be installed with my new gutters near Newburg, KY?
Want a seamless gutter instead of a big box store sectional gutter in Newburg Kentucky?
The simple solution is to call Newburg Seamless Gutters for a seamless gutters price estimate within Newburg KY.
Are you annoyed when you take a walk outside and you get wet on from your roof? It’s time to get in touch with for gutters for your property in Newburg, KY.
Contact the professionals at Newburg KY Seamless Gutters we can really help you in Newburg.
Newburg Seamless Gutters is the place to find a local seamless gutters contractor that services homeowners in Newburg, Kentucky.
Newburg Seamless Gutters installs and offers gutter leaf guards in Newburg, KY.
Do you need new gutters in the following zip codes: 40219.
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 17 22:02:16 2016
@author: cjs14
"""
#http://www.astrojack.com/tag/ipython-notebook/
#%matplotlib inline
from IPython.display import Image
import matplotlib.pyplot as plt
import numpy as np
from JSAnimation.IPython_display import display_animation
from matplotlib import animation
Nframes = 100
# First set up the figure, the axis, and the plot element we want to animate
fig = plt.figure(figsize=(6,6))
rad = 0.5
# ax = fig.add_subplot(111, xlim=(-2.*rad, 2.*rad), ylim=(-2.*rad, 2.*rad), aspect='equal')
ax = plt.subplot2grid((3,3), (0,0), colspan=2, rowspan=2,
xlim=(-2.*rad, 2.*rad), ylim=(-2.*rad, 2.*rad), aspect='equal')
circ = plt.Circle((0, 0), radius=rad, facecolor="None", edgecolor='k', lw=4)
ax.add_patch(circ)
ax.grid(False)
ax.axis('off')
circle, = ax.plot([], [], marker='o', ms=10)
#ax1 = fig.add_subplot(212, ylim=(0, 2.*np.pi), xlim=(-2.*rad, 2.*rad))
ax1 = plt.subplot2grid((3,3), (2,0), colspan=2, ylim=(0, 2.*np.pi), xlim=(-2.*rad, 2.*rad), sharex=ax)
ax1.tick_params(axis='both', which='major', labelsize=10)
ax1.set_ylabel('time', fontsize=12)
ax1.set_xlabel('x position', fontsize=12)
x_pos_marker, = ax1.plot([], [], marker='o', ms=10, color='b')
x_pos_line, = ax1.plot([], [], color='k')
#ax2 = fig.add_subplot(122, xlim=(0, 2.*np.pi), ylim=(-2.*rad, 2.*rad))
ax2 = plt.subplot2grid((3,3), (0,2), rowspan=2, xlim=(0, 2.*np.pi), ylim=(-2.*rad, 2.*rad), sharey=ax)
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
ax2.tick_params(axis='both', which='major', labelsize=10)
ax2.set_xlabel('time', fontsize=12)
ax2.set_ylabel('y position', fontsize=12)
y_pos_marker, = ax2.plot([], [], marker='o', ms=10, color='b')
y_pos_line, = ax2.plot([], [], color='k')
# initialization function: plot the background of each frame
def init():
circle.set_data([], [])
x_pos_marker.set_data([], [])
y_pos_marker.set_data([], [])
x_pos_line.set_data([], [])
y_pos_line.set_data([], [])
return circle, x_pos_marker, y_pos_marker, x_pos_line, y_pos_line
# This function moves the polygons as a function of the frame i
def animate(i):
t = 2.*np.pi*float(i/(Nframes - 1.))
x_marker = rad*np.cos(t)
y_marker = rad*np.sin(t)
circle.set_data(x_marker, y_marker)
x_pos_marker.set_data(x_marker, t)
y_pos_marker.set_data(t, y_marker)
all_t = np.linspace(0, 2.*np.pi, Nframes)
x = rad*np.cos(all_t)
y = rad*np.sin(all_t)
x_pos_line.set_data(x, all_t)
y_pos_line.set_data(all_t, y)
return circle, x_pos_marker, y_pos_marker, x_pos_line, y_pos_line
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=Nframes, interval=20, blit=True)
# call our new function to display the animation
display_animation(anim)
|
Attendees will be given the opportunity to develop confidence as a Course Director for the Certified Angling Instructor (CAI) Train the Trainer Program. BSA National Fishing Committees’s most sought after mentors and thought leaders share their passion for this outstanding opportunity for personal growth and development. Highly interactive classroom and field activities capitalize on the group’s expertise and feedback. Program sessions include: promotion, scheduling, staffing, design, planning, budgeting, and administration. If you would like to lead the adventure of BSA Fishing Programs, this course is for you! FISH ON!
|
from lxml import etree
from insalata.model.Location import Location
from insalata.model.Layer2Network import Layer2Network
from insalata.model.Layer3Network import Layer3Network
from insalata.model.Interface import Interface
def scan(graph, connectionInfo, logger, thread):
"""
Load the network topology given in an XML file into the graph.
Timer is -1 => Objects will not be deleted.
Therefore, the infrastructure in the XML file is laoded permanently until the XML is changed.
The module is able to detect changes in the XML file. Therefore, it is possible to modify the
loaded information at runtime.
Necessary values in the configuration file of this collector module:
- file Path to the XML file the collector module shall parse
:param graph: Data interface object for this collector module
:type graph: insalata.model.Graph.Graph
:param connectionInfo: Information needed to connect to xen server
:type connectionInfo: dict
:param logger: The logger this scanner shall use
:type logger: logging:Logger
:param thread: Thread executing this collector
:type thread: insalata.scanner.Worker.Worker
"""
logger.info("Reading xml file '{0}' into internal graph.".format(connectionInfo['file']))
timeout = -1
name = connectionInfo['name']
configXml = etree.parse(connectionInfo['file'])
readElements = set()
readLocations(graph, configXml.find("locations"), logger, name, timeout, readElements)
readL2Networks(graph, configXml.find("layer2networks"), logger, name, timeout, readElements)
readL3Networks(graph, configXml.find("layer3networks"), logger, name, timeout, readElements)
readHosts(graph, configXml.xpath(".//host[not(@control)]"), logger, name, timeout, readElements)
for element in graph.getAllNeighbors():
inList = False
for el in readElements:
if el == element:
inList = True
if not inList: #Delete verification for these as they do not appear in the Xml
element.removeVerification(name)
def readHosts(graph, hostsXml, logger, name, timeout, readElements):
"""
Load hosts of the xml file into the graph.
:param graph: Data Interface object for this scanner
:type graph: :class: `Graph`
:param hostsXml: Part of the parsed XML containing the hosts.
:type hostsXml: list
:param logger: The logger this scanner shall use
:type logger: seealso:: :class:`logging:Logger`
:param name: Name this collector module uses for verification
:type name: str
:param timeout: Timeout of this collector module
:type timeout: int
:param readElements: Set containing every read element from the XML -> Allows to delete not longer existing ones
:type readElements: set
"""
logger.debug("Reading all hosts given in XML")
if not hostsXml:
return
for hostXml in hostsXml:
if "location" in hostXml.attrib:
location = graph.getOrCreateLocation(hostXml.attrib["location"], name, timeout)
else:
location = graph.getOrCreateLocation(hostXml.attrib["physical"])
readElements.add(location)
template = [t for t in location.getTemplates() if t.getID() == hostXml.attrib["template"]]
if len(template) > 0:
template = template[0] #No template shouldn't be possible if xml passed the preprocessor
host = graph.getOrCreateHost(hostXml.attrib["id"], name, timeout, location, template)
readElements.add(host)
logger.debug("Found host: {0}.".format(hostXml.attrib["id"]))
if "cpus" in hostXml.attrib:
host.setCPUs(int(hostXml.attrib["cpus"]))
if ("memoryMin" in hostXml.attrib) and ("memoryMax" in hostXml.attrib):
host.setMemory(int(hostXml.attrib["memoryMin"]), int(hostXml.attrib["memoryMax"]))
if "powerState" in hostXml.attrib:
host.setPowerState(hostXml.attrib["powerState"])
#interfaces, routing, firewall rules and disks added with edges
if hostXml.find("interfaces") is not None:
readInterfaces(graph, hostXml.find("interfaces"), host, logger, name, timeout, readElements)
if hostXml.find("routes") is not None:
readRoutes(graph, hostXml.find("routes"), host, logger, name, timeout, readElements)
if hostXml.find("disks") is not None:
readDisks(graph, hostXml.find("disks"), host, logger, name, timeout, readElements)
if hostXml.find(".//firewallRules") is not None:
readFirewallRules(graph, hostXml.find(".//firewallRules"), host, logger, name, timeout, readElements)
#find firewall raw data
if hostXml.find('.//raw') is not None:
rawXml = hostXml.find('.//raw')
if rawXml is not None:
raw = graph.getOrCreateFirewallRaw(name, timeout, host, rawXml.attrib["firewall"], rawXml.text)
host.setFirewallRaw(raw)
readElements.add(raw)
def readInterfaces(graph, interfacesXml, host, logger, name, timeout, readElements):
"""
Load all interfaces of a host. The interfaces will be added to the host.
:param graph: Data Interface object for this scanner
:type graph: :class: `Graph`
:param interfacesXml: Part of the parsed XML containing the interfaces of the current host.
:type interfacesXml: list
:param host: The host that contains the read interfaces
:type host: insalata.model.Host.Host
:param logger: The logger this scanner shall use
:type logger: seealso:: :class:`logging:Logger`
:param name: Name this collector module uses for verification
:type name: str
:param timeout: Timeout of this collector module
:type timeout: int
:param readElements: Set containing every read element from the XML -> Allows to delete not longer existing ones
:type readElements: set
"""
if interfacesXml:
logger.debug("Reading interfaces from XML.")
if not interfacesXml:
return
for ifaceXml in interfacesXml.findall("interface"):
if not "network" in ifaceXml.attrib:
logger.warning("No network attribute found for interface '{0}'.".format(ifaceXml.attrib["mac"]))
continue
network = [n for n in graph.getAllNeighbors(Layer2Network) if n.getID() == ifaceXml.attrib["network"]]
if len(network) == 0:
logger.warning("No suitable network found for interface '{0}'.".format(ifaceXml.attrib["mac"]))
continue
else:
network = network[0]
interface = graph.getOrCreateInterface(ifaceXml.attrib["mac"], name, timeout, network=network)
readElements.add(interface)
logger.debug("Found Interface with mac: {0}.".format(interface.getID()))
if "rate" in ifaceXml.attrib:
interface.setRate(ifaceXml.attrib["rate"])
if "mtu" in ifaceXml.attrib:
interface.setMtu(ifaceXml.attrib["mtu"])
host.addInterface(interface, name, timeout)
readLayer3Addresses(graph, ifaceXml.findall("layer3address"), interface, logger, name, timeout, readElements)
def readLayer3Addresses(graph, layer3AddressesXml, interface, logger, name, timeout, readElements):
"""
Load all Layer3Addresses of a interface. The addresses will be added to the interface automatically.
:param graph: Data Interface object for this scanner
:type graph: :class: `Graph`
:param layer3AddressesXml: Part of the parsed XML containing the Layer3Addresses of the interface.
:type layer3AddressesXml: list
:param interface: The interface containing the addresses
:type host: insalata.model.Interface.Interface
:param logger: The logger this scanner shall use
:type logger: seealso:: :class:`logging:Logger`
:param name: Name this collector module uses for verification
:type name: str
:param timeout: Timeout of this collector module
:type timeout: int
:param readElements: Set containing every read element from the XML -> Allows to delete not longer existing ones
:type readElements: set
"""
if layer3AddressesXml:
logger.debug("Read all Layer3Addresses of interface {0} in XML.".format(interface.getID()))
if not layer3AddressesXml:
return
for addressXml in layer3AddressesXml:
network = None
if "network" in addressXml.attrib:
network = [n for n in graph.getAllNeighbors(Layer3Network) if n.getID() == addressXml.attrib["network"]]
if len(network) == 0:
logger.warning("No suitable network found for {0}.".format(addressXml.attrib["network"]))
network = None
netmask = None
else:
network = network[0]
netmask = network.getNetmask() if not "netmask" in addressXml.attrib else addressXml.attrib["netmask"]
gateway = None if not "gateway" in addressXml.attrib else addressXml.attrib["gateway"]
address = graph.getOrCreateLayer3Address(addressXml.attrib["address"], name, timeout, netmask, gateway)
readElements.add(address)
if "static" in addressXml.attrib:
address.setStatic(addressXml.attrib["static"] == "True")
else:
address.setStatic(True)
if network:
address.setNetwork(network)
interface.addAddress(address, name, timeout)
#get services
if addressXml.find("services") is not None:
readServices(graph, addressXml.find("services"), address, logger, name, timeout, readElements)
def readServices(graph, servicesXml, address, logger, name, timeout, readElements):
"""
Load all services of a Layer3Address. The services willbe added automatically.
:param graph: Data Interface object for this scanner
:type graph: :class: `Graph`
:param servicesXml: Part of the parsed XML containing the services of this address.
:type hostsXml: list
:param address: The Layer3Address the services are provided on
:type address: insalata.model.Layer3Address.Layer3Address
:param logger: The logger this scanner shall use
:type logger: seealso:: :class:`logging:Logger`
:param name: Name this collector module uses for verification
:type name: str
:param timeout: Timeout of this collector module
:type timeout: int
:param readElements: Set containing every read element from the XML -> Allows to delete not longer existing ones
:type readElements: set
"""
if not servicesXml:
return
if servicesXml:
logger.debug("Reading Services from XML for address: {0}.".format(address.getID()))
for serviceXml in servicesXml:
#special dhcp service
if serviceXml.tag == "dhcp":
service = graph.getOrCreateDhcpService(name, timeout, address)
if "lease" in serviceXml.attrib:
service.setLease(serviceXml.attrib["lease"])
if ("from" or "to") in serviceXml.attrib:
service.setStartEnd(serviceXml.attrib["from"], serviceXml.attrib["to"])
if ("announcedGateway") in serviceXml.attrib:
service.setAnnouncedGateway(serviceXml.attrib["announcedGateway"])
#special dns service
elif serviceXml.tag == "dns":
service = graph.getOrCreateDnsService(name, timeout, address)
if "domain" in serviceXml.attrib:
service.setDomain(serviceXml.attrib["domain"])
#add more special services here, e.g. http
#generic unknown services
else:
service = graph.getOrCreateService(serviceXml.attrib["port"], serviceXml.attrib["protocol"], name, timeout, serviceXml.attrib["type"], address)
if "type" in serviceXml.attrib:
service.setName(serviceXml.attrib["type"])
if "product" in serviceXml.attrib:
service.setProduct(serviceXml.attrib["product"])
if "version" in serviceXml.attrib:
service.setVersion(serviceXml.attrib["version"])
readElements.add(service)
address.addService(service, name, timeout)
def readRoutes(graph, routingXml, host, logger, name, timeout, readElements):
"""
Load all routes of a host. The routes will be added to the host automatically.
:param graph: Data Interface object for this scanner
:type graph: :class: `Graph`
:param routingXml: Part of the parsed XML containing the routes.
:type routingXml: list
:param host: The host that contains the read routes.
:type host: insalata.model.Host.Host
:param logger: The logger this scanner shall use
:type logger: seealso:: :class:`logging:Logger`
:param name: Name this collector module uses for verification
:type name: str
:param timeout: Timeout of this collector module
:type timeout: int
:param readElements: Set containing every read element from the XML -> Allows to delete not longer existing ones
:type readElements: set
"""
if not routingXml:
return
if routingXml:
logger.debug("Reading all Routes from XML for host {0}.".format(host.getID()))
for routeXml in routingXml:
interface = None
if "interface" in routeXml.attrib:
interface = [i for i in graph.getAllNeighbors(Interface) if i.getID() == routeXml.attrib["interface"]]
if len(interface) == 0:
logger.debug("No interface found found for route. Interface: {0}.".format(routeXml.attrib["interface"]))
else:
interface = interface[0]
route = graph.getOrCreateRoute(name, timeout, host, routeXml.attrib["destination"], routeXml.attrib["genmask"], routeXml.attrib["gateway"], interface)
host.addRoute(route, name, timeout)
readElements.add(route)
def readFirewallRules(graph, rulesXml, host, logger, name, timeout, readElements):
"""
Load all firewall rules of a host. The rules will be added to the host automatically.
:param graph: Data Interface object for this scanner
:type graph: :class: `Graph`
:param rulesXml: Part of the parsed XML containing the firewall rules.
:type rulesXml: list
:param host: The host that contains the read firewall rules.
:type host: insalata.model.Host.Host
:param logger: The logger this scanner shall use
:type logger: seealso:: :class:`logging:Logger`
:param name: Name this collector module uses for verification
:type name: str
:param timeout: Timeout of this collector module
:type timeout: int
:param readElements: Set containing every read element from the XML -> Allows to delete not longer existing ones
:type readElements: set
"""
logger.debug("Reading all firewall rules from XML for host {0}.".format(host.getID()))
if not rulesXml:
return
for ruleXml in rulesXml:
interface = [i for i in graph.getAllNeighbors(Interface) if "inInterface" in ruleXml.attrib and i.getID() == ruleXml.attrib["inInterface"]]
inInterface = interface[0] if len(interface) > 0 else None
if rulesXml:
logger.debug("Reading all firewall rules from XML for host {0}.".format(host.getID()))
for ruleXml in rulesXml:
interface = [i for i in graph.getAllNeighbors(Interface) if "inInterface" in ruleXml.attrib and i.getID() == ruleXml.attrib["inInterface"]]
inInterface = interface[0] if len(interface) > 0 else None
interface = [i for i in graph.getAllNeighbors(Interface) if "outInterface" in ruleXml.attrib and i.getID() == ruleXml.attrib["outInterface"]]
outInterface = interface[0] if len(interface) > 0 else None
srcnet = destnet = srcports = destports = protocol = None
if "chain" in ruleXml.attrib:
chain = ruleXml.attrib["chain"]
if "action" in ruleXml.attrib:
action = ruleXml.attrib["action"]
if "srcnet" in ruleXml.attrib:
srcnet = ruleXml.attrib["srcnet"]
if "destnet" in ruleXml.attrib:
destnet = ruleXml.attrib["destnet"]
if "srcports" in ruleXml.attrib:
srcports = ruleXml.attrib["srcports"]
if "destports" in ruleXml.attrib:
destports = ruleXml.attrib["destports"]
if "protocol" in ruleXml.attrib:
protocol = ruleXml.attrib["protocol"]
rule = graph.getOrCreateFirewallRule(name, timeout, host, chain, action, protocol, srcnet, destnet, srcports, destports, inInterface, outInterface)
host.addFirewallRule(rule, name, timeout)
readElements.add(rule)
def readDisks(graph, disksXml, host, logger, name, timeout, readElements):
"""
Load all disks of a host. The disks will be added to the host.
:param graph: Data Interface object for this scanner
:type graph: :class: `Graph`
:param disksXml: Part of the parsed XML containing the disks of the current host.
:type disksXml: list
:param host: The host that contains the read interfaces.
:type host: insalata.model.Host.Host
:param logger: The logger this scanner shall use
:type logger: seealso:: :class:`logging:Logger`
:param name: Name this collector module uses for verification
:type name: str
:param timeout: Timeout of this collector module
:type timeout: int
:param readElements: Set containing every read element from the XML -> Allows to delete not longer existing ones
:type readElements: set
"""
logger.debug("Read all disks on host {0}.".format(host.getID()))
if not disksXml:
return
for diskXml in disksXml:
logger.debug("Found disk {0} for host {1}.".format(diskXml.attrib["id"], host.getID()))
disk = graph.getOrCreateDisk(diskXml.attrib["id"], name, timeout, host)
if "size" in diskXml.attrib:
disk.setSize(int(diskXml.attrib["size"]))
logger.debug("Adding disk '{0}' to host '{1}'".format(disk.getID(), host.getID()))
host.addDisk(disk, name, timeout)
readElements.add(disk)
if disksXml:
logger.debug("Read all disks on host {0}.".format(host.getID()))
for diskXml in disksXml:
logger.debug("Found disk {0} for host {1}.".format(diskXml.attrib["id"], host.getID()))
disk = graph.getOrCreateDisk(diskXml.attrib["id"], name, timeout, host)
if "size" in diskXml.attrib:
disk.setSize(int(diskXml.attrib["size"]))
logger.debug("Adding disk '{0}' to host '{1}'".format(disk.getID(), host.getID()))
host.addDisk(disk, name, timeout)
readElements.add(disk)
def readL2Networks(graph, l2networksXml, logger, name, timeout, readElements):
"""
Load all Layer2Networks given in the XML.
:param graph: Data Interface object for this scanner
:type graph: :class: `Graph`
:param l2networksXml: Part of the parsed XML containing the networks.
:type l2networksXml: list
:param logger: The logger this scanner shall use
:type logger: seealso:: :class:`logging:Logger`
:param name: Name this collector module uses for verification
:type name: str
:param timeout: Timeout of this collector module
:type timeout: int
:param readElements: Set containing every read element from the XML -> Allows to delete not longer existing ones
:type readElements: set
"""
if l2networksXml:
logger.debug("Reading Layer2Networks from XML.")
if not l2networksXml:
return
for netXml in l2networksXml.findall("layer2network"):
if "location" in netXml.attrib:
location = graph.getOrCreateLocation(netXml.attrib["location"], name, timeout)
else:
location = graph.getOrCreateLocation("physical", name, timeout)
readElements.add(location)
readElements.add(graph.getOrCreateLayer2Network(netXml.attrib["id"], name, timeout, location))
logger.debug("Found Layer2Network {0} in location {1}.".format(netXml.attrib["id"], location.getID()))
def readL3Networks(graph, l3networksXml, logger, name, timeout, readElements):
"""
Load all Layer3Networks given in the XML.
:param graph: Data Interface object for this scanner
:type graph: :class: `Graph`
:param l3networksXml: Part of the parsed XML containing the networks.
:type l3networksXml: list
:param logger: The logger this scanner shall use
:type logger: seealso:: :class:`logging:Logger`
:param name: Name this collector module uses for verification
:type name: str
:param timeout: Timeout of this collector module
:type timeout: int
:param readElements: Set containing every read element from the XML -> Allows to delete not longer existing ones
:type readElements: set
"""
if l3networksXml is None:
return
if l3networksXml:
logger.debug("Reading Layer3Networks from XML.")
for netXml in l3networksXml.findall("layer3network"):
readElements.add(graph.getOrCreateLayer3Network(netXml.attrib["id"], name, timeout, netXml.attrib["address"], netXml.attrib["netmask"]))
logger.debug("Found Layer3Network: {0}.".format(netXml.attrib["id"]))
def readLocations(graph, locationsXml, logger, name, timeout, readElements):
"""
Load all Locations given in the XML.
:param graph: Data Interface object for this scanner
:type graph: :class: `Graph`
:param locationsXml: Part of the parsed XML containing the locations.
:type locationsXml: list
:param logger: The logger this scanner shall use
:type logger: seealso:: :class:`logging:Logger`
:param name: Name this collector module uses for verification
:type name: str
:param timeout: Timeout of this collector module
:type timeout: int
:param readElements: Set containing every read element from the XML -> Allows to delete not longer existing ones
:type readElements: set
"""
if not locationsXml:
return
if locationsXml:
logger.debug("Reading Locations from XML.")
for locationXml in locationsXml.findall("location"):
location = graph.getOrCreateLocation(locationXml.attrib["id"], name, timeout)
logger.debug("Found location: {0}.".format(location.getID()))
readElements.add(location)
|
Feeling sluggish and mentally not on point? Our friends at Sunwarrior have some suggestions for us! Your body may be looking for a little liquid medicine to help you over the hump. When you think about it, an average adult needs approximately two liters of fluid per day. This daily average does not take into consideration additional medical conditions or exercise per day. With other body needs, we realistically need more than the basic daily guideline. Other than ingesting basic water (which we want to drink as much as possible—during exercise particularly) some other great choices are available. I know this can sound taxing to many, but it does not have to be! Here are my top 6 beverages for a healthy mind and body!
Herbal teas are a great way to restore Zen to your body. Herbal teas can also aid your body and mind with herbs and spices to help aid in digestion, sleep, brain fog, immunity, and energy. Drinking herbal teas throughout the day and into the evening is an easy and efficient way to purify your mind and body.
With an abundance of antioxidants and naturally occurring nitrates, juicing and drinking this food means you are doing your body good. You are increasing blood flow throughout your body and enabling better brain focus and higher energy.
This fermented tea is full of probiotics. Along with being able to help tummy digestion, and fight off unwanted yeast in the body, kombucha aids in mental clarity and mood stability as well. You can make your own, or buy this drink at your local grocery store.
4. Aloe. We know that aloe gel is a useful home remedy for external burns and moisturizing agents, but did you know that drinking aloe is just as powerful internally? Loaded with vitamins, minerals, and antioxidants, aloe juice can be easily found in the grocery store and can be added to your daily routine to keep your body healthy and happy. It aids in boosting your immunity, helps blood circulation, and balances your metabolism. You can drink it alone or add to a smoothie.
5. Smoothies. Looking for an easy way to incorporate a high amount of antioxidants and nutrients into your body? Smoothies can be your one-stop shop to energize, nourish, detoxify, and cleanse your body. Keeping your smoothie clean and fresh are your best options. You can make a fast and easy smoothie with Sunwarrior blends such as illumin8 or Warrior Blend.
6. Lemon Water. Adding lemon to warm water is an effective tool towards aiding digestion and improving your immune system with an increase in vitamin C. Lemon water can kill a few pesky germs which will help with fresher breath as well.
So there you have it, folks. Drinking healthy beverages is not so daunting after all. In fact, my choice of beverages spread throughout the day can be easy additions to your daily water routine. By keeping your fluid intake up throughout the day, you are keeping your mind and body alert and ready to take on the day! Remember, if you feel thirsty, you are already dehydrated, and you begin to feel the effects through lack of concentration, and motivation. Keep yourself hydrated, happy, and alert!
Get healthy with Sunwarrior’s free fitness challenge! They’ll give you a free meal plan and a free exercise regimen to follow so you can feel your best!
|
# Given two strings s and t, determine if they are both one edit distance apart.
#
# Note:
#
# There are 3 possiblities to satisify one edit distance apart:
#
# Insert a character into s to get t
# Delete a character from s to get t
# Replace a character of s to get t
# Example 1:
#
# Input: s = "ab", t = "acb"
# Output: true
# Explanation: We can insert 'c' into s to get t.
# Example 2:
#
# Input: s = "cab", t = "ad"
# Output: false
# Explanation: We cannot get t from s by only one step.
# Example 3:
#
# Input: s = "1203", t = "1213"
# Output: true
# Explanation: We can replace '0' with '1' to get t.
class Solution(object):
def isOneEditDistance(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
len_s = len(s)
len_t = len(t)
if abs(len_s - len_t) > 1 or s == t:
return False
if len_s > len_t:
return self.isOneEditDistance(t, s)
for i in range(len_s):
if s[i] != t[i]:
return s[i+1:] == t[i+1:] or s[i:] == t[i+1:]
return True
# Note:
#
|
it was for basic flippant reading for me. it looked appealing because of its entertaining premise of a 31ish woman was pursuing marriage while her current state, on the opposite wasn’t so engaging.
i gave it a 1.5 because although i wanted to get to its conclusion and find out if Italian Angie finds love or not, it was still cliche and somewhat unproportional with character realism. everyone seemed to be bold and beautiful with very superficial conflicts. I almost remember rolling my eyes at numerous parts of the book.
it wasn’t bad…you’ll get through it without cringing and complaining…it’s indifferently entertaining, but it’s not in my “favorites” panel of choice.
|
from django.test import SimpleTestCase
from django.core.exceptions import ValidationError
from multi_email_field.forms import MultiEmailField as MultiEmailFormField
from multi_email_field.widgets import MultiEmailWidget
class MultiEmailFormFieldTest(SimpleTestCase):
def test_widget(self):
f = MultiEmailFormField()
self.assertIsInstance(f.widget, MultiEmailWidget)
def test_to_python(self):
f = MultiEmailFormField()
# Empty values
for val in ['', None]:
self.assertEquals([], f.to_python(val))
# One line correct value
val = ' foo@bar.com '
self.assertEquals(['foo@bar.com'], f.to_python(val))
# Multi lines correct values (test of #0010614)
val = 'foo@bar.com\nfoo2@bar2.com\r\nfoo3@bar3.com'
self.assertEquals(['foo@bar.com', 'foo2@bar2.com', 'foo3@bar3.com'],
f.to_python(val))
def test_validate(self):
f = MultiEmailFormField(required=True)
# Empty value
val = []
self.assertRaises(ValidationError, f.validate, val)
# Incorrect value
val = ['not-an-email.com']
self.assertRaises(ValidationError, f.validate, val)
# An incorrect value with correct values
val = ['foo@bar.com', 'not-an-email.com', 'foo3@bar3.com']
self.assertRaises(ValidationError, f.validate, val)
# Should not happen (to_python do the strip)
val = [' foo@bar.com ']
self.assertRaises(ValidationError, f.validate, val)
# Correct value
val = ['foo@bar.com']
f.validate(val)
|
High paying writing jobs zippia maps show the highest and lowest how to get paid write online careers best ideas about fun accounting. Hired for physical therapy accountant essay you can do from home at home. Work looking lance red flags avoid business insider. Openings job search uk available in my area victoria tx technical tw one of salaries marketing most social creative that will make life easier born realist pay well san antonio help cheap deals on line.
Writing jobs how to get paid write online will help you the highest paying openings business. Reviews net scam or legit elite different types of journalism lancewriting best charts galore images money fast quick technical tw one in these be looking high earning work from home frugal rules. List ideas careers about fun accounting teaching abroad com top places blogging zippia maps show and lowest real why pay for something that s are lancers big bucks demand skills lance articles lancewriting. Insider well amazing websites write. What programming.
High paying writing jobs zippia maps show the highest and lowest online fom home earn per day live support that pay well best images about lance. Technical tw one of in paid top tips to job sites for writers. Openings business will help you how get write writers young professionals insider careers can do more boards where america howard students academic introduces resources super scholar. Rs hour maf accountants contractors land virtual from ideas make money ways cheap deals on line at now hiring must see pov fun accounting most popular fields study survey bares fresh graduates is a scam it s written trap what are programming earning people who hate.
|
# ***** BEGIN LICENSE BLOCK *****
#
# For copyright and licensing please refer to COPYING.
#
# ***** END LICENSE BLOCK *****
from setuptools import setup
import os
import platform
# Conditionally include additional modules for docs
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
requirements = list()
if on_rtd:
requirements.append('tornado')
# Conditional include unittest2 for versions of python < 2.7
tests_require=['nose', 'mock']
platform_version = list(platform.python_version_tuple())[0:2]
if platform_version[0] != '3' and platform_version != ['2', '7']:
tests_require.append('unittest2')
long_description = ('Pika is a pure-Python implementation of the AMQP 0-9-1 '
'protocol that tries to stay fairly independent of the '
'underlying network support library. Pika was developed '
'primarily for use with RabbitMQ, but should also work '
'with other AMQP 0-9-1 brokers.')
setup(name='pika',
version='0.9.10p0',
description='Pika Python AMQP Client Library',
long_description=long_description,
author='Tony Garnock-Jones',
author_email='tonygarnockjones@gmail.com',
maintainer='Gavin M. Roy',
maintainer_email='gmr@meetme.com',
url='https://github.com/pika ',
packages=['pika', 'pika.adapters'],
license='MPL v1.1 and GPL v2.0 or newer',
install_requires=requirements,
tests_require=tests_require,
test_suite = "nose.collector",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'License :: OSI Approved :: Mozilla Public License 1.1 (MPL 1.1)',
'Operating System :: OS Independent',
'Topic :: Communications',
'Topic :: Internet',
'Topic :: Software Development :: Libraries',
],
zip_safe=True
)
|
St. Luke’s would not be where we are today without our donors and their generous gifts of time and financial resources. Our donors have come through for St. Luke’s for more than a century, helping us keep our communities healthy and strong.
Ron Jones Jordan-Wilcomb Construction, Inc.
|
from django.db import models
class FlightOption(models.Model):
def __unicode__(self):
return "Flight Option #%s for %s" % (self.id, self.speaker)
speaker = models.ForeignKey('speakers.Speaker')
route_description = models.TextField(blank=True)
# arrival
arrival_date = models.DateTimeField(blank=True, null=True)
arrival_observations = models.TextField(blank=True)
# departure
departure_date = models.DateTimeField(blank=True, null=True)
departure_observations = models.TextField(blank=True)
# sent
sent = models.BooleanField(default=False)
sent_timestamp = models.DateTimeField(blank=True, null=True)
# seen
seen = models.BooleanField(default=False)
seen_timestamp = models.DateTimeField(blank=True, null=True)
# approved
approved = models.BooleanField(default=False)
approved_timestamp = models.DateTimeField(blank=True, null=True)
# approved
rejected = models.BooleanField(default=False)
rejected_timestamp = models.DateTimeField(blank=True, null=True)
rejected_reason = models.TextField(blank=True)
# meta
criado = models.DateTimeField(blank=True, auto_now_add=True, verbose_name="Criado")
atualizado = models.DateTimeField(blank=True, auto_now=True, verbose_name="Atualizado")
|
Commercial Services: We provide professional commercial appraisals of various property types all over the State of Massachusetts, our “core” coverage area consists of Suffolk, Norfolk, and Middlesex Counties. If you are in need of appraisal services, you can request a bid by sending us an email.
Residential Services: Three county coverage, Suffolk, Norfolk and Middlesex, with multi-discipline appraisal staff. FHA, VA, REO, conventional, and high end Multi-Million dollar estates.
Commercial Fees: All Commercial projects are bid individually based on scope of work and reporting requirements. Send us a request by email with property addresses or property descriptions and general locations and we can produce a written bid document explaining the scope and reporting requirements recommended for your specific project.
Residential Fees: You will find the fees that we charge are very competitive for the menu of services that we offer, and we know of no other local appraisal firm that advertises their rates.
|
#!/usr/local/bioinfo/python/3.4.3_build2/bin/python
# -*- coding: utf-8 -*-
# @package grepMotifFromAlignment.py
# @author Sebastien Ravel
"""
The grepMotifFromAlignment script
=================================
:author: Sebastien Ravel
:contact: sebastien.ravel@cirad.fr
:date: 08/07/2016
:version: 0.1
Script description
------------------
This Programme parse Aligment info to build motif table of SNP in gene's
Example
-------
>>> grepMotifFromAlignment.py -d path/to/fasta -o filenameout
Help Programm
-------------
optional arguments:
- \-h, --help
show this help message and exit
- \-v, --version
display grepMotifFromAlignment.py version number and exit
Input mandatory infos for running:
- \-d <path/to/directory>, --directory <path/to/directory>
path to directory fasta files
- \-o <filename>, --out <filename>
Name of output file
Input infos for running with default values:
- \-l <filename>, --list <filename>
File with Strain to keep (one per row), default keep all strains
"""
##################################################
## Modules
##################################################
#Import MODULES_SEB
import sys, os
current_dir = os.path.dirname(os.path.abspath(__file__))+"/"
sys.path.insert(1,current_dir+'../modules/')
from MODULES_SEB import directory, dictList2txt, dictDict2txt, dict2txt, relativeToAbsolutePath, existant_file, sort_human
## Python modules
import argparse
from time import localtime, strftime
## BIO Python modules
from Bio import AlignIO
from Bio.Align import AlignInfo, MultipleSeqAlignment
##################################################
## Variables Globales
version="0.1"
VERSION_DATE='04/03/2015'
debug="False"
#debug="True"
##################################################
## Functions
##################################################
## Main code
##################################################
if __name__ == "__main__":
# Initializations
start_time = strftime("%d-%m-%Y_%H:%M:%S", localtime())
# Parameters recovery
parser = argparse.ArgumentParser(prog='grepMotifFromAlignment.py', description='''This Programme parse Aligment info to build motif table of SNP in gene's''')
parser.add_argument('-v', '--version', action='version', version='You are using %(prog)s version: ' + version, help=\
'display grepMotifFromAlignment.py version number and exit')
#parser.add_argument('-dd', '--debug',choices=("False","True"), dest='debug', help='enter verbose/debug mode', default = "False")
filesreq = parser.add_argument_group('Input mandatory infos for running')
filesreq.add_argument('-d', '--directory', metavar="<path/to/directory>",type=directory, required=True, dest = 'pathDirectory', help = 'path to directory fasta files')
filesreq.add_argument('-o', '--out', metavar="<filename>", required=True, dest = 'paramoutfile', help = 'Name of output file')
files = parser.add_argument_group('Input infos for running with default values')
files.add_argument('-l', '--list', metavar="<filename>", default="ALL", dest = 'listKeepFile', help = 'File with Strain to keep (one per row), default keep all strains')
# Check parameters
args = parser.parse_args()
#Welcome message
print("#################################################################")
print("# Welcome in grepMotifFromAlignment (Version " + version + ") #")
print("#################################################################")
print('Start time: ', start_time,'\n')
# Récupère le fichier de conf passer en argument
pathDirectory = args.pathDirectory
outputfilename = relativeToAbsolutePath(args.paramoutfile)
print("\t - Input pathDirectory is: %s" % pathDirectory)
print("\t - Output file name is: %s" % outputfilename)
if args.listKeepFile not in ["ALL"]:
listKeepSouche = loadInList(existant_file(args.listKeepFile))
print("\t - You want to keep strains:\n%s" % "\n".join(listKeepSouche))
basename = paramlistKeep.split(".")[0]
else:
listKeepSouche = []
print("\t - You want to keep all strains \n")
basename = "All"
dicoOutputTxt = {}
dicoSeqSNP = {}
dicoFilenbSNP ={}
dicoFileCountSNP ={}
fileEmpty = 0
listFileEmpty = []
ctr = 1
nbMotifTotal=0
for filein in pathDirectory.listFiles:
ctr += 1
if ((ctr % 100 == 0) and (ctr != 0)) or (float(ctr) == len(pathDirectory.listFiles)):
percent = (float(ctr)/float(len(pathDirectory.listFiles)))*100
sys.stdout.write("\rProcessed up to %0.2f %%..." % percent)
sys.stdout.flush()
#print(filein)
dicoSeqSNP = {}
nbSNP = 0
tableauSoucheName = []
# lecture de l'alignement
#alignment = AlignIO.read(open(resultataligment,"r"), "fasta")
alignmentStart = AlignIO.read(open(filein,"r"), "fasta")
# cree un nouvelle alignement avec que les souches voulus:
keepListRecord = []
for record in alignmentStart:
if record.id not in listKeepSouche and args.listKeepFile == "ALL" :
listKeepSouche.append(record.id)
#print(record.id)
if record.id in listKeepSouche:
keepListRecord.append(record)
tableauSoucheName.append(record.id)
if record.id not in dicoSeqSNP.keys():
dicoSeqSNP[record.id] = ""
alignment = MultipleSeqAlignment(keepListRecord)
lenAlignement = int(alignment.get_alignment_length())
#print(alignment)
#print(tableauSoucheName)
#print(len(tableauSoucheName))
for indice in range(0,lenAlignement):
tab = list(alignment[:,indice])
#print(tab)
nbO = tab.count(tab[0])
nbA = tab.count("A")
nbC = tab.count("C")
nbT = tab.count("T")
nbG = tab.count("G")
nbN = tab.count("N")+tab.count("n")
nbGap = tab.count("-")
sommeACTG = nbA + nbC + nbT + nbG
allcount = sommeACTG + nbN + nbGap
if int(allcount) != len(alignment): # test si total = nombre de souche
print( sommeACTG, nbA , nbC , nbT, nbG,nbN, nbGap)
print( tab)
exit()
if nbGap == 0 :
if nbO != sommeACTG and nbN == 0:
nbSNP+=1
#print(indice)
for lentabi in range(0,len(tab)):
dicoSeqSNP[tableauSoucheName[lentabi]] += (tab[lentabi])
nbSNPtotal=nbSNP
if nbSNPtotal == 0:
fileEmpty += 1
listFileEmpty.append(filein)
else:
nbMotifTotal+=1
listMotif = []
for geneId, sequence in dicoSeqSNP.items():
nbSNPtotal = (len(sequence))
listMotif.append(sequence)
nameMGG = filein.split("/")[-1].replace("_Orthologue_macse_NT.fasta","")
if nameMGG not in dicoFileCountSNP.keys():
dicoFileCountSNP[nameMGG] = {"NBSNP":nbSNPtotal,
"lenAlign":lenAlignement}
if nbSNPtotal not in dicoFilenbSNP.keys():
dicoFilenbSNP[nbSNPtotal] = 1
else:
dicoFilenbSNP[nbSNPtotal] +=1
#print(nbSNPtotal)
dicoCompteMotif = {k: listMotif.count(k) for k in set(listMotif)}
#print(dict2txt(dicoCompteMotif))
dicoTranslateMotif2Code = {}
code = 10
for motifUniq in dicoCompteMotif.keys():
dicoTranslateMotif2Code[motifUniq] = code
code+=1
for geneId, sequence in dicoSeqSNP.items():
codeSeq = dicoTranslateMotif2Code[sequence]
if geneId not in dicoOutputTxt.keys():
dicoOutputTxt[geneId] = [str(codeSeq)]
else:
dicoOutputTxt[geneId].append(str(codeSeq))
output_handle = open(outputfilename, "w")
#print(dictList2txt(dicoOutputTxt))
outputTxt = ""
#for key in sorted(dicoOutputTxt.keys()):
for key in sorted(listKeepSouche, key=sort_human):
value = "\t".join(dicoOutputTxt[key])
outputTxt += "%s\t%s\n" % (str(key),str(value))
output_handle.write(outputTxt)
outputListEmpty = open(basename+"_outputListEmpty.txt", "w")
for fileEmptyName in listFileEmpty:
outputListEmpty.write(fileEmptyName+"\n")
with open(basename+"_LenAlign_nbSNP.txt","w") as output1:
txt1 = dictDict2txt(dicoFileCountSNP)
output1.write(txt1)
with open(basename+"_nbSNPallFile.txt","w") as output2:
txt1 = dict2txt(dicoFilenbSNP)
output2.write(txt1)
print("\n\nExecution summary:")
print(" - Outputting \n\
Il y a au final %i Motif dans tout les MGG\n\
Il y a %i fichiers vides\n\
les sequences sont ajouter dans le fichier %s\n\
la liste des fichiers vides est dans le fichier outputListEmpty.txt" %(nbMotifTotal,fileEmpty,outputfilename))
print("\nStop time: ", strftime("%d-%m-%Y_%H:%M:%S", localtime()))
print("#################################################################")
print("# End of execution #")
print("#################################################################")
|
Mr Babache Giant Juggling Rings - 40cm Large Juggling Rings.
Extra large, oversize juggling rings for manipulation and... err... juggling.
Extra large juggling rings made from high quality plastic.
Great rings for lower number manipulation and juggling.
They are more effort to keep in the air when juggling higher numbers but are totally doable but will take some adjusting to.
They can be sharp when new but lightly sanding the outside and inside will make them nicer to catch.
These rings are a lot of fun to juggle, I got them in the uv orange color which is super bright and beautiful. They are easy on the hands due to the thickness and the rounded edges of the Inner and outer ring. I have not tried the standard size rings, but I really like these ones, love them, the plastic material is of a great quality too.
This is my second review about these rings and I still love them, now I own 6 of them.
I don't manipulate them I juggle them and they are excellent and of course they don't do well in the wind but neither do all other rings except maybe the ones with holes in them.
I would recommend these rings for juggling and if you want to manipulate them great go for it.
|
from msl.equipment.connection import Connection
from msl.equipment.connection_demo import ConnectionDemo
from msl.equipment.record_types import EquipmentRecord
from msl.equipment.resources.picotech.picoscope.picoscope import PicoScope
from msl.equipment.resources.picotech.picoscope.channel import PicoScopeChannel
class MyConnection(Connection):
def __init__(self, record):
super(MyConnection, self).__init__(record)
def get_none1(self):
"""No return type is specified."""
pass
def get_none2(self, channel):
"""This function takes 1 input but returns nothing.
Parameters
----------
channel : :obj:`str`
Some channel number
"""
pass
def get_bool1(self):
""":obj:`bool`: A boolean value."""
pass
def get_bool2(self):
"""Returns a boolean value.
Returns
-------
:obj:`bool`
A boolean value.
"""
pass
def get_string1(self):
""":obj:`str`: A string value."""
pass
def get_string2(self):
"""Returns a string value.
Returns
-------
:obj:`str`
A string value.
"""
pass
def get_bytes1(self):
""":obj:`bytes`: A bytes value."""
pass
def get_bytes2(self):
"""Returns a bytes value.
Returns
-------
:obj:`bytes`
A bytes value.
"""
pass
def get_int1(self):
""":obj:`int`: An integer value."""
pass
def get_int2(self):
"""Returns an integer value.
Returns
-------
:obj:`int`
An integer value.
"""
pass
def get_float1(self):
""":obj:`float`: A floating-point value."""
pass
def get_float2(self):
"""Returns a floating-point value.
Returns
-------
:obj:`float`
A floating-point value.
"""
pass
def get_list_of_bool1(self):
""":obj:`list` of :obj:`bool`: A list of boolean values."""
pass
def get_list_of_bool2(self):
"""A list of boolean values.
Returns
-------
:obj:`list` of :obj:`bool`
A list of boolean values.
"""
pass
def get_list_of_str1(self):
""":obj:`list` of :obj:`str`: A list of string values."""
pass
def get_list_of_str2(self):
"""A list of string values.
Returns
-------
:obj:`list` of :obj:`str`
A list of string values.
"""
pass
def get_list_of_bytes1(self):
""":obj:`list` of :obj:`bytes`: A list of bytes values."""
pass
def get_list_of_bytes2(self):
"""A list of bytes values.
Returns
-------
:obj:`list` of :obj:`bytes`
A list of bytes values.
"""
pass
def get_list_of_int1(self):
""":obj:`list` of :obj:`int`: A list of integer values."""
pass
def get_list_of_int2(self):
"""A list of integer values.
Returns
-------
:obj:`list` of :obj:`int`
A list of integer values.
"""
pass
def get_list_of_float1(self):
""":obj:`list` of :obj:`float`: A list of floating-point values."""
pass
def get_list_of_float2(self):
"""A list of floating-point values.
Returns
-------
:obj:`list` of :obj:`float`
A list of floating-point values.
"""
pass
def get_dict_of_bool1(self):
""":obj:`dict` of :obj:`bool`: A dictionary of boolean values."""
pass
def get_dict_of_bool2(self):
"""A dictionary of boolean values.
Returns
-------
:obj:`dict` of :obj:`bool`
A dictionary of boolean values.
"""
pass
def get_dict_of_str1(self):
""":obj:`dict` of :obj:`str`: A dictionary of string values."""
pass
def get_dict_of_str2(self):
"""A dictionary of string values.
Returns
-------
:obj:`dict` of :obj:`str`
A dictionary of string values.
"""
pass
def get_dict_of_bytes1(self):
""":obj:`dict` of :obj:`bytes`: A dictionary of bytes values."""
pass
def get_dict_of_bytes2(self):
"""A dictionary of bytes values.
Returns
-------
:obj:`dict` of :obj:`bytes`
A dictionary of bytes values.
"""
pass
def get_dict_of_int1(self):
""":obj:`dict` of :obj:`int`: A dictionary of integer values."""
pass
def get_dict_of_int2(self):
"""A dictionary of integer values.
Returns
-------
:obj:`dict` of :obj:`int`
A dictionary of integer values.
"""
pass
def get_dict_of_float1(self):
""":obj:`dict` of :obj:`float`: A dictionary of floating-point values."""
pass
def get_dict_of_float2(self):
"""A dictionary of floating-point values.
Returns
-------
:obj:`dict` of :obj:`float`
A dictionary of floating-point values.
"""
pass
def get_multiple1(self):
"""Many different data types.
Returns
-------
:obj:`str`
A string value.
:obj:`float`
A floating-point value.
:obj:`float`
A floating-point value.
:obj:`dict` of :obj:`int`
A dictionary of integer values.
:obj:`bytes`
A bytes value.
"""
pass
def test_return_type_builtin():
demo = ConnectionDemo(EquipmentRecord(), MyConnection)
assert demo.get_none1() is None
assert demo.get_none2() is None
assert isinstance(demo.get_bool1(), bool)
assert isinstance(demo.get_bool2(), bool)
assert isinstance(demo.get_string1(), str)
assert isinstance(demo.get_string2(), str)
assert isinstance(demo.get_bytes1(), bytes)
assert isinstance(demo.get_bytes2(), bytes)
assert isinstance(demo.get_int1(), int)
assert isinstance(demo.get_int2(), int)
assert isinstance(demo.get_float1(), float)
assert isinstance(demo.get_float2(), float)
x = demo.get_list_of_bool1()
assert isinstance(x, list) and isinstance(x[0], bool)
x = demo.get_list_of_bool2()
assert isinstance(x, list) and isinstance(x[0], bool)
x = demo.get_list_of_str1()
assert isinstance(x, list) and isinstance(x[0], str)
x = demo.get_list_of_str2()
assert isinstance(x, list) and isinstance(x[0], str)
x = demo.get_list_of_bytes1()
assert isinstance(x, list) and isinstance(x[0], bytes)
x = demo.get_list_of_bytes2()
assert isinstance(x, list) and isinstance(x[0], bytes)
x = demo.get_list_of_int1()
assert isinstance(x, list) and isinstance(x[0], int)
x = demo.get_list_of_int2()
assert isinstance(x, list) and isinstance(x[0], int)
x = demo.get_list_of_float1()
assert isinstance(x, list) and isinstance(x[0], float)
x = demo.get_list_of_float2()
assert isinstance(x, list) and isinstance(x[0], float)
x = demo.get_dict_of_bool1()
assert isinstance(x, dict) and isinstance(x['demo'], bool)
x = demo.get_dict_of_bool2()
assert isinstance(x, dict) and isinstance(x['demo'], bool)
x = demo.get_dict_of_str1()
assert isinstance(x, dict) and isinstance(x['demo'], str)
x = demo.get_dict_of_str2()
assert isinstance(x, dict) and isinstance(x['demo'], str)
x = demo.get_dict_of_bytes1()
assert isinstance(x, dict) and isinstance(x['demo'], bytes)
x = demo.get_dict_of_bytes2()
assert isinstance(x, dict) and isinstance(x['demo'], bytes)
x = demo.get_dict_of_int1()
assert isinstance(x, dict) and isinstance(x['demo'], int)
x = demo.get_dict_of_int2()
assert isinstance(x, dict) and isinstance(x['demo'], int)
x = demo.get_dict_of_float1()
assert isinstance(x, dict) and isinstance(x['demo'], float)
x = demo.get_dict_of_float2()
assert isinstance(x, dict) and isinstance(x['demo'], float)
x = demo.get_multiple1()
assert len(x) == 5
assert isinstance(x[0], str)
assert isinstance(x[1], float)
assert isinstance(x[2], float)
assert isinstance(x[3], dict) and isinstance(x[3]['demo'], int)
assert isinstance(x[4], bytes)
def test_return_type_object():
scope = ConnectionDemo(EquipmentRecord(), PicoScope)
x = scope.channel()
assert isinstance(x, dict) and x['demo'] == PicoScopeChannel
|
This designer home sits on 1581m2 in Eldonwood, Matamata and has been designed for relaxed living and entertaining.
The home encompasses extremely spacious open plan living with cathedral ceilings, bi-folds to patio and entertainment areas, great family style kitchen with plenty of bench space, 3 bedrooms all opening to private patios, 2 tiled bathrooms including main bedroom with a wonderful ensuite. The 4th bedroom is self contained with ensuite and kitchenette (a great B&B option, extra space for the family or a rental proposition) which sits above the double garage.
The home has a gas fire and all tiled areas have underfloor heating. There is also a central vacuum system.
Eldonwood is a beautiful rural/residential development with soft edged roads and extensive walking trails, mature trees and gardens and combines the best aspects of country living, space and views, with the convenience of living close to town and very handy to schools and the many cafes and shops in Matamata.
Matamata is a thriving provincial town and only a 45 minute drive to Tauranga, Hamilton or Rotorua and 2 hours to Auckland.
This property is sure to be a winner! Call Joanne today to view.
|
# coding=utf-8
"""
vcsserver-lib
-------------
vcsserver-lib is library for easy and fast creation of an ssh demon for popular VCSs (Mercurial and Git).
This library uses Twisted framework."""
from setuptools import setup
setup(
name='vcsserver',
version='0.3.1',
url='https://bitbucket.org/3f17/vcssshd-lib',
license='BSD',
author='Dmitry Zhiltsov',
author_email='dzhiltsov@me.com',
description='Library for easy and fast creation of an ssh demon for popular VCSs (Mercurial and Git)',
long_description=__doc__,
#py_modules=['vcssshd'],
packages=['vcsserver'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Twisted', 'pycrypto', 'pyasn1'
],
classifiers=[
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Framework :: Twisted',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
Janet was born in Wallasey, England and immigrated with her parents to Canada when she was seven years old. She met her husband, Tim, in the Trinity Western College choir. She has two children: a married daughter living in Victoria, B.C., and a son living at home who attends UFV.
Janet enjoys scrapbooking and loves to sing. She has been involved in many choirs over the years and very much enjoyed being a part of Calvin’s Christmas Cantata last year. She recently decided to try a new musical genre and joined the Calvin Clappers handbell choir, which she absolutely loves . In the warmer months, she spends as much time as possible swimming in the ocean or any lake she can find.
|
import pathlib
import json
import collections
for f in pathlib.Path('tsp').iterdir():
if f.is_file() and str(f)[-3:] == 'tsp' :
tsp = ''.join(open(str(f))).split('\n')
j = collections.OrderedDict()
for i in range(5):
key, val = tsp[i].split(':')
j[key.strip()] = val.strip()
# - Dict
j['NODE_COORD_SECTION'] = {}
print(tsp[5])
for coord in tsp[6:-2]:
tmp = coord.strip().replace(' ',' ').replace(' ',' ')
index, x, y = tmp.split(' ')
j['NODE_COORD_SECTION'][index] = {'x': x, 'y': y}
with open('dict/' + f.name + '.json', 'w') as f2:
f2.write(json.dumps(j, indent=4))
# - List
j['NODE_COORD_SECTION'] = []
for coord in tsp[6:-2]:
coord = coord.strip().replace(' ',' ').replace(' ',' ')
index, x, y = coord.split(' ')
j['NODE_COORD_SECTION'].append({'x': x, 'y': y})
with open('list/' + f.name + '.json', 'w') as f2:
f2.write(json.dumps(j, indent=4))
|
I am pregnant at the 23rd week and the ET is on 20.01.15. Of course exactly in the exam phase! I'm unsure now, I'm doing a leave semester? But I feel so totally fit, means I could attend the lectures ... which brings nothing if I do not write an exam! During an exam, but I also reluctant to give birth :-p would be really stupid if there are the defenses!
I'm in a dead end right now! Is someone similar or does someone have experience there? ?
|
# -*- coding: utf-8 -*-
# Migration script to move published repositories to the new location.
#
# Copyright © 2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
import logging
import os
import shutil
_log = logging.getLogger('pulp')
OLD_PUBLISH_ROOT_DIR = '/var/www'
OLD_PUPPET_PUBLISH_DIR_NAME = 'pulp_puppet'
NEW_PUBLISH_ROOT_DIR = '/var/lib/pulp/published'
NEW_PUPPET_PUBLISH_DIR_NAME = 'puppet'
def migrate(*args, **kwargs):
"""
Move files from old publish directories to the new location.
"""
old_puppet_publish_dir = os.path.join(OLD_PUBLISH_ROOT_DIR, OLD_PUPPET_PUBLISH_DIR_NAME)
new_puppet_publish_dir = os.path.join(NEW_PUBLISH_ROOT_DIR, NEW_PUPPET_PUBLISH_DIR_NAME)
if os.path.exists(old_puppet_publish_dir) and os.listdir(old_puppet_publish_dir):
# Move contents of '/var/www/pulp_puppet' into '/var/lib/pulp/published/puppet'
move_directory_contents(old_puppet_publish_dir, new_puppet_publish_dir)
_log.info("Migrated published puppet repositories to the new location")
def move_directory_contents(src_dir, dest_dir):
"""
Move everything in src_dir to dest_dir
"""
# perform the move. /var/lib/pulp/published/puppet already exists so we
# need to move like this (i.e, we can't use shutil.copytree). This should
# leave an empty /var/www/pulp_puppet dir.
for entry in os.listdir(src_dir):
shutil.move(os.path.join(src_dir, entry), os.path.join(dest_dir, entry))
|
Here is list of top Lithuania Scholarships for undergraduate and Postgraduates students in Lithuania to pursue a degree in several fields of study.
World Scholarship Forum gathers the latest and updated information on Lithuania Scholarships for citizens and international students to get funding assistance for the pursuit of their academic dreams.
These scholarships and related opportunities will aid you in the funding of your studies and on the long run you can have your desired academic pedestal attained.
Compared to other Baltic States, Lithuania is economically and politically advanced.
Located in Eastern Europe, between Latvia and Russia on the Baltic Sea coast, Lithuania was for a long time a republic of the Soviet Union but was the first to declare its independence in 1990. It is now part of the European Union.
Opportunities in this Scholarships category include: Scholarships, Internships, Volunteering, Training, Fellowships for several postgraduate levels, competitions, Grants and Awards of many sorts.
With the robust system of World Scholarship Forum, We have gathered these Scholarships to aid you in the realization of your utmost desires.
Therefore if you are asking any of the following questions as an Lithuania?
carefully browse through this page to get the full scholarship for Lithuanian students that meet the needs above.
These Lithuanian Scholarships for International Students can be a Fully Funded Scholarship, Grant or a Need based Scholarships.
|
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import shutil
import numpy as np
from PyQt5 import QtGui, QtCore, QtWidgets
from spimagine.gui.mainwidget import MainWidget
from spimagine.models.data_model import DataModel, NumpyData
from gputools import OCLProgram
CACHEDIRS = ["~/.nv/ComputeCache","~/.cache/pyopencl/pyopencl-compiler-cache-v2-py2.7.6.final.0"]
CACHEDIRS = [os.path.expanduser(_C) for _C in CACHEDIRS]
import spimagine
def absPath(myPath):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
return os.path.join(base_path, os.path.basename(myPath))
except Exception:
base_path = os.path.abspath(os.path.dirname(__file__))
return os.path.join(base_path, myPath)
class MyWidget(MainWidget):
def __init__(self):
super(MyWidget,self).__init__()
self.compileTimer = QtCore.QTimer(self)
self.compileTimer.setInterval(1000)
self.compileTimer.timeout.connect(self.on_compile_timer)
self.compileTimer.start()
def on_compile_timer(self):
for c in CACHEDIRS:
if os.path.exists(c):
print("removing cache: ", c)
shutil.rmtree(c)
print("compiling...")
try:
dirname = os.path.dirname(spimagine.volumerender.__file__)
proc = OCLProgram(os.path.join(dirname,"kernels/volume_kernel.cl"),
build_options =
["-cl-fast-relaxed-math",
"-cl-unsafe-math-optimizations",
"-cl-mad-enable",
"-I %s" %os.path.join(dirname,"kernels/"),
"-D maxSteps=%s"%spimagine.config.__DEFAULTMAXSTEPS__]
)
self.glWidget.renderer.proc = proc
self.glWidget.refresh()
print(np.amin(self.glWidget.output),np.amax(self.glWidget.output))
except Exception as e:
print(e)
if __name__ == '__main__':
x = np.linspace(-1,1,128)
Z,Y,X = np.meshgrid(x,x,x)
R1 = np.sqrt((X+.2)**2+(Y+.2)**2+(Z+.2)**2)
R2 = np.sqrt((X-.2)**2+(Y-.2)**2+(Z-.2)**2)
d = np.exp(-10*R1**2)+np.exp(-10*R2**2)
app = QtWidgets.QApplication(sys.argv)
win = MyWidget()
win.setModel(DataModel(NumpyData(d)))
win.show()
win.raise_()
sys.exit(app.exec_())
|
This is our Colin Ludwell new car dealer page, We have listed the address of Colin Ludwell and phone numbers and websites. Colin Ludwell is in Bristol, Colin Ludwell offers new cars, used cars, cheap new cars, cheap new motorbikes, cheap new motorbikes, used vehicles and new bikes in Bristol and also any type of new cars and used cars in Bristol.
minor things which shouldn't of happened with the price I paid and I know you'll not submit this review ..
just bought a cheap secondhand car from Ludwells, the service beforehand was great, patient, prepared to answer questions, efficient and a great sendoff; will certainly go back next time Dave Evans..
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DedicatedHostsOperations:
"""DedicatedHostsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: "_models.DedicatedHost",
**kwargs: Any
) -> "_models.DedicatedHost":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHost"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'hostName': self._serialize.url("host_name", host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DedicatedHost')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DedicatedHost', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DedicatedHost', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: "_models.DedicatedHost",
**kwargs: Any
) -> AsyncLROPoller["_models.DedicatedHost"]:
"""Create or update a dedicated host .
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:param host_name: The name of the dedicated host .
:type host_name: str
:param parameters: Parameters supplied to the Create Dedicated Host.
:type parameters: ~azure.mgmt.compute.v2020_06_01.models.DedicatedHost
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DedicatedHost or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_06_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHost"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DedicatedHost', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'hostName': self._serialize.url("host_name", host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: "_models.DedicatedHostUpdate",
**kwargs: Any
) -> "_models.DedicatedHost":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHost"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'hostName': self._serialize.url("host_name", host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DedicatedHostUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DedicatedHost', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: "_models.DedicatedHostUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.DedicatedHost"]:
"""Update an dedicated host .
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:param host_name: The name of the dedicated host .
:type host_name: str
:param parameters: Parameters supplied to the Update Dedicated Host operation.
:type parameters: ~azure.mgmt.compute.v2020_06_01.models.DedicatedHostUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DedicatedHost or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_06_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHost"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DedicatedHost', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'hostName': self._serialize.url("host_name", host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'hostName': self._serialize.url("host_name", host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete a dedicated host.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:param host_name: The name of the dedicated host.
:type host_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'hostName': self._serialize.url("host_name", host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}'} # type: ignore
async def get(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
expand: Optional[str] = "instanceView",
**kwargs: Any
) -> "_models.DedicatedHost":
"""Retrieves information about a dedicated host.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:param host_name: The name of the dedicated host.
:type host_name: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DedicatedHost, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_06_01.models.DedicatedHost
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHost"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'hostName': self._serialize.url("host_name", host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DedicatedHost', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}'} # type: ignore
def list_by_host_group(
self,
resource_group_name: str,
host_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DedicatedHostListResult"]:
"""Lists all of the dedicated hosts in the specified dedicated host group. Use the nextLink
property in the response to get the next page of dedicated hosts.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DedicatedHostListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_06_01.models.DedicatedHostListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHostListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_host_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DedicatedHostListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_host_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts'} # type: ignore
|
Truth is a holy relic many artists seek. Not necessarily truth through representation of facts, rather the truth in seeking those universal principals that transcend all cultures. In David O. Russell’s latest film he plays with truth in the very first frame with the tagline, “Some of this Actually Happened”.
ell’s love letter to Martin Scorsese with its intense infusion of period style, brash characters that are full of life, use of first person narration, and a heavy reliance on inspired musical choices. What it lacks is that sharp edge Scorsese so elegantly brings. Russell is a little too precious with his characters and is hesitant to enter any truly dark or twisted territory. This lack of conviction hinders American Hustle from being an all-time gem, but due in large part to a stellar cast it is a film that is certainty worth your time.
That cast includes some of today’s biggest and brightest stars including Christian Bale as Irving Rosenfeld, a man who looks like the creepy uncle we all fear we have. In the month of December alone Bale has shown why he is such a versatile actor. In Out of the Furnace he was a subdued force that internalized every emotion. Here Bale is a loud obnoxious worm of person that exudes extreme confidence—a con artist that looks like a con artist. Actors often take this type of role and become a cartoon you can never take seriously. Bale gives it a form gravitas few are capable of achieving. He is big in the right moments, and Russell gives him enough smaller moments to show his vulnerability. Based on appearance he is not someone who looks like he could land a woman who looks like Amy Adams, but he has that impetuous personality that draws certain types of people in.
Adams plays Sydney Prosser a woman who has the tenacity and intelligence to get what she wants. While Bale was fantastic, Adams may have been even better. Russell gives her a strong character to work with. One that is layered and unendingly complicated. In return Adams gives a performance that is nuanced in every which way. She has a seductiveness to her that is overpowering. Her control is obvious, however she is not invulnerable. There is a pain to her plan of attack.
After she joins up with Irving she helps bring his operation to an entirely new level. By donning a fake British accent she helps Irving fool people into throwing their money into a fake investment firm. This racket grows so large it draws the attention of the FBI agent Richie DiMaso, who is played by Bradley Cooper. DiMaso sets up a sting operation to shut down their little ponzi scheme. Instead of throwing both of them in jail he plans on using their expertise to bring down even bigger fish in the criminal market.
Last year when Cooper teamed up with Russell in Silver Linings Playbook he gave the performance of his career. This time around he is only a supporting player but still shows his strength as an actor. Richie Dimaso shares many characteristics with Cooper’s character from ‘Silver Linings’. Both have an act first ask questions later type of attitude that puts them in situations that are over their head. Both are continuously searching for something more but aren’t really sure what that more consists of exactly. Where Richie Dimaso differs is in the area of likeability. In a film filled with con artists, members of the mob, and dirty politicians the FBI agent is the most unlikable person. His headstrong determination is what continually gets them into trouble.
That motivation morphs an operation to land some small time crooks into a darker and more perilous world ruled by dangerous people. Atlantic City becomes the focal point as Mayor Carmine Polito is trying to rebuild it to its former glory. Polito is a man with good intentions but dirty actions. He may be the only character in the entire film that is not solely out for himself. Jeremy Renner was an odd casting choice for Polito. It is a straight-laced character that is purposely bland, which is not the type of role Renner is known for. That lack of familiarity may be why he never feels comfortable in the role.
One person who appears to be comfortable in any role she plays is Jennifer Lawrence. Here she gives another wiser than her years performance that will go down as one of the year’s best. She plays DiMaso’s wife who is a constant thorn in his side. Her unhinged personality makes her the unpredictable piece that could bring this entire operation to a screeching halt.
Russell displays great skill in handling all these performances. On a character standpoint the script is strong. All the major players are fleshed out and fully formed. Where the film runs into issues is in the area of plotting. There are a lot of moving pieces that it can never entirely corral. It has a lack of focus as if it’s not sure where exactly it wants to go next. Frequently it will go off and get lost in a tangent of subplots and side stories. As the film concludes it forcefully attempts to tie all this threads together but it never completely accomplishes that goal.
a movie about a serial killer that doesn’t kill anyone. The key is to be more daring without being gratuitous.
Now the film is not without stakes. One moment in particular will go down as one of my favorite scenes of the year. An impromptu meeting at a casino is set up perfectly as a mere unexpected sentence asked by an unexpected guest raises the tension instantly. So much was done with so little—a brilliant moment powered by some remarkable actors.
American Hustle is an engaging story bathed in seventies sleek and sweaty style. It comes alive with a sultry soundtrack of rock n’ roll hits and disco beats. While the final product is less than the sum of its parts, it is still a solid piece of work.
|
"""
SQLAlchemy FullText Search
"""
from setuptools import setup, Command
setup(
name='SQLAlchemy-FullText-Search',
version='0.2.3',
url='https://github.com/mengzhuo/sqlalchemy-fulltext-search',
license='BSD',
author='Meng Zhuo, Alejandro Mesa',
author_email='mengzhuo1203@gmail.com, alejom99@gmail.com',
description=('Provide FullText for MYSQL & SQLAlchemy model'),
long_description = __doc__,
packages=['sqlalchemy_fulltext'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=['SQLAlchemy>=0.8',],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules' ]
)
|
A topic which brings much doubt and tests the faith of newcomers in Jyotish is the topic of Ayanamsa. Where our western counterparts who use the tropical zodiac aren’t affected by this problem, those who use the sidereal zodiac, or a zodiac based on star positions are cast into a deep dilemma as to which ayanamsa to use. Where western sidereal astrologers are almost forced to take the plunge into learning the intricacies of the ayanamsa and thereby choose the ayanamsa they like the best, vedic astrologers are presented with a peculiar situation where faith tends to be the compelling factor in choosing ayanamsa.
This writeup doesn’t claim to ascertain a correct ayanamsa, but tries to shed light on the many different facets of the ayanamsa, and some possibilities within the same.
To explain ayanamsa it becomes necessary to describe two different zodiacs, the Niryana chakra and the Sayana chakra. These are two different zodiacs in a sense.
The Niryana is based on the stars and the Sayana is based on the equinoctial points, i.e. the seasons. The Niryana or star-based zodiac is moving in comparison to the Sayana chakra, which is seen by the stars being in different positions during the same time every year. This difference between Niryana and Sayana chakra is called Ayanamsa.
In an attempt to explain the cause of the ayanamsa two major theories have arisen: luni-solar theory and what some are calling the binary theory.
The luni-solar theory was initially referred to by Copernicus and later Newton, and has been modified several times ever since. The theory is based on the idea that the reason the stars are changing with reference to the earth, is because earths angle to them is changing. The reason for the many modifications of the theory, is because its based on the idea that the Sun and Moons gravity is the cause of the earths changing angle. Here whilst scientists could prove that the Sun and Moon did indeed affect the earths angle, it wasn’t enough to explain the change in ayanamsa. Further the theory was based on a number of calculation principles which didn’t deal well with predicting future positions of the ayanamsa.
Here we come to the binary theory. This theory is much closer to home and coincides with some of the Vedic concepts of the universe, namely that the Sun is moving around another point, i.e. Dhruva or some other fixed point. See, the Moon revolves around the Earth in about 27 days. This is a lunar return. Yet, a full moon occurs only after ~29 days. So the Moon has to spend 2 days extra catching up with the Sun to complete the full moon phase.
Similarly, the earth revolves around the Sun in ~365 days. Yet, based on the binary theory, the Sun is simultaneously moving around another point/binary star, and therefore the earth, just like the moon, must spend more time to catch up with the star-placements in the heavens.
Because the earth has to spend more time moving forwards around the Sun it appears as if the stars are moving backwards every year.
This brings us to the entire predicament of the ayanamsa… the rate at which it is moving! The rate at which its moving is called precession. The ayanamsa moves based on the star positions with regards to the equinoctial points, and it has been found that the precession speed is changing. Where it initially was 46 seconds of arc (”) in 150 AD , it is now ~50 seconds of ark and increasing. Therefore for a complete cycle around the zodiac, it would take about 25 thousand years with present information. Yet with data only spanning over a period of 2000 years, we only have a 1/12th fraction of the knowledge of the zodiac movement.
These days most makers of astrological software do not take into account the change in precession rate, and calculate a fixed rate of precession from a given date. This is a completely wrong approach.
Some intelligent programmers have instead based their ayanamsa calculations on actual ephemeris data of star positions. Though this is the best approach, it is limited to empirical/observed data. Therefore the goal of astronomers and astrologers today is to define and make a model which can imitate the precession.
We are not left without clues from the seers of the vedas. According to Surya Siddhanta, the circumference of the nakshatras wherein the stars exist are sixty times larger than the circumference of the earths movement around the Sun. By this is implied that it takes sixty times longer for the Sun to progress one degree, than it takes the earth. Therefore it takes 60 years before the Sun has progressed one degree in the stars, and 120 years before it progresses two degrees. This makes the average rate of precession of the ayanamsa one minute per year. This is ten arc seconds faster than the current precession (50″).
Assuming this data to be accurate, it would give a cycle lasting 21600 years before the Sayana and Niryana chakras aligned again.
Today the most common choice among the vedic astrologers is the Lahiri ayanamsa. The reason being that the ayanamsa was selected by a committee after intense scrutiny. The Lahiri ayanamsa is based on the Chitra Paksha Ayanamsa. There are two main ayanamsas given in the vedic scripture namely Revati-paksha and Chitra-paksha. The word paksha refers to 180 degrees or an exact opposite position from a fixed star. Here Revati and Chitra do not refer to a large constellation of nakshatras spanning ~13 degrees, but to the original stars which make up the base of the nakshatra positions. The star Chitra corresponds to the star Spica, whilst Revati corresponds to the star Zeta Piscium. 180 degrees opposite the star Chitra is said to be 0 degrees of Aries, whilst 180 degrees opposite Revati is 0 degrees of Libra.
Today the tropical degree of Spica is 23 degrees 57 minutes 2.03 arc seconds. These degrees are the actual ayanamsa at present. This corresponds to the sidereal degree of 0 degrees Libra and opposite this is the sidereal 0 degrees of Aries according to the Chitra Paksha/Lahiri Ayanamsa. The reason for choosing stars at the beginning of these two points is that they correspond to the Deva Ayana, i.e. the day of the Deva begins in Aries, and ends in Libra, where the day of the Asuras begin. Preferably any ayanamsa which depicts the movement of the stars should be based on these starting/ending points as according to the Rg Veda, the stars are the abode of the Deva.
There also exists the Raman Ayanamsa and the Krishnamurty Paddhati Ayanamsa. Where the former was not openly announced by its author, the latter is very close to Lahiri and could be an approximation made to compensate for the inaccuracies in the Lahiri ayanamsa at the given time. This is because astrologers had relied on a mean motion of the ayanamsa which caused inaccuracies in the calculation with time. One more ayanamsa called Yuktesvaras ayanamsa exists but it wasn’t used to predict planetary positions but to justify yuga calculations, hence its unclear as to whether the author of it actually meant to use it for any other purpose.
What is common for these three mentioned ayanamsas is that their authors haven’t linked them to any fixed-star positions.
Unfortunately, the astronomical backing is lacking in many of the ‘new’ ayanamsas today, and mainly because astrologers are too quick to ignore the star positions before ascribing to an ayanamsa. And if it isn’t the star position, then the precession rate itself doesn’t correspond to the actual precession of the stars. This leaves ‘new’ ayanamsas as being imaginary or arbitrary positions in space.
1. Stars initiating the ayanamsa.
2. Actual precession rate of the stars.
3. The signs which initiate the ayanamsa.
So far the only software today, to this authors knowledge, which depicts the Ayanamsa based on actual star positions is Jagannath Hora, by PVR Narasimha Rao. The software uses an ephemeris with the actual star positions to fix the ayanamsa. Yet, it is this authors hope that one day astrologers will be able to calculate the ayanamsa for any given time using the knowledge given to us by the seers.
Surya Siddhanta, Chapter XII, sloka 80.
Do you or the parampara have an opinion on Pushya Paksha ayanamsha? It seems to meet the criteria you set forth here and I’ve been noticing some interesting /more accurate placements when they are close to crucial chart points.
The Parampara is very broad. The branch headed by Pt. Sanjay Rath has not made any official or formal announcements about any Ayanamsha, but I know that Sanjayji doesn’t use Pushya Paksha ayanamsha.
Neither do I nor my closest colleagues.
Personally I have two points of critique for the Pushya Paksha Ayanamsha.
#1 Whilst Pushya’s Yogatara is close to the ecliptic, it doesn’t explain why we should NOT use Chitra’s Yogatara. I would be more satisfied with reading a serious analysis of ALL the star positions and the reason for the angular displacement between them, and thereby ascertain the more accurate Ayanamsha reference-point, rather than incur a Pushya-vs-Chitra debate. Astrologers tend to justify anything and everything… There are professional Vedic Astrologers using the Tropical Zodiac!.. So the point is that we cannot ascertain an Ayanamsha on astrological inference alone, and we need a more serious look into why the astronomical placement of the stars are as they are, and which star to base our ayanamsha upon.
#2 The appropriate name is not Pushya-Paksha. Paksha means opposite and therefore the term Pushya-Paksha implies to place 0 degrees Aries somewhere in the present sign of Capricorn! The more appropriate term should be Pushya-Svarga as Svarga would at least refer to a placement about 270 degrees away from Pushya.
Unfortunately the term is not as convenient as ‘Chitra Paksha’ where the term implies exactly 180 opposite the Yogatara of Chitra – this is the basis of Chitra Paksha or Lahiri Ayanamsha. This position has been a very convenient reference point to look for during the equinoxes.
|
import sys
import getopt
import platform
import subprocess
from twisted.internet import reactor
from autobahn.websocket import WebSocketClientFactory, WebSocketClientProtocol, connectWS
STAR_REQUEST_TIMEOUT_IN_SECONDS = 3
DEFAULT_URL = "ws://localhost:9000"
STAR_REQUEST_MESSAGE = "twinkle:star"
ERROR_MESSAGE = "twinkle:error"
STARRED_MESSAGE = "twinkle:starred"
def _enum(**enums):
return type('Enum', (), enums)
Sound = _enum(SUCCESS = 1, FAIL = 2)
muted = False
class ClientProtocol(WebSocketClientProtocol):
def __init__(self):
self.has_recived_starred_message = False
def onOpen(self):
self.sendMessage(STAR_REQUEST_MESSAGE)
reactor.callLater(STAR_REQUEST_TIMEOUT_IN_SECONDS, self.timeout)
def onClose(self, wasClean, code, reason):
if not wasClean:
terminateAbnormally(reason)
elif self.has_recived_starred_message == False:
terminateAbnormally("Star request failed")
else:
reactor.stop()
def onMessage(self, message, binary):
if message == ERROR_MESSAGE:
self.sendClose()
elif message == STARRED_MESSAGE:
self.has_recived_starred_message = True
print("Star request succeeded")
if not muted:
playSound(Sound.SUCCESS)
self.sendClose()
def timeout(self):
if not self.has_recived_starred_message:
print("Timeout while waiting for star request's response")
self.sendClose()
class ClientFactory(WebSocketClientFactory):
def __init__(self, url):
WebSocketClientFactory.__init__(self, url)
def clientConnectionFailed(self, connector, reason):
terminateAbnormally(reason)
def terminateAbnormally(reason):
print(reason)
if not muted:
playSound(Sound.FAIL)
reactor.stop()
def playSound(sound):
if (sound == Sound.SUCCESS):
audioFile = "success.wav"
elif (sound == Sound.FAIL):
audioFile = "fail.wav"
else:
raise Exception("Unknown sound state")
try:
playCommand = getPlayCommand(audioFile)
subprocess.call(playCommand, shell=True)
except Exception as e:
print(e)
def getPlayCommand(filePath):
system = platform.system()
if system == "Linux":
return "aplay " + filePath
# TODO Needs testing
elif system == "Darwin":
return "afplay " + filePath
elif system == "Windows":
return "powershell -c (New-Object Media.SoundPlayer \"" + filePath + "\").PlaySync();"
else:
raise Exception("Could not identify platform while trying to play audio")
if __name__ == '__main__':
url = DEFAULT_URL
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv,"mh", ["url=", "mute-sound", "help"])
except getopt.GetoptError:
print('twinkle [--url=<url to web socket server>] [-m|--mute-sound] [-h|--help]')
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print('twinkle [--url=<url to web socket server>] [-m|--mute-sound] [-h|--help]')
sys.exit()
elif opt in ("--url"):
url = arg
elif opt in ("-m", "--mute-sound"):
muted = True
factory = ClientFactory(url)
factory.protocol = ClientProtocol
connectWS(factory)
reactor.run()
|
However durable your air conditioning Green Lake may appear, it will slowly deteriorate and malfunction. No matter when it decides to quit working, we’re ready to assist you. No matter the time of day or night that it chooses to give up working, we’re ready to come to your rescue. Air conditioner repair is the sole service. Aside from the air filter cleansing and repair, there are a few other things that can be maintained or fixed while choosing ac repair. Maintenance on a normal basis by a trustworthy AC repair business Green Lake, WI can assist ensure your system remains working properly all through the year. Scheduling annual expert maintenance assists keep your commercial air conditioner in prime shape, avoiding the demand for emergency air-conditioning repair Green Lake, WI and assistance.
AC repair and maintenance can get troublesome if not dealt with by the appropriate personnel. It can be one of the most discouraging home repairs. Green Lake AC Repair With respect to repairing your air conditioner, however, you can’t simply trust any repair business or repair guy in the future into your home. You have to pick the service just if they’re active on the social networking platform. Hence, to keep the sturdiness and operation of your AC all you will require is fantastic repair services Green Lake. You can find the really finest AC repair service on the internet. The service will look after domestic and industrial buildings. It’s very vital to locate a great Ac repair service to produce your summer season comfortable and peaceful. Generally, the terrific AC repair services in Green Lake react within a day. The unprofessional suppliers constantly use the specific same method to take care of all kinds of AC of any brand, however professionals constantly make use of particular or particular techniques in addition to use authentic parts to produce your branded AC to operate in the most reliable possible technique. Before choosing the repair services that you will need to be familiar with reasons which are the source of the repair services. Prior to you call any skilled and dependable air conditioner repair support, there are a few standard things which you may desire to inspect for yourself.
Air conditioning repair Green Lake is crucial in commercial and residential places. Therefore, it’s ideal if one goes in for a normal AC maintenance which will look after the repairs. There’s no denying the simple fact that regular preventive maintenance can enhance the operation and service life HVAC systems. It is far less costly than expensive change-outs. If you consider top HVAC maintenance in dwelling and company places, then you have to stick to a few crucial measures that need to be accepted. When it has to do with AC repair then we are among the top Commercial Air conditioning Green Lake businesses in your region and offer you the finest and proficient services at affordable price. If you’re thinking that you would have the ability to handle the industrial AC repair without seeking the expert help, you have no clue what mess you’ll be getting into. If you wish to seek the services of a professional for an industrial AC repair, make sure that you employ the one which has an experience in handling numerous complexities involved in an industrial AC. Skilled HVAC technicians can concentrate on the significant electrical or mechanical faults and fix them before the system becomes completely exhausted. If you cannot, then you can’t be an HVAC service technician Green Lake, and particularly in the residential field. The HVAC service technician is going to be expected to meet and greet, and respect and be kind, thoughtful, and ready to listen to the many persons they will meet in the plan of each and every day at work. So, you’ve got to depend on specialists in air conditioning restoration.
No matter the scenario, a repair can often correct the issue instead of you needing to find a new system for your house. Individuals mistakenly think that chain businesses offer higher repairs. Naturally, the very best repairs are the ones that you do not have to make. At times, a residential air-conditioning repair for your Green Lake home is needed. The aforementioned steps make maintenance a bit easier, let you keep a cool home throughout the summertime, and help you to save on electricity costs. In spite of the size or reliability of your present unit, regular maintenance must make sure that it does not break down when you want it most. Therefore, it’s ideal if one goes in for a normal AC maintenance Green Lake which will manage the repairs.
A properly working air conditioning process is just one of the greatest things your house could have for you and your family to take pleasure in the summer. A functioning air conditioning process is a must. Your house’s central ac system may disappoint you at times as soon as it goes down as you require it the most. Maintaining your AC unit isn’t always simple, but you always have the option to request expert assistance. A properly maintained AC unit can earn a big difference on your energy bills monthly. In the event the central AC unit doesn’t seem to be cooling your house effectively, start by decreasing the thermostat by five degrees. If you’re taking a look at a repair business and keep reading reviews that mention careless technicians, bad customer service and outrageous prices, avoid it. In such instance, you need to contact residential air-conditioning repair company near you to decide on the issue and give suggestions which will help you maintain your house cool and comfortable. In such instances you need to contact air-conditioning unit repair company Green Lake.
Whether you need assistance with a residential or industrial HVAC system, our technicians possess the resources and knowledge to swiftly and effectively resolve any problems. For those who have emergency repair Green Lake wants and will need to obtain the most suitable contractor for the job, you want to make sure that you start looking for certain qualities to make sure the job is done well. If you’re not in demand of HVAC maintenance or ac repair but want to know more about purchasing new HVAC products, we can assist you too. Residential or commercial, you require an HVAC system Green Lake you can depend on to work when you require it most. When it has to do with your HVAC systems, any problem at all can be an enormous supply of tension and discomfort. Your new HVAC system won’t only keep your residence or business at the ideal temperature, but will help you save money on electricity costsand help protect the surroundings, too. If you’re interested in a new ductless AC system, we can help you pick a model with higher efficiency ratings.
Emergency Ac Repair in Green Lake WI. AC not working? Call our experts for all your home air conditioner repair needs. Our local air conditioner repair technicians are available for 24/7.
|
# -*- coding: utf-8 -*-
import logging
import sys
import sqlalchemy.orm
import database
import structures
def get_names_from_file(path):
with open(path) as file:
data_lines = file.read().splitlines()
names = []
for name_line in data_lines:
fields = name_line.split(",")
japanese_name = structures.JapaneseName(kanji=fields[0], hiragana=fields[1], gender=fields[2])
names.append(japanese_name)
return names
if __name__ == "__main__":
logger = logging.getLogger('names')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.info('Reading raw data')
names = get_names_from_file("./raw_names.txt")
logger.info('Filtering out duplicates')
unique_names = set(names)
logger.info('Filtering out invalid names')
validator = structures.JapaneseNamesValidator()
valid_names = [name for name in unique_names if validator.is_name_valid(name)]
logger.info('Sorting names')
sorted_names = sorted(list(valid_names))
logger.info('Populating database')
Session = sqlalchemy.orm.sessionmaker(bind=database.engine)
session = Session()
# Define genders
boy = database.Gender(gender=u'男')
girl = database.Gender(gender=u'女')
session.add_all([boy, girl])
session.commit()
name_entries = [database.Name(name) for name in sorted_names]
session.add_all(name_entries)
session.commit()
|
Trust in the Lord with all of your heart; and don’t lean on your own understanding. In everything you do, acknowledge the Lord and the Lord will direct you.
Can grief feel like an anvil on my heart? Like a black-out curtain over my day? Like a rock in the pit of my stomach? It must be grief that I am feeling, but why? Who knows why, Lord. Who knows why? I cry to you…have mercy on me and give me your understanding for mine has failed me. Direct me to the healing and peace that I need, I pray in Jesus’ name.. Amen.
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from rpy import r
def platt(light, etr, ini=None):
"""
Adjust a curve of best fit, following the Platt model.
Parameters
----------
light : arr
Generally PAR values. Where Photosynthetic Active Radiance
interfer on Primary Production. 'light' is this parameter.
etr : arr
Eletron Transference Rate, by means relative ETR, obtained from
Rapid Light Curves.
ini : List
optional intial values for optimization proccess.
Returns
-------
iniR : arr
Initial values modeled, with R `optim` function.
opts : arr
Curve adjusted with ETR values modeled.
pars : arr
Curve parameters (alpha, Ek, ETRmax)
See Also
--------
T. Platt, C.L. Gallegos and W.G. Harrison, 1980. Photoinibition of
photosynthesis in natural assemblages of marine phytoplankton. Journal
of Marine Research, 38:4, 687-701.
"""
opts = []
pars = []
r.assign("x", light[~np.isnan(light)])
r.assign("y", etr[~np.isnan(etr)])
if ini is None:
r.assign('ini', [0.4, 1.5, 1500])
else:
r.assign('ini', np.array(ini))
min_platt = r("""
platt<- function(params){
alpha<-params[1]
Beta<- params[2]
Ps<- params[3]
return( sum( (y-Ps*(1-exp(-alpha*x/Ps))*exp(-Beta*x/Ps))^2))
} """)
min_adp = r("""
min_ad<-function(params){
alpha<-params[1]
Beta<-params[2]
Ps<-params[3]
return( ( (Ps*(1-exp(-alpha*x/Ps)) *exp(-Beta*x/Ps)) ) )
}""")
r('etr_sim<-optim(par=ini, fn=platt)')
r('p_alpha<-etr_sim$par[1]')
r('p_Beta<-etr_sim$par[2]')
r('p_Ps2<-etr_sim$par[3]')
r('''
if (p_Beta==0 | p_Beta<0){
p_etrmax<-p_Ps2
}else {
p_etrmax<-p_Ps2*(p_alpha/(p_alpha+p_Beta))*
(p_Beta/(p_alpha+p_Beta))^(p_Beta/p_alpha)
}
p_Ek<-p_etrmax/p_alpha
''')
iniR = r('etr_sim$par')
opts = np.append(opts, r('min_ad(par = etr_sim$par)'))
cpars = r('as.data.frame(cbind(p_alpha, p_Ek, p_etrmax))')
pars = [cpars['p_alpha'], cpars['p_Ek'], cpars['p_etrmax']]
return iniR, opts, pars
def platt_opts(light, params):
"""
Adjust `opt` values of PAR levels following the Platt model.
Parameters
----------
light : arr
Generally PAR values. Where Photosynthetic Active Radiance
interfer on Primary Production. 'light' is this parameter.
params: arr
Containing values of (alpha, Beta, etrmax).
Returns
-------
opts : arr
Values optimized according to `params`and list of PAR levels.
"""
opts = []
r.assign("light", light[~np.isnan(light)])
r.assign("params", params)
# if opt == None:
# r.assign("opt", light[~np.isnan(light)])
# else:
# r.assign("opt", opt[~np.isnan(opt)])
# if ini == None:
# r.assign('ini', [0.4,1.5,1500])
# else:
# r.assign('ini', np.array(ini))
# op, platt_param = platt(light,etr, ini=ini)
# r.assign('platt_param', platt_param)
min_opt = r("""
min_opt<-function(light,params){
alpha<-params[1]
Beta<-params[2]
Ps<-params[3]
return( ( (Ps*(1-exp(-alpha*light/Ps)) *exp(-Beta*light/Ps)) ) )
}""")
opts = np.append(opts, r('min_opt(light, params)'))
return opts
def eilers_peeters(light, etr, ini=None):
"""
Adjust a best fit curve to ExP curves, according to Eilers & Peters
Model.
Parameters
----------
light : arr
Generally PAR values. Where Photosynthetic Active Radiance
interfer on Primary Production. 'light' is this parameter.
etr : arr
Eletron Transference Rate, by means relative ETR, obtained from
Rapid Light Curves.
ini : None
Initial values values to set the curve.
To insert initial values, they must be a list
of values of initial parameters (a,b,c) of Eilers-Peeters models
Return
------
iniR : arr
Initial values modeled, with R `optim` function.
opts : arr
Values optimized
params : arr
Curve Parameters (alpha, Ek, ETR_max)
See Also
--------
P.H.C. Eilers and J.C.H Peeters. 1988. A model for the relationship
between the light intensity and the rate of photosynthesis in
phytoplankton. Ecol. Model. 42:199-215.
#TODO
## Implement minimisation in Python.
## It's not very clear how to apply `nls2` in Python.
## minimize from a list of initial values.
##a = varis[0]
##b = varis[1]
##c = varis[2]
#a = mini['a']
#b = mini['b']
#c = mini['c']
#opts = (light/(a*(light**2)+(b*light)+c))
#ad = fmin(ep_minimize,varis,args=(light,etr))
#alpha = (1./ad[2])
#etrmax = 1./(ad[1]+2*(ad[0]*ad[2])**0.5)
#Eopt = (ad[2]/ad[0])**0.5
#Ek = etrmax/alpha
#params = [alpha, Ek, etrmax, Eopt]
"""
r('library(nls2)')
r.assign("x", light[~np.isnan(light)])
r.assign("y", etr[~np.isnan(etr)])
r('dat<-as.data.frame(cbind(x,y))')
r('names(dat)<-c("light","etr")')
if ini is None:
r('''grid<-expand.grid(list(a=seq(1e-07,9e-06,by=2e-07),
b=seq(-0.002,0.006,by=0.002),c=seq(-6,6,by=2)))''')
mini = r('''
mini<-coefficients(nls2(etr~light/(a*light^2+b*light+c),
data=dat, start=grid, algorithm="brute-force"))
''')
else:
mini = ini
r.assign("mini", mini)
r('''ep<-nls(etr~light/(a*light^2+b*light+c),data=dat,
start=list(a=mini[1],b=mini[2],c=mini[3]),
lower = list(0,-Inf,-Inf), trace=FALSE,
algorithm = "port", nls.control("maxiter"=100000, tol=0.15))
a2<-summary(ep)$coefficients[1]
b2<-summary(ep)$coefficients[2]
c2<-summary(ep)$coefficients[3]
alpha<-(1/c2)
etrmax<-1/(b2+2*(a2*c2)^0.5)
Eopt<-(c2/a2)^0.5
Ek<-etrmax/alpha''')
iniR = mini
alpha = r('alpha')
Ek = r('Ek')
etr_max = r('etrmax')
params = [alpha, Ek, etr_max]
opts = r('opts<-fitted(ep)')
return iniR, opts, params
|
The Paranormal Bohemian: Mystical Art, Poetry, Prose, and Music.
-- A hauntingly beautiful story of a soul named Russell who searches the world and an eternity to resolve his "unfinished business".
-- On a dare, Tim enters a spooky, run-down house where a terrible werewolf is said to live. What will he find inside?
|
# -*- coding: utf-8 -*-
#
# Manages user accounts stored in MoinMoin user directory.
# Author: HolgerCremer@gmail.com
from os import listdir, stat
from os.path import join, exists
import re
from passlib.context import CryptContext
class MoinMoinUserDir():
USER_FILE_RE = re.compile(r'^[0-9\.]+$')
def __init__(self, logger, mm_user_dir, disable_cache):
if mm_user_dir is None:
raise ValueError('No "mm_user_dir" configuration.')
if not exists(mm_user_dir):
raise ValueError('mm_user_dir "%s" doesn`t exist!' % mm_user_dir)
self._crypt_context = CryptContext(
# is the default value in the MoinMoin wiki
schemes=['sha512_crypt', ]
)
self._log = logger
self._mm_user_dir = mm_user_dir
self._disable_cache = disable_cache
self._user_cache = None
self._user_cache_check = None
def get_users(self):
users = self._list_users_and_pw()
user_list = []
for name in users:
user_list.append(name)
return user_list
def check_password(self, user, password):
users = self._list_users_and_pw()
for name in users:
if name == user:
pw_correct = self._crypt_context.verify(password, users[name])
self._log.info('User %s found, pw check success: %s' % (name, pw_correct))
return pw_correct
return None
def _list_users_and_pw(self):
if not self._must_read_again():
return self._user_cache
self._log.debug('read user data again')
users = {}
for user_file in listdir(self._mm_user_dir):
if self.USER_FILE_RE.match(user_file) is None:
continue
(name, password) = self._get_name_and_password(user_file)
if name is None:
continue
name = name.decode('utf8')
users[name] = password
self._user_cache = users
return users
def _must_read_again(self):
if self._disable_cache:
return True
if self._user_cache is None or self._user_cache_check is None:
self._user_cache_check = self._get_dir_check_value()
return True
new_check = self._get_dir_check_value()
if new_check == self._user_cache_check:
return False
self._user_cache_check = new_check
return True
def _get_dir_check_value(self):
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = stat(self._mm_user_dir)
return '%s-%s-%s-%s' % (size, atime, mtime, ctime)
def _get_name_and_password(self, file_name):
name_prefix = 'name='
pw_prefix = 'enc_password='
scheme_prefix = '{PASSLIB}'
name, password = None, None
with open(join(self._mm_user_dir, file_name), "r") as file:
for line in file:
if line.startswith(name_prefix):
# remove prefix and newline
name = line[len(name_prefix):len(line) - 1]
elif line.startswith(pw_prefix):
# remove prefix and newline
password = line[len(pw_prefix):len(line) - 1]
# check for passlib prefix
if not password.startswith(scheme_prefix):
self._log.warn('Unsupported scheme prefix. User "%s" won\'t login.' % file_name.encode('utf8', 'ignore'))
return None, None
# remove the scheme prefix
password = password[len(scheme_prefix):]
if name is not None and password is not None:
return name, password
self._log.warn('No %s and %s entries found for file %s.' % (name_prefix, pw_prefix, file_name.encode('utf8', 'ignore')))
return None, None
|
To access the Timetag API you need to acquire authorization through OAuth2.
Contact us at info@timetag.it to request the registration of your application.
We will provide you with an OAuth2 client id and secret.
Our API only supports the Implicit Grant flow of OAuth2, so make sure you implement this flow in your application.
Scopes are used to grant an application different levels of access to operations on behalf of the end user. Each API may declare one or more scopes.
|
#!/usr/bin/env python
import re
import time
import requests
import argparse
from pprint import pprint
import ast
import os
from sys import exit
from prometheus_client import start_http_server
from prometheus_client.core import GaugeMetricFamily, REGISTRY
DEBUG = int(os.environ.get('DEBUG', '0'))
def must_env(var):
val = os.environ.get(var)
if val is None:
raise Exception('Error reading token from environment (%s)' % var)
return val
def load_env():
global BEEPING_SERVER
global BEEPING_METRICS_PORT
global BEEPING_CHECKS
BEEPING_SERVER = must_env('BEEPING_SERVER')
BEEPING_METRICS_PORT = must_env('BEEPING_METRICS_PORT')
BEEPING_CHECKS = must_env('BEEPING_CHECKS')
class BeepingCollector(object):
# The sites we want to monitor.
sites = ast.literal_eval(must_env('BEEPING_CHECKS'))
def __init__(self, target):
self._target = target.rstrip("/")
def collect(self):
sites_data = self._request_data()
self._setup_empty_prometheus_metrics()
for site in self.sites:
if DEBUG:
print "working with site: %s" % site
pprint(sites_data[site])
self._get_metrics(site,sites_data[site])
if DEBUG:
print "_prometheus_metrics"
pprint(self._prometheus_metrics)
for metric in self._prometheus_metrics.values():
yield metric
def _request_data(self):
# Request the information we need from Beeping
beeping_url = '{0}/check'.format(self._target) # @TODO no need for the format i think
if DEBUG:
print "_request_data >> beeping_url: %s" % beeping_url
def queryBeeping(myurl):
result = {}
for site in self.sites:
result[site] = {}
data = {}
params = self.sites[site]
response = requests.post(myurl, json=params)
if response.status_code != requests.codes.ok:
return[]
data = response.json()
result[site] = data
return result
return queryBeeping(beeping_url)
def _setup_empty_prometheus_metrics(self):
# The metrics we want to export.
self._prometheus_metrics = {}
self._prometheus_metrics = {
'dns_lookup':
GaugeMetricFamily('beeping_dns_lookup',
'site dns_lookup in seconds', labels=["site"]),
'tcp_connection':
GaugeMetricFamily('beeping_tcp_connection',
'site tcp_connection in seconds', labels=["site"]),
'tls_handshake':
GaugeMetricFamily('beeping_tls_handshake',
'site tls_handshake in seconds', labels=["site"]),
'server_processing':
GaugeMetricFamily('beeping_server_processing',
'site server_processing in seconds', labels=["site"]),
'content_transfer':
GaugeMetricFamily('beeping_content_transfer',
'site content_transfer in seconds', labels=["site"]),
'http_request_time':
GaugeMetricFamily('beeping_http_request_time_seconds',
'site http_request_time in seconds', labels=["site"]),
'http_status_code':
GaugeMetricFamily('beeping_http_status_code',
'site http_status_code', labels=["site"]),
'http_body_pattern':
GaugeMetricFamily('beeping_http_body_pattern',
'site http_body_pattern found', labels=["site"]),
'timeline_name_lookup':
GaugeMetricFamily('beeping_timeline_name_lookup',
'site timeline name_lookup in seconds', labels=["site"]),
'timeline_connect':
GaugeMetricFamily('beeping_timeline_connect',
'site timeline connect in seconds', labels=["site"]),
'timeline_pretransfer':
GaugeMetricFamily('beeping_timeline_pretransfer',
'site timeline pretransfer in seconds', labels=["site"]),
'timeline_starttransfer':
GaugeMetricFamily('beeping_timeline_starttransfer',
'site timeline starttransfer in seconds', labels=["site"]),
'ssl_cert_expiry_days_left':
GaugeMetricFamily('beeping_ssl_cert_expiry_days_left',
'ssl cert expiry days left', labels=["site"]),
}
def _get_metrics(self, site, site_data):
if DEBUG:
print "====== get_metrics checking site: "+site
print site_data.get('http_status_code')
if site_data.get('http_status_code', 0):
self._prometheus_metrics['http_status_code'].add_metric([site], site_data.get('http_status_code'))
if site_data.get('http_body_pattern'):
http_body_pattern_value = 1
else:
http_body_pattern_value = 0
self._prometheus_metrics['http_body_pattern'].add_metric([site], http_body_pattern_value)
# metrics
self._prometheus_metrics['dns_lookup'].add_metric([site], site_data.get('dns_lookup'))
self._prometheus_metrics['tcp_connection'].add_metric([site], site_data.get('tcp_connection'))
if site_data.get('tls_handshake', 0):
self._prometheus_metrics['tls_handshake'].add_metric([site], site_data.get('tls_handshake'))
self._prometheus_metrics['server_processing'].add_metric([site], site_data.get('server_processing'))
self._prometheus_metrics['content_transfer'].add_metric([site], site_data.get('content_transfer'))
self._prometheus_metrics['http_request_time'].add_metric([site], site_data.get('http_request_time'))
# timeline data
self._prometheus_metrics['timeline_name_lookup'].add_metric([site], site_data.get('timeline',0).get('name_lookup',0))
self._prometheus_metrics['timeline_connect'].add_metric([site], site_data.get('timeline',0).get('connect',0))
self._prometheus_metrics['timeline_pretransfer'].add_metric([site], site_data.get('timeline',0).get('pretransfer',0))
self._prometheus_metrics['timeline_starttransfer'].add_metric([site], site_data.get('timeline',0).get('starttransfer',0))
# ssl
if site_data.get('ssl'):
self._prometheus_metrics['ssl_cert_expiry_days_left'].add_metric([site], site_data.get('ssl').get('cert_expiry_days_left'))
def parse_args():
parser = argparse.ArgumentParser(
description='beeping exporter args beeping address and port'
)
parser.add_argument(
'-j', '--beeping',
metavar='beeping',
required=False,
help='server url from the beeping api',
default=os.environ.get('BEEPING_SERVER', 'http://localhost:8080')
)
parser.add_argument(
'-p', '--port',
metavar='port',
required=False,
type=int,
help='Listen to this port',
default=int(os.environ.get('BEEPING_METRICS_PORT', '9118'))
)
return parser.parse_args()
BEEPING_SERVER = None
BEEPING_METRICS_PORT = None
BEEPING_CHECKS = None
def main():
try:
load_env()
args = parse_args()
port = int(args.port)
REGISTRY.register(BeepingCollector(args.beeping))
start_http_server(port)
print "Polling %s. Serving at port: %s" % (args.beeping, port)
while True:
time.sleep(1)
except KeyboardInterrupt:
print(" Interrupted")
exit(0)
if __name__ == "__main__":
main()
|
Matt Horton is a technology professional and software developer with over three decades of experience in start-up situations ranging in industries from Telecommunications, Internet, Financial, Automotive, Building Automation & Energy Management, and currently the Industrial Internet of Things. Currently, Matt is the Chief Software Architect and Product Manager of sensorFact™ Services for Energy Control Technologies, Inc.
With a passion for education and civic duty, Matt is excited to bring his talents and experience to the efforts of Good Citizen. It is his ambition to help other Americans understand their critical roles and civic responsibilities in keeping this nation free and operating for the benefit of all those that regard this country as home.
Matt resides near the beautiful waters of Pompano Beach Florida, and is the proud father of one daughter Emily, and three sons, Marshall, Lawrence, and Harrison; he also has four grandchildren. Outside of his work, Matt enjoys cooking, writing, graphic arts, and playing his favorite game “Sid Meier's Civilization”.
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START streaming-aead-example]
"""A command-line utility for using streaming AEAD for a file.
It loads cleartext keys from disk - this is not recommended!
It requires 4 arguments (and one optional one):
mode: either 'encrypt' or 'decrypt'
keyset_path: name of the file with the keyset to be used for encryption or
decryption
input_path: name of the file with the input data to be encrypted or decrypted
output_path: name of the file to write the ciphertext respectively plaintext
to
[optional] associated_data: the associated data used for encryption/decryption
provided as a string.
"""
from __future__ import absolute_import
from __future__ import division
# Placeholder for import for type annotations
from __future__ import print_function
from typing import BinaryIO
# Special imports
from absl import app
from absl import flags
from absl import logging
import tink
from tink import cleartext_keyset_handle
from tink import streaming_aead
FLAGS = flags.FLAGS
BLOCK_SIZE = 1024 * 1024 # The CLI tool will read/write at most 1 MB at once.
flags.DEFINE_enum('mode', None, ['encrypt', 'decrypt'],
'Selects if the file should be encrypted or decrypted.')
flags.DEFINE_string('keyset_path', None,
'Path to the keyset used for encryption or decryption.')
flags.DEFINE_string('input_path', None, 'Path to the input file.')
flags.DEFINE_string('output_path', None, 'Path to the output file.')
flags.DEFINE_string('associated_data', None,
'Associated data used for the encryption or decryption.')
def read_as_blocks(file: BinaryIO):
"""Generator function to read from a file BLOCK_SIZE bytes.
Args:
file: The file object to read from.
Yields:
Returns up to BLOCK_SIZE bytes from the file.
"""
while True:
data = file.read(BLOCK_SIZE)
# If file was opened in rawIO, EOF is only reached when b'' is returned.
# pylint: disable=g-explicit-bool-comparison
if data == b'':
break
# pylint: enable=g-explicit-bool-comparison
yield data
def encrypt_file(input_file: BinaryIO, output_file: BinaryIO,
associated_data: bytes,
primitive: streaming_aead.StreamingAead):
"""Encrypts a file with the given streaming AEAD primitive.
Args:
input_file: File to read from.
output_file: File to write to.
associated_data: Associated data provided for the AEAD.
primitive: The streaming AEAD primitive used for encryption.
"""
with primitive.new_encrypting_stream(output_file,
associated_data) as enc_stream:
for data_block in read_as_blocks(input_file):
enc_stream.write(data_block)
def decrypt_file(input_file: BinaryIO, output_file: BinaryIO,
associated_data: bytes,
primitive: streaming_aead.StreamingAead):
"""Decrypts a file with the given streaming AEAD primitive.
This function will cause the program to exit with 1 if the decryption fails.
Args:
input_file: File to read from.
output_file: File to write to.
associated_data: Associated data provided for the AEAD.
primitive: The streaming AEAD primitive used for decryption.
"""
try:
with primitive.new_decrypting_stream(input_file,
associated_data) as dec_stream:
for data_block in read_as_blocks(dec_stream):
output_file.write(data_block)
except tink.TinkError as e:
logging.exception('Error decrypting ciphertext: %s', e)
exit(1)
def main(argv):
del argv
associated_data = b'' if not FLAGS.associated_data else bytes(
FLAGS.associated_data, 'utf-8')
# Initialise Tink.
try:
streaming_aead.register()
except tink.TinkError as e:
logging.exception('Error initialising Tink: %s', e)
return 1
# Read the keyset into a keyset_handle.
with open(FLAGS.keyset_path, 'rt') as keyset_file:
try:
text = keyset_file.read()
keyset_handle = cleartext_keyset_handle.read(tink.JsonKeysetReader(text))
except tink.TinkError as e:
logging.exception('Error reading key: %s', e)
return 1
# Get the primitive.
try:
streaming_aead_primitive = keyset_handle.primitive(
streaming_aead.StreamingAead)
except tink.TinkError as e:
logging.exception('Error creating streaming AEAD primitive from keyset: %s',
e)
return 1
# Encrypt or decrypt the file.
with open(FLAGS.input_path, 'rb') as input_file:
with open(FLAGS.output_path, 'wb') as output_file:
if FLAGS.mode == 'encrypt':
encrypt_file(input_file, output_file, associated_data,
streaming_aead_primitive)
elif FLAGS.mode == 'decrypt':
decrypt_file(input_file, output_file, associated_data,
streaming_aead_primitive)
if __name__ == '__main__':
flags.mark_flag_as_required('mode')
flags.mark_flag_as_required('keyset_path')
flags.mark_flag_as_required('input_path')
flags.mark_flag_as_required('output_path')
app.run(main)
# [END streaming-aead-example]
|
Love one another. As I have loved you, so you must love one another. By this all men will know that you are my disciples if you love one another.
No one to calm her fears.
Keeping it inside and secured.
Her mental health it has cost.
Through His eyes she is adored.
And when you stand praying, if you hold anything against anyone, forgive him, so that you Father in heaven may forgive you your sins.
Be merciful to me, O Lord, for I am in distress; my eyes grow weak with sorrow, my soul and my body with grief. My life is consumed by anguish and my years by groaning; my strength fails because of my affliction, and my bones grow weak. Because of all my enemies, I am the utter contempt of my neighbors; I am a dread to my friends-those who see me on the street flee from me. I am forgotten by them as though I were dead; I have become like broken pottery. For I hear the slander of many, there is terror on every side, they conspire against me and plot to take my life. But I trust in you, O Lord; I say, "You are my God," deliver me from my enemies and from those who pursue me. Let your face shine on your servant; save me in your unfailing love. Let me not be put to shame, O Lord, for I have cried out to you; but let the wicked be put to shame and lie silent in the grave. Let their lying lips be silenced, for with pride and contempt they speak arrogantly against the righteous.
My personal publishing company. Reaching out to others that feel lost and alone. Writing has helped me heal through my Lord and Savior's gracious gift. Sharing a hug through words. Please click the link to see descriptions of all my books.
|
#!/usr/bin/python
""" This tool loads Senate Data directly from the government's website, parses
thru the XML files, and converts into a usable CSV file.
by Brandon Roberts 2012 copyleft GPLv3+."""
import requests
import argparse
import xml.etree.ElementTree as ET
import csv
import re
def fetch( url):
headers = {"User-Agent": "Mozilla/5.0 (Window NT 6.1; WOW64; rv:17.0)"
"Gecko/17.0 Firefox/17.0"}
loaded = False
while not loaded:
try:
r = requests.get(url, headers=headers, allow_redirects=False)
if r.status_code == 200:
return r.text
except Exception as e:
print "[!] Error fetching %s\n[!] %s" % (url, e)
def fetch_bills( SENATE, SESSION ):
""" This first fetches the number of bills in this senate session, then it
iterates through them and collects the raw xml for each bill's vote.
Parameters:
SENATE : The number of senate to search, i.e., 111
SESSION : Which senate session? i.e, 1 or 2
Returns:
A list of every XML file containing information about every
bill voted on for the senate session.
"""
bills = []
# Get number of bills from internet
URLM=( "http://www.senate.gov/legislative/LIS/roll_call_lists/"
"vote_menu_%s_%s.xml"%(SENATE,SESSION))
xmlf0 = fetch( URLM)
tree = ET.fromstring(xmlf0)
# this is the number of bills
TOTAL_BILLS = int(tree[3][0][0].text)
print "[*] Total Bills in Senate %s Session %s: %s" % (SENATE, SESSION,
TOTAL_BILLS)
# Get all senate voting files ... this could be multiprocessed, but it's
# not really worth the effort to me right now
bills = []
for b in xrange( 1, TOTAL_BILLS+1):
b = str(b).zfill(5)
print( "[ ] Fetching record SENATE: %s SESSION: "
"%s NUM: %s"%(SENATE, SESSION, b))
URL=("http://www.senate.gov/legislative/LIS/roll_call_votes/vote%s%s/"
"vote_%s_%s_%s.xml"%(SENATE, SESSION, SENATE, SESSION, b))
bill = fetch( URL)
bills.append( bill)
return bills
def process_bills( FIRST, LAST, bills):
""" This returns a particular senator's voting record from raw XML text
with information about senate bills and their voters.
Parameters:
FIRST : Senator's first name
LAST : Senator's last name
bills : a list of raw XML strings containing the senate voting records
Returns:
A iterable of a particular senators' voting record for each bill.
"""
print "[*] TOTAL BILLS TO PROCESS %s" % len( bills)
n = 0
for bill in bills:
print "[*] PROCESSING NUM: %s"%n
n +=1
tree = ET.fromstring( bill)
# Get votes from this record
text = tree[7].text
if text:
text = re.sub('[^A-Za-z0-9 ]', '', text)
# this next section loops through all the voters (senators) and looks
# for a vote from the senator we want
last = ""
first= ""
vote = ""
for member in tree[17]:
l = member[1].text # last
f = member[2].text # first
v = member[5].text # vote
if l.lower() == LAST.lower() and f.lower() == FIRST.lower():
last = l
first = f
vote = v
break
if vote == "Yea" or vote == "Nay":
yield text, vote
def voting_record( FIRST, LAST, SENATE, SESSION):
""" This is a wrapper for the process_bills and fetch_bills functions. Give
it a Senator's first and last names, the senate number and session and
it will tell you how the senator voted on particular bills.
Paramaters:
FIRST : Senator's first name
LAST : Senator's last name
SENATE : The number of senate to search, i.e., 111
SESSION : Which senate session? i.e, 1 or 2
Returns:
A an iterable list of how a senator voted on each bill.
"""
bills = fetch_bills( SENATE, SESSION )
print "[*] Processing bills XML"
return process_bills( FIRST, LAST, bills)
def argz():
parser = argparse.ArgumentParser()
desc =("This tool loads Senate Data directly from the government's website,"
" parses thru the XML files, and converts into a usable CSV file. "
"It's classified by Yea or Nay vote and "
"looks at the description of the bill as the string. "
"by Brandon Roberts 2012 copyleft GPL3+.")
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("first", type=str, help="first name of politician")
parser.add_argument("last", type=str, help="last name of politician")
parser.add_argument("senate", type=str, help="senate ... 111th would be 111")
parser.add_argument("session", type=str, help="1 or 2")
parser.add_argument("ofile", type=str, help="file to write csv to")
return parser.parse_args()
def write_csv( recs, OFILE):
""" Write our senate voting record to disk.
Parameters:
recs : our iterable list containing a senate voting record
OFILE : the filename to write the CSV to
"""
if ".csv" not in OFILE:
filename = "%s.csv"%OFILE
else:
filename = OFILE
print "[*] Writing to %s"%filename
header = [ "BILL_SUMMARY", "VOTE"]
with open(filename, 'wb') as f:
w = csv.writer(f, header)
w.writerow( header)
w.writerows( recs)
def main():
# do cmd line arguments
args = argz()
# our input varz
SENATE = args.senate # 111
SESSION = args.session # 1
LAST = args.last # "Franken"
FIRST = args.first # "Al"
OFILE = args.ofile # "franken.2009.arff"
# fetch voting record
print "[*] Getting %s %s's voting record"%(FIRST, LAST)
recs = voting_record( FIRST, LAST, SENATE, SESSION)
# write voting record
write_csv( recs, OFILE)
# who woulda thought it was so easy?
print "[*] Boom Shucka lucka."
if __name__ == "__main__":
main()
|
Electric garage door openers are a very convenient home feature to have. But when is it time for a brand-new one? And exactly what do you have to know about getting a garage door opener setup? Here are five of the most important things to know about garage door openers and their setup procedure.
To start with, home owners have the choice to pick in between automatic and manual doors. In the past, there has been some concern over whether the automated doors posed a safety danger within houses with property, family pets or children, however there are now security includes added to these doors to combat the risks. There are lots of benefits to having automatic doors installed over manual options, consisting of cost, ease of access and convenience.
Tilt doors are pretty standard; when they open, they do so via a tilting mechanism. There is two options for the type of fitting used. J or jamb fittings and T or track fittings. A Jamb type tilt door is the traditional fitting. This door tilts outward and then raises to open half within and half sitting outside the opening of the garage. A track type tilt door works a bit differently from the traditional tilt-up. Basically, this option provides people the opportunity to really tilt the door up and backwards to recess the door into the garage. Tilt doors offer you the most opening height of any of the door choices and is the best option when you do not have a great deal of head room in your garage. Some people don’t like the look of the basic tilt door as it has actually been around for a long time and can sometimes date the appearance of your house. It is preferred at the moment, when installing a brand-new tilt door, to have the door sheeted in sectional/panel door sheeting, which gives the door a really modern look.
Sectional doors are typically divided into at least four panels stacked on top of each other that will ultimately become the structure of the entire garage. These divisions actually allow the garage to withdraw upwards so that it doesn’t move outwards in order for the garage to open, as they hinged together and have actually rollers placed into the hinges, assisted by a track each side. Garage doors of this nature have the tendency to be more popular because of the many styles and looks offered.
Residential garage door openers normally have 1/2 or 1/3 horsepower motors. You might be amazed to know that a garage door opener doesn’t in fact do the majority of the opening and closing of your garage door. The garage door’s springs manage the bulk of the work.
It’s vital that these springs are appropriately adjusted during your garage door opener setup. Garage doors are heavy, they’re likely your home’s most significant mechanical device. Without the correct door spring tension, your opener motor will have a much shorter lifespan.
Garage door openers have come a long way in regards to style. They’re certainly more energy effective. In addition to standard security features that have been around for years (like eye sensing units that identify obstructions in the door’s course and instantly reverse its direction), there are also great deals of modern tech features available for today’s garage door openers that are extremely convenient.
Even for those that are mechanically inclined, a garage door opener setup should be approached with lots of care. Adjusting a garage door’s springs without specialist understanding poses the risk of serious injury.
A garage door opener setup also requires knowledge of locations such as electrical circuitry, understanding just how much horsepower is needed for certain door weights, and guaranteeing all safety measures work appropriately. Our professionals can take care of all elements of your garage door opener setup.
We have a wealth of stylish and appealing steel, wood, fibreglass, and full view aluminum garage door designs to choose from. Modern garage doors can be personalized with better insulation alternatives that will improve your home’s energy effectiveness. Garage door insulation also reduces incoming and outgoing noise and assists to strengthen the door.
|
"""Base settings shared by all environments"""
# Import global settings to make it easier to extend settings.
from django.conf.global_settings import * # pylint: disable=W0614,W0401
#==============================================================================
# Generic Django project settings
#==============================================================================
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SITE_ID = 1
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
TIME_ZONE = 'UTC'
USE_TZ = True
USE_I18N = True
USE_L10N = True
LANGUAGE_CODE = 'en'
LANGUAGES = (
('en', 'English'),
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '=q9gc4e4c=w^-4fnt3#vsnsm=n*k_ttcr@8_6wtma(h*w4-e$_'
INSTALLED_APPS = (
'loganalyzer.apps.nginxlog',
'south',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
)
#==============================================================================
# Calculation of directories relative to the project module location
#==============================================================================
import os
import sys
import loganalyzer as project_module
PROJECT_DIR = os.path.dirname(os.path.realpath(project_module.__file__))
PYTHON_BIN = os.path.dirname(sys.executable)
ve_path = os.path.dirname(os.path.dirname(os.path.dirname(PROJECT_DIR)))
# Assume that the presence of 'activate_this.py' in the python bin/
# directory means that we're running in a virtual environment.
if os.path.exists(os.path.join(PYTHON_BIN, 'activate_this.py')):
# We're running with a virtualenv python executable.
VAR_ROOT = os.path.join(os.path.dirname(PYTHON_BIN), 'var')
elif ve_path and os.path.exists(os.path.join(ve_path, 'bin',
'activate_this.py')):
# We're running in [virtualenv_root]/src/[project_name].
VAR_ROOT = os.path.join(ve_path, 'var')
else:
# Set the variable root to a path in the project which is
# ignored by the repository.
VAR_ROOT = os.path.join(PROJECT_DIR, 'var')
if not os.path.exists(VAR_ROOT):
os.mkdir(VAR_ROOT)
#==============================================================================
# Project URLS and media settings
#==============================================================================
ROOT_URLCONF = 'loganalyzer.urls'
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/'
STATIC_URL = '/static/'
MEDIA_URL = '/uploads/'
STATIC_ROOT = os.path.join(VAR_ROOT, 'static')
MEDIA_ROOT = os.path.join(VAR_ROOT, 'uploads')
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, 'static'),
)
#==============================================================================
# Templates
#==============================================================================
TEMPLATE_DIRS = (
os.path.join(PROJECT_DIR, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS += (
)
#==============================================================================
# Middleware
#==============================================================================
MIDDLEWARE_CLASSES += (
)
#==============================================================================
# Auth / security
#==============================================================================
AUTHENTICATION_BACKENDS += (
)
#==============================================================================
# Miscellaneous project settings
#==============================================================================
#==============================================================================
# Third party app settings
#==============================================================================
|
A Colorado man has been arrested after allegedly answering a Craigslist ad posted by a teenage girl asking someone to kill her.
Joseph Michael Lopez, 22, was arrested on Friday (local time) for shooting 19-year-old Natalie Bollinger dead.
Mr Lopez told police Ms Bollinger had posted an ad in the "woman seeking men" category of Craigslist saying, "I want to put a hit on myself".
He said after a conversation spanning more than 100 texts, he agreed to kill her for a price.
They met on December 28, at which point Mr Lopez tried to talk Ms Bollinger out of the plan.
However, he said she persisted, bringing her own gun and telling him she wanted to be "executed from behind".
She allegedly said she was having issues with her boyfriend, who had reported her missing.
Mr Lopez says they both knelt down and prayed, then he got up, closed his eyes and shot her.
He then fled with the gun and her purse.
Ms Bollinger's autopsy results showed high levels of heroin in her system.
Loved ones told police Ms Bollinger had a history of suicidal thoughts but had been happy recently.
Legal experts told local media that even if Mr Lopez's version of events is true, he will still be tried for murder. However, he may receive a lesser sentence if it is proven that she asked him to kill her.
|
from copy import copy
import datetime
import pandas as pd
from syscore.objects import arg_not_supplied, failure, missing_data
from sysdata.production.timed_storage import (
listOfEntriesData,
)
from sysobjects.production.capital import capitalEntry, LIST_OF_COMPOUND_METHODS, totalCapitalUpdater
## All capital is stored by strategy, but some 'strategies' actually relate to the total global account
from sysobjects.production.timed_storage import listOfEntries
GLOBAL_STRATEGY = "_GLOBAL"
BROKER_ACCOUNT_VALUE = "_BROKER"
MAXIMUM_ACCOUNT_VALUE = "_MAX"
ACC_PROFIT_VALUES = "_PROFIT"
SPECIAL_NAMES = [
GLOBAL_STRATEGY,
BROKER_ACCOUNT_VALUE,
MAXIMUM_ACCOUNT_VALUE,
ACC_PROFIT_VALUES,
]
class capitalForStrategy(listOfEntries):
"""
A list of capitalEntry
"""
def _entry_class(self):
return capitalEntry
class capitalData(listOfEntriesData):
"""
Store and retrieve the capital assigned to a particular strategy
A separate process is required to map from account value to strategy capital
We also store the total account value (GLOBAL STRATEGY), broker account value (BROKER_ACCOUNT_VALUE),
and for half compounding purposes MAXIMUM_ACCOUNT_VALUE
"""
def _data_class_name(self) ->str:
return "sysdata.production.capital.capitalForStrategy"
def get_total_capital_pd_df(self) -> pd.DataFrame:
return self.get_capital_pd_df_for_strategy(GLOBAL_STRATEGY)
def get_broker_account_value_pd_df(self) -> pd.DataFrame:
return self.get_capital_pd_df_for_strategy(BROKER_ACCOUNT_VALUE)
def get_maximum_account_value_pd_df(self) -> pd.DataFrame:
return self.get_capital_pd_df_for_strategy(MAXIMUM_ACCOUNT_VALUE)
def get_profit_and_loss_account_pd_df(self) -> pd.DataFrame:
return self.get_capital_pd_df_for_strategy(ACC_PROFIT_VALUES)
def get_capital_pd_df_for_strategy(self, strategy_name: str) -> pd.DataFrame:
capital_series = self.get_capital_series_for_strategy(strategy_name)
pd_series = capital_series.as_pd_df()
return pd_series
def get_capital_series_for_strategy(self, strategy_name: str) -> capitalForStrategy:
capital_series = self._get_series_for_args_dict(
dict(strategy_name=strategy_name)
)
return capital_series
def get_current_total_capital(self) -> float:
return self.get_current_capital_for_strategy(GLOBAL_STRATEGY)
def get_broker_account_value(self) -> float:
return self.get_current_capital_for_strategy(BROKER_ACCOUNT_VALUE)
def get_current_maximum_account_value(self) -> float:
return self.get_current_capital_for_strategy(MAXIMUM_ACCOUNT_VALUE)
def get_current_pandl_account(self) -> float:
return self.get_current_capital_for_strategy(ACC_PROFIT_VALUES)
def get_current_capital_for_strategy(self, strategy_name: str) -> float:
current_capital_entry = self.get_last_entry_for_strategy(strategy_name)
if current_capital_entry is missing_data:
return missing_data
capital_value = current_capital_entry.capital_value
return capital_value
def get_date_of_last_entry_for_strategy(self, strategy_name: str) -> datetime.datetime:
current_capital_entry = self.get_last_entry_for_strategy(strategy_name)
if current_capital_entry is missing_data:
return missing_data
entry_date = current_capital_entry.date
return entry_date
def get_last_entry_for_strategy(self, strategy_name: str) -> capitalEntry:
current_capital_entry = self._get_current_entry_for_args_dict(
dict(strategy_name=strategy_name)
)
return current_capital_entry
def update_broker_account_value(
self,
new_capital_value: float,
date: datetime.datetime=arg_not_supplied):
self.update_capital_value_for_strategy(
BROKER_ACCOUNT_VALUE, new_capital_value, date=date
)
def update_profit_and_loss_account(
self, new_capital_value: float, date: datetime.datetime=arg_not_supplied):
self.update_capital_value_for_strategy(
ACC_PROFIT_VALUES, new_capital_value, date=date
)
def update_total_capital(self, new_capital_value: float, date: datetime.datetime=arg_not_supplied):
self.update_capital_value_for_strategy(
GLOBAL_STRATEGY, new_capital_value, date=date
)
def update_maximum_capital(self, new_capital_value: float, date: datetime.datetime=arg_not_supplied):
return self.update_capital_value_for_strategy(
MAXIMUM_ACCOUNT_VALUE, new_capital_value, date=date
)
def update_capital_value_for_strategy(
self, strategy_name: str, new_capital_value: float, date: datetime.datetime=arg_not_supplied
):
new_capital_entry = capitalEntry(new_capital_value, date=date)
try:
self._update_entry_for_args_dict(
new_capital_entry, dict(strategy_name=strategy_name)
)
except Exception as e:
self.log.warn(
"Error %s when updating capital for %s with %s"
% (str(e), strategy_name, str(new_capital_entry))
)
def get_list_of_strategies_with_capital(self) -> list:
strategy_names = self._get_list_of_strategies_with_capital_including_reserved_names()
for strat_name in SPECIAL_NAMES:
try:
strategy_names.remove(strat_name)
except IndexError:
# Don't have to have capital defined
pass
return strategy_names
def _get_list_of_strategies_with_capital_including_reserved_names(self) -> list:
list_of_args_dict = self._get_list_of_args_dict()
strategy_names = [d["strategy_name"] for d in list_of_args_dict]
strategy_names = list(set(strategy_names))
return strategy_names
def delete_last_capital_for_strategy(
self, strategy_name: str, are_you_sure=False):
self._delete_last_entry_for_args_dict(
dict(strategy_name=strategy_name), are_you_sure=are_you_sure
)
def delete_all_capital_for_strategy(
self, strategy_name: str, are_you_really_sure=False):
self._delete_all_data_for_args_dict(
dict(
strategy_name=strategy_name),
are_you_really_sure=are_you_really_sure)
def delete_all_special_capital_entries(self, are_you_really_sure=False):
for strat_name in SPECIAL_NAMES:
self.delete_all_capital_for_strategy(
strat_name, are_you_really_sure=are_you_really_sure
)
def delete_recent_capital_for_total_strategy(
self, start_date: datetime.datetime, are_you_sure=False):
self.delete_recent_capital_for_strategy(
GLOBAL_STRATEGY, start_date, are_you_sure=are_you_sure
)
def delete_recent_capital_for_maximum(
self, start_date: datetime.datetime, are_you_sure=False):
self.delete_recent_capital_for_strategy(
MAXIMUM_ACCOUNT_VALUE, start_date, are_you_sure=are_you_sure
)
def delete_recent_capital_for_broker_value(
self, start_date: datetime.datetime, are_you_sure=False):
self.delete_recent_capital_for_strategy(
BROKER_ACCOUNT_VALUE, start_date, are_you_sure=are_you_sure
)
def delete_recent_capital_for_pandl(self, start_date: datetime.datetime, are_you_sure=False):
self.delete_recent_capital_for_strategy(
ACC_PROFIT_VALUES, start_date, are_you_sure=are_you_sure
)
def delete_recent_capital_for_strategy(
self, strategy_name: str, start_date: datetime.datetime, are_you_sure=False
):
have_capital_to_delete = True
while have_capital_to_delete:
last_date_in_data = self.get_date_of_last_entry_for_strategy(strategy_name)
if last_date_in_data is missing_data:
## gone to the start, nothing left
break
if last_date_in_data < start_date:
# before the start date, so don't want to delete
break
else:
self.delete_last_capital_for_strategy(
strategy_name, are_you_sure=are_you_sure
)
class totalCapitalCalculationData(object):
"""
This object allows us to calculate available total capital from previous capital and profits
It uses the special strategy names GLOBAL_STRATEGY and BROKER_ACCOUNT_VALUE, MAXIMUM and PROFIT
Three different compounding methods are available ['full', 'half', 'fixed']
"""
def __init__(self, capital_data: capitalData, calc_method="full"):
"""
Calculation methods are: full- all profits and losses go to capital, half - profits past the HWM are not added,
fixed - capital is unaffected by profits and losses (not reccomended!)
:param capital_data: capitalData instance or something that inherits from it
:param calc_method: method for going from profits to capital allocated
"""
self._capital_data = capital_data
try:
assert calc_method in LIST_OF_COMPOUND_METHODS
except BaseException:
raise Exception(
"Capital calculation %s has to be one of %s"
% (calc_method, LIST_OF_COMPOUND_METHODS)
)
self._calc_method = calc_method
@property
def capital_data(self):
return self._capital_data
@property
def calc_method(self):
return self._calc_method
def __repr__(self):
return "capitalCalculationData for %s" % self._capital_data
def get_current_total_capital(self):
return self.capital_data.get_current_total_capital()
def get_total_capital(self) ->pd.DataFrame:
return self.capital_data.get_total_capital_pd_df()
def get_profit_and_loss_account(self) -> pd.DataFrame():
return self.capital_data.get_profit_and_loss_account_pd_df()
def get_broker_account(self) -> pd.DataFrame:
return self.capital_data.get_broker_account_value_pd_df()
def get_maximum_account(self) -> pd.DataFrame:
return self.capital_data.get_maximum_account_value_pd_df()
def get_all_capital_calcs(self) -> pd.DataFrame:
total_capital = self.get_total_capital()
max_capital = self.get_maximum_account()
acc_pandl = self.get_profit_and_loss_account()
broker_acc = self.get_broker_account()
if (
total_capital is missing_data
or max_capital is missing_data
or acc_pandl is missing_data
or broker_acc is missing_data
):
return missing_data
all_capital = pd.concat(
[total_capital, max_capital, acc_pandl, broker_acc], axis=1
)
all_capital.columns = ["Actual", "Max", "Accumulated", "Broker"]
return all_capital
def update_and_return_total_capital_with_new_broker_account_value(
self, broker_account_value: float, check_limit=0.1
) -> float:
"""
does everything you'd expect when a new broker account value arrives:
- add on to broker account value series
- get p&l since last broker
- call capital calculation function, which will update
If the change in broker account value is greater than check_limit then do not update capital
You will have to check and do a manual update if it's correct
:param value: float
:param check_limit: float
:return: current total capital
"""
# Compare broker account value to previous
capital_updater = self._init_capital_updater(broker_account_value)
capital_updater.check_pandl_size(check_limit=check_limit)
capital_updater.calculate_new_total_and_max_capital_given_pandl()
self._update_capital_data_after_pandl_event(capital_updater)
return capital_updater.new_total_capital
def _init_capital_updater(self, new_broker_account_value: float) -> totalCapitalUpdater:
calc_method = self.calc_method
prev_broker_account_value = self._get_prev_broker_account_value_create_if_no_data(new_broker_account_value)
prev_maximum_capital = self.capital_data.get_current_maximum_account_value()
prev_total_capital = self.capital_data.get_current_total_capital()
capital_updater = totalCapitalUpdater(new_broker_account_value= new_broker_account_value,
prev_total_capital = prev_total_capital,
prev_maximum_capital = prev_maximum_capital,
prev_broker_account_value = prev_broker_account_value,
calc_method = calc_method)
return capital_updater
def _get_prev_broker_account_value_create_if_no_data(self, new_broker_account_value: float) -> float:
prev_broker_account_value = self.capital_data.get_broker_account_value()
if prev_broker_account_value is missing_data:
# No previous capital, need to set everything up
self.create_initial_capital(
new_broker_account_value, are_you_really_sure=True)
prev_broker_account_value = copy(new_broker_account_value)
return prev_broker_account_value
def _update_capital_data_after_pandl_event(self, capital_updater: totalCapitalUpdater):
# Update broker account value and add p&l entry with synched dates
date = datetime.datetime.now()
new_total_capital = capital_updater.new_total_capital
new_maximum_capital = capital_updater.new_maximum_capital
new_broker_account_value = capital_updater.new_broker_account_value
profit_and_loss = capital_updater.profit_and_loss
self.capital_data.update_total_capital(
new_total_capital, date=date)
self.capital_data.update_maximum_capital(
new_maximum_capital, date=date)
self.capital_data.update_broker_account_value(
new_broker_account_value, date=date)
self._update_pandl(profit_and_loss, date)
def _update_pandl(self, profit_and_loss: float, date: datetime.datetime):
# Add P&L to accumulated p&l
prev_acc_pandl = self._capital_data.get_current_pandl_account()
new_acc_pandl = prev_acc_pandl + profit_and_loss
self._capital_data.update_profit_and_loss_account(
new_acc_pandl, date=date)
def adjust_broker_account_for_delta(self, delta_value: float):
"""
If you have changed broker account value, for example because of a withdrawal, but don't want this to
affect capital calculations
A negative delta_value indicates a withdrawal (capital value falling) and vice versa
:param value: change in account value to be ignore, a minus figure is a withdrawal, positive is deposit
:return: None
"""
prev_broker_account_value = self.capital_data.get_broker_account_value()
if prev_broker_account_value is missing_data:
self._capital_data.log.warn(
"Can't apply a delta to broker account value, since no value in data"
)
broker_account_value = prev_broker_account_value + delta_value
# Update broker account value
self.capital_data.update_broker_account_value(broker_account_value)
def modify_account_values(
self,
broker_account_value: float=arg_not_supplied,
total_capital: float=arg_not_supplied,
maximum_capital: float=arg_not_supplied,
acc_pandl: float=arg_not_supplied,
date: datetime.datetime=arg_not_supplied,
are_you_sure:bool =False,
):
"""
Allow any account valuation to be modified
Be careful! Only use if you really know what you are doing
:param value: new_maximum_account_value
:return: None
"""
if not are_you_sure:
self._capital_data.log.warn(
"You need to be sure to modify capital!")
if date is arg_not_supplied:
date = datetime.datetime.now()
if broker_account_value is not arg_not_supplied:
self.capital_data.update_broker_account_value(
broker_account_value, date=date
)
if total_capital is not arg_not_supplied:
self.capital_data.update_total_capital(total_capital, date=date)
if maximum_capital is not arg_not_supplied:
self.capital_data.update_maximum_capital(
maximum_capital, date=date)
if acc_pandl is not arg_not_supplied:
self.capital_data.update_profit_and_loss_account(
acc_pandl, date=date)
def create_initial_capital(
self,
broker_account_value: float,
total_capital: float=arg_not_supplied,
maximum_capital: float=arg_not_supplied,
acc_pandl: float=arg_not_supplied,
are_you_really_sure: bool=False,
):
"""
Used to create the initial capital series
Will delete capital! So be careful
If broker_account_value passed and total_capital not passed, then use maximum_capital
acc_pandl defaults to zero if not passed
Default is to start at HWM with broker account value, but you can modify this
:return: None
"""
self.delete_all_capital(are_you_really_sure=are_you_really_sure)
if total_capital is arg_not_supplied:
total_capital = broker_account_value
if maximum_capital is arg_not_supplied:
maximum_capital = total_capital
if acc_pandl is arg_not_supplied:
acc_pandl = 0
date = datetime.datetime.now()
self.capital_data.update_total_capital(total_capital, date=date)
self.capital_data.update_maximum_capital(maximum_capital, date=date)
self.capital_data.update_broker_account_value(
broker_account_value, date=date)
self.capital_data.update_profit_and_loss_account(acc_pandl, date=date)
def delete_recent_capital(self, start_date: datetime.datetime, are_you_sure: bool=False):
"""
Delete all capital entries on or after start date
:param start_date: pd.datetime
:return:
"""
if not are_you_sure:
self._capital_data.log.warn(
"You have to be sure to delete capital")
return failure
self.capital_data.delete_recent_capital_for_total_strategy(
start_date, are_you_sure=are_you_sure
)
self.capital_data.delete_recent_capital_for_maximum(
start_date, are_you_sure=are_you_sure
)
self.capital_data.delete_recent_capital_for_broker_value(
start_date, are_you_sure=are_you_sure
)
self.capital_data.delete_recent_capital_for_pandl(
start_date, are_you_sure=are_you_sure
)
def delete_all_capital(self, are_you_really_sure: bool=False):
self._capital_data.delete_all_special_capital_entries(
are_you_really_sure=are_you_really_sure
)
|
The “Lucifer Effect” describes the point in time when an ordinary, normal person first crosses the boundary between good and evil to engage in an evil action.
Philip George Zimbardo (born 23 March 1933) is a psychologist and a professor emeritus at Stanford University. He is president of the Heroic Imagination Project, famous for his Stanford prison study involving Groupthink processes and for authorship of various introductory psychology books and textbooks for college students, including The Lucifer Effect and The Time Paradox.
If you observe such abuses and don’t say, “This is wrong! Stop it!” you give tacit approval to continue. You are part of the silent majority that makes evil deeds more acceptable.
The idea of the banality of heroism debunks the myth of the “heroic elect,” a myth that reinforces two basic human tendencies. The first is to ascribe very rare personal characteristics to people who do something special — to see them as superhuman, practically beyond comparison to the rest of us. The second is the trap of inaction — sometimes known as the "bystander effect." Research has shown that the bystander effect is often motivated by diffusion of responsibility, when different people witnessing an emergency all assume someone else will help. Like the “good guards,” we fall into the trap of inaction when we assume it’s someone else’s responsibility to act the hero.
The “Lucifer Effect” describes the point in time when an ordinary, normal person first crosses the boundary between good and evil to engage in an evil action. It represents a transformation of human character that is significant in its consequences. Such transformations are more likely to occur in novel settings, in “total situations,” where social situational forces are sufficiently powerful to overwhelm, or set aside temporally, personal attributes of morality, compassion, or sense of justice and fair play.
Evil is the exercise of power to intentionally harm (psychologically), hurt (physically), or destroy (mortally or spiritually) others.
We're going to take away their individuality in various ways. In general what all this leads to is a sense of powerlessness. That is, in this situation we'll have all the power and they'll have none.
This was a controversial study of the psychological effects of becoming a prisoner or prison guard conducted at Stanford University from August 14 to August 20 in 1971 by a team of researchers led by Zimbardo. Twenty-four male students out of 75 were selected to take on randomly assigned roles of prisoners and guards in a mock prison situated in the basement of the Stanford psychology building. The participants adapted to their roles well beyond Zimbardo's expectations, as the guards enforced authoritarian measures and ultimately subjected some of the prisoners to psychological torture, leading to dangerous and psychologically damaging situations.
Zimbardo to those selected to be "prison guards"
This page was last edited on 27 January 2019, at 02:53.
|
import urllib.request
from bs4 import BeautifulSoup
from distutils.version import LooseVersion
class Error (Exception): pass
class MegaresDataFinder:
def __init__(self, version=None):
self.url_root = 'https://megares.meglab.org/download/'
self.index_url = self.url_root + 'index.php'
self.version = version
def _get_available_zips(self):
try:
response = urllib.request.urlopen(self.index_url)
html_text = response.read()
except:
raise Error('Error getting megares download page ' + self.index_url)
return html_text
@classmethod
def _zips_from_index_page_string(cls, html_text):
try:
soup = BeautifulSoup(html_text, 'html.parser')
except:
raise Error('Error parsing contents of megares download page. Cannot continue')
prefix = 'megares_v'
suffix = '.zip'
zips = {}
for link in soup.find_all('a'):
href = link.get('href')
if href.startswith(prefix) and href.endswith(suffix):
version = href[len(prefix):-len(suffix)]
zips[version] = href
return zips
@classmethod
def _get_url_for_version(cls, zips, version=None):
if version is None:
versions = list(zips.keys())
versions.sort(key=LooseVersion)
return zips[versions[-1]]
else:
try:
return zips[version]
except:
versions = ', '.join(list(zips.keys()))
raise Error('Error! version ' + version + ' of megares not found. Available versions: ' + versions)
def run(self):
print('Finding available megares versions from', self.index_url)
html_text = self._get_available_zips()
zips = MegaresDataFinder._zips_from_index_page_string(html_text)
print('Found versions: ', ', '.join(list(zips.keys())))
url = MegaresDataFinder._get_url_for_version(zips, version=self.version)
return self.url_root + url
|
Jill Norman is an award-winning editor, publisher and author of books on food. She writes frequently on food and wine, and is a regular speaker on gastronomy at literary and food festivals.
A leading expert on herbs and spices, Jill has always been interested in ingredients and flavours. Her belief that food should be simple. locally and sustainably sourced and made from the finest ingredients defines her work.
She is also the literary trustee of the Elizabeth David Estate and has compiled collections of Elizabeth David’s unpublished work, and two illustrated anthologies.
Jill's books on food and cooking have been widely translated and won awards in many countries. Her interests also extend to wine and she talks and writes on food and wine pairing and on travels through wine lands.
|
from cuisine import res
class GradeDAO(object):
groupby = {"$group":
{"_id":
{"camis": "$CAMIS",
"dba": "$DBA",
"phone": "$PHONE",
"grade": "$CURRENTGRADE"},
"RECORDDATE": {"$max":"$RECORDDATE"}
}
}
borodict = {"manhattan": 1,
"brooklyn": 3,
"queens": 4,
"statenisland": 5,
"thebronx": 2}
def __init__(self, mongo):
self.ratings = mongo.db.ratings
def __get_grades__(self, match, groupby=groupby):
resultset = self.ratings.aggregate([match, groupby])
return resultset['result']
def get_grade_by_zipcode(self, key):
match = {"$match": {"ZIPCODE": key}}
return self.__get_grades__(match)
def get_grade_by_boro(self, key):
match = {"$match": {"BORO": boro[key]}}
return self.__get_grades__(match)
def get_grade_by_cuisine(self, key):
match = {"%match": {"CUISINE": key}}
return self.__get_grades__(match)
def get_grade_by_phone(self, key):
match = {"$match": {"PHONE": key}}
groupby = {
"$group":{"_id":
{"insp_id" : "$_id",
"camis": "$CAMIS",
"dba": "$DBA",
"score": "$SCORE",
"cuisine": "$CUISINECODE",
"violation": "$VIOLCODE",
"grade": "$CURRENTGRADE",
"building": "$BUILDING",
"street": "$STREET",
"zipcode": "$ZIPCODE",
"boro": "$BORO",},
"INSPECTIONDATE": {"$max":"$INSPDATE"}}}
return self.__get_grades__(match, groupby)
def get_summary(self, boro, cuisine=False):
query = {'BORO': self.borodict[boro]}
if cuisine:
query['CUISINECODE'] = res[cuisine]
camis = self.ratings.find(query).distinct('CAMIS')
result = self.ratings.find({'CAMIS': {"$in": camis}})
return result
|
Sheet pan turkey sausages with sweet potatoes and okra is an easy, hands-off dinner that’s perfect for a busy weeknight!
So on Monday, I said I wasn’t really looking forward to this week.
But it’s turned out to be a really good one! I managed to get myself into a better mindset and sometimes that makes all the difference, you know?
We may try and go to a local park festival and we are definitely taking the kids to their first football game on Sunday to see our local Carolina Panthers play. I’m curious to see what they think of it!
But speaking of busy weeks, I was excited to hear this month’s Recipe Redux is all about sheet pan dinners!
Previously I’ve shared my sheet pan chicken with broccoli and bell peppers (and Parmesan cheese sprinkled over it all) and I’ve made countless others that will eventually make it here to this blog.
Today I’m sharing my sheet pan turkey sausages and veggies. I use whole turkey sausages (you can choose mild or hot), cubed sweet potatoes and sliced okra.
The sausages get browned and plump while the sweet potatoes and okra get a little charred on the outside and perfectly tender on the inside.
All kinds of warm, cozy, comforting vibes in this dinner!
And even the kids loved it!
The sausages let off a lot of juice as they roast. I put them on their own separate pan so the vegetables don’t get soggy.
I use turkey sausages but you can use regular pork sausage if that’s what you like best.
You could also substitute red-skinned or Yukon potatoes for the sweet potatoes.
And if you’re not a huge okra fan, switch it out for your favorite roasted veggie. Brussels sprouts would be great here, too!
I’ve kept the seasonings very simple but feel free to add your favorite spices to the vegetables.
Next time you need an easy, hands-off dinner, I hope you’ll give these sheet pan turkey sausages and veggies a try!
Sheet pan turkey sausages with sweet potatoes and okra is an easy, hands-off dinner!
Add whole turkey sausages to one sheet pan.
Add diced sweet potatoes to a second sheet pan. Drizzle with two teaspoons olive oil, 3/4 teaspoon salt and 1/2 teaspoon black pepper. Toss to get everything coated and then spread in an even layer.
Add both sheet pans to the oven and cook for 20 minutes.
After the first 20 minutes, flip the turkey sausages over on their sheet pan and stir the sweet potatoes on the second sheet pan.
Add the okra to the sheet pan with the sweet potatoes and return the pans to the oven.
Roast for another 20 minutes, or until the turkey sausages are cooked through and the vegetables are tender.
I’m thrilled to hear your week turned into a good one! Funny how life works out like that, huh? And it sounds like y’all have a fantastic weekend planned! Go Panthers! I am loving this sheet pan dinner!! I’m all about some sweet ‘taters and I love me some turkey sausages!! And okra!! I haven’t had okra in far too long!! This is totally calling my name, girlfriend! Cheers to a stellar weekend!
I love okra and love roasted vegetables, but for some reason it never occurred to me to roast okra! I am definitely trying this. Hope you have a fun weekend with the kids!
Yum yum yum!! This is such a delicious combination!
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exceptions import DropItem
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals
from scrapy.contrib.exporter import JsonItemExporter
from scrapy.contrib.exporter import JsonLinesItemExporter
class AllrisPipeline(object):
def __init__(self):
pass
def process_item(self, item, spider):
for entry in item.values():
if not entry:
raise DropItem("Empty ID in item %s" % item)
item['id'] = item['id'][0]
item['name'] = item['name'][0]
item['reference'] = item['name'].split()[0]
item['originator'] = item['originator'][0].split(',')
item['publishedDate'] = item['publishedDate'][0]
item['paperType'] = item['paperType'][0]
item['mainFile'] = item['mainFile'][0]
return item
class JsonExportPipeline(object):
def __init__(self):
dispatcher.connect(self.spider_opened, signals.spider_opened)
dispatcher.connect(self.spider_closed, signals.spider_closed)
self.files = {}
def spider_opened(self, spider):
file = open('%s_items.json' % spider.name, 'w+b')
self.files[spider] = file
self.exporter = JsonItemExporter(file)
#self.exporter = JsonLinesItemExporter(file)
self.exporter.start_exporting()
def spider_closed(self, spider):
self.exporter.finish_exporting()
file = self.files.pop(spider)
file.close()
def process_item(self, item, spider):
self.exporter.export_item(item)
return item
|
COMStar Computing Technology Institute is playing a guiding role to advancing the technologies in computing for multi-disciplinary research and application in related fields.
Our overriding goal is to use the best science and technology available to bring the most effective ideas and approaches to enhancing everyday life. For us, making the world a better place is not wishful thinking. It is the goal that drives us. The intellectual diversity of our teams enables us to bring together experts from many fields in the search for innovative answers to any challenge.
We believe that appropriate investment in Big Data will not only lead to a new wave of fundamental technological advances that will be embodied in the next generations of Big Data management platforms, systems, and products, but also has the potential to create huge economic value in the world economy for years to come.
Driverless cars are one of the great technological advances for future transportation. With the investments by the U.S. government and innovative companies, several companies and research institutions working in the field have fully demonstrated that self-driving vehicles are technically viable nowadays. There is a common goal of making driverless cars a reality by 2020s.
Virtual reality is part of a booming wearable technology industry that took steps toward a big breakout recently. VR has been on the verge of taking off for decades due to its high price and nauseating effects. The latest VR headsets offer more than a mere doorway into wire-frame worlds. Are we finally about to lay our hands on the decent VR headset we’ve all be waiting for?
COM.Geo is a leading-edge conference for computing for geospatial research and application. COM.Geo focuses on the latest computing technologies for multidisciplinary research and development that enables the exploration in geospatial areas. It is an exclusive event that builds a bridge between computing and geospatial areas.
We have developed a groundbreaking intelligent Big Data computing approach based on a new type of brain computational model. This novel tech outperforms traditional techs in speed, efficiency, accuracy, unstructured data, stream processing, etc. It's been used for Precision Medicines, genomic research, bioinformatics, diagnostics, fraud detection, etc.
Virtual reality is part of a booming wearable technology industry that took steps toward a big breakout recently. We focuses on innovative R&D on touchless VR interaction, fast motion tracking, etc.
Our team focuses on cutting-edge imaging, computer vision and visualization technologies, such as image/video object detection, tracking, feature extraction, 3D object reconstruction through images or videos, GPGPUs, large-data visualization, 3D printing, etc.
Divide details about your product or agency work into parts. Write a few lines about each one and contact us about any further collaboration. We will responde get back to you soon.
|
# coding: utf-8
"""
This is part of the MSS Python's module.
Source: https://github.com/BoboTiG/python-mss
"""
from __future__ import print_function
import os.path
import sys
from argparse import ArgumentParser
from . import __version__
from .exception import ScreenShotError
from .factory import mss
from .tools import to_png
def main(args=None):
# type: (Optional[List[str]]) -> int
""" Main logic. """
cli_args = ArgumentParser()
cli_args.add_argument('-c', '--coordinates', default='', type=str,
help='the part of the screen to capture:'
' top, left, width, height')
cli_args.add_argument('-m', '--monitor', default=0, type=int,
help='the monitor to screen shot')
cli_args.add_argument('-o', '--output', default='monitor-{mon}.png',
help='the output file name')
cli_args.add_argument('-q', '--quiet', default=False, action='store_true',
help='do not print created files')
cli_args.add_argument('-v', '--version', action='version',
version=__version__)
options = cli_args.parse_args(args)
kwargs = {
'mon': options.monitor,
'output': options.output,
}
if options.coordinates:
try:
top, left, width, height = options.coordinates.split(',')
except ValueError:
print('Coordinates syntax: top, left, width, height')
return 2
kwargs['mon'] = {
'top': int(top),
'left': int(left),
'width': int(width),
'height': int(height),
}
if options.output == 'monitor-{mon}.png':
kwargs['output'] = 'sct-{top}x{left}_{width}x{height}.png'
try:
with mss() as sct:
if options.coordinates:
output = kwargs['output'].format(**kwargs['mon'])
sct_img = sct.grab(kwargs['mon'])
to_png(sct_img.rgb, sct_img.size, output)
if not options.quiet:
print(os.path.realpath(output))
else:
for file_name in sct.save(**kwargs):
if not options.quiet:
print(os.path.realpath(file_name))
return 0
except ScreenShotError:
return 1
if __name__ == '__main__':
exit(main(sys.argv[1:]))
|
At Hampshire Motor House we aim to bring you the widest choice of used cars in HOOK, HAMPSHIRE and at great prices. We have some fantastic deals on used SEAT cars, which are always changing. Browse our used cars below and contact us for more information on any of our second hand cars.
|
# -*- coding: utf-8 -*-
from functools import partial
from multiprocessing import Process
import multiprocessing as mp
import sys
import os
import platform
import unicodedata
# https://github.com/pyinstaller/pyinstaller/wiki/Recipe-Multiprocessing
# Module multiprocessing is organized differently in Python 3.4+
try:
# Python 3.4+
if sys.platform.startswith('win'):
import multiprocessing.popen_spawn_win32 as forking
else:
import multiprocessing.popen_fork as forking
except ImportError:
import multiprocessing.forking as forking
if sys.platform.startswith('win'):
# First define a modified version of Popen.
class _Popen(forking.Popen):
def __init__(self, *args, **kw):
if hasattr(sys, 'frozen'):
# We have to set original _MEIPASS2 value from sys._MEIPASS
# to get --onefile mode working.
os.putenv('_MEIPASS2', sys._MEIPASS)
try:
super(_Popen, self).__init__(*args, **kw)
finally:
if hasattr(sys, 'frozen'):
# On some platforms (e.g. AIX) 'os.unsetenv()' is not
# available. In those cases we cannot delete the variable
# but only set it to the empty string. The bootloader
# can handle this case.
if hasattr(os, 'unsetenv'):
os.unsetenv('_MEIPASS2')
else:
os.putenv('_MEIPASS2', '')
# Second override 'Popen' class with our modified version.
forking.Popen = _Popen
def read_in_chunks(file_object, chunk_size=4 * 1024 * 1024):
"""Lazy function (generator) to read a file piece by piece.
Default chunk size: 1k.
"""
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
def do_work(in_queue, out_queue, null_char):
"""Pulls data from in_queue, counts number of null characters,
and sends result to out_queue.
"""
while True:
null = 0
item = in_queue.get()
for byte in item:
if byte == null_char:
null = null + 1
out_queue.put(null)
in_queue.task_done()
def scan(name, work_queue, result_queue):
"""Loads data into work_queue, then gets results from result_queue."""
try:
with open(name, 'rb') as f:
for i in read_in_chunks(f):
work_queue.put(i)
except IOError:
return 'Error'
else:
work_queue.join()
null_count = sum([result_queue.get()
for i in range(result_queue.qsize())])
return null_count
def create_workers(work_queue, result_queue, null_char=b'\x00'):
"""Generates daemonized worker processes."""
num_workers = mp.cpu_count() - 1
if num_workers < 1:
num_workers = 1
# Start workers
worker_list = []
for i in range(num_workers):
t = Process(target=do_work, args=(work_queue, result_queue, null_char))
worker_list.append(t)
t.daemon = True
t.start()
return worker_list
def scan_target(path, files, directories):
"""
Processes given path.
Adds files to files list.
If path is a directory, all subfiles and directories are added to
the files and directories lists as appropriate.
Returns list of files and list of directories.
"""
path = os.path.abspath(path)
if not os.path.isdir(path):
files.append(path)
return files, directories
directory_list = [
unicodedata.normalize('NFC', f) for f in os.listdir(path)]
for entry in directory_list:
entry_path = os.path.join(path, entry)
if os.path.isdir(entry_path):
directories.append(entry_path)
else:
files.append(entry_path)
return files, directories
|
Ronnie is a Pedigree Lhasa Apso with a huge personality. He is black in colour with hints of white fur too. Lovely temperament and a pleasure to have in the family. I do not have any Pedigree documentation however he comes from a family litter of puppies and has two Lhasa Apso parents!
|
from abc import abstractmethod
from ledger.util import count_bits_set
from ledger.util import highest_bit_set
class HashStore:
"""
Store of nodeHashes and leafHashes mapped against their sequence numbers.
"""
@property
@abstractmethod
def is_persistent(self) -> bool:
pass
@abstractmethod
def writeLeaf(self, leafHash):
"""
append the leafHash to the leaf hash store
:param leafHash: hash of the leaf
"""
@abstractmethod
def writeNode(self, node):
"""
append the node to the node hash store.
:param node: tuple of start, height and nodeHash
"""
@abstractmethod
def readLeaf(self, pos):
"""
Read the leaf hash at the given position in the merkle tree
:param pos: the sequence number of the leaf
:return: the leafHash at the specified position
"""
@abstractmethod
def readNode(self, pos):
"""
Read the node hash at the given position in the merkle tree
:param pos: the sequence number of the node (as calculated by
getNodePosition)
:return: the nodeHash at the specified position
"""
@abstractmethod
def readLeafs(self, startpos, endpos):
"""
Read multiple leaves at the given position.
:param startpos: read from this sequence number (inclusive)
:param endpos: read up to this sequence number (inclusive)
:return: list of leafHashes
"""
@abstractmethod
def readNodes(self, startpos, endpos):
"""
Read multiple nodes at the given position. Node position can be
calculated using getNodePosition
:param startpos: read from this sequence number (inclusive)
:param endpos: read up to this sequence number (inclusive)
:return: list of nodeHashes
"""
@property
@abstractmethod
def leafCount(self) -> int:
pass
@leafCount.setter
@abstractmethod
def leafCount(self, count: int) -> None:
pass
@property
@abstractmethod
def nodeCount(self) -> None:
pass
@classmethod
def getNodePosition(cls, start, height=None) -> int:
"""
Calculates node position based on start and height
:param start: The sequence number of the first leaf under this tree.
:param height: Height of this node in the merkle tree
:return: the node's position
"""
pwr = highest_bit_set(start) - 1
height = height or pwr
if count_bits_set(start) == 1:
adj = height - pwr
return start - 1 + adj
else:
c = pow(2, pwr)
return cls.getNodePosition(c, pwr) + \
cls.getNodePosition(start - c, height)
@classmethod
def getPath(cls, seqNo, offset=0):
"""
Get the audit path of the leaf at the position specified by serNo.
:param seqNo: sequence number of the leaf to calculate the path for
:param offset: the sequence number of the node from where the path
should begin.
:return: tuple of leafs and nodes
"""
if offset >= seqNo:
raise ValueError("Offset should be less than serial number")
pwr = highest_bit_set(seqNo - 1 - offset) - 1
if pwr <= 0:
if seqNo % 2 == 0:
return [seqNo - 1], []
else:
return [], []
c = pow(2, pwr) + offset
leafs, nodes = cls.getPath(seqNo, c)
nodes.append(cls.getNodePosition(c, pwr))
return leafs, nodes
def readNodeByTree(self, start, height=None):
"""
Fetches nodeHash based on start leaf and height of the node in the tree.
:return: the nodeHash
"""
pos = self.getNodePosition(start, height)
return self.readNode(pos)
@property
def is_consistent(self) -> bool:
"""
Returns True if number of nodes are consistent with number of leaves
"""
from ledger.compact_merkle_tree import CompactMerkleTree
return self.nodeCount == CompactMerkleTree.get_expected_node_count(
self.leafCount)
@staticmethod
def _validatePos(start, end=None):
if end and start >= end:
raise IndexError(
"start ({}) index must be less than end ({}) index"
.format(start, end)
)
if start < 1:
raise IndexError(
"seqNo starts from 1, index requested: {}".format(start))
@abstractmethod
def open(self):
pass
@abstractmethod
def close(self):
pass
@property
@abstractmethod
def closed(self):
pass
@abstractmethod
def reset(self) -> bool:
"""
Removes all data from hash store
:return: True if completed successfully
"""
|
In edited extracts from his book ‘Great Deaths’, Dr John Wolffe, Senior Lecturer and Head of the Department of Religious Studies at the Open University, describes the demonstrations of public grief and mourning following the death of Queen Victoria in January 1901.
Published in Review, July-December 2000.
In his lifetime, Jan Christiaan Smuts (1870–1950) was not only widely recognised as an exceptional scholar, soldier and scientist, but was also South Africa’s outstanding white statesman. For all his international achievements, however, he was incapable of anything but the rankest opportunism in relation to South Africa’s racial problems. In her Raleigh Lecture delivered on 2 November 2000, Professor Shula Marks FBA addressed this contradiction, ascribing his liberal internationalism at least in part to the influence of a group of remarkable radical and feminist women. In this extract, she considers the roots of Smuts’s powerful racial fears.
|
# -*- coding: utf-8 -*-
"""
The `dict_metrics` module implements utilities to compare
frames and dictionaries.
This module implements several criteria and metrics to compare different sets
of atoms. This module is primarily focused on multivariate kernels and
atoms.
"""
# Authors: Sylvain Chevallier <sylvain.chevallier@uvsq.fr>
# License: GPL v3
# TODO: add docstring to criteria fonction
# verify Fubini-Study scale parameter
# verify beta dist behavior, seems like 1-bd
# change scale behavior, replace 1-d with d !
import cvxopt as co
import cvxopt.solvers as solv
import numpy as np
import scipy.linalg as sl
from numpy import (
NaN,
abs,
all,
arccos,
arcsin,
argmax,
array,
atleast_2d,
concatenate,
infty,
max,
min,
ones,
ones_like,
sqrt,
trace,
unravel_index,
zeros,
zeros_like,
)
from numpy.linalg import det, norm, svd
def _kernel_registration(this_kernel, dictionary, g):
k_len = this_kernel.shape[0]
n_kernels = len(dictionary)
k_max_len = array([i.shape[0] for i in dictionary]).max()
m_dist = ones((n_kernels, k_max_len - k_len + 1)) * infty
m_corr = zeros((n_kernels, k_max_len - k_len + 1))
for i, kernel in enumerate(dictionary): # kernel loop
ks = kernel.shape[0]
# for t in range(k_max_len-k_len+1): # convolution loop
for t in range(ks - k_len + 1): # convolution loop
# print ("t = ", t, "and l =", l)
# print ("kernel = ", kernel.shape,
# "and kernel[t:t+l,:] = ", kernel[t:t+k_len,:].shape)
m_dist[i, t] = g(this_kernel, kernel[t : t + k_len, :])
m_corr[i, t] = trace(this_kernel.T.dot(kernel[t : t + k_len, :])) / (
norm(this_kernel, "fro") * norm(kernel[t : t + k_len, :], "fro")
)
return m_dist, m_corr
def principal_angles(A, B):
"""Compute the principal angles between subspaces A and B.
The algorithm for computing the principal angles is described in :
A. V. Knyazev and M. E. Argentati,
Principal Angles between Subspaces in an A-Based Scalar Product:
Algorithms and Perturbation Estimates. SIAM Journal on Scientific Computing,
23 (2002), no. 6, 2009-2041.
http://epubs.siam.org/sam-bin/dbq/article/37733
"""
# eps = np.finfo(np.float64).eps**.981
# for i in range(A.shape[1]):
# normi = la.norm(A[:,i],np.inf)
# if normi > eps: A[:,i] = A[:,i]/normi
# for i in range(B.shape[1]):
# normi = la.norm(B[:,i],np.inf)
# if normi > eps: B[:,i] = B[:,i]/normi
QA = sl.orth(A)
QB = sl.orth(B)
_, s, Zs = svd(QA.T.dot(QB), full_matrices=False)
s = np.minimum(s, ones_like(s))
theta = np.maximum(np.arccos(s), np.zeros_like(s))
V = QB.dot(Zs)
idxSmall = s > np.sqrt(2.0) / 2.0
if np.any(idxSmall):
RB = V[:, idxSmall]
_, x, _ = svd(RB - QA.dot(QA.T.dot(RB)), full_matrices=False)
thetaSmall = np.flipud(
np.maximum(arcsin(np.minimum(x, ones_like(x))), zeros_like(x))
)
theta[idxSmall] = thetaSmall
return theta
def chordal_principal_angles(A, B):
"""
chordal_principal_angles(A, B) Compute the chordal distance based on
principal angles.
Compute the chordal distance based on principal angles between A and B
as :math:`d=\sqrt{ \sum_i \sin^2 \theta_i}`
"""
return sqrt(np.sum(np.sin(principal_angles(A, B)) ** 2))
def chordal(A, B):
"""
chordal(A, B) Compute the chordal distance
Compute the chordal distance between A and B
as d=\sqrt{K - ||\bar{A}^T\bar{B}||_F^2}
where K is the rank of A and B, || . ||_F is the Frobenius norm,
\bar{A} is the orthogonal basis associated with A and the same goes for B.
"""
if A.shape != B.shape:
raise ValueError(
f"Atoms have not the same dimension ({A.shape} and {B.shape}). Error raised"
f"in chordal(A, B)",
)
if np.allclose(A, B):
return 0.0
else:
d2 = A.shape[1] - norm(sl.orth(A).T.dot(sl.orth(B)), "fro") ** 2
if d2 < 0.0:
return sqrt(abs(d2))
else:
return sqrt(d2)
def fubini_study(A, B):
"""
fubini_study(A, B) Compute the Fubini-Study distance
Compute the Fubini-Study distance based on principal angles between A and B
as d=\acos{ \prod_i \theta_i}
"""
if A.shape != B.shape:
raise ValueError(
f"Atoms have different dim ({A.shape} and {B.shape}). Error raised in"
f"fubini_study(A, B)",
)
if np.allclose(A, B):
return 0.0
return arccos(det(sl.orth(A).T.dot(sl.orth(B))))
def binet_cauchy(A, B):
"""Compute the Binet-Cauchy distance
Compute the Binet-Cauchy distance based on principal angles between A
and B with d=\sqrt{ 1 - \prod_i \cos^2 \theta_i}
"""
theta = principal_angles(A, B)
return sqrt(1.0 - np.prod(np.cos(theta) ** 2))
def geodesic(A, B):
"""
geodesic (A, B) Compute the arc length or geodesic distance
Compute the arc length or geodesic distance based on principal angles between A
and B with d=\sqrt{ \sum_i \theta_i^2}
"""
theta = principal_angles(A, B)
return norm(theta)
def frobenius(A, B):
if A.shape != B.shape:
raise ValueError(
f"Atoms have different dim ({A.shape} and {B.shape}). Error raised in"
f"frobenius(A, B)",
)
return norm(A - B, "fro")
def abs_euclidean(A, B):
if (A.ndim != 1 and A.shape[1] != 1) or (B.ndim != 1 and B.shape[1] != 1):
raise ValueError(
f"Atoms are not univariate ({A.shape} and {B.shape}). Error raised"
f"in abs_euclidean(A, B)",
)
if np.allclose(A, B):
return 0.0
else:
return sqrt(2.0 * (1.0 - np.abs(A.T.dot(B))))
def euclidean(A, B):
if (A.ndim != 1 and A.shape[1] != 1) or (B.ndim != 1 and B.shape[1] != 1):
raise ValueError(
f"Atoms are not univariate ({A.shape} and {B.shape}). Error raised in"
f"euclidean(A, B)",
)
if np.allclose(A, B):
return 0.0
else:
return sqrt(2.0 * (1.0 - A.T.dot(B)))
def _valid_atom_metric(gdist):
"""Verify that atom metric exist and return the correct function"""
if gdist == "chordal":
return chordal
elif gdist == "chordal_principal_angles":
return chordal_principal_angles
elif gdist == "fubinistudy":
return fubini_study
elif gdist == "binetcauchy":
return binet_cauchy
elif gdist == "geodesic":
return geodesic
elif gdist == "frobenius":
return frobenius
elif gdist == "abs_euclidean":
return abs_euclidean
elif gdist == "euclidean":
return euclidean
else:
return None
def _scale_metric(gdist, d, D1):
if (
gdist == "chordal"
or gdist == "chordal_principal_angles"
or gdist == "fubinistudy"
or gdist == "binetcauchy"
or gdist == "geodesic"
):
# TODO: scale with max n_features
return d / sqrt(D1[0].shape[0])
elif gdist == "frobenius":
return d / sqrt(2.0)
else:
return d
def _compute_gdm(D1, D2, g):
"""Compute ground distance matrix from dictionaries D1 and D2
Distance g acts as ground distance.
A kernel registration is applied if dictionary atoms do not have
the same size.
"""
# Do we need a registration? If kernel do not have the same shape, yes
if not all(array([i.shape[0] for i in D1 + D2]) == D1[0].shape[0]):
# compute correlation and distance matrices
k_dim = D1[0].shape[1]
# minl = np.array([i.shape[1] for i in D1+D2]).min()
max_l1 = array([i.shape[0] for i in D1]).max()
max_l2 = array([i.shape[0] for i in D2]).max()
if max_l2 > max_l1:
Da = D1
Db = D2
max_l = max_l2
else:
Da = D2
Db = D1
max_l = max_l1
# Set all Db atom to largest value
Dbe = []
for i in range(len(Db)):
k_l = Db[i].shape[0]
Dbe.append(concatenate((zeros((max_l - k_l, k_dim)), Db[i]), axis=0))
gdm = zeros((len(Da), len(Db)))
for i in range(len(Da)):
m_dist, m_corr = _kernel_registration(Da[i], Dbe, g)
k_l = Da[i].shape[0]
# m_dist, m_corr = _kernel_registration(np.concatenate((zeros((np.int(np.floor((max_l-k_l)/2.)), k_dim)), Da[i], zeros((np.int(np.ceil((max_l-k_l)/2.)), k_dim))), axis=0), Dbe, g)
for j in range(len(Dbe)):
gdm[i, j] = m_dist[
j, unravel_index(abs(m_corr[j, :]).argmax(), m_corr[j, :].shape)
]
else:
# all atoms have the same length, no registration
gdm = zeros((len(D1), len(D2)))
for i in range(len(D1)):
for j in range(len(D2)):
gdm[i, j] = g(D1[i], D2[j])
return gdm
def hausdorff(D1, D2, gdist, scale=False):
"""
Compute the Hausdorff distance between two sets of elements, here
dictionary atoms, using a ground distance.
Possible choice are "chordal", "fubinistudy", "binetcauchy", "geodesic",
"frobenius", "abs_euclidean" or "euclidean".
The scale parameter changes the return value to be between 0 and 1.
"""
g = _valid_atom_metric(gdist)
if g is None:
print("Unknown ground distance, exiting.")
return NaN
gdm = _compute_gdm(D1, D2, g)
d = max([max(min(gdm, axis=0)), max(min(gdm, axis=1))])
if not scale:
return d
else:
return _scale_metric(gdist, d, D1)
def emd(D1, D2, gdist, scale=False):
"""
Compute the Earth Mover's Distance (EMD) between two sets of elements,
here dictionary atoms, using a ground distance.
Possible choice are "chordal", "fubinistudy", "binetcauchy", "geodesic",
"frobenius", "abs_euclidean" or "euclidean".
The scale parameter changes the return value to be between 0 and 1.
"""
g = _valid_atom_metric(gdist)
if g is None:
print("Unknown ground distance, exiting.")
return NaN
# if gdist == "chordal":
# g = chordal
# elif gdist == "chordal_principal_angles":
# g = chordal_principal_angles
# elif gdist == "fubinistudy":
# g = fubini_study
# elif gdist == "binetcauchy":
# g = binet_cauchy
# elif gdist == "geodesic":
# g = geodesic
# elif gdist == "frobenius":
# g = frobenius
# elif gdist == "abs_euclidean":
# g = abs_euclidean
# elif gdist == "euclidean":
# g = euclidean
# else:
# print 'Unknown ground distance, exiting.'
# return NaN
# # Do we need a registration? If kernel do not have the same shape, yes
# if not np.all(np.array([i.shape[0] for i in D1+D2]) == D1[0].shape[0]):
# # compute correlation and distance matrices
# k_dim = D1[0].shape[1]
# # minl = np.array([i.shape[1] for i in D1+D2]).min()
# max_l1 = np.array([i.shape[0] for i in D1]).max()
# max_l2 = np.array([i.shape[0] for i in D2]).max()
# if max_l2 > max_l1:
# Da = D1
# Db = D2
# max_l = max_l2
# else:
# Da = D2
# Db = D1
# max_l = max_l1
# Dbe = []
# for i in range(len(Db)):
# k_l = Db[i].shape[0]
# Dbe.append(np.concatenate((zeros((max_l-k_l, k_dim)), Db[i]), axis=0))
# gdm = zeros((len(Da), len(Db)))
# for i in range(len(Da)):
# k_l = Da[i].shape[0]
# m_dist, m_corr = _kernel_registration(np.concatenate((zeros(( np.int(np.floor((max_l-k_l)/2.)), k_dim)), Da[i], zeros((np.int(np.ceil((max_l-k_l)/2.)), k_dim))), axis=0), Dbe, g)
# for j in range(len(Dbe)):
# gdm[i,j] = m_dist[j, np.unravel_index(np.abs(m_corr[j,:]).argmax(), m_corr[j,:].shape)]
# else:
# # all atoms have the same length, no registration
# gdm = np.zeros((len(D1), len(D2)))
# for i in range(len(D1)):
# for j in range(len(D2)):
# gdm[i,j] = g(D1[i], D2[j])
gdm = _compute_gdm(D1, D2, g)
c = co.matrix(gdm.flatten(order="F"))
G1 = co.spmatrix([], [], [], (len(D1), len(D1) * len(D2)))
G2 = co.spmatrix([], [], [], (len(D2), len(D1) * len(D2)))
G3 = co.spmatrix(-1.0, range(len(D1) * len(D2)), range(len(D1) * len(D2)))
for i in range(len(D1)):
for j in range(len(D2)):
k = j + (i * len(D2))
G1[i, k] = 1.0
G2[j, k] = 1.0
G = co.sparse([G1, G2, G3])
h1 = co.matrix(1.0 / len(D1), (len(D1), 1))
h2 = co.matrix(1.0 / len(D2), (len(D2), 1))
h3 = co.spmatrix([], [], [], (len(D1) * len(D2), 1))
h = co.matrix([h1, h2, h3])
A = co.matrix(1.0, (1, len(D1) * len(D2)))
b = co.matrix([1.0])
co.solvers.options["show_progress"] = False
sol = solv.lp(c, G, h, A, b)
d = sol["primal objective"]
if not scale:
return d
else:
return _scale_metric(gdist, d, D1)
# if (gdist == "chordal" or gdist == "chordal_principal_angles" or
# gdist == "fubinistudy" or gdist == "binetcauchy" or
# gdist == "geodesic"):
# return d/sqrt(D1[0].shape[0])
# elif gdist == "frobenius":
# return d/sqrt(2.)
# else:
# return d
def _multivariate_correlation(s, D):
"""Compute correlation between multivariate atoms
Compute the correlation between a multivariate atome s and dictionary D
as the sum of the correlation in each n_dims dimensions.
"""
n_features = s.shape[0]
n_dims = s.shape[1]
n_kernels = len(D)
corr = np.zeros((n_kernels, n_features))
for k in range(n_kernels): # for all atoms
corrTmp = 0
for j in range(n_dims): # for all dimensions
corrTmp += np.correlate(s[:, j], D[k][:, j])
corr[k, : len(corrTmp)] = corrTmp
return corr
def detection_rate(ref, recov, threshold):
"""Compute the detection rate between reference and recovered dictionaries
The reference ref and the recovered recov are univariate or multivariate
dictionaries. An atom a of the ref dictionary is considered as recovered if
$c < threshold$ with $c = argmax_{r \in R} |<a, r>|$, that is the absolute
value of the maximum correlation between a and any atom r of the recovered
dictionary R is above a given threshold.
The process is iterative and an atom r could be matched only once with an
atom a of the reference dictionary. In other word, each atom a is matched
with a different atom r.
"""
n_kernels_ref, n_kernels_recov = len(ref), len(recov)
n_features = ref[0].shape[0]
if ref[0].ndim == 1:
n_dims = 1
for k in range(n_kernels_ref):
ref[k] = atleast_2d(ref[k]).T
else:
n_dims = ref[0].shape[1]
if recov[0].ndim == 1:
for k in range(n_kernels_recov):
recov[k] = atleast_2d(recov[k]).T
dr = 0
corr = zeros((n_kernels_ref, n_kernels_recov))
for k in range(n_kernels_ref):
c_tmp = _multivariate_correlation(
concatenate(
(zeros((n_features, n_dims)), ref[k], zeros((n_features, n_dims))), axis=0
),
recov,
)
for j in range(n_kernels_recov):
idx_max = argmax(abs(c_tmp[j, :]))
corr[k, j] = c_tmp[j, idx_max]
c_local = np.abs(corr.copy())
for _ in range(n_kernels_ref):
max_corr = c_local.max()
if max_corr >= threshold:
dr += 1
idx_max = np.unravel_index(c_local.argmax(), c_local.shape)
c_local[:, idx_max[1]] = zeros(n_kernels_ref)
c_local[idx_max[0], :] = zeros(n_kernels_recov)
return float(dr) / n_kernels_recov * 100.0
def _convert_array(ref, recov):
if ref[0].ndim == 1:
for k in range(len(ref)):
ref[k] = atleast_2d(ref[k]).T
if recov[0].ndim == 1:
for k in range(len(recov)):
recov[k] = atleast_2d(recov[k]).T
D1 = np.array(ref)
D2 = np.array(recov)
M = D1.shape[0]
N = D1.shape[1]
D1 = D1.reshape((M, N))
D2 = D2.reshape((M, N))
return D1, D2, M
def precision_recall(ref, recov, threshold):
"""Compute precision and recall for recovery experiment"""
D1, D2, M = _convert_array(ref, recov)
corr = D1.dot(D2.T)
precision = float((np.max(corr, axis=0) > threshold).sum()) / float(M)
recall = float((np.max(corr, axis=1) > threshold).sum()) / float(M)
return precision * 100.0, recall * 100.0
def precision_recall_points(ref, recov):
"""Compute the precision and recall for each atom in a recovery experiment"""
# if ref[0].ndim == 1:
# for k in range(len(ref)):
# ref[k] = atleast_2d(ref[k]).T
# if recov[0].ndim == 1:
# for k in range(len(recov)):
# recov[k] = atleast_2d(recov[k]).T
# D1 = np.array(ref)
# D2 = np.array(recov)
# M = D1.shape[0]
# N = D1.shape[1]
# D1 = D1.reshape((M, N))
# D2 = D2.reshape((M, N))
D1, D2, _ = _convert_array(ref, recov)
corr = D1.dot(D2.T)
precision = np.max(corr, axis=0)
recall = np.max(corr, axis=1)
return precision, recall
def beta_dist(D1, D2):
"""Compute the Beta-distance proposed by Skretting and Engan
The beta-distance is:
$\beta(D1, D2)=1/(M1+M2)(\sum_j \beta(D1, d^2_j)+\sum_j \beta(D2, d^1_j))$
with $\beta(D, x) = arccos(\max_i |d^T_i x|/||x||)$
as proposed in:
Karl Skretting and Kjersti Engan,
Learned dictionaries for sparse image representation: properties and results,
SPIE, 2011.
"""
if D1[0].shape != D2[0].shape:
raise ValueError(
f"Dictionaries have different dim : {D1[0].shape} and {D2[0].shape}."
)
D1 = np.array(D1)
M1 = D1.shape[0]
N = D1.shape[1]
D1 = D1.reshape((M1, N))
D2 = np.array(D2)
M2 = D2.shape[0]
D2 = D2.reshape((M2, N))
corr = D1.dot(D2.T)
if np.allclose(np.max(corr, axis=0), ones(M2)) and np.allclose(
np.max(corr, axis=1), ones(M1)
):
return 0.0
return (
np.sum(np.arccos(np.max(corr, axis=0))) + np.sum(np.arccos(np.max(corr, axis=1)))
) / (M1 + M2)
|
Quincy Smile Makeover - Are Smile Makeovers Worth It?
Are smile makeovers worth it? Quincy area dentist says yes!
Quincy, MA patients who are ready to achieve a new smile are often welcome to speak with Dr. Stephen J. Matarazzo about the advantages of a smile makeover.
Dr. Stephen J. Matarazzo describes the smile makeover as a complete transformation of the teeth and sometimes gum tissues to achieve a more attractive appearance. This Is achieved by combining a variety of cosmetic dentistry procedures into one treatment plan to make a dramatic change.
What treatments are included in a smile makeover?
Many of these are hand-selected by our dental team to include into a treatment plan based on the needs of the patient. One patient may benefit from porcelain veneers, while another may be better suited for dental crowns. Evaluating the patient’s smile and learning about what the patient wants to change is the best way to decide how to move forward with treatment. Every plan is uniquely tailored to an individual patient, and no two plans are the same.
How long does it take to achieve my new smile?
Patients interested in a smile makeover are often interested in learning more about how long it will take for the transformation. Patients can discuss with their dentist whether they want to combine treatments or have them performed one at a time. Some patients choose to use oral conscious sedation to have many treatments administered at just one appointment while ensuring their comfort throughout. This is a personal decision that should be made after patients have been educated on what they can expect.
Why is each smile makeover process different?
The needs of our patients are unique, and there is no “one size fits all” option when it comes to rejuvenating a smile. Each patient has specific concerns that need to be addressed with dental repairs. Dr. Stephen J. Matarazzo and his team listen to patients’ needs and ensure that the treatments suggested will benefit them. By developing a personalized dental plan for patients, they know and trust that we have their best interests at heart and that we provide them with the personalized service they want and deserve.
How many treatments might a patient need for a smile makeover?
Treatments may include a single solution such as full dentures or may incorporate a variety of procedures to achieve the results patients desire. This may include a combination of treatments mentioned above, such as dental crowns, professional teeth whitening, or the placement of porcelain veneers on the anterior teeth. By having an initial consultation and evaluation appointment with Dr. Stephen J. Matarazzo, patients can work with the dental staff to develop a plan of action that meets their needs and is within their budget. Our practice offers high quality materials and services for patients who are serious about achieving the beautiful smile they’ve always wanted – and we provide the professional, talented staff to help!
Are smile makeovers worth it?
Absolutely! Dr. Stephen J. Matarazzo sees patients all the time who will never look back after having their smile restoration performed. He sees how it changes the confidence of patients and how it improves their self-esteem when they have a smile they are excited to share with others. Many patients do not regret taking the time to enhance their appearance in this manner, and often share with their dental team how pleased they are with the results.
Are you ready to work with a dentist to redesign your smile?
Quincy, MA area dentist, Dr. Stephen J. Matarazzo, is here to help patients in the community with their dental needs. He believes in helping patients achieve the smile they’ve always wanted by combining today’s amazing treatments into a terrific solution. Contact his practice today to schedule an appointment at (617) 405-3939 and visit the practice at 300 Crown Colony Drive. We welcome new patients into our state-of-the-art facility in Quincy, MA.
Home » Related Articles » Are smile makeovers worth it? Quincy area dentist says yes!
|
#!/usr/bin/env python
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites:
- Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster
- Set your Google Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-credentials
"""
import argparse
# [START bigtable_hw_imports]
import datetime
from google.cloud import bigtable
from google.cloud.bigtable import column_family
from google.cloud.bigtable import row_filters
# [END bigtable_hw_imports]
def main(project_id, instance_id, table_id):
# [START bigtable_hw_connect]
# The client must be created with admin=True because it will create a
# table.
client = bigtable.Client(project=project_id, admin=True)
instance = client.instance(instance_id)
# [END bigtable_hw_connect]
# [START bigtable_hw_create_table]
print('Creating the {} table.'.format(table_id))
table = instance.table(table_id)
print('Creating column family cf1 with Max Version GC rule...')
# Create a column family with GC policy : most recent N versions
# Define the GC policy to retain only the most recent 2 versions
max_versions_rule = column_family.MaxVersionsGCRule(2)
column_family_id = 'cf1'
column_families = {column_family_id: max_versions_rule}
if not table.exists():
table.create(column_families=column_families)
else:
print("Table {} already exists.".format(table_id))
# [END bigtable_hw_create_table]
# [START bigtable_hw_write_rows]
print('Writing some greetings to the table.')
greetings = ['Hello World!', 'Hello Cloud Bigtable!', 'Hello Python!']
rows = []
column = 'greeting'.encode()
for i, value in enumerate(greetings):
# Note: This example uses sequential numeric IDs for simplicity,
# but this can result in poor performance in a production
# application. Since rows are stored in sorted order by key,
# sequential keys can result in poor distribution of operations
# across nodes.
#
# For more information about how to design a Bigtable schema for
# the best performance, see the documentation:
#
# https://cloud.google.com/bigtable/docs/schema-design
row_key = 'greeting{}'.format(i).encode()
row = table.direct_row(row_key)
row.set_cell(column_family_id,
column,
value,
timestamp=datetime.datetime.utcnow())
rows.append(row)
table.mutate_rows(rows)
# [END bigtable_hw_write_rows]
# [START bigtable_hw_create_filter]
# Create a filter to only retrieve the most recent version of the cell
# for each column accross entire row.
row_filter = row_filters.CellsColumnLimitFilter(1)
# [END bigtable_hw_create_filter]
# [START bigtable_hw_get_with_filter]
print('Getting a single greeting by row key.')
key = 'greeting0'.encode()
row = table.read_row(key, row_filter)
cell = row.cells[column_family_id][column][0]
print(cell.value.decode('utf-8'))
# [END bigtable_hw_get_with_filter]
# [START bigtable_hw_scan_with_filter]
print('Scanning for all greetings:')
partial_rows = table.read_rows(filter_=row_filter)
for row in partial_rows:
cell = row.cells[column_family_id][column][0]
print(cell.value.decode('utf-8'))
# [END bigtable_hw_scan_with_filter]
# [START bigtable_hw_delete_table]
print('Deleting the {} table.'.format(table_id))
table.delete()
# [END bigtable_hw_delete_table]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('project_id', help='Your Cloud Platform project ID.')
parser.add_argument(
'instance_id', help='ID of the Cloud Bigtable instance to connect to.')
parser.add_argument(
'--table',
help='Table to create and destroy.',
default='Hello-Bigtable')
args = parser.parse_args()
main(args.project_id, args.instance_id, args.table)
|
Every studio has one, a creative diva, that Eyeore type that grumbles and moans about every task, hates whatever they are doing, yet doesn’t really do anything about it. Here’s how to get the best from your creatives.
Why are they like this? Well, they do live in a daily environment of design by committee, where they constantly get told to change, change, change their masterpiece – until it’s diluted down enough to meet the client’s satisfaction.
Everyone has a design opinion and creatives feel this more than most, as theirs usually sits at the bottom of the heap. When looking at the situation this way it’s actually quite easy to understand why they feel stifled and frustrated.
Yet unlike a portion of other designers who ‘drop the baby’ and allow the client to do whatever they like, just to get it out the door and move on – these ones don’t. Their incessant bellyaching drives everyone around them wild but it’s actually a sign that their passion still lies beneath, a sign of suppressed creativity.
So what are the keys to engaging these people and making the environment painless for everyone?
1. Allow them to be unproductive – to do the absurd and fail. Innovation comes from experimentation and exploring outside the parameters. Expect the costs that come along with this but in the long run it will be cheaper than losing clients through not staying ahead of the game.
2. Don’t constrain them – performance will be better if they’re allowed to work autonomously. Don’t force them to follow unnecessary processes or hover over them, asking what they’re doing or how they’re doing it. Creatives are easily distracted so keep them away from emails, IMs and phone calls. In short, don’t interrupt the creative process. Allowing them to work outside normal hours is also beneficial as they will often prefer to be left alone.
3. Don’t criticise the bad ideas – Make them feel important. Creatives are used to criticism but it often cuts deeper than you think; they can often feel crushed. Not noticing that special effort spent on a job will do you no good, as their opinion will be verbalised and bad energy can affect the whole team. Be lavish with the praise but also be sincere about it.
4. Consider carefully before allowing them to manage others – your most talented creatives may be wonderful at their jobs but this certainly does not mean they should be managing others. Actually it’s rare that natural innovators have good leadership skills, (a number of extremely successful business owners have identified their own leadership deficits and brought in others to make up for it, Mark Zuckerberg for one). A study showed that the most talented creatives also exhibit psychological characteristics such as being rebellious, being independently motivated and low in empathy. All can inhibit them from being effective leaders.
Understanding what really makes your creative divas tick will help you to build an environment where they can flourish and truly allow their brilliance to shine.
Image courtesy of Bryant Arnold.
Becca Stevens wants to live in a world where clients stick to the initial brief and designers go home on time. As a Studio Manager, she’s been subjected to all kinds of job juggling, patience testing and deadline moving situations. When she’s not training other agency folk how to use Streamtime to harness the chaos, you can find her poking around antiques and vintage places, finding curiosities to treasure.
|
class User():
def __init__(self, first_name, last_name, birthday, sex):
self.first_name = first_name
self.last_name = last_name
self.birthday = birthday
self.sex = sex
def describe_user(self):
print('first name: ' + self.first_name +
'\nlast name: ' + self.last_name +
'\nbirthday: ' + self.birthday +
'\nsex: ' + self.sex)
def greet_user(self):
print('\nhey ' + self.first_name + ', \nhow are you?\n')
class Admin(User):
def __init__(
self,
first_name,
last_name,
birthday,
sex,
privileges=['can add post', 'can delete post', 'can ban user']):
super().__init__(first_name, last_name, birthday, sex)
self.privileges = privileges
def show_privileges(self):
print('privileges: ' + str(self.privileges))
steve = Admin('Steve', 'Mcqueen', '18-04-2000', 'male')
steve.greet_user()
steve.show_privileges()
bob = Admin(
'Bob',
'the Rob',
'23-12-1999',
'male',
['can add post', 'can delete post'])
bob.greet_user()
bob.show_privileges()
|
Chemical factory W. Neudorff GmbH KG was founded in 1854 in Königsberg in what was then Prussia. At that time, it was one of the first German firms to produce plant protection and animal care products.
The company was founded by Wilhelm Neudorff, a fabric dyer with a particular interest in chemistry. He developed a diverse range of bath essences, tinctures, ointments and cosmetics.
In 1903, the company moved to Wuppertal, where it continued to expand its range of chemical-pharmaceutical products. Plant protection products, pest control agents and animal care products were all being produced by the mid-1930s. There was already an emphasis at that time on making the safest possible products.
In 1959, Rudolf Lohmann bought the company and moved it to Emmerthal. Three people then began to expand the firm into the form recognisable today. A whole range of new products were developed and produced. We recognised very early on that there is a market for eco-friendly products for amateur gardeners and we have continued to expand these product lines. Our range has grown and grown and we're still diversifying to this day.
Over the years, we have developed eco friendly processes, compounds and equipment, which are ideal for use in natural gardens, in organic horticulture and in integrated plant cultivation.
|
# TestEngineIndexedProps.py
#
# Check that indexed properties (where the engine number is replaced by '#) in
# engines XML definition are working.
#
# Copyright (c) 2016 Bertrand Coconnier
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>
#
import shutil
import xml.etree.ElementTree as et
from JSBSim_utils import JSBSimTestCase, CreateFDM, RunTest
class TestEngineIndexedProps(JSBSimTestCase):
def testEnginePowerVC(self):
fdm = CreateFDM(self.sandbox)
fdm.load_script(self.sandbox.path_to_jsbsim_file('scripts',
'L4102.xml'))
fdm.run_ic()
pm = fdm.get_property_manager()
self.assertTrue(pm.hasNode('propulsion/engine[0]/EnginePowerVC'))
self.assertTrue(pm.hasNode('propulsion/engine[1]/EnginePowerVC'))
while fdm.run():
self.assertAlmostEqual(fdm['propulsion/engine[0]/EnginePowerVC'],
fdm['propulsion/engine[1]/EnginePowerVC'])
def testFunctionWithIndexedProps(self):
tree = et.parse(self.sandbox.path_to_jsbsim_file('engine',
'eng_PegasusXc.xml'))
# Define the function starter-max-power-W as a 'post' function
root = tree.getroot()
startPowFunc_tag = root.find("function/[@name='propulsion/engine[#]/starter-max-power-W']")
startPowFunc_tag.attrib['type']='post'
tree.write('eng_PegasusXc.xml')
# Copy the propeller file.
shutil.copy(self.sandbox.path_to_jsbsim_file('engine', 'prop_deHavilland5000.xml'),
'.')
fdm = CreateFDM(self.sandbox)
fdm.set_engine_path('.')
fdm.load_script(self.sandbox.path_to_jsbsim_file('scripts',
'Short_S23_1.xml'))
fdm.run_ic()
pm = fdm.get_property_manager()
self.assertTrue(pm.hasNode('propulsion/engine[0]/starter-max-power-W'))
self.assertTrue(pm.hasNode('propulsion/engine[1]/starter-max-power-W'))
self.assertTrue(pm.hasNode('propulsion/engine[2]/starter-max-power-W'))
self.assertTrue(pm.hasNode('propulsion/engine[3]/starter-max-power-W'))
while fdm.run():
rpm = [fdm['propulsion/engine[0]/engine-rpm'],
fdm['propulsion/engine[1]/engine-rpm'],
fdm['propulsion/engine[2]/engine-rpm'],
fdm['propulsion/engine[3]/engine-rpm']]
for i in xrange(4):
maxPower = max(0.0, 1.0-rpm[i]/400)*498.941*0.10471976*rpm[i]
self.assertAlmostEqual(fdm['propulsion/engine[%d]/starter-max-power-W' % (i,)],
maxPower)
def testTableWithIndexedVars(self):
tree = et.parse(self.sandbox.path_to_jsbsim_file('engine',
'eng_PegasusXc.xml'))
# Define the function starter-max-power-W as a 'post' function
root = tree.getroot()
startPowFunc_tag = root.find("function/[@name='propulsion/engine[#]/starter-max-power-W']")
startPowFunc_tag.attrib['type']='post'
max_tag = startPowFunc_tag.find('product/max')
diff_tag = max_tag.find('difference')
max_tag.remove(diff_tag)
table_tag = et.SubElement(max_tag,'table')
table_tag.attrib['name']='propulsion/engine[#]/starter-tabular-data'
indepVar_tag = et.SubElement(table_tag, 'independentVar')
indepVar_tag.attrib['lookup']='row'
indepVar_tag.text = 'propulsion/engine[#]/engine-rpm'
tData_tag = et.SubElement(table_tag, 'tableData')
tData_tag.text ='0.0 1.0\n400.0 0.0'
tree.write('eng_PegasusXc.xml')
# Copy the propeller file.
shutil.copy(self.sandbox.path_to_jsbsim_file('engine', 'prop_deHavilland5000.xml'),
'.')
fdm = CreateFDM(self.sandbox)
fdm.set_engine_path('.')
fdm.load_script(self.sandbox.path_to_jsbsim_file('scripts',
'Short_S23_1.xml'))
fdm.run_ic()
pm = fdm.get_property_manager()
self.assertTrue(pm.hasNode('propulsion/engine[0]/starter-max-power-W'))
self.assertTrue(pm.hasNode('propulsion/engine[1]/starter-max-power-W'))
self.assertTrue(pm.hasNode('propulsion/engine[2]/starter-max-power-W'))
self.assertTrue(pm.hasNode('propulsion/engine[3]/starter-max-power-W'))
while fdm.run():
rpm = [fdm['propulsion/engine[0]/engine-rpm'],
fdm['propulsion/engine[1]/engine-rpm'],
fdm['propulsion/engine[2]/engine-rpm'],
fdm['propulsion/engine[3]/engine-rpm']]
for i in xrange(4):
tabularData = max(0.0, 1.0-rpm[i]/400)
maxPower = tabularData*498.941*0.10471976*rpm[i]
self.assertAlmostEqual(fdm['propulsion/engine[%d]/starter-max-power-W' % (i,)],
maxPower)
self.assertAlmostEqual(fdm['propulsion/engine[%d]/starter-tabular-data' % (i,)],
tabularData)
RunTest(TestEngineIndexedProps)
|
Computers are everywhere. Today, in the United States, professional institutions provide at least one device for every five students. Find out how technology in education is transforming the whole process of learning.
Do computers really help you to study?
Not so long ago, students had to carry dozens of books with them, handwrite term papers and dissertations, brows half a library in search of the right literature, etc. Now books can be loaded onto a tablet or laptop; articles can be typed in Word; grammatical, stylistic and other errors can be checked in online checkers, and a necessary page of printed text can be copied or taken a photo of with your phone. And, of course, progress does not stand, and every day more and more advanced technologies come to the aid of undergraduates.
Of course, Word corrects mistakes, but only grammatical and punctuation ones. In order to check the style, you should use some of the online editors. Grammarly will cope with the English language, and Orfogrammka will cope with Russian. And for those who want to find out why you need to write this way, and not otherwise, there are a lot of online websites, all sorts of rules and answers from expert philologists.
Interactive displays have made static reality more dynamic and vibrant. These technologies help students create team projects, use visibility in presentations, and in general, make learning more diverse and exciting. And this is a good thing because it increases students’ engagement in the educational process and makes them more diligent and attentive to deadlines. With such tools no need to scan many a bid4papers review in search of a reliable writing agency to complete writing tasks on time.
Extended reality is a thing, which makes interesting not only a visit to a museum but also a lecture on abstract concepts. Despite the fact that VR is still used quite rarely in education, there are already a couple of exciting developments. The Sky Map application allows students to learn about the night sky in search of constellations, but it will take a long time before the integration of such applications comes to schools. Extended reality should delay and possess hints for all cases of reference to real objects.
For instance, Karen Schrier from the USA has invented a game, through which she has been telling her students about the famous battle of Lexington for several years. With the help of an app on the cellphone, students move around the true battlefield, participate in the battle and independently try to answer questions that still cause controversy among American historians.
Progress has reached even the buildings of educational institutions – however, so far only to the US. A smart campus is a new approach to the organization of an educational process. The point is that when entering the institute a special device reads information about the person, and the undergraduate is immediately sent individual reminders – schedules, examinations and tests results, messages from teachers, etc.
Some universities use computers to exchange information between students and professors in electronic form without going online. They operate on energy-efficient long-range networks.
I think computers do have value in the classrooms across the world. It is always welcoming to see one because of the efficiency of using it. Furthermore, this is a pretty good list. I would not have imagined VR being used to showcase a battle! Furthermore, it seems as though computers will only grow up from here and be more necessary than ever.
|
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011 Nick Hall
# Copyright (C) 2013 Heinz Brinker <heinzbrinker@yahoo.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Gtk modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import Pango
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.lib import EventType, EventRoleType
from gramps.gen.plug import Gramplet
from gramps.gui.widgets import Photo
from gramps.gen.display.name import displayer as name_displayer
from gramps.gen.display.place import displayer as place_displayer
from gramps.gen.datehandler import get_date
from gramps.gen.utils.file import media_path_full
from gramps.gen.const import COLON, GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
class PersonDetails(Gramplet):
"""
Displays details for a person.
"""
def init(self):
self.gui.WIDGET = self.build_gui()
self.gui.get_container_widget().remove(self.gui.textview)
self.gui.get_container_widget().add(self.gui.WIDGET)
self.uistate.connect('nameformat-changed', self.update)
def build_gui(self):
"""
Build the GUI interface.
"""
self.top = Gtk.Box()
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.photo = Photo(self.uistate.screen_height() < 1000)
self.photo.show()
self.name = Gtk.Label(halign=Gtk.Align.START)
self.name.override_font(Pango.FontDescription('sans bold 12'))
vbox.pack_start(self.name, fill=True, expand=False, padding=7)
self.grid = Gtk.Grid(orientation=Gtk.Orientation.VERTICAL)
self.grid.set_column_spacing(10)
vbox.pack_start(self.grid, fill=True, expand=False, padding=5)
vbox.show_all()
self.top.pack_start(self.photo, fill=True, expand=False, padding=5)
self.top.pack_start(vbox, fill=True, expand=True, padding=10)
return self.top
def add_row(self, title, value):
"""
Add a row to the table.
"""
label = Gtk.Label(label=title + COLON, halign=Gtk.Align.END,
valign=Gtk.Align.START)
label.show()
value = Gtk.Label(label=value, halign=Gtk.Align.START)
value.show()
self.grid.add(label)
self.grid.attach_next_to(value, label, Gtk.PositionType.RIGHT, 1, 1)
def clear_grid(self):
"""
Remove all the rows from the grid.
"""
list(map(self.grid.remove, self.grid.get_children()))
def db_changed(self):
self.dbstate.db.connect('person-update', self.update)
def active_changed(self, handle):
self.update()
def update_has_data(self):
"""
Determine if a person has_data by checking:
1. has a birth, baptism, death, or burial event; OR
2. has a father; OR
3. has a mother
"""
active_handle = self.get_active('Person')
has_data = False
if active_handle:
active_person = self.dbstate.db.get_person_from_handle(active_handle)
if active_person:
for event_type in [EventType(EventType.BIRTH),
EventType(EventType.BAPTISM),
EventType(EventType.DEATH),
EventType(EventType.BURIAL)]:
event = self.get_event(active_person, event_type)
if event:
has_data = True
break
if not has_data:
family_handle = active_person.get_main_parents_family_handle()
if family_handle:
family = self.dbstate.db.get_family_from_handle(family_handle)
handle = family.get_father_handle()
if handle:
if self.dbstate.db.get_person_from_handle(handle):
has_data = True
else:
handle = family.get_mother_handle()
if handle:
if self.dbstate.db.get_person_from_handle(handle):
has_data = True
self.set_has_data(has_data)
def main(self): # return false finishes
self.display_empty()
active_handle = self.get_active('Person')
if active_handle:
active_person = self.dbstate.db.get_person_from_handle(active_handle)
self.top.hide()
if active_person:
self.display_person(active_person)
self.top.show()
self.update_has_data()
def display_person(self, active_person):
"""
Display details of the active person.
"""
self.load_person_image(active_person)
self.name.set_text(name_displayer.display(active_person))
self.clear_grid()
self.display_alternate_names(active_person)
self.display_parents(active_person)
self.display_separator()
self.display_type(active_person, EventType(EventType.BIRTH))
self.display_type(active_person, EventType(EventType.BAPTISM))
self.display_type(active_person, EventType(EventType.DEATH))
self.display_type(active_person, EventType(EventType.BURIAL))
self.display_separator()
self.display_attribute(active_person, _('Occupation'))
self.display_attribute(active_person, _('Title'))
self.display_attribute(active_person, _('Religion'))
def display_empty(self):
"""
Display empty details when no person is selected.
"""
self.photo.set_image(None)
self.photo.set_uistate(None, None)
self.name.set_text(_('No active person'))
self.clear_grid()
def display_separator(self):
"""
Display an empty row to separate groupd of entries.
"""
label = Gtk.Label(label='')
label.override_font(Pango.FontDescription('sans 4'))
label.show()
self.grid.add(label)
def display_alternate_names(self, active_person):
"""
Display other names of the person
"""
try:
nlist = active_person.get_alternate_names()
if len(nlist) > 0:
for altname in nlist:
name_type = str(altname.get_type())
text = name_displayer.display_name(altname)
self.add_row(name_type, text)
self.display_separator()
except:
pass
def display_parents(self, active_person):
"""
Display the parents of the active person.
"""
family_handle = active_person.get_main_parents_family_handle()
if family_handle:
family = self.dbstate.db.get_family_from_handle(family_handle)
handle = family.get_father_handle()
if handle:
father = self.dbstate.db.get_person_from_handle(handle)
father_name = name_displayer.display(father)
else:
father_name = _('Unknown')
handle = family.get_mother_handle()
if handle:
mother = self.dbstate.db.get_person_from_handle(handle)
mother_name = name_displayer.display(mother)
else:
mother_name = _('Unknown')
else:
father_name = _('Unknown')
mother_name = _('Unknown')
self.add_row(_('Father'), father_name)
self.add_row(_('Mother'), mother_name)
def display_attribute(self, active_person, attr_key):
"""
Display an attribute row.
"""
values = []
for attr in active_person.get_attribute_list():
if attr.get_type() == attr_key:
values.append(attr.get_value())
if values:
self.add_row(attr_key, _(', ').join(values))
def display_type(self, active_person, event_type):
"""
Display an event type row.
"""
event = self.get_event(active_person, event_type)
if event:
self.add_row(str(event_type), self.format_event(event))
def get_event(self, person, event_type):
"""
Return an event of the given type.
"""
for event_ref in person.get_event_ref_list():
if int(event_ref.get_role()) == EventRoleType.PRIMARY:
event = self.dbstate.db.get_event_from_handle(event_ref.ref)
if event.get_type() == event_type:
return event
return None
def format_event(self, event):
"""
Format the event for display.
"""
date = get_date(event)
handle = event.get_place_handle()
if handle:
place = place_displayer.display_event(self.dbstate.db, event)
retval = _('%(date)s - %(place)s.') % {'date' : date,
'place' : place}
else:
retval = _('%(date)s.') % dict(date=date)
return retval
def load_person_image(self, person):
"""
Load the primary image if it exists.
"""
media_list = person.get_media_list()
if media_list:
media_ref = media_list[0]
object_handle = media_ref.get_reference_handle()
obj = self.dbstate.db.get_media_from_handle(object_handle)
full_path = media_path_full(self.dbstate.db, obj.get_path())
mime_type = obj.get_mime_type()
if mime_type and mime_type.startswith("image"):
self.photo.set_image(full_path, mime_type,
media_ref.get_rectangle())
self.photo.set_uistate(self.uistate, object_handle)
else:
self.photo.set_image(None)
self.photo.set_uistate(None, None)
else:
self.photo.set_image(None)
self.photo.set_uistate(None, None)
|
Kozi’s Korner: Baking Is a Wellness Tool That Works For Me!
Many of you have tasted Kozi’s, ‘Kain’t Stop Eat’n Em!,’ Sweet Potato Pies. He has shared them at many POCC Events and years ago, Kozi baked and sold enough of his pies to pay his way to New York City where he performed at Carnegie Hall.
I never looked at or considered cooking and baking to be a wellness tool until just recently. When I worked at the bakery I baked because it was my job to do so regardless of how much I disliked the work. But just like anything else, the more one does something the better and more adept one becomes at whatever it is they are doing.
That’s the way it was with baking. Sometimes I would have to bake 60 or 70 pies of all kinds every night; cherry, strawberry, blueberry, blackberry, strawberry rhubarb, peach, pecan, pumpkin etc. Uggg! I really disliked the hours, the mess, the stickiness, the heavy lifting, the heat from the ovens, the cleanup; need I go on? After a while it became routine and the job seemed easier. I was able to do what I needed to do more tidily and efficiently and actually improve on a number of things including some recipes. Back then it was a chore and a bore.
Now, I find that I can apply the skills that I acquired as an apprentice baker to other areas of my life as well as cooking/baking. The discipline, the timeliness, the cleanliness, the creativity and the delicious results of my labor are rewarding.
I find joy in baking early in the morning and bringing a still hot, fresh out of the oven pie or quiche to work to share with my fellow employees. They enjoy it too.
What wellness tools work for you? Let me know. Till next time on Kozi’s Korner…Peace & Blessings. xt time on Kozi’s Korner…Peace & Blessings.
|
# -*- encoding: utf-8 -*-
from abjad import *
def test_tonalanalysistools_TonalAnalysisAgent_analyze_chords_01():
r'''The three inversions of a C major triad.
'''
chord = Chord([0, 4, 7], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'major', 'triad', 'root')
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([4, 7, 12], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'major', 'triad', 1)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([7, 12, 16], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'major', 'triad', 2)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
def test_tonalanalysistools_TonalAnalysisAgent_analyze_chords_02():
r'''The three inversions of an a minor triad.
'''
chord = Chord([9, 12, 16], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('a', 'minor', 'triad', 'root')
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([12, 16, 21], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('a', 'minor', 'triad', 1)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([16, 21, 24], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('a', 'minor', 'triad', 2)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
def test_tonalanalysistools_TonalAnalysisAgent_analyze_chords_03():
r'''The four inversions of a C dominant seventh chord.
'''
chord = Chord([0, 4, 7, 10], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'dominant', 7, 'root')
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([4, 7, 10, 12], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'dominant', 7, 1)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([7, 10, 12, 16], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'dominant', 7, 2)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([10, 12, 16, 19], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'dominant', 7, 3)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
def test_tonalanalysistools_TonalAnalysisAgent_analyze_chords_04():
r'''The five inversions of a C dominant ninth chord.
'''
chord = Chord([0, 4, 7, 10, 14], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'dominant', 9, 'root')
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([4, 7, 10, 12, 14], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'dominant', 9, 1)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([7, 10, 12, 14, 16], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'dominant', 9, 2)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([10, 12, 14, 16, 19], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'dominant', 9, 3)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([2, 10, 12, 16, 19], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'dominant', 9, 4)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
def test_tonalanalysistools_TonalAnalysisAgent_analyze_chords_05():
r'''Returns none when chord does not analyze.
'''
chord = Chord('<c cs d>4')
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [None]
|
Description: Slender perennial with long rhizome.
Culms rigid, terete, 1–3-noded, striate, glabrous, smooth, 20–90 cm high, 1.0–2.0 mm diam.
Leaves basal and cauline; blade reduced to short erect flattened mucro; sheath straw-coloured to pale reddish or grey-brown, dull. Inflorescence oblong to ovate in outline, narrow, erect, 1–8 cm long, to 1 cm diam.; lowest involucral bract shorter than inflorescence. Spikelets few, 1-flowered, 3–6 mm long. Glumes 4 or 5, acute to long-acute, red-brown to grey-brown, with margins glabrous, midrib scaberulous; fertile glumes 3–5 mm long. Anthers 1.8–2.2 mm long, excluding appendage 0.3–0.5 mm long.
Nut obovoid to globose, 2.3–3.0 mm long, 1.5–2.0 mm diam., smooth with scattered pits, hispid at apex, shining, dark red-brown to black.
Distribution and occurrence: Grows in mostly brackish or saline swamps, on sandy soils; in coastal areas except for isolated occurrence at Boonoo Boonoo Falls.
Other Australian states: Qld Tas. W.A. S.A.
|
import os
import main
import unittest
import tempfile
from bson.json_util import loads, dumps
from pymongo import MongoClient
import base64
import copy
INVALID_ARTICLE = {"title": "B", "content": "afds", "sectionID": "23", "staffIDs": ["69"], "date": "Blah"}
STAFF = {"name": "Michael Truell", "position": "CTO"}
SECTION = {"title": "Sports"}
CORRECT_USERNAME = "admin"
CORRECT_PASSWORD = "d"
def getValidArticle(db):
staffs = [a for a in db.staff.find(STAFF)]
sections = [a for a in db.section.find(SECTION)]
if len(staffs) == 0:
db.staff.insert_one(copy.deepcopy(STAFF))
staffs = [a for a in db.staff.find(STAFF)]
if len(sections) == 0:
db.section.insert_one(copy.deepcopy(SECTION))
sections = [a for a in db.section.find(SECTION)]
sectionID = sections[0]['_id']
staffID = staffs[0]['_id']
return {"title": "Article Title", "content": "Article content goes here.", "date": "May 28, 2016", "sectionID": sectionID, "staffIDs": [staffID]}
def getAuthHeader(username, password):
return {"Authorization": "Basic "+base64.b64encode((username+":"+password).encode("utf-8")).decode("utf-8")}
class APITester(unittest.TestCase):
def setUp(self):
client = MongoClient()
client.drop_database("testRecord")
self.db = client.testRecord
main.initDB(self.db)
self.db_fd, main.app.config['DATABASE'] = tempfile.mkstemp()
main.app.config['TESTING'] = True
self.app = main.app.test_client()
def tearDown(self):
os.close(self.db_fd)
os.unlink(main.app.config['DATABASE'])
def queryGET(self, endpointName, data={}):
request = self.app.get(endpointName, data=data)
return request.data.decode("utf-8")
def queryPOST(self, endpointName, data={}):
header = getAuthHeader(CORRECT_USERNAME, CORRECT_PASSWORD)
contentType = 'application/json'
request = self.app.post(endpointName, data=dumps(data), content_type=contentType, headers=header)
return request.data.decode("utf-8")
def testEmptyDB(self):
endpoints = ['article', 'staff', 'section']
for endpoint in endpoints:
assert '[]' in str(self.app.get('/api/'+endpoint).data)
def testGETInvalidArticle(self):
self.db.article.insert_one(copy.deepcopy(INVALID_ARTICLE))
assert '[]' == self.queryGET('/api/article')
def testGETValidArticle(self):
def isSameAricle(article1, article2):
for field in list(article1.keys())+list(article2.keys()):
if field not in ['_id', 'section', 'staffs']:
if article1[field] != article2[field]:
return False
return True
validArticle = getValidArticle(self.db)
self.db.article.insert_one(validArticle)
returnedArticle = loads(self.queryGET('/api/article'))[0]
assert isSameAricle(validArticle, returnedArticle)
returnedArticle = loads(self.queryGET('/api/article', data={"sectionID": validArticle['sectionID']}))[0]
assert isSameAricle(validArticle, returnedArticle)
returnedArticle = loads(self.queryGET('/api/article', data={"title": validArticle['title']}))[0]
assert isSameAricle(validArticle, returnedArticle)
def testPOSTArticle(self):
# Should fail with bad object ids
try:
self.queryPOST("/api/admin/article", data=INVALID_ARTICLE)
assert False
except:
pass
# Should store data and return good when given valid article
assert self.queryPOST("/api/admin/article", data=getValidArticle(self.db)) == 'good'
assert self.db.article.find_one(getValidArticle(self.db)) is not None
def testGETStaff(self):
def isSameStaff(staff1, staff2):
for field in list(staff1.keys())+list(staff2.keys()):
if field != '_id':
if staff1[field] != staff2[field]:
return False
return True
modifiableStaff = copy.deepcopy(STAFF)
self.db.staff.insert_one(modifiableStaff)
assert isSameStaff(STAFF, loads(self.queryGET('/api/staff'))[0])
print("id")
print(self.queryGET('/api/staff', data={"staffID": str(modifiableStaff['_id'])}))
assert isSameStaff(STAFF, loads(self.queryGET('/api/staff', data={"staffID": str(modifiableStaff['_id'])}))[0])
assert isSameStaff(STAFF, loads(self.queryGET('/api/staff', data={"name": STAFF['name']}))[0])
def testPOSTStaff(self):
assert self.queryPOST("/api/admin/staff", data=STAFF) == 'good'
assert self.db.staff.find_one(STAFF) is not None
def testGETSection(self):
self.db.section.insert_one(copy.deepcopy(SECTION))
returnedSection = loads(self.queryGET('/api/section'))[0]
for field in list(returnedSection.keys())+list(SECTION.keys()):
if field != '_id':
assert SECTION[field] == returnedSection[field]
def testPOSTSection(self):
assert self.queryPOST("/api/admin/section", data=SECTION) == 'good'
assert self.db.section.find_one(SECTION) is not None
def testAdminAccess(self):
def request(username, password):
headers = getAuthHeader(username, password)
return self.app.post("/api/admin/article", headers=headers).data.decode("utf-8")
assert request(CORRECT_USERNAME, CORRECT_PASSWORD) == 'Bad request'
assert request(CORRECT_USERNAME, "wrong") == 'Unauthorized access'
assert request("wrong", CORRECT_PASSWORD) == 'Unauthorized access'
if __name__ == '__main__':
unittest.main()
|
If you are looking for a romantic Scottish Wedding, look no further than the Borders Events Centre in the Scottish Borders.
The Springwood Pavilion at the Border Union Showground, is a large multi-purpose event space set within beautifully maintained parkland with ample parking. At 1,000 sqm, there is enough space for reception, dining, dancing, chill out zones and more! Our maximum capacity for dinner and dancing would be 450 guests.
The wonderful thing about our wedding venue is that it is a blank canvas for you to style completely to compliment your tastes. We have our own drapes, chair covers and tie-backs that you are welcome to hire, alternatively we can recommend wedding stylists who can transform the hall in spectacular fashion.
Whether you have your wedding ceremony with us or at a local church, the Borders Events Centre is ideally set up for your big day.
Guests will enter through the large, light entrance hall, can be welcomed at the door with a welcome drink before being seated for their dinner before dancing the night away on our 320sqm wooden dance floor (or hire in one to suit).
The BEC is just a 5 minute walk from the pretty, historic town of Kelso and its hotels and B&Bs so guests do not have far to go from dance floor to bed!
Travel wise, the Borders Events Centre is located just an hour’s drive from Edinburgh, 90 minutes from Newcastle and Carlisle. Our nearest train stations are Berwick-upon-Tweed and Tweedbank in Galashiels.
|
# Copyright (C) 2020 OpenMotics BV
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
PulseCounter Mapper
"""
from __future__ import absolute_import
from gateway.dto import PulseCounterDTO
from gateway.models import PulseCounter
if False: # MYPY
from typing import List
class PulseCounterMapper(object):
@staticmethod
def orm_to_dto(orm_object): # type: (PulseCounter) -> PulseCounterDTO
return PulseCounterDTO(id=orm_object.number,
name=orm_object.name,
persistent=orm_object.persistent,
room=None if orm_object.room is None else orm_object.room.number)
@staticmethod
def dto_to_orm(pulse_counter_dto): # type: (PulseCounterDTO) -> PulseCounter
pulse_counter = PulseCounter.get_or_none(number=pulse_counter_dto.id)
if pulse_counter is None:
pulse_counter = PulseCounter(number=pulse_counter_dto.id,
name='',
source='gateway',
persistent=False)
if 'name' in pulse_counter_dto.loaded_fields:
pulse_counter.name = pulse_counter_dto.name
if 'persistent' in pulse_counter_dto.loaded_fields:
pulse_counter.persistent = pulse_counter_dto.persistent
return pulse_counter
|
Search rental properties on this site or find the perfect Home for rent in Tampa FL that meets the needs of your family. You will be able to Search Rental Homes where all the active listings are at your fingertips and before you will be all of the Homes for Rent in Tampa. You will also have the opportunity to locate a trusted and experienced realtor in Tampa FL to help reach you family goals. The best and most reliable realtors in Tampa, can be found on this site and on its pages. If you use the services of a Tampa realtor, you can be assured that your rent is within your budget and that your lease is professionally reviewed for accuracy and the best possible terms.
Many consumers seek a house for rent and have no experience reviewing the terms presented for lease you are expected to sign. Do your home search with assurance that a trusted advisor is helping you with the particulars during your real estate search. As you navigate the load of search listings and results, you will search properties that look great on the website but atrocious in person and a property listed might have a bad reputation commonly known among Tampa realtors. Use this site to search homes but pull from experienced agents when the time comes to make important decisions.
|
import datetime
import re
from smsform_exceptions import (SMSFieldException, ChoiceException, InvalidDateException,
MissingRequiredFieldException)
from smsform_validators import multiple_choice_validator, single_choice_validator
# SMS FIELD
class GenericSMSField(object):
empty_values = [None, [], ""]
def __init__(self, name, *args, **kwargs):
self.name = name
self.validators = kwargs.get('validators') or []
self.prefixes = kwargs.get("prefixes") or [""]
self.prefixes.sort(key=len, reverse=True)#Longest prefix should come first
self.accepted_prefix = ""
required = kwargs.get("required", "blank")
if required == "blank":
self.required = True
else:
self.required = False
def get_field_regex(self):
"""Return a dict of 'prefix':prefix and regex:regex"""
prefix_regexes = []
for prefix in self.prefixes:
prefix_regex = r"\b{prefix}(?P<{name}>\w*)".format(
prefix=prefix,
name=self.name
)
prefix_regexes.append({"prefix": prefix, "regex": prefix_regex})
return prefix_regexes
def get_verbose_name(self):
name_parts = self.name.split("_")
return " ".join(name_parts).title()
def to_python(self, text, accepted_prefix=""):
"""Convert the passed in text to a valid python object, any special
conversions from the passed in text to a valid python object should
happen here."""
self.accepted_prefix = self.accepted_prefix or accepted_prefix
return text, accepted_prefix
def validate(self, value):
# check to see if the field is required and present
if self.required and value in self.empty_values:
raise MissingRequiredFieldException(self.get_verbose_name())
for validator in self.validators:
try:
validator(value=value)
except SMSFieldException, e:
raise
return True
def process_field(self, text, accepted_prefix=""):
# Try to split into text and the accepted prefix
python_obj, accepted_prefix = self.to_python(text, accepted_prefix)
self.validate(python_obj)
return python_obj
def __repr__(self):
return "<{name}> object".format(name=self.name)
# SMSFields
class PrefixField(GenericSMSField):
"""This field is for the special fields that have a first letter followed by
the actual data. This class just strips out that first letter"""
pass
class MultiChoiceField(GenericSMSField):
def __init__(self, choices, choice_divider=",", *args, **kwargs):
self.choice_divider = choice_divider
self.choices = choices
super(MultiChoiceField, self).__init__(*args, **kwargs)
self.validators.append(multiple_choice_validator)
def to_python(self, text, accepted_prefix):
text, accepted_prefix = super(
MultiChoiceField, self).to_python(text, accepted_prefix)
return text.split(self.choice_divider), accepted_prefix
def get_field_regex(self):
choices_string = "|".join(self.choices)
return [
{
"prefix": "", "regex": "({choices_string})".format(choices_string=choices_string)
}
]
def validate(self, value):
# check to see if the field is required and present
if self.required and value in self.empty_values:
raise MissingRequiredFieldException(self.get_verbose_name())
for validator in self.validators:
try:
validator(value=value, choices=self.choices)
except SMSFieldException, e:
raise
return True
class SingleChoiceField(MultiChoiceField):
def __init__(self, choices, *args, **kwargs):
super(SingleChoiceField, self).__init__(choices, *args, **kwargs)
self.validators = [single_choice_validator]
class DateField(GenericSMSField):
def __init__(self, name, *args, **kwargs):
date_formats = kwargs.get("date_formats", None) or ["%d/%b/%y", "%d%b%y"]
super(DateField, self).__init__(name, *args, **kwargs)
self.date_formats = date_formats
def get_field_regex(self):
"""We will accept 2 formats for the dates: dayMonthYear, day/Month/Year
with the month acceptable as a word or digits
"""
regex_strings = [
r"\b\d{1,2}[-/]\d{1,2}[-/]\d{1,4}\b",
r"\b\d{1,2}[a-z]{3,14}\d{1,4}\b",
]
return [
{
"prefix": "", "regex": "{regex_strings}".format(regex_strings="|".join(regex_strings), name=self.name)
}
]
def to_python(self, date_string, accepted_prefix=""):
python_date = None
for date_format in self.date_formats:
try:
python_date = datetime.datetime.strptime(
date_string, date_format)
except ValueError:
continue
else:
break
if not python_date:
raise InvalidDateException(
"Date not recognized, please use the format: dayMonthYear"
)
return python_date.date(), accepted_prefix
|
Brookwoods Industrial Estate, Holywell Green, Halifax, West Yorkshire, HX4 9BH, UK.
Copyright © 2001- 2002 Pennine Enginnering Group. All Rights Reserved. Designated Trademarks, Logos And Brands Are The Property Of Their Respective Owners.
|
#!/usr/bin/python
# get the loaders ahead of time so that cobbler can run in an isolated network!
# NOTE: see cobbler/action_dlcontent.py for the list of files it looks for...
import os
import sys
import urlgrabber
force = True
content_server = 'http://cobbler.github.com/loaders'
#dest = '/var/lib/cobbler/loaders'
dest = os.getcwd()
files = (
("%s/README" % content_server, "%s/README" % dest),
("%s/COPYING.elilo" % content_server, "%s/COPYING.elilo" % dest),
("%s/COPYING.yaboot" % content_server, "%s/COPYING.yaboot" % dest),
("%s/COPYING.syslinux" % content_server, "%s/COPYING.syslinux" % dest),
("%s/elilo-3.8-ia64.efi" % content_server, "%s/elilo-ia64.efi" % dest),
("%s/yaboot-1.3.14-12" % content_server, "%s/yaboot" % dest),
("%s/pxelinux.0-3.61" % content_server, "%s/pxelinux.0" % dest),
("%s/menu.c32-3.61" % content_server, "%s/menu.c32" % dest),
("%s/grub-0.97-x86.efi" % content_server, "%s/grub-x86.efi" % dest),
("%s/grub-0.97-x86_64.efi" % content_server, "%s/grub-x86_64.efi" % dest),
)
print "Script will download to: %s from: %s" % (dest, content_server)
try:
raw_input('<ENTER>/^C ?')
except KeyboardInterrupt, e:
sys.exit(1)
for src, dst in files:
if os.path.exists(dst) and not force:
print "File: %s already exists." % dst
continue
print "Downloading: %s to: %s" % (src, dst)
urlgrabber.grabber.urlgrab(src, filename=dst)
|
This can be added to the build server as an artifact and used for automatic F-Droid updates. Cf. #2040 (comment) pointed out by @pazos.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2010 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
"""
import os.path
import math
import logging
from copy import deepcopy
from base import Database, Entry, makeLogicNode, DatabaseError
from rmgpy.molecule import Molecule, Atom, Bond, Group, atomTypes
################################################################################
def saveEntry(f, entry):
"""
Write a Pythonic string representation of the given `entry` in the solvation
database to the file object `f`.
"""
f.write('entry(\n')
f.write(' index = {0:d},\n'.format(entry.index))
f.write(' label = "{0}",\n'.format(entry.label))
if isinstance(entry.item, Molecule):
if Molecule(SMILES=entry.item.toSMILES()).isIsomorphic(entry.item):
# The SMILES representation accurately describes the molecule, so we can save it that way.
f.write(' molecule = "{0}",\n'.format(entry.item.toSMILES()))
else:
f.write(' molecule = \n')
f.write('"""\n')
f.write(entry.item.toAdjacencyList(removeH=False))
f.write('""",\n')
elif isinstance(entry.item, Group):
f.write(' group = \n')
f.write('"""\n')
f.write(entry.item.toAdjacencyList())
f.write('""",\n')
elif entry.item is not None:
f.write(' group = "{0}",\n'.format(entry.item))
if isinstance(entry.data, SoluteData):
f.write(' solute = SoluteData(\n')
f.write(' S = {0!r},\n'.format(entry.data.S))
f.write(' B = {0!r},\n'.format(entry.data.B))
f.write(' E = {0!r},\n'.format(entry.data.E))
f.write(' L = {0!r},\n'.format(entry.data.L))
f.write(' A = {0!r},\n'.format(entry.data.A))
if entry.data.V is not None: f.write(' V = {0!r},\n'.format(entry.data.V))
f.write(' ),\n')
elif isinstance(entry.data, SolventData):
f.write(' solvent = SolventData(\n')
f.write(' s_g = {0!r},\n'.format(entry.data.s_g))
f.write(' b_g = {0!r},\n'.format(entry.data.b_g))
f.write(' e_g = {0!r},\n'.format(entry.data.e_g))
f.write(' l_g = {0!r},\n'.format(entry.data.l_g))
f.write(' a_g = {0!r},\n'.format(entry.data.a_g))
f.write(' c_g = {0!r},\n'.format(entry.data.c_g))
f.write(' s_h = {0!r},\n'.format(entry.data.s_h))
f.write(' b_h = {0!r},\n'.format(entry.data.b_h))
f.write(' e_h = {0!r},\n'.format(entry.data.e_h))
f.write(' l_h = {0!r},\n'.format(entry.data.l_h))
f.write(' a_h = {0!r},\n'.format(entry.data.a_h))
f.write(' c_h = {0!r},\n'.format(entry.data.c_h))
f.write(' A = {0!r},\n'.format(entry.data.A))
f.write(' B = {0!r},\n'.format(entry.data.B))
f.write(' C = {0!r},\n'.format(entry.data.C))
f.write(' D = {0!r},\n'.format(entry.data.D))
f.write(' E = {0!r},\n'.format(entry.data.E))
f.write(' alpha = {0!r},\n'.format(entry.data.alpha))
f.write(' beta = {0!r},\n'.format(entry.data.beta))
f.write(' eps = {0!r},\n'.format(entry.data.eps))
f.write(' ),\n')
elif entry.data is None:
f.write(' solute = None,\n')
else:
raise DatabaseError("Not sure how to save {0!r}".format(entry.data))
f.write(' shortDesc = u"""')
try:
f.write(entry.shortDesc.encode('utf-8'))
except:
f.write(entry.shortDesc.strip().encode('ascii', 'ignore')+ "\n")
f.write('""",\n')
f.write(' longDesc = \n')
f.write('u"""\n')
try:
f.write(entry.longDesc.strip().encode('utf-8') + "\n")
except:
f.write(entry.longDesc.strip().encode('ascii', 'ignore')+ "\n")
f.write('""",\n')
f.write(')\n\n')
def generateOldLibraryEntry(data):
"""
Return a list of values used to save entries to the old-style RMG
thermo database based on the thermodynamics object `data`.
"""
raise NotImplementedError()
def processOldLibraryEntry(data):
"""
Process a list of parameters `data` as read from an old-style RMG
thermo database, returning the corresponding thermodynamics object.
"""
raise NotImplementedError()
class SolventData():
"""
Stores Abraham/Mintz parameters for characterizing a solvent.
"""
def __init__(self, s_h=None, b_h=None, e_h=None, l_h=None, a_h=None,
c_h=None, s_g=None, b_g=None, e_g=None, l_g=None, a_g=None, c_g=None, A=None, B=None,
C=None, D=None, E=None, alpha=None, beta=None, eps=None):
self.s_h = s_h
self.b_h = b_h
self.e_h = e_h
self.l_h = l_h
self.a_h = a_h
self.c_h = c_h
self.s_g = s_g
self.b_g = b_g
self.e_g = e_g
self.l_g = l_g
self.a_g = a_g
self.c_g = c_g
# These are parameters for calculating viscosity
self.A = A
self.B = B
self.C = C
self.D = D
self.E = E
# These are SOLUTE parameters used for intrinsic rate correction in H-abstraction rxns
self.alpha = alpha
self.beta = beta
# This is the dielectric constant
self.eps = eps
def getHAbsCorrection(self):
"""
If solvation is on, this will give the log10 of the ratio of the intrinsic rate
constants log10(k_sol/k_gas) for H-abstraction rxns
"""
return -8.3*self.alpha*self.beta
def getSolventViscosity(self, T):
"""
Returns the viscosity in Pa s, according to correlation in Perry's Handbook
and coefficients in DIPPR
"""
return math.exp(self.A + (self.B / T) + (self.C*math.log(T)) + (self.D * (T**self.E)))
class SolvationCorrection():
"""
Stores corrections for enthalpy, entropy, and Gibbs free energy when a species is solvated.
Enthalpy and Gibbs free energy is in J/mol; entropy is in J/mol/K
"""
def __init__(self, enthalpy=None, gibbs=None, entropy=None):
self.enthalpy = enthalpy
self.entropy = entropy
self.gibbs = gibbs
class SoluteData():
"""
Stores Abraham parameters to characterize a solute
"""
def __init__(self, S=None, B=None, E=None, L=None, A=None, V=None, comment=""):
self.S = S
self.B = B
self.E = E
self.L = L
self.A = A
self.V = V
self.comment = comment
def __repr__(self):
return "SoluteData(S={0},B={1},E={2},L={3},A={4},comment={5!r})".format(self.S, self.B, self.E, self.L, self.A, self.comment)
def getStokesDiffusivity(self, T, solventViscosity):
"""
Get diffusivity of solute using the Stokes-Einstein sphere relation. Radius is
found from the McGowan volume.
"""
k_b = 1.3806488e-23 # m2*kg/s2/K
radius = math.pow((75*self.V/3.14159),(1.0/3.0))/100 # in meters
D = k_b*T/6/3.14159/solventViscosity/radius # m2/s
return D
def setMcGowanVolume(self, species):
"""
Find and store the McGowan's Volume
Returned volumes are in cm^3/mol/100 (see note below)
See Table 2 in Abraham & McGowan, Chromatographia Vol. 23, No. 4, p. 243. April 1987
doi: 10.1007/BF02311772
"V is scaled to have similar values to the other
descriptors by division by 100 and has units of (cm3mol−1/100)."
the contibutions in this function are in cm3/mol, and the division by 100 is done at the very end.
"""
molecule = species.molecule[0] # any will do, use the first.
Vtot = 0
for atom in molecule.atoms:
thisV = 0.0
if atom.isCarbon():
thisV = 16.35
elif (atom.element.number == 7): # nitrogen, do this way if we don't have an isElement method
thisV = 14.39
elif atom.isOxygen():
thisV = 12.43
elif atom.isHydrogen():
thisV = 8.71
elif (atom.element.number == 16):
thisV = 22.91
else:
raise Exception()
Vtot = Vtot + thisV
for bond in molecule.getBonds(atom):
# divide contribution in half since all bonds would be counted twice this way
Vtot = Vtot - 6.56/2
self.V= Vtot / 100; # division by 100 to get units correct.
################################################################################
################################################################################
class SolventLibrary(Database):
"""
A class for working with a RMG solvent library.
"""
def __init__(self, label='', name='', shortDesc='', longDesc=''):
Database.__init__(self, label=label, name=name, shortDesc=shortDesc, longDesc=longDesc)
def loadEntry(self,
index,
label,
solvent,
reference=None,
referenceType='',
shortDesc='',
longDesc='',
):
self.entries[label] = Entry(
index = index,
label = label,
data = solvent,
reference = reference,
referenceType = referenceType,
shortDesc = shortDesc,
longDesc = longDesc.strip(),
)
def load(self, path):
"""
Load the solvent library from the given path
"""
Database.load(self, path, local_context={'SolventData': SolventData}, global_context={})
def saveEntry(self, f, entry):
"""
Write the given `entry` in the solute database to the file object `f`.
"""
return saveEntry(f, entry)
def getSolventData(self, label):
"""
Get a solvent's data from its name
"""
return self.entries[label].data
class SoluteLibrary(Database):
"""
A class for working with a RMG solute library. Not currently used.
"""
def __init__(self, label='', name='', shortDesc='', longDesc=''):
Database.__init__(self, label=label, name=name, shortDesc=shortDesc, longDesc=longDesc)
def loadEntry(self,
index,
label,
molecule,
solute,
reference=None,
referenceType='',
shortDesc='',
longDesc='',
):
try:
mol = Molecule(SMILES=molecule)
except:
try:
mol = Molecule().fromAdjacencyList(molecule)
except:
logging.error("Can't understand '{0}' in solute library '{1}'".format(molecule,self.name))
raise
self.entries[label] = Entry(
index = index,
label = label,
item = mol,
data = solute,
reference = reference,
referenceType = referenceType,
shortDesc = shortDesc,
longDesc = longDesc.strip(),
)
def load(self, path):
"""
Load the solute library from the given path
"""
Database.load(self, path, local_context={'SoluteData': SoluteData}, global_context={})
def saveEntry(self, f, entry):
"""
Write the given `entry` in the solute database to the file object `f`.
"""
return saveEntry(f, entry)
def generateOldLibraryEntry(self, data):
"""
Return a list of values used to save entries to the old-style RMG
thermo database based on the thermodynamics object `data`.
"""
return generateOldLibraryEntry(data)
def processOldLibraryEntry(self, data):
"""
Process a list of parameters `data` as read from an old-style RMG
thermo database, returning the corresponding thermodynamics object.
"""
return processOldLibraryEntry(data)
################################################################################
class SoluteGroups(Database):
"""
A class for working with an RMG solute group additivity database.
"""
def __init__(self, label='', name='', shortDesc='', longDesc=''):
Database.__init__(self, label=label, name=name, shortDesc=shortDesc, longDesc=longDesc)
def loadEntry(self,
index,
label,
group,
solute,
reference=None,
referenceType='',
shortDesc='',
longDesc='',
):
if group[0:3].upper() == 'OR{' or group[0:4].upper() == 'AND{' or group[0:7].upper() == 'NOT OR{' or group[0:8].upper() == 'NOT AND{':
item = makeLogicNode(group)
else:
item = Group().fromAdjacencyList(group)
self.entries[label] = Entry(
index = index,
label = label,
item = item,
data = solute,
reference = reference,
referenceType = referenceType,
shortDesc = shortDesc,
longDesc = longDesc.strip(),
)
def saveEntry(self, f, entry):
"""
Write the given `entry` in the thermo database to the file object `f`.
"""
return saveEntry(f, entry)
def generateOldLibraryEntry(self, data):
"""
Return a list of values used to save entries to the old-style RMG
thermo database based on the thermodynamics object `data`.
"""
return generateOldLibraryEntry(data)
def processOldLibraryEntry(self, data):
"""
Process a list of parameters `data` as read from an old-style RMG
thermo database, returning the corresponding thermodynamics object.
"""
return processOldLibraryEntry(data)
################################################################################
class SolvationDatabase(object):
"""
A class for working with the RMG solvation database.
"""
def __init__(self):
self.libraries = {}
self.libraries['solvent'] = SolventLibrary()
self.libraries['solute'] = SoluteLibrary()
self.groups = {}
self.local_context = {
'SoluteData': SoluteData,
'SolventData': SolventData
}
self.global_context = {}
def __reduce__(self):
"""
A helper function used when pickling a SolvationDatabase object.
"""
d = {
'libraries': self.libraries,
'groups': self.groups,
'libraryOrder': self.libraryOrder,
}
return (SolvationDatabase, (), d)
def __setstate__(self, d):
"""
A helper function used when unpickling a SolvationDatabase object.
"""
self.libraries = d['libraries']
self.groups = d['groups']
self.libraryOrder = d['libraryOrder']
def load(self, path, libraries=None, depository=True):
"""
Load the solvation database from the given `path` on disk, where `path`
points to the top-level folder of the solvation database.
Load the solvent and solute libraries, then the solute groups.
"""
self.libraries['solvent'].load(os.path.join(path,'libraries','solvent.py'))
self.libraries['solute'].load(os.path.join(path,'libraries','solute.py'))
self.loadGroups(os.path.join(path, 'groups'))
def getSolventData(self, solvent_name):
try:
solventData = self.libraries['solvent'].getSolventData(solvent_name)
except:
raise DatabaseError('Solvent {0!r} not found in database'.format(solvent_name))
return solventData
def loadGroups(self, path):
"""
Load the solute database from the given `path` on disk, where `path`
points to the top-level folder of the solute database.
Three sets of groups for additivity, atom-centered ('abraham'), non atom-centered
('nonacentered'), and radical corrections ('radical')
"""
logging.info('Loading Platts additivity group database from {0}...'.format(path))
self.groups = {}
self.groups['abraham'] = SoluteGroups(label='abraham').load(os.path.join(path, 'abraham.py' ), self.local_context, self.global_context)
self.groups['nonacentered'] = SoluteGroups(label='nonacentered').load(os.path.join(path, 'nonacentered.py' ), self.local_context, self.global_context)
self.groups['radical'] = SoluteGroups(label='radical').load(os.path.join(path, 'radical.py' ), self.local_context, self.global_context)
def save(self, path):
"""
Save the solvation database to the given `path` on disk, where `path`
points to the top-level folder of the solvation database.
"""
path = os.path.abspath(path)
if not os.path.exists(path): os.mkdir(path)
self.saveLibraries(os.path.join(path, 'libraries'))
self.saveGroups(os.path.join(path, 'groups'))
def saveLibraries(self, path):
"""
Save the solute libraries to the given `path` on disk, where `path`
points to the top-level folder of the solute libraries.
"""
if not os.path.exists(path): os.mkdir(path)
for library in self.libraries.keys():
self.libraries[library].save(os.path.join(path, library+'.py'))
def saveGroups(self, path):
"""
Save the solute groups to the given `path` on disk, where `path`
points to the top-level folder of the solute groups.
"""
if not os.path.exists(path): os.mkdir(path)
for group in self.groups.keys():
self.groups[group].save(os.path.join(path, group+'.py'))
def loadOld(self, path):
"""
Load the old RMG solute database from the given `path` on disk, where
`path` points to the top-level folder of the old RMG database.
"""
for (root, dirs, files) in os.walk(os.path.join(path, 'thermo_libraries')):
if os.path.exists(os.path.join(root, 'Dictionary.txt')) and os.path.exists(os.path.join(root, 'Library.txt')):
library = SoluteLibrary(label=os.path.basename(root), name=os.path.basename(root))
library.loadOld(
dictstr = os.path.join(root, 'Dictionary.txt'),
treestr = '',
libstr = os.path.join(root, 'Library.txt'),
numParameters = 5,
numLabels = 1,
pattern = False,
)
library.label = os.path.basename(root)
self.libraries[library.label] = library
self.groups = {}
self.groups['abraham'] = SoluteGroups(label='abraham', name='Platts Group Additivity Values for Abraham Solute Descriptors').loadOld(
dictstr = os.path.join(path, 'thermo_groups', 'Abraham_Dictionary.txt'),
treestr = os.path.join(path, 'thermo_groups', 'Abraham_Tree.txt'),
libstr = os.path.join(path, 'thermo_groups', 'Abraham_Library.txt'),
numParameters = 5,
numLabels = 1,
pattern = True,
)
def saveOld(self, path):
"""
Save the old RMG Abraham database to the given `path` on disk, where
`path` points to the top-level folder of the old RMG database.
"""
# Depository not used in old database, so it is not saved
librariesPath = os.path.join(path, 'thermo_libraries')
if not os.path.exists(librariesPath): os.mkdir(librariesPath)
for library in self.libraries.values():
libraryPath = os.path.join(librariesPath, library.label)
if not os.path.exists(libraryPath): os.mkdir(libraryPath)
library.saveOld(
dictstr = os.path.join(libraryPath, 'Dictionary.txt'),
treestr = '',
libstr = os.path.join(libraryPath, 'Library.txt'),
)
groupsPath = os.path.join(path, 'thermo_groups')
if not os.path.exists(groupsPath): os.mkdir(groupsPath)
self.groups['abraham'].saveOld(
dictstr = os.path.join(groupsPath, 'Abraham_Dictionary.txt'),
treestr = os.path.join(groupsPath, 'Abraham_Tree.txt'),
libstr = os.path.join(groupsPath, 'Abraham_Library.txt'),
)
def getSoluteData(self, species):
"""
Return the solute descriptors for a given :class:`Species`
object `species`. This function first searches the loaded libraries
in order, returning the first match found, before falling back to
estimation via Platts group additivity.
"""
soluteData = None
# Check the library first
soluteData = self.getSoluteDataFromLibrary(species, self.libraries['solute'])
if soluteData is not None:
assert len(soluteData)==3, "soluteData should be a tuple (soluteData, library, entry)"
soluteData[0].comment += "Data from solute library"
soluteData = soluteData[0]
else:
# Solute not found in any loaded libraries, so estimate
soluteData = self.getSoluteDataFromGroups(species)
# No Platts group additivity for V, so set using atom sizes
soluteData.setMcGowanVolume(species)
# Return the resulting solute parameters S, B, E, L, A
return soluteData
def getAllSoluteData(self, species):
"""
Return all possible sets of Abraham solute descriptors for a given
:class:`Species` object `species`. The hits from the library come
first, then the group additivity estimate. This method is useful
for a generic search job. Right now, there should either be 1 or
2 sets of descriptors, depending on whether or not we have a
library entry.
"""
soluteDataList = []
# Data from solute library
data = self.getSoluteDataFromLibrary(species, self.libraries['solute'])
if data is not None:
assert len(data) == 3, "soluteData should be a tuple (soluteData, library, entry)"
data[0].comment += "Data from solute library"
soluteDataList.append(data)
# Estimate from group additivity
# Make it a tuple
data = (self.getSoluteDataFromGroups(species), None, None)
soluteDataList.append(data)
return soluteDataList
def getSoluteDataFromLibrary(self, species, library):
"""
Return the set of Abraham solute descriptors corresponding to a given
:class:`Species` object `species` from the specified solute
`library`. If `library` is a string, the list of libraries is searched
for a library with that name. If no match is found in that library,
``None`` is returned. If no corresponding library is found, a
:class:`DatabaseError` is raised.
"""
for label, entry in library.entries.iteritems():
for molecule in species.molecule:
if molecule.isIsomorphic(entry.item) and entry.data is not None:
return (deepcopy(entry.data), library, entry)
return None
def getSoluteDataFromGroups(self, species):
"""
Return the set of Abraham solute parameters corresponding to a given
:class:`Species` object `species` by estimation using the Platts group
additivity method. If no group additivity values are loaded, a
:class:`DatabaseError` is raised.
It averages (linearly) over the desciptors for each Molecule (resonance isomer)
in the Species.
"""
soluteData = SoluteData(0.0,0.0,0.0,0.0,0.0)
count = 0
comments = []
for molecule in species.molecule:
molecule.clearLabeledAtoms()
molecule.updateAtomTypes()
sdata = self.estimateSoluteViaGroupAdditivity(molecule)
soluteData.S += sdata.S
soluteData.B += sdata.B
soluteData.E += sdata.E
soluteData.L += sdata.L
soluteData.A += sdata.A
count += 1
comments.append(sdata.comment)
soluteData.S /= count
soluteData.B /= count
soluteData.E /= count
soluteData.L /= count
soluteData.A /= count
# Print groups that are used for debugging purposes
soluteData.comment = "Average of {0}".format(" and ".join(comments))
return soluteData
def transformLonePairs(self, molecule):
"""
Changes lone pairs in a molecule to two radicals for purposes of finding
solute data via group additivity. Transformed for each atom based on valency.
"""
saturatedStruct = molecule.copy(deep=True)
addedToPairs = {}
for atom in saturatedStruct.atoms:
addedToPairs[atom] = 0
if atom.lonePairs > 0:
charge = atom.charge # Record this so we can conserve it when checking
bonds = saturatedStruct.getBonds(atom)
sumBondOrders = 0
for key, bond in bonds.iteritems():
if bond.order == 'S': sumBondOrders += 1
if bond.order == 'D': sumBondOrders += 2
if bond.order == 'T': sumBondOrders += 3
if bond.order == 'B': sumBondOrders += 1.5 # We should always have 2 'B' bonds (but what about Cbf?)
if atomTypes['Val4'] in atom.atomType.generic: # Carbon, Silicon
while(atom.radicalElectrons + charge + sumBondOrders < 4):
atom.decrementLonePairs()
atom.incrementRadical()
atom.incrementRadical()
addedToPairs[atom] += 1
if atomTypes['Val5'] in atom.atomType.generic: # Nitrogen
while(atom.radicalElectrons + charge + sumBondOrders < 3):
atom.decrementLonePairs()
atom.incrementRadical()
atom.incrementRadical()
addedToPairs[atom] += 1
if atomTypes['Val6'] in atom.atomType.generic: # Oxygen, sulfur
while(atom.radicalElectrons + charge + sumBondOrders < 2):
atom.decrementLonePairs()
atom.incrementRadical()
atom.incrementRadical()
addedToPairs[atom] += 1
if atomTypes['Val7'] in atom.atomType.generic: # Chlorine
while(atom.radicalElectrons + charge + sumBondOrders < 1):
atom.decrementLonePairs()
atom.incrementRadical()
atom.incrementRadical()
addedToPairs[atom] += 1
saturatedStruct.updateConnectivityValues()
saturatedStruct.sortVertices()
saturatedStruct.updateAtomTypes()
saturatedStruct.updateLonePairs()
saturatedStruct.updateMultiplicity()
return saturatedStruct, addedToPairs
def removeHBonding(self, saturatedStruct, addedToRadicals, addedToPairs, soluteData):
# Remove hydrogen bonds and restore the radical
for atom in addedToRadicals:
for H, bond in addedToRadicals[atom]:
saturatedStruct.removeBond(bond)
saturatedStruct.removeAtom(H)
atom.incrementRadical()
# Change transformed lone pairs back
for atom in addedToPairs:
if addedToPairs[atom] > 0:
for pair in range(1, addedToPairs[atom]):
saturatedStruct.decrementRadical()
saturatedStruct.decrementRadical()
saturatedStruct.incrementLonePairs()
# Update Abraham 'A' H-bonding parameter for unsaturated struct
for atom in saturatedStruct.atoms:
# Iterate over heavy (non-hydrogen) atoms
if atom.isNonHydrogen() and atom.radicalElectrons > 0:
for electron in range(1, atom.radicalElectrons):
# Get solute data for radical group
try:
self.__addGroupSoluteData(soluteData, self.groups['radical'], saturatedStruct, {'*':atom})
except KeyError: pass
return soluteData
def estimateSoluteViaGroupAdditivity(self, molecule):
"""
Return the set of Abraham solute parameters corresponding to a given
:class:`Molecule` object `molecule` by estimation using the Platts' group
additivity method. If no group additivity values are loaded, a
:class:`DatabaseError` is raised.
"""
# For thermo estimation we need the atoms to already be sorted because we
# iterate over them; if the order changes during the iteration then we
# will probably not visit the right atoms, and so will get the thermo wrong
molecule.sortVertices()
# Create the SoluteData object with the intercepts from the Platts groups
soluteData = SoluteData(
S = 0.277,
B = 0.071,
E = 0.248,
L = 0.13,
A = 0.003
)
addedToRadicals = {} # Dictionary of key = atom, value = dictionary of {H atom: bond}
addedToPairs = {} # Dictionary of key = atom, value = # lone pairs changed
saturatedStruct = molecule.copy(deep=True)
# Convert lone pairs to radicals, then saturate with H.
# Change lone pairs to radicals based on valency
if sum([atom.lonePairs for atom in saturatedStruct.atoms]) > 0: # molecule contains lone pairs
saturatedStruct, addedToPairs = self.transformLonePairs(saturatedStruct)
# Now saturate radicals with H
if sum([atom.radicalElectrons for atom in saturatedStruct.atoms]) > 0: # radical species
addedToRadicals = saturatedStruct.saturate()
# Saturated structure should now have no unpaired electrons, and only "expected" lone pairs
# based on the valency
for atom in saturatedStruct.atoms:
# Iterate over heavy (non-hydrogen) atoms
if atom.isNonHydrogen():
# Get initial solute data from main group database. Every atom must
# be found in the main abraham database
try:
self.__addGroupSoluteData(soluteData, self.groups['abraham'], saturatedStruct, {'*':atom})
except KeyError:
logging.error("Couldn't find in main abraham database:")
logging.error(saturatedStruct)
logging.error(saturatedStruct.toAdjacencyList())
raise
# Get solute data for non-atom centered groups (being found in this group
# database is optional)
try:
self.__addGroupSoluteData(soluteData, self.groups['nonacentered'], saturatedStruct, {'*':atom})
except KeyError: pass
soluteData = self.removeHBonding(saturatedStruct, addedToRadicals, addedToPairs, soluteData)
return soluteData
def __addGroupSoluteData(self, soluteData, database, molecule, atom):
"""
Determine the Platts group additivity solute data for the atom `atom`
in the structure `structure`, and add it to the existing solute data
`soluteData`.
"""
node0 = database.descendTree(molecule, atom, None)
if node0 is None:
raise KeyError('Node not found in database.')
# It's possible (and allowed) that items in the tree may not be in the
# library, in which case we need to fall up the tree until we find an
# ancestor that has an entry in the library
node = node0
while node is not None and node.data is None:
node = node.parent
if node is None:
raise KeyError('Node has no parent with data in database.')
data = node.data
comment = node.label
while isinstance(data, basestring) and data is not None:
for entry in database.entries.values():
if entry.label == data:
data = entry.data
comment = entry.label
break
comment = '{0}({1})'.format(database.label, comment)
# This code prints the hierarchy of the found node; useful for debugging
#result = ''
#while node is not None:
# result = ' -> ' + node + result
# node = database.tree.parent[node]
#print result[4:]
# Add solute data for each atom to the overall solute data for the molecule.
soluteData.S += data.S
soluteData.B += data.B
soluteData.E += data.E
soluteData.L += data.L
soluteData.A += data.A
soluteData.comment += comment + "+"
return soluteData
def calcH(self, soluteData, solventData):
"""
Returns the enthalpy of solvation, at 298K, in J/mol
"""
# Use Mintz parameters for solvents. Multiply by 1000 to go from kJ->J to maintain consistency
delH = 1000*((soluteData.S*solventData.s_h)+(soluteData.B*solventData.b_h)+(soluteData.E*solventData.e_h)+(soluteData.L*solventData.l_h)+(soluteData.A*solventData.a_h)+solventData.c_h)
return delH
def calcG(self, soluteData, solventData):
"""
Returns the Gibbs free energy of solvation, at 298K, in J/mol
"""
# Use Abraham parameters for solvents to get log K
logK = (soluteData.S*solventData.s_g)+(soluteData.B*solventData.b_g)+(soluteData.E*solventData.e_g)+(soluteData.L*solventData.l_g)+(soluteData.A*solventData.a_g)+solventData.c_g
# Convert to delG with units of J/mol
delG = -8.314*298*2.303*logK
return delG
def calcS(self, delG, delH):
"""
Returns the entropy of solvation, at 298K, in J/mol/K
"""
delS = (delH-delG)/298
return delS
def getSolvationCorrection(self, soluteData, solventData):
"""
Given a soluteData and solventData object, calculates the enthalpy, entropy,
and Gibbs free energy of solvation at 298 K. Returns a SolvationCorrection
object
"""
correction = SolvationCorrection(0.0, 0.0, 0.0)
correction.enthalpy = self.calcH(soluteData, solventData)
correction.gibbs = self.calcG(soluteData, solventData)
correction.entropy = self.calcS(correction.gibbs, correction.enthalpy)
return correction
|
Agriculture is an important economic sector in the Vernon area. There are over 350 farms with a total farm area of 23,733 hectares, generating an estimated $17-million in annual gross farm receipts. The larger North Okanagan Regional District consists of approximately 1,167 farms with a total area of 84,339 hectares and estimated annual farm receipts of $26.2-million. The North Okanagan has 65,124 hectares (8.7%) located in the Agricultural Land Reserve.
Agriculture in the Vernon area is diverse, leading in cattle ranching, animal production and fruit farming. However, greenhouse, nursery and floriculture production, vegetable and melon farming, poultry and egg production, sheep and goat farming are also fast-growing activities locally.
The Vernon area has a diverse food-processing sector that has developed in partnership with the region’s agricultural base. There are an estimated 14 food-processing firms in the area. These companies supply local markets through farm gate sales and target export markets around the world.
Innovation continues to create new opportunities in the agriculture and food-processing sector. One example is the establishment of vineyards in the Vernon area with new grape varieties. Growth in Metro Vancouver is creating increased demand on agricultural land in the Lower Mainland making it desirable to relocate some farming and food-processing activities to areas where agricultural and industrial land is more affordable and readily available.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.