text stringlengths 0 1.05M | meta dict |
|---|---|
"""A tool for adding a new cluster / constellation from photographs."""
import os
import cv2
import numpy
import mel.lib.common
import mel.lib.image
def setup_parser(parser):
mel.lib.common.add_context_detail_arguments(parser)
parser.add_argument(
"destination",
type=str,
default=None,
help="New path to create and store the constellation to.",
)
parser.add_argument(
"moles",
type=str,
default=None,
nargs="+",
help="Names of the moles to store.",
)
def process_args(args):
# TODO: validate destination path up-front
# TODO: validate mole names up-front
context_image, detail_image = mel.lib.common.process_context_detail_args(
args
)
montage_size = 1024
mole_size = 512
# print out the dimensions of the images
print("{}: {}".format(args.context, context_image.shape))
print("{}: {}".format(args.detail, detail_image.shape))
# display the context image in a reasonably sized window
window_name = "display"
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
window_width = 800
window_height = 600
cv2.resizeWindow(window_name, window_width, window_height)
# get the user to mark the mole positions
context_mole_pos, detail_mole_pos = mel.lib.common.user_mark_moles(
window_name, context_image, detail_image, len(args.moles)
)
# Put a box around moles on context image
mel.lib.common.box_moles(context_image, context_mole_pos, thickness=50)
# Connect moles on cluster detail image
cluster_detail_image = numpy.copy(detail_image)
mel.lib.common.connect_moles(cluster_detail_image, detail_mole_pos)
# Combine context image with cluster detail image to make montage
cluster_monatage_image = mel.lib.image.montage_horizontal(
50, context_image, cluster_detail_image
)
cluster_monatage_image = mel.lib.common.shrink_to_max_dimension(
cluster_monatage_image, montage_size
)
# Let user review montage
mel.lib.common.user_review_image(window_name, cluster_monatage_image)
# Point to moles on individual detail images
mole_images = []
for index, mole in enumerate(detail_mole_pos):
indicated_image = numpy.copy(detail_image)
mel.lib.common.indicate_mole(indicated_image, mole)
indicated_image = mel.lib.common.shrink_to_max_dimension(
indicated_image, mole_size
)
mel.lib.common.user_review_image(window_name, indicated_image)
mole_images.append(indicated_image)
# No more interaction, close all windows
cv2.destroyAllWindows()
# Write the images
#
# TODO: try to determine the date from the original filename if possible
# and use that in ISO 8601 format.
#
mel.lib.common.overwrite_image(
args.destination,
mel.lib.common.determine_filename_for_ident(args.context, args.detail),
cluster_monatage_image,
)
for index, mole in enumerate(args.moles):
mole_dir = os.path.join(args.destination, mole)
mel.lib.common.overwrite_image(
mole_dir,
mel.lib.common.determine_filename_for_ident(args.detail),
mole_images[index],
)
# TODO: optionally remove the original images
# -----------------------------------------------------------------------------
# Copyright (C) 2015-2018 Angelos Evripiotis.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| {
"repo_name": "aevri/mel",
"path": "mel/cmd/addcluster.py",
"copies": "1",
"size": "4081",
"license": "apache-2.0",
"hash": -4081191757329977000,
"line_mean": 31.3888888889,
"line_max": 79,
"alpha_frac": 0.654006371,
"autogenerated": false,
"ratio": 3.693212669683258,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9847219040683257,
"avg_score": 0,
"num_lines": 126
} |
"""A tool for adding a single mole from photographs."""
import cv2
import mel.lib.common
import mel.lib.image
def setup_parser(parser):
mel.lib.common.add_context_detail_arguments(parser)
parser.add_argument(
"destination",
type=str,
default=None,
help="New path to create and store the mole to.",
)
def process_args(args):
# TODO: validate destination path up-front
# TODO: validate mole names up-front
context_image, detail_image = mel.lib.common.process_context_detail_args(
args
)
# TODO: extract this choice to a common place
montage_size = 1024
# display the context image in a reasonably sized window
# TODO: extract this choice to a common place
window_name = "display"
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
window_width = 800
window_height = 600
cv2.resizeWindow(window_name, window_width, window_height)
# get the user to mark the mole positions
context_mole_pos, detail_mole_pos = mel.lib.common.user_mark_moles(
window_name, context_image, detail_image, 1
)
# Put a box around mole on context image
# TODO: extract the thickness choice to a common place
mel.lib.common.box_moles(context_image, context_mole_pos, thickness=50)
# Point to mole on detail image
mel.lib.common.indicate_mole(detail_image, detail_mole_pos[0])
# Combine context image with detail image to make montage
monatage_image = mel.lib.image.montage_horizontal(
50, context_image, detail_image
)
monatage_image = mel.lib.common.shrink_to_max_dimension(
monatage_image, montage_size
)
# Let user review montage
mel.lib.common.user_review_image(window_name, monatage_image)
# No more interaction, close all windows
cv2.destroyAllWindows()
# Write the images
mel.lib.common.overwrite_image(
args.destination,
mel.lib.common.determine_filename_for_ident(args.context, args.detail),
monatage_image,
)
# TODO: optionally remove the original images
# -----------------------------------------------------------------------------
# Copyright (C) 2015-2018 Angelos Evripiotis.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| {
"repo_name": "aevri/mel",
"path": "mel/cmd/addsingle.py",
"copies": "1",
"size": "2842",
"license": "apache-2.0",
"hash": 3103429904296938500,
"line_mean": 30.2307692308,
"line_max": 79,
"alpha_frac": 0.6632653061,
"autogenerated": false,
"ratio": 3.7345597897503287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48978250958503283,
"avg_score": null,
"num_lines": null
} |
"""A tool for compressing tile images."""
import io
import logging
import subprocess
logger = logging.getLogger(__name__)
class Compressor:
"""The abstract compressor class."""
def compress(self, data):
"""
Compress the input data and return the result, or raise an exception if
something has gone wrong.
"""
raise NotImplementedError('This method should be overridden.')
class CommandLineCompressor(Compressor):
"""
A compressor which executed a command and uses stdin/stdout to provide and
retrieve the data.
"""
def __init__(self, args):
logger.info('Initialising command-line compressor: %s', args)
self.args = args
def compress(self, data):
logger.debug('Executing: %s', self.args)
return subprocess.check_output(self.args, input=data)
class Pngquant(CommandLineCompressor):
"""A compressor for PNGs."""
def __init__(self, quality='50-75'):
super().__init__(['pngquant',
'--quality', quality,
'--speed', '1',
'-'])
class Jpegoptim(CommandLineCompressor):
"""A compressor for JPEGs."""
def __init__(self, max_quality='75'):
super().__init__(['jpegoptim', '--max', max_quality, '-'])
| {
"repo_name": "thomasleese/cartographer",
"path": "cartographer/compressors.py",
"copies": "1",
"size": "1318",
"license": "mit",
"hash": 4937737060334779000,
"line_mean": 24.3461538462,
"line_max": 79,
"alpha_frac": 0.593323217,
"autogenerated": false,
"ratio": 4.30718954248366,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 52
} |
"""A tool for converting my novel to other formats.
Only supports :- or >-delimited body text, as well as ;- or <-delimited
preformatted body text.
"""
import click
from .votl import tree_from_file
from .output.html import html_from_tree
from .output.markdown import markdown_from_tree, MarkdownError
from .output.text import words_from_tree
from sys import stderr
def usage(name):
"""Print a usage message.
Params:
name (str): Name of the file to be run
"""
print("usage: {} command file".format(name),
"",
"Valid commands:",
"",
"html: Convert to html",
"markdown: Convert to markdown. Custom text objects not supported,"
" yet.",
"words: Dump all paragraphs of body text in plain (colon-separated)"
" body objects.",
sep="\n")
@click.group()
def unikko():
pass
@unikko.command(help="Convert OTL to HTML")
@click.argument("input", type=click.File("r"))
def html(input):
tree = tree_from_file(input)
if tree:
html = html_from_tree(tree)
print(html)
@unikko.command(help="Convert OTL to markdown")
@click.argument("input", type=click.File("r"))
def markdown(input):
tree = tree_from_file(input)
if tree:
try:
markdown = markdown_from_tree(tree)
print(markdown)
except MarkdownError as e:
print("Error generating markdown:", str(e), file=stderr)
@unikko.command(help="Convert OTL body text to plain text (for word counting)")
@click.argument("input", type=click.File("r"))
def words(input):
tree = tree_from_file(input)
if tree:
words = words_from_tree(tree)
print(words)
if __name__ == "__main__":
unikko()
| {
"repo_name": "fennekki/unikko",
"path": "unikko/unikko.py",
"copies": "1",
"size": "1751",
"license": "bsd-2-clause",
"hash": 4728297130163150000,
"line_mean": 24.75,
"line_max": 79,
"alpha_frac": 0.6219303255,
"autogenerated": false,
"ratio": 3.6555323590814197,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9774739373034579,
"avg_score": 0.0005446623093681917,
"num_lines": 68
} |
"""A tool for downloading historical data"""
import argparse
import asyncio
import logging
from datetime import datetime, timedelta
from functools import reduce
from async_v20 import OandaClient
logger = logging.getLogger('async_v20')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--from-time', help='The date from when to get the date')
parser.add_argument('--to-time', help='Get the required data up to this date', )
parser.add_argument('--instrument', help='The instrument of the data to get')
parser.add_argument('--granularity', help='The width of the candle to get')
parser.add_argument('--out-file', help='The destination of the data')
parser.add_argument('--time-out', help='The request time out period', default=30)
granularity_to_minutes = {
'S5': 416,
'S10': 833,
'S15': 1250,
'S30': 2500,
'M1': 5000,
'M2': 10000,
'M4': 20000,
'M5': 25000,
'M10': 50000,
'M15': 75000,
'M30': 150000,
'H1': 300000,
'H2': 600000,
'H3': 900000,
'H4': 1200000,
'H6': 1800000,
'H8': 2400000,
'H12': 3600000,
'D': 7200000,
'W': 50400000,
'M': 201600000
}
def create_requests(client,
instrument_name,
granularity,
from_time,
to_time,
price='MBA',
count=5000):
change = timedelta(minutes=granularity_to_minutes[granularity])
from_time = from_time
while from_time < to_time:
yield client.get_candles(instrument_name, price=price, granularity=granularity, count=count,
from_time=from_time)
from_time = from_time + change
async def worker(coroutines, data_frames):
while True:
try:
coroutine = next(coroutines)
except StopIteration:
return
rsp = await coroutine
if not rsp.status == 200:
raise ConnectionError(f'Failed Downloading candles. {rsp.json()}')
df = rsp.candles.dataframe(datetime_format='UNIX')
df.index = df.time
data_frames.append(df)
async def get_data(client, instrument_name, granularity, from_time, to_time):
data_frames = []
coroutines = create_requests(client, instrument_name, granularity, from_time, to_time)
await asyncio.gather(*[worker(coroutines, data_frames) for _ in range(5)])
df = reduce(lambda x, y: x.append(y), data_frames)
df.sort_index(inplace=True)
return df
async def execute():
namespace = parser.parse_args()
from_time = datetime(*[int(i) for i in namespace.from_time.split('-')])
to_time = datetime(*[int(i) for i in namespace.to_time.split('-')])
granularity = namespace.granularity
out_file = namespace.out_file
time_out = namespace.time_out
async with OandaClient(rest_timeout=int(time_out)) as client:
df = await get_data(client, namespace.instrument, granularity, from_time, to_time)
df.to_csv(out_file)
loop = asyncio.get_event_loop()
run = loop.run_until_complete
loop.run_until_complete(execute())
| {
"repo_name": "jamespeterschinner/async_v20",
"path": "bin/candle_data.py",
"copies": "1",
"size": "3154",
"license": "mit",
"hash": 3919775900819370500,
"line_mean": 29.9215686275,
"line_max": 100,
"alpha_frac": 0.6347495244,
"autogenerated": false,
"ratio": 3.492801771871539,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9624785619161706,
"avg_score": 0.000553135421966638,
"num_lines": 102
} |
'''A tool for encrypting and decrypting data with the AWS KMS'''
from __future__ import print_function
import sys
import os
from argparse import ArgumentParser
from .kms import (
get_client,
EncryptionError,
)
from . import files
def parse_kv(kv_string):
values = {}
for kv in kv_string.split(','):
pair = kv.split('=')
values[pair[0]] = pair[1]
return values
def pack(args):
kms_client = get_client(profile=args.profile, region=args.region)
context = None
if args.encryption_context:
context = parse_kv(args.encryption_context)
files.pack(kms_client, args.key,
args.source_path, args.source_path + '.kt', context)
def unpack(args):
kms_client = get_client(profile=args.profile, region=args.region)
context = None
if args.encryption_context:
context = parse_kv(args.encryption_context)
try:
files.unpack(kms_client, args.source_path, '.', context)
except EncryptionError as e:
print(e, file=sys.stderr)
exit(1)
def cli():
common_args = ArgumentParser(add_help=False, description=__doc__)
common_args.add_argument('--profile', help='AWS client profile')
common_args.add_argument('--region', help='AWS region')
common_args.add_argument('-c', '--encryption-context',
help='key=val,key=val')
ap = ArgumentParser()
sp = ap.add_subparsers()
pack_ap = sp.add_parser('pack', help='Store KMS-encrypted data',
parents=(common_args,))
pack_ap.add_argument('key',
help='They master key to use. Pass a key ID or '
'alias/<alias-name>.')
pack_ap.add_argument('source_path')
pack_ap.set_defaults(func=pack)
unpack_ap = sp.add_parser('unpack', help='Retrieve KMS-encrypted data',
parents=(common_args,))
unpack_ap.add_argument('source_path')
unpack_ap.set_defaults(func=unpack)
args = ap.parse_args()
if not os.path.exists(args.source_path):
ap.exit(1, 'File not found: {}\n'.format(args.source_path))
if args.source_path.endswith("/"):
args.source_path = args.source_path[:-1]
args.func(args)
if __name__ == '__main__':
cli()
| {
"repo_name": "slank/kmstool",
"path": "kmstool/cli.py",
"copies": "2",
"size": "2277",
"license": "mit",
"hash": -4298569366250346000,
"line_mean": 29.7702702703,
"line_max": 75,
"alpha_frac": 0.6095740009,
"autogenerated": false,
"ratio": 3.6142857142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5223859715185715,
"avg_score": null,
"num_lines": null
} |
"A tool for generating HTML reports."
import datetime
from itertools import chain
import click
from yattag import Doc
from cosmic_ray.work_db import WorkDB, use_db
from cosmic_ray.work_item import TestOutcome
from cosmic_ray.tools.survival_rate import kills_count, survival_rate
@click.command()
@click.option("--only-completed/--not-only-completed", default=False)
@click.option("--skip-success/--include-success", default=False)
@click.argument("session-file", type=click.Path(dir_okay=False, readable=True, exists=True))
def report_html(only_completed, skip_success, session_file):
"""Print an HTML formatted report of test results."""
with use_db(session_file, WorkDB.Mode.open) as db:
doc = _generate_html_report(db, only_completed, skip_success)
print(doc.getvalue())
# TODO: Redo this with jinja?
def _generate_html_report(db, only_completed, skip_success):
# pylint: disable=too-many-statements
doc, tag, text = Doc().tagtext()
doc.asis("<!DOCTYPE html>")
with tag("html", lang="en"):
with tag("head"):
doc.stag("meta", charset="utf-8")
doc.stag("meta", name="viewport", content="width=device-width, initial-scale=1, shrink-to-fit=no")
doc.stag(
"link",
rel="stylesheet",
href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css",
integrity="sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T",
crossorigin="anonymous",
)
with tag("title"):
text("Cosmic Ray Report")
with tag("body"):
with tag("div", klass="container"):
with tag("h1"):
with tag("p", klass="text-dark"):
text("Cosmic Ray Report")
all_items = db.completed_work_items
if not only_completed:
incomplete = ((item, None) for item in db.pending_work_items)
all_items = chain(all_items, incomplete)
with tag("div", klass="container"):
# Summary info
_generate_summary(doc, db)
# Job list
_generate_job_list(doc, db, skip_success)
with tag("script"):
doc.attr(src="https://code.jquery.com/jquery-3.3.1.slim.min.js")
doc.attr(("integrity", "sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo"))
doc.attr(("crossorigin", "anonymous"))
with tag("script"):
doc.attr(src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js")
doc.attr(("integrity", "sha384-UO2eT0CpHqdSJQ6hJty5KVphtPhzWj9WO1clHTMGa3JDZwrnQq4sF86dIHNDz0W1"))
doc.attr(("crossorigin", "anonymous"))
with tag("script"):
doc.attr(src="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js")
doc.attr(("integrity", "sha384-JjSmVgyd0p3pXB1rRibZUAYoIIy6OrQ6VrjIEaFf/nJGzIxFDsf4x0xIM+B07jRM"))
doc.attr(("crossorigin", "anonymous"))
with tag("script", type="text/javascript"):
doc.asis(
"$('div.job_list___sub_multi_collapse').on('shown.bs.collapse',"
" function () {"
" correct_behavior_functional_buttons();"
" });"
"$('div.job_list___sub_multi_collapse').on('hidden.bs.collapse',"
" function () {"
" correct_behavior_functional_buttons();"
" });"
"function correct_behavior_functional_buttons() {"
" var expand = false;"
" var collapse = false;"
" $('a.job_list___sub_multi_heading').each(function(index) {"
" if ($(this).attr('aria-expanded') == 'false') {"
" expand = true;"
" return false;"
" };"
" });"
" $('a.job_list___sub_multi_heading').each(function(index) {"
" if ($(this).attr('aria-expanded') == 'true') {"
" collapse = true;"
" return false;"
" };"
" });"
" if (expand) {"
" $('div#job_item_expand_all').css('display', 'inline-block');"
" } else {"
" $('div#job_item_expand_all').css('display', 'none');"
" };"
" if (collapse) {"
" $('div#job_item_collapse_all').css('display', 'inline-block');"
" } else {"
" $('div#job_item_collapse_all').css('display', 'none');"
" };"
" };"
"correct_behavior_functional_buttons();"
)
return doc
def _generate_job_list(doc, db, skip_success):
doc, tag, text = doc.tagtext()
with tag("div", klass="mb-1", id="job_list___accordion"):
with tag("div", klass="card"):
with tag(
"a",
("data-toggle", "collapse"),
("data-target", "#job_list___collapse_1"),
("aria-expanded", "false"),
("aria-controls", "job_list___collapse_1"),
href="#",
):
with tag("div", klass="card-header", id="job_list___heading_1"):
with tag("button", klass="btn btn-outline-dark"):
with tag("h4", klass="m-0"):
text("Job list")
with tag(
"div",
("aria-labelledby", "job_list___heading_1"),
("data-parent", "#job_list___accordion"),
klass="collapse",
id="job_list___collapse_1",
):
with tag("div", klass="card-body"):
with tag("div", klass="text-right mb-1"):
with tag("div", klass="mx-1", id="job_item_expand_all"):
with tag(
"a",
href="#",
onclick="$('div.job_list___sub_multi_collapse').collapse('show');",
):
with tag("button", klass="btn btn-outline-dark"):
with tag("span"):
text("Expand All")
with tag("div", klass="mx-1", id="job_item_collapse_all"):
with tag(
"a",
href="#",
onclick="$('div.job_list___sub_multi_collapse').collapse('hide');",
):
with tag("button", klass="btn btn-outline-dark"):
with tag("span"):
text("Collapse All")
# Job item
all_items = db.completed_work_items
for index, (work_item, result) in enumerate(all_items, start=1):
_generate_work_item_card(doc, index, work_item, result, skip_success)
def _generate_work_item_card(doc, index, work_item, result, skip_success):
doc, tag, text = doc.tagtext()
if result is not None:
if result.is_killed:
if result.test_outcome == TestOutcome.INCOMPETENT:
level = "info"
else:
level = "success"
if skip_success:
return
else:
level = "danger"
with tag("div", klass="mb-1", id="job_list___sub_accordion_{}".format(index)):
with tag("div", klass="card"):
with tag(
"a",
("data-toggle", "collapse"),
("data-target", "#job_list___sub_collapse_{}_1".format(index)),
("aria-expanded", "false"),
("aria-controls", "job_list___sub_collapse_{}_1".format(index)),
href="#",
klass="job_list___sub_multi_heading",
):
with tag(
"div",
("role", "alert"),
klass="card-header alert-{}".format(level),
id="job_list___sub_heading_{}_1".format(index),
):
with tag("button", klass="btn btn-outline-{}".format(level)):
with tag("span", klass="job_id"):
text("{} : Job ID {}".format(index, work_item.job_id))
with tag(
"div",
("aria-labelledby", "job_list___sub_heading_{}_1".format(index)),
("data-parent", "#job_list___sub_accordion_{}".format(index)),
klass="collapse job_list___sub_multi_collapse",
id="job_list___sub_collapse_{}_1".format(index),
):
with tag("div", klass="card-body"):
with tag("div", klass="work-item"):
with tag(
"div",
klass="alert alert-{} test-outcome".format(level),
role="alert",
):
if result is not None:
if not result.is_killed:
with tag("p"):
text("SURVIVED")
with tag("p"):
text("worker outcome: {}".format(result.worker_outcome))
with tag("p"):
text("test outcome: {}".format(result.test_outcome))
else:
with tag("p"):
text("No result")
for mutation in work_item.mutations:
with tag("pre", klass="location"):
with tag(
"a",
href=pycharm_url(str(mutation.module_path), mutation.start_pos[0]),
klass="text-secondary",
):
with tag("button", klass="btn btn-outline-dark"):
text(
"{}, start pos: {}, end pos: {}".format(
mutation.module_path,
mutation.start_pos,
mutation.end_pos,
)
)
with tag("pre"):
text("operator: {}, occurrence: {}".format(mutation.operator_name, mutation.occurrence))
if result is not None:
if result.diff:
with tag("div", klass="alert alert-secondary"):
with tag("pre", klass="diff"):
text(result.diff)
if result.output:
with tag("div", klass="alert alert-secondary"):
with tag("pre", klass="diff"):
text(result.output)
def _generate_summary(doc, db):
doc, tag, text = doc.tagtext()
num_items = db.num_work_items
num_complete = db.num_results
with tag("div", klass="mb-1", id="summary_info___accordion"):
with tag("div", klass="card"):
with tag(
"a",
("data-toggle", "collapse"),
("data-target", "#summary_info___collapse_1"),
("aria-expanded", "true"),
("aria-controls", "summary_info___collapse_1"),
href="#",
):
with tag("div", klass="card-header", id="summary_info___heading_1"):
with tag("button", klass="btn btn-outline-dark"):
with tag("h4", klass="m-0"):
text("Summary info")
with tag(
"div",
("aria-labelledby", "summary_info___heading_1"),
("data-parent", "#summary_info___accordion"),
klass="collapse show",
id="summary_info___collapse_1",
):
with tag("div", klass="card-body"):
with tag("p"):
text("Date time: {}".format(datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
with tag("p"):
text("Total jobs: {}".format(num_items))
if num_complete > 0:
with tag("p"):
text("Complete: {} ({:.2f}%)".format(num_complete, num_complete / num_items * 100))
with tag("p"):
num_killed = kills_count(db)
text("Surviving mutants: {} ({:.2f}%)".format(num_complete - num_killed, survival_rate(db)))
else:
with tag("p"):
text("No jobs completed")
def pycharm_url(filename, line_number):
"Get a URL for opening a file in Pycharm."
return "pycharm://open?file={}&line={}".format(filename, line_number)
| {
"repo_name": "sixty-north/cosmic-ray",
"path": "src/cosmic_ray/tools/html.py",
"copies": "1",
"size": "13851",
"license": "mit",
"hash": -2530627157612315600,
"line_mean": 43.6806451613,
"line_max": 120,
"alpha_frac": 0.4289942964,
"autogenerated": false,
"ratio": 4.357030512739855,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0014798281379808825,
"num_lines": 310
} |
"""A tool for randomly changing words in a Twitter profile.
Use requires creating an application via apps.twitter.com and generating
a Consumer Key, Consumer Secret, Access Token, and Access Token Secret.
The application also requires read and write permission.
Class:
ProfileBot - handles api access and profile updating
"""
import tweepy
import random
class ProfileBot:
"""A tool for randomly changing words in a Twitter profile.
Methods:
new_profile - Updates Twitter profile with a single word replaced
by a randomly chosen new word. No length checking is performed,
so make sure it fits.
Attributes:
api: Tweepy api wrapper object
"""
def __init__(self, consumer_key, consumer_secret, access_token,
access_token_secret):
"""Initialise the bot with Twitter api auth tokens
Arguments (all values from apps.twitter.com):
consumer_key - Twitter consumer key
consumer_secret - Twitter consumer secret
access_token - Twitter app access token
access_token_secret - Twitter app access token secret
Postcondition:
A Tweepy api wrapper object is created using the access tokens
"""
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(auth)
def new_profile(self, profile, oldword, wordlist):
"""Update profile text with a random wordlist
Arguments:
profile - string with the base profile text
oldword - string with the word to be replaced
wordlist - List of potential replacement strings,
one will be chosen at random
Postcondition:
Twitter profile has been changed to the new string with
randomly chosen replacement word. Currently unable to
determine success or failure - this is missing from Tweepy
documentation.
"""
# pick a word at random from the word list and replace
newword = random.choice(wordlist)
profile = profile.replace(oldword, newword)
# update profile
self.api.update_profile(description=profile)
if __name__ == '__main__':
import argparse
# use the first line of the doc string as the program description
parser = argparse.ArgumentParser(description=__doc__.splitlines()[0])
parser.add_argument("--consumer_key", required=True,
help="Twitter consumer key")
parser.add_argument("--consumer_secret", required=True,
help="Twitter consumer secret")
parser.add_argument("--access_token", required=True,
help="Twitter app access token")
parser.add_argument("--access_token_secret", required=True,
help="Twitter app access token secret")
parser.add_argument("--profile", required=True,
help="Base profile string, for substitution")
parser.add_argument("--replace", default="WORD",
help="Word to be replaced")
parser.add_argument("--with", dest="wordlist", nargs="+", required=True,
help="List of possible replacements")
args = parser.parse_args()
# create the bot (initialising the auth method)
bot = ProfileBot(args.consumer_key, args.consumer_secret,
args.access_token, args.access_token_secret)
# update the profile
bot.new_profile(args.profile, args.replace, args.wordlist)
| {
"repo_name": "mymsy/ProfileBot",
"path": "profilebot.py",
"copies": "1",
"size": "3631",
"license": "cc0-1.0",
"hash": 549323904157862460,
"line_mean": 37.2210526316,
"line_max": 76,
"alpha_frac": 0.6381162214,
"autogenerated": false,
"ratio": 4.777631578947369,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.591574780034737,
"avg_score": null,
"num_lines": null
} |
"""A Tool for using yaml files to create templates for fpdf
"""
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'yafte',
version = '0.7.0',
description = 'Using yaml for fpdf templates',
long_description = long_description,
url = 'https://github.com/m42e/yamlfpdftemplate',
download_url = 'https://github.com/m42e/yamlfpdftemplate/archive/v0.7.0.tar.gz',
author = 'Matthias Bilger',
author_email = 'matthias@bilger.info',
license = 'MIT',
classifiers=[
'Topic :: Text Processing',
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
],
keywords = 'template yaml fpdf',
packages = find_packages(exclude=['example']),
install_requires = ['pyyaml', 'fpdf'],
)
| {
"repo_name": "m42e/yamlfpdftemplate",
"path": "setup.py",
"copies": "1",
"size": "1035",
"license": "mit",
"hash": 6682284324862429000,
"line_mean": 30.3636363636,
"line_max": 84,
"alpha_frac": 0.6463768116,
"autogenerated": false,
"ratio": 3.484848484848485,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4631225296448485,
"avg_score": null,
"num_lines": null
} |
""" A tool to convert from Zimbra dicts to Python dicts
"Zimbra dicts" means lists in the following form::
[
{
"n": "key",
"_content": "value"
}
]
"""
def zimbra_to_python(zimbra_dict, key_attribute="n",
content_attribute="_content"):
"""
Converts single level Zimbra dicts to a standard python dict
:param zimbra_dict: The dictionary in Zimbra-Format
:return: A native python dict
"""
local_dict = {}
for item in zimbra_dict:
local_dict[item[key_attribute]] = item[content_attribute]
return local_dict
def get_value(haystack, needle, key_attribute="n",
content_attribute="_content"):
""" Fetch a value from a zimbra-like json dict (keys are "n", values are
"_content"
This function may be slightly faster than zimbra_to_python(haystack)[
needle], because it doesn't necessarily iterate over the complete list.
:param haystack: The list in zimbra-dict format
:param needle: the key to search for
:return: the value or None, if the key is not found
"""
for value in haystack:
if value[key_attribute] == needle:
return value[content_attribute]
return None
| {
"repo_name": "Zimbra-Community/python-zimbra",
"path": "pythonzimbra/tools/dict.py",
"copies": "3",
"size": "1257",
"license": "bsd-2-clause",
"hash": 3820945885725757000,
"line_mean": 22.2777777778,
"line_max": 76,
"alpha_frac": 0.6221161496,
"autogenerated": false,
"ratio": 3.7299703264094957,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5852086476009495,
"avg_score": null,
"num_lines": null
} |
"""A tool to create an externally-hosted APK definition JSON from an APK.
For more information see README.md.
"""
import argparse
import base64
from distutils import spawn
import hashlib
import json
import logging
import os.path
import re
import subprocess
import sys
import tempfile
import zipfile
# Enable basic logging to stderr
# logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.WARNING)
_log = logging.getLogger(__name__)
class MissingPrerequisiteError(Exception):
"""An exception throw to indicate that a pre-requisit check has failed.
See `AbstractApkParser` `check_prerequisites`.
"""
class AbstractApkParser(object):
"""Provides an abstract parser that parses an APK file.
Takes an APK file and returns a dictionary of attributes describing that APK.
Sub-classes should override the `parse` method.
Attributes:
apk_file (str): The path to the APK file being parsed.
"""
def __init__(self, apk_file):
"""Create an APK parser with a given APK file.
Args:
apk_file: The path to the APK file to parse.
"""
self.apk_file = apk_file
return
def check_prerequisites(self):
"""Validates all pre-requisites of the parser.
For example, a tool expected in the system PATH that is not available.
This does not guarantee that the parser will not fail, but provides an
early indication of definite problems.
Raises:
MissingPrerequisiteError: If any problems are encountered.
"""
if not os.path.exists(self.apk_file):
raise MissingPrerequisiteError("APK file does not exist - "
+ str(self.apk_file))
return
def parse(self):
"""Parse the APK. Abstract method that must be overridden.
Returns:
A dictionary of elements this parser has extracted from the APK, and
their values.
"""
raise NotImplementedError("Abstract method not overriden")
class AaptParser(AbstractApkParser):
"""Parses the APK using the AOSP "aapt" tool.
Specifically, runs the "aapt dump badging" command, and performs basic
parsing of the output to extract fields that we need.
"""
PACKAGE_MATCH_REGEX = re.compile(
r"\s*package:\s*name='(.*)'\s*"
r"versionCode='(\d+)'\s*versionName='(.+)'\s*")
APPLICATION_REGEX = re.compile(
r"\s*application:\s*label='(.*)'\s*icon='(.*)'\s*")
APPLICATION_LABEL = re.compile(r"\s*application-label:\s*'(.*)'\s*")
SDK_VERSION_REGEX = re.compile(r"\s*sdkVersion:\s*'(.*)'\s*")
MAX_SDK_VERSION_REGEX = re.compile(r"\s*maxSdkVersion:\s*'(.*)'\s*")
USES_FEATURE_REGEX = re.compile(r"\s*uses-feature:\s+name='(.*)'\s*")
# Old uses-permission format:
# uses-permission:'android.permission.VIBRATE'
# New uses-permission format:
# uses-permission: name='android.permission.VIBRATE'
# uses-permission: name='android.permission.WRITE_EXTERNAL_STORAGE'
# maxSdkVersion='18'
USES_PERMISSION_REGEX_OLD = re.compile(r"\s*uses-permission:\s*'(.*)'\s*")
USES_PERMISSION_REGEX_NEW = re.compile(
r"\s*uses-permission:\s*name='(.*?)'\s*(?:maxSdkVersion='(.*)'\s*|)")
def __init__(self, apk_file):
super(AaptParser, self).__init__(apk_file)
self.aapt_exe = self.locate_aapt_exe()
def check_prerequisites(self):
super(AaptParser, self).check_prerequisites()
if self.aapt_exe is None:
raise MissingPrerequisiteError("Couldn't find the aapt binary on "
"system\'s PATH. This binary is part of "
"the Android developer\'s SDK. Please "
"ensure it is available on the PATH.")
def locate_aapt_exe(self):
return spawn.find_executable("aapt")
def run_aapt(self):
return subprocess.check_output(
"\"" + self.aapt_exe + "\" dump --values badging " + self.apk_file,
stderr=subprocess.STDOUT,
shell=True).decode('utf-8').splitlines()
def parse(self):
output = {}
for line in self.run_aapt():
matches = self.PACKAGE_MATCH_REGEX.match(line)
if matches:
_log.info("Matched package")
output["package_name"] = matches.group(1)
output["version_code"] = matches.group(2)
output["version_name"] = matches.group(3)
matches = self.SDK_VERSION_REGEX.match(line)
if matches:
output["minimum_sdk"] = matches.group(1)
matches = self.MAX_SDK_VERSION_REGEX.match(line)
if matches:
output["maximum_sdk"] = matches.group(1)
matches = self.APPLICATION_LABEL.match(line)
if matches:
output["application_label"] = matches.group(1)
matches = self.APPLICATION_REGEX.match(line)
if matches:
# In the case that the explicit "application-label" field is not found
# in the aapt output, we grab it from the "application" field.
# (More recent versions of aapt only provide localized versions of
# application-label in the form "application-label-xx[-XX]".)
if "application_label" not in output:
output["application_label"] = matches.group(1)
output["icon_filename"] = matches.group(2)
matches = self.USES_FEATURE_REGEX.match(line)
if matches:
output.setdefault("uses_feature", []).append(matches.group(1))
matches = self.USES_PERMISSION_REGEX_OLD.match(line)
if matches:
output.setdefault("uses_permission", []).append({"name": matches.group(1)})
matches = self.USES_PERMISSION_REGEX_NEW.match(line)
if matches:
new_permission = {"name": matches.group(1)}
try:
if matches.group(2) is not None:
new_permission.update({"maxSdkVersion": matches.group(2)})
except IndexError:
# No maxSdkVersion - that's OK, it's not mandatory
pass
output.setdefault("uses_permission", []).append(new_permission)
return output
class FileParser(AbstractApkParser):
"""Parses properties of the APK file as a system file.
"""
def parse(self):
output = {}
output["file_size"] = os.path.getsize(self.apk_file)
with open(self.apk_file, "rb") as f:
output["file_sha1_base64"] = base64.b64encode(
hashlib.sha1(f.read()).digest()).decode('ascii')
with open(self.apk_file, "rb") as f:
output["file_sha256_base64"] = base64.b64encode(
hashlib.sha256(f.read()).digest()).decode('ascii')
return output
class JarFileValidationParser(AbstractApkParser):
"""Parser that validates that the APK is a valid zip-aligned JAR.
Note:
This parser doesn't actually return any information from the APK,
(it returns an empty dictionary), but it produces errors if the
APK fails validation.
"""
def __init__(self, apk_file):
super(JarFileValidationParser, self).__init__(apk_file)
self.zipalign_exe = self.locate_zipalign_exe()
self.jarsigner_exe = self.locate_jarsigner_exe()
def locate_zipalign_exe(self):
return spawn.find_executable("zipalign")
def locate_jarsigner_exe(self):
return spawn.find_executable("jarsigner")
def check_prerequisites(self):
super(JarFileValidationParser, self).check_prerequisites()
if self.zipalign_exe is None:
raise MissingPrerequisiteError("Couldn't find zipalign binary in "
"system's PATH. This binary is needed "
"to validate the APK.")
if self.jarsigner_exe is None:
raise MissingPrerequisiteError("Couldn't find jarsigner binary in "
"system's PATH. This binary is needed "
"to validate the APK.")
return
def parse(self):
# Validate that the zip is correctly aligned.
try:
subprocess.check_call("\"" + self.zipalign_exe + "\" -c 4 " + self.apk_file,
shell=True)
except subprocess.CalledProcessError as e:
raise Exception("Error: Zip alignment is incorrect", e)
# Validate that the jar is signed correctly.
with open(os.devnull, "w") as dev_null:
try:
subprocess.check_call("\"" + self.jarsigner_exe + "\" -verify " + self.apk_file,
stdout=dev_null,
shell=True)
except subprocess.CalledProcessError as e:
raise Exception("Error: JAR signature doesn't validate correctly", e)
# No new data parsed from APK, return an empty dictionary.
return {}
class IconParser(AbstractApkParser):
"""Parses the icon from the file as base64 encoded binary.
Attributes:
icon_filename (str) : Filename of the icon within the APK.
"""
def __init__(self, apk_file, icon_filename):
super(IconParser, self).__init__(apk_file)
self.icon_filename = icon_filename
def check_prerequisites(self):
super(IconParser, self).check_prerequisites()
if self.icon_filename is None:
raise MissingPrerequisiteError("Couldn't find icon in APK")
def parse(self):
output = {}
jar_zip = zipfile.ZipFile(self.apk_file)
icon_file_bytes = jar_zip.read(self.icon_filename)
output["icon_base64"] = base64.b64encode(icon_file_bytes).decode('ascii')
return output
class CertificateParser(AbstractApkParser):
"""Parses the signing certificate chain from the APK.
"""
def __init__(self, apk_file):
super(CertificateParser, self).__init__(apk_file)
self.openssl_exe = self.locate_openssl_exe()
def locate_openssl_exe(self):
return spawn.find_executable("openssl")
def openssl_convert_rsa_cert(self, infile_path, outfile_path):
subprocess.check_call("\"" + self.openssl_exe + "\" pkcs7 -in "
+ infile_path
+ " -print_certs -inform DER -out "
+ outfile_path, shell=True)
def check_prerequisites(self):
# We use a command line tool rather than the openssl library to
# simplify install for developers not experienced with Python.
super(CertificateParser, self).check_prerequisites()
if self.openssl_exe is None:
raise MissingPrerequisiteError("Couldn't find openssl commandline tool "
"in system PATH.")
def parse(self):
output = {}
jar_zip = zipfile.ZipFile(self.apk_file)
# Find all the files in the JAR manifest (which includes the certificate)
# and extract into a temporary directory.
temp_dir = tempfile.mkdtemp()
for ii in jar_zip.namelist():
_log.info(ii)
_log.info("Found " + str(len(jar_zip.namelist())) + " files in the APK")
manifest_files = [ii for ii in jar_zip.namelist()
if ii.startswith("META-INF")]
for f in manifest_files:
_log.info("Found file in manfiest folder: " + f)
jar_zip.extractall(path=temp_dir, members=manifest_files)
# Look at each file to try and find the RSA certificate file (we expect
# only one, and throw an error if there are multiple).
with tempfile.NamedTemporaryFile(
mode="r+b", delete=False) as temp_out_file:
_log.info("Writing to temporary output file" + temp_out_file.name)
for path, _, files in os.walk(temp_dir):
for f in files:
if not f.endswith(".RSA"):
continue
if output.get("certificate_base64") is not None:
raise Exception("Multiple RSA keys - APK should only be signed "
"by a single jarsigner.")
# Found the RSA certificate. Use the openssl commandline tool to
# extract details.
_log.info("Found RSA file: " + os.path.join(path, f))
self.openssl_convert_rsa_cert(os.path.join(path, f),
temp_out_file.name)
# Mark the output to indicate we have found a certificate, so that
# even if we fail to parse it properly it is counted.
output["certificate_base64"] = []
# Certificates have been dumped, in order (if a cert chain existed)
# in base64 format, which is what we need.
certificate = None
for ii in temp_out_file:
ii = ii.decode('ascii').strip()
if re.match(r".*-+BEGIN\s+CERTIFICATE-+.*", ii):
_log.debug("Begin certificate line")
certificate = ""
elif re.match(r".*-+END\s+CERTIFICATE-+.*", ii):
_log.debug("End certificate line")
output["certificate_base64"].append(certificate)
certificate = None
elif certificate is not None:
certificate += ii
else:
_log.debug("Skipping non-cert line: " + ii)
return output
class DataExtractor(object):
"""Class to extract the required information from the apk and hosting url.
"""
def __init__(self, apk, externallyhostedurl):
self.apk = apk
self.externallyhostedurl = externallyhostedurl
def parse(self):
"""Parse the given apk and hosting url.
This extracts the required information and returns a dict which needs to
be formatted as JSON to form the file that can be uploaded in the developer
console."""
if not os.path.exists(self.apk):
raise MissingPrerequisiteError("Could not find APK " + self.apk)
# Create a list of parser that provides all the data we need for our
# externally-hosted APK JSON.
parsers = [AaptParser(self.apk), FileParser(self.apk),
CertificateParser(self.apk), JarFileValidationParser(self.apk)]
# Validate the system setup
for parser in parsers:
parser.check_prerequisites()
# Parse the APK
apk_properties = {}
for parser in parsers:
apk_properties.update(parser.parse())
# Also add the icon (this relies on data parsed in the previous parser).
icon_parser = IconParser(self.apk, apk_properties.get("icon_filename"))
icon_parser.check_prerequisites()
apk_properties.update(icon_parser.parse())
# Add in the externally-hosted URL and print to stdout.
apk_properties["externally_hosted_url"] = self.externallyhostedurl
return apk_properties
def main():
"""Print an externally-hosted APK JSON definition to stdout.
"""
# Define flags and help text.
arg_parser = argparse.ArgumentParser(
description="For a given APK, create an externally hosted APK "
"definition file.")
arg_parser.add_argument("--apk", dest="apk_file_name", required=True)
arg_parser.add_argument("--externallyHostedUrl",
dest="externally_hosted_url", required=True)
args = arg_parser.parse_args()
extractor = DataExtractor(args.apk_file_name, args.externally_hosted_url)
print(json.dumps(extractor.parse(), indent=1))
return
if __name__ == "__main__":
main()
| {
"repo_name": "google/play-work",
"path": "externally-hosted-apks/externallyhosted.py",
"copies": "1",
"size": "14819",
"license": "apache-2.0",
"hash": 3748343366583263700,
"line_mean": 34.7084337349,
"line_max": 88,
"alpha_frac": 0.6385721034,
"autogenerated": false,
"ratio": 3.89052244683644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.502909455023644,
"avg_score": null,
"num_lines": null
} |
"""A tool to create events for Sam."""
###############################################################################
# pylint: disable=global-statement
#
# TODO: [ ]
#
###############################################################################
# standard library imports
import datetime
import logging
# related third party imports
# application specific imports
__version__ = "1.3.9"
# Initialize the logger
LOGGER = logging.getLogger(__name__)
# Set constants
INITIALIZED = False
INPUT = None
OUTPUT = None
FUNC_KEYWORDS = {}
EVENT_ID = 0
LOGGER.debug("I was imported.")
def _parse_keyword(keyword):
"""Parse a keyword into substrings.
e.g.: "system.onstart" gets parsed into:
* "system.*"
* "*.onstart" and
* "system.onstart".
This allows plugins to register to wildcards, for example every message
that begins with "system".
"""
result = []
words = keyword.split(".")
for i in range(len(words)):
for j in range(i+1, len(words)+1):
pre = "" if i == 0 else "*."
post = "" if j == len(words) else ".*"
result.append(pre + ".".join(words[i:j]) + post)
return result
class eEvent(object):
"""An event for Samantha.
Each event stores information about who triggered it, what type it is and
of course a keyword + optional data.
"""
def __init__(self, sender_id, keyword, event_type="trigger",
data=None, ttl=0):
"""Initialize a new event."""
global EVENT_ID
self.uid = "e_{:04}".format(EVENT_ID)
EVENT_ID += 1
self.sender_id = sender_id
self.keyword = keyword
self.parsed_kw_list = _parse_keyword(self.keyword)
self.creation_time = datetime.datetime.now()
if int(ttl) > 0:
self.expiration_time = (self.creation_time +
datetime.timedelta(0, int(ttl)))
else:
self.expiration_time = None
LOGGER.debug("[UID: %s] Building new event (%s): '%s' from %s",
self.uid, event_type, keyword, sender_id)
if event_type in ["trigger", "request"]:
self.event_type = event_type
else:
self.event_type = "trigger"
raise ValueError("{} is an invalid type. Allowed are 'trigger' "
"and 'request' This Event will be handled as "
"'trigger'.".format(event_type))
self.data = data
self.result = None
@property
def expired(self):
if self.expiration_time is None:
return False
else:
return self.expiration_time < datetime.datetime.now()
def trigger(self):
"""Put the current event into the input queue."""
kw_in_use = False
for kw in self.parsed_kw_list:
if kw in FUNC_KEYWORDS:
kw_in_use = True
break
if kw_in_use:
if INITIALIZED or self.sender_id.startswith("core"):
INPUT.put(self)
LOGGER.debug("[UID: %s] Added the event to the queue. INPUT "
"currently holds %d items.",
self.uid, INPUT.qsize())
else:
LOGGER.warning("This module is not initialized correctly. This "
"means that booting wasn't successful, or that "
"the Server is about to stop.")
else:
LOGGER.debug("Skipping event '%s' from %s because the keyword is "
"not in use.", self.keyword, self.sender_id)
def _init(queue_in, queue_out):
"""Initialize the module."""
global INPUT, OUTPUT
LOGGER.info("Initializing...")
INPUT = queue_in
OUTPUT = queue_out
LOGGER.info("Initialisation complete.")
return True
def update_keywords(func_keywords):
"""Update the global variable FUNC_KEYWORDS."""
global FUNC_KEYWORDS
FUNC_KEYWORDS = func_keywords
def stop():
"""Stop the module."""
global INITIALIZED
LOGGER.info("Exiting...")
INITIALIZED = False
LOGGER.info("Exited.")
return True
def initialize(queue_in, queue_out):
"""Initialize the module when not yet initialized."""
global INITIALIZED
if not INITIALIZED:
INITIALIZED = _init(queue_in, queue_out)
else:
LOGGER.info("Already initialized!")
| {
"repo_name": "Sirs0ri/PersonalAssistant",
"path": "samantha/tools/eventbuilder.py",
"copies": "1",
"size": "4453",
"license": "mit",
"hash": -6466909075252849000,
"line_mean": 27.1835443038,
"line_max": 80,
"alpha_frac": 0.5430047159,
"autogenerated": false,
"ratio": 4.244995233555767,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5287999949455767,
"avg_score": null,
"num_lines": null
} |
"""A tool to inspect the binary size of a built binary file.
This script prints out a tree of symbols and their corresponding sizes, using
Linux's nm functionality.
Usage:
python binary_size.py -- \
--target=/path/to/your/target/binary \
[--nm_command=/path/to/your/custom/nm] \
[--max_depth=10] [--min_size=1024] \
[--color] \
To assist visualization, pass in '--color' to make the symbols color coded to
green, assuming that you have a xterm connection that supports color.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import subprocess
import sys
class Trie(object):
"""A simple class that represents a Trie."""
def __init__(self, name):
"""Initializes a Trie object."""
self.name = name
self.size = 0
self.dictionary = {}
def GetSymbolTrie(target, nm_command, max_depth):
"""Gets a symbol trie with the passed in target.
Args:
target: the target binary to inspect.
nm_command: the command to run nm.
max_depth: the maximum depth to create the trie.
"""
# Run nm to get a dump on the strings.
proc = subprocess.Popen(
[nm_command, '--radix=d', '--size-sort', '--print-size', target],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
nm_out, _ = proc.communicate()
if proc.returncode != 0:
print('NM command failed. Output is as follows:')
print(nm_out)
sys.exit(1)
# Run c++filt to get proper symbols.
proc = subprocess.Popen(['c++filt'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = proc.communicate(input=nm_out)
if proc.returncode != 0:
print('c++filt failed. Output is as follows:')
print(out)
sys.exit(1)
# Splits the output to size and function name.
data = []
for line in out.split('\n'):
if line:
content = line.split(' ')
if len(content) < 4:
# This is a line not representing symbol sizes. skip.
continue
data.append([int(content[1]), ' '.join(content[3:])])
symbol_trie = Trie('')
for size, name in data:
curr = symbol_trie
for c in name:
if c not in curr.dictionary:
curr.dictionary[c] = Trie(curr.name + c)
curr = curr.dictionary[c]
curr.size += size
if len(curr.name) > max_depth:
break
symbol_trie.size = sum(t.size for t in symbol_trie.dictionary.values())
return symbol_trie
def MaybeAddColor(s, color):
"""Wrap the input string to the xterm green color, if color is set.
"""
if color:
return '\033[92m{0}\033[0m'.format(s)
else:
return s
def ReadableSize(num):
"""Get a human-readable size."""
for unit in ['B', 'KB', 'MB', 'GB']:
if abs(num) <= 1024.0:
return '%3.2f%s' % (num, unit)
num /= 1024.0
return '%.1f TB' % (num,)
# Note(jiayq): I know, I know, this is a recursive function, but it is
# convenient to write.
def PrintTrie(trie, prefix, max_depth, min_size, color):
"""Prints the symbol trie in a readable manner.
"""
if len(trie.name) == max_depth or not trie.dictionary.keys():
# If we are reaching a leaf node or the maximum depth, we will print the
# result.
if trie.size > min_size:
print('{0}{1} {2}'.format(
prefix,
MaybeAddColor(trie.name, color),
ReadableSize(trie.size)))
elif len(trie.dictionary.keys()) == 1:
# There is only one child in this dictionary, so we will just delegate
# to the downstream trie to print stuff.
PrintTrie(
trie.dictionary.values()[0], prefix, max_depth, min_size, color)
elif trie.size > min_size:
print('{0}{1} {2}'.format(
prefix,
MaybeAddColor(trie.name, color),
ReadableSize(trie.size)))
keys_with_sizes = [
(k, trie.dictionary[k].size) for k in trie.dictionary.keys()]
keys_with_sizes.sort(key=lambda x: x[1])
for k, _ in keys_with_sizes[::-1]:
PrintTrie(
trie.dictionary[k], prefix + ' |', max_depth, min_size, color)
def main(argv):
if not sys.platform.startswith('linux'):
raise RuntimeError('Currently this tool only supports Linux.')
parser = argparse.ArgumentParser(
description="Tool to inspect binary size.")
parser.add_argument(
'--max_depth', type=int, default=10,
help='The maximum depth to print the symbol tree.')
parser.add_argument(
'--min_size', type=int, default=1024,
help='The mininum symbol size to print.')
parser.add_argument(
'--nm_command', type=str, default='nm',
help='The path to the nm command that the tool needs.')
parser.add_argument(
'--color', action='store_true',
help='If set, use ascii color for output.')
parser.add_argument(
'--target', type=str,
help='The binary target to inspect.')
args = parser.parse_args(argv)
if not args.target:
raise RuntimeError('You must specify a target to inspect.')
symbol_trie = GetSymbolTrie(
args.target, args.nm_command, args.max_depth)
PrintTrie(symbol_trie, '', args.max_depth, args.min_size, args.color)
if __name__ == '__main__':
main(sys.argv[1:])
| {
"repo_name": "xzturn/caffe2",
"path": "caffe2/python/binarysize.py",
"copies": "3",
"size": "5666",
"license": "apache-2.0",
"hash": -2218679456037026300,
"line_mean": 33.5487804878,
"line_max": 80,
"alpha_frac": 0.5861277797,
"autogenerated": false,
"ratio": 3.762284196547145,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00007527853056308341,
"num_lines": 164
} |
#A tool to interpret DNA String is FASTA format
DNA = {}
codon = {'UUU': 'F',
'CUU': 'L',
'AUU': 'I',
'GUU': 'V',
'UUC': 'F',
'CUC': 'L',
'AUC': 'I',
'GUC': 'V',
'UUA': 'L',
'CUA': 'L',
'AUA': 'I',
'GUA': 'V',
'UUG': 'L',
'CUG': 'L',
'AUG': 'M',
'GUG': 'V',
'UCU': 'S',
'CCU': 'P',
'ACU': 'T',
'GCU': 'A',
'UCC': 'S',
'CCC': 'P',
'ACC': 'T',
'GCC': 'A',
'UCA': 'S',
'CCA': 'P',
'ACA': 'T',
'GCA': 'A',
'UCG': 'S',
'CCG': 'P',
'ACG': 'T',
'GCG': 'A',
'UAU': 'Y',
'CAU': 'H',
'AAU': 'N',
'GAU': 'D',
'UAC': 'Y',
'CAC': 'H',
'AAC': 'N',
'GAC': 'D',
'UAA': '',
'CAA': 'Q',
'AAA': 'K',
'GAA': 'E',
'UAG': '',
'CAG': 'Q',
'AAG': 'K',
'GAG': 'E',
'UGU': 'C',
'CGU': 'R',
'AGU': 'S',
'GGU': 'G',
'UGC': 'C',
'CGC': 'R',
'AGC': 'S',
'GGC': 'G',
'UGA': '',
'CGA': 'R',
'AGA': 'R',
'GGA': 'G',
'UGG': 'W',
'CGG': 'R',
'AGG': 'R',
'GGG': 'G'}
with open("rosalind_orf.txt") as f:
for line in f:
if line[0] == '>':
s = line.split()
current_dna = s[0].replace('>','')
DNA[current_dna] = ''
else:
DNA[current_dna] += line
for entry in DNA:
DNA[entry] = DNA[entry].replace('\n','')
starts = []
stops = []
for entry in DNA:
for i, letter in enumerate(DNA[entry]):
try:
if DNA[entry][i:i+3] == "TAG":
starts.append(i)
except IndexError:
pass
for i, letter in enumerate(DNA[entry]):
try:
if DNA[entry][i:i+3] in ("TAA","TAG","TGA"):
stops.append(i)
except IndexError:
pass
print starts, stops
strings = []
for i in starts:
for j in stops:
if i != j and i < j and i % 3 == j % 3:
strings.append((i,j + 3))
break
print strings
comp = { 'A' : 'T',
'T' : 'A',
'G' : 'C',
'C' : 'G'}
t = ''
for entry in DNA:
s = DNA[entry][::-1]
t = [comp[x] for x in s]
print t
tstarts = []
tstops = []
t = ''.join(t)
for i, letter in enumerate(t):
try:
if t[i:i+3] == "TAG":
tstarts.append(i)
except IndexError:
pass
for i, letter in enumerate(t):
try:
if t[i:i+3] in ("TAA","TAG","TGA"):
tstops.append(i)
except IndexError:
pass
print tstarts, tstops
tstrings = []
for i in tstarts:
for j in tstops:
if i != j and i < j and i % 3 == j % 3:
tstrings.append((i,j + 3))
break
| {
"repo_name": "Zhyll/rosalind",
"path": "rosalind_orf.py",
"copies": "1",
"size": "2423",
"license": "mit",
"hash": -6564483075173655000,
"line_mean": 14.6322580645,
"line_max": 56,
"alpha_frac": 0.4271564177,
"autogenerated": false,
"ratio": 2.341062801932367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3268219219632367,
"avg_score": null,
"num_lines": null
} |
#A tool to interpret DNA String is FASTA format
DNA = {}
with open("rosalind_cons.txt") as f:
for line in f:
if line[0] == '>':
s = line.split()
current_dna = s[0].replace('>','')
DNA[current_dna] = ''
else:
DNA[current_dna] += line
for entry in DNA:
DNA[entry] = DNA[entry].replace('\n','')
l = len(DNA[entry])
mat = {'A':[],'C':[],'G':[],'T':[]}
con = []
counter = {}
for i in range(l):
counter['A'] = 0
counter['C'] = 0
counter['G'] = 0
counter['T'] = 0
maxi = 0
app = ''
for entry in DNA:
if DNA[entry][i] == 'A':
counter['A'] += 1
if DNA[entry][i] == 'C':
counter['C'] += 1
if DNA[entry][i] == 'G':
counter['G'] += 1
if DNA[entry][i] == 'T':
counter['T'] += 1
for c in counter:
if counter[c] > maxi:
maxi = counter[c]
app = c
con.append(app)
mat['A'].append(str(counter['A']))
mat['C'].append(str(counter['C']))
mat['G'].append(str(counter['G']))
mat['T'].append(str(counter['T']))
print ''.join(con)
print 'A:', ' '.join(mat['A'])
print 'C:', ' '.join(mat['C'])
print 'G:', ' '.join(mat['G'])
print 'T:', ' '.join(mat['T'])
| {
"repo_name": "Zhyll/rosalind",
"path": "rosalind7.py",
"copies": "1",
"size": "1124",
"license": "mit",
"hash": -7722205258055666000,
"line_mean": 17.4262295082,
"line_max": 47,
"alpha_frac": 0.5097864769,
"autogenerated": false,
"ratio": 2.4172043010752686,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.34269907779752684,
"avg_score": null,
"num_lines": null
} |
# A tool to locate and extract the Windows 10 Spotlight lockscreen images. Filters files using [Pillow](https://python-pillow.org/), a Python Imaging Library (PIL) fork.
import os
import shutil
import glob
from PIL import Image
def invalidResolution(filePath):
width, height = Image.open(filePath).size
return width < height or width < 1920 or height < 1080
username = "Ajay"
assetDirectory = "C:/Users/{0}/AppData/Local/Packages/Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy/LocalState/Assets/".format(username)
saveDirectory = "C:/Users/{0}/Desktop/Wallpapers/".format(username)
fileExtension = ".png"
byteConversion = 1024
minimumFileSizeKilobyte = 100
os.chdir(assetDirectory)
assetFiles = glob.glob("*")
if len(assetFiles) > 0:
if not os.path.exists(saveDirectory):
os.makedirs(saveDirectory)
for assetFile in assetFiles:
oldFilePath = assetDirectory + assetFile
newFilePath = saveDirectory + assetFile + fileExtension
if os.stat(oldFilePath).st_size / byteConversion > minimumFileSizeKilobyte:
shutil.copyfile(oldFilePath, newFilePath)
if invalidResolution(newFilePath):
os.remove(newFilePath)
| {
"repo_name": "AjayAujla/PythonUtilities",
"path": "Windows10SpotlightLockscreenImages.py",
"copies": "1",
"size": "1335",
"license": "mit",
"hash": 2381925169967214000,
"line_mean": 35.0810810811,
"line_max": 169,
"alpha_frac": 0.661423221,
"autogenerated": false,
"ratio": 4.045454545454546,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5206877766454546,
"avg_score": null,
"num_lines": null
} |
# A tool to setup the Python registry.
class error(Exception):
pass
import sys # at least we can count on this!
def FileExists(fname):
"""Check if a file exists. Returns true or false.
"""
import os
try:
os.stat(fname)
return 1
except os.error as details:
return 0
def IsPackageDir(path, packageName, knownFileName):
"""Given a path, a ni package name, and possibly a known file name in
the root of the package, see if this path is good.
"""
import os
if knownFileName is None:
knownFileName = "."
return FileExists(os.path.join(os.path.join(path, packageName),knownFileName))
def IsDebug():
"""Return "_d" if we're running a debug version.
This is to be used within DLL names when locating them.
"""
import imp
for suffix_item in imp.get_suffixes():
if suffix_item[0]=='_d.pyd':
return '_d'
return ''
def FindPackagePath(packageName, knownFileName, searchPaths):
"""Find a package.
Given a ni style package name, check the package is registered.
First place looked is the registry for an existing entry. Then
the searchPaths are searched.
"""
import regutil, os
pathLook = regutil.GetRegisteredNamedPath(packageName)
if pathLook and IsPackageDir(pathLook, packageName, knownFileName):
return pathLook, None # The currently registered one is good.
# Search down the search paths.
for pathLook in searchPaths:
if IsPackageDir(pathLook, packageName, knownFileName):
# Found it
ret = os.path.abspath(pathLook)
return ret, ret
raise error("The package %s can not be located" % packageName)
def FindHelpPath(helpFile, helpDesc, searchPaths):
# See if the current registry entry is OK
import os, win32api, win32con
try:
key = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE, "Software\\Microsoft\\Windows\\Help", 0, win32con.KEY_ALL_ACCESS)
try:
try:
path = win32api.RegQueryValueEx(key, helpDesc)[0]
if FileExists(os.path.join(path, helpFile)):
return os.path.abspath(path)
except win32api.error:
pass # no registry entry.
finally:
key.Close()
except win32api.error:
pass
for pathLook in searchPaths:
if FileExists(os.path.join(pathLook, helpFile)):
return os.path.abspath(pathLook)
pathLook = os.path.join(pathLook, "Help")
if FileExists(os.path.join( pathLook, helpFile)):
return os.path.abspath(pathLook)
raise error("The help file %s can not be located" % helpFile)
def FindAppPath(appName, knownFileName, searchPaths):
"""Find an application.
First place looked is the registry for an existing entry. Then
the searchPaths are searched.
"""
# Look in the first path.
import regutil, string, os
regPath = regutil.GetRegisteredNamedPath(appName)
if regPath:
pathLook = regPath.split(";")[0]
if regPath and FileExists(os.path.join(pathLook, knownFileName)):
return None # The currently registered one is good.
# Search down the search paths.
for pathLook in searchPaths:
if FileExists(os.path.join(pathLook, knownFileName)):
# Found it
return os.path.abspath(pathLook)
raise error("The file %s can not be located for application %s" % (knownFileName, appName))
def FindPythonExe(exeAlias, possibleRealNames, searchPaths):
"""Find an exe.
Returns the full path to the .exe, and a boolean indicating if the current
registered entry is OK. We don't trust the already registered version even
if it exists - it may be wrong (ie, for a different Python version)
"""
import win32api, regutil, string, os, sys
if possibleRealNames is None:
possibleRealNames = exeAlias
# Look first in Python's home.
found = os.path.join(sys.prefix, possibleRealNames)
if not FileExists(found): # for developers
if "64 bit" in sys.version:
found = os.path.join(sys.prefix, "PCBuild", "amd64", possibleRealNames)
else:
found = os.path.join(sys.prefix, "PCBuild", possibleRealNames)
if not FileExists(found):
found = LocateFileName(possibleRealNames, searchPaths)
registered_ok = 0
try:
registered = win32api.RegQueryValue(regutil.GetRootKey(), regutil.GetAppPathsKey() + "\\" + exeAlias)
registered_ok = found==registered
except win32api.error:
pass
return found, registered_ok
def QuotedFileName(fname):
"""Given a filename, return a quoted version if necessary
"""
import regutil, string
try:
fname.index(" ") # Other chars forcing quote?
return '"%s"' % fname
except ValueError:
# No space in name.
return fname
def LocateFileName(fileNamesString, searchPaths):
"""Locate a file name, anywhere on the search path.
If the file can not be located, prompt the user to find it for us
(using a common OpenFile dialog)
Raises KeyboardInterrupt if the user cancels.
"""
import regutil, string, os
fileNames = fileNamesString.split(";")
for path in searchPaths:
for fileName in fileNames:
try:
retPath = os.path.join(path, fileName)
os.stat(retPath)
break
except os.error:
retPath = None
if retPath:
break
else:
fileName = fileNames[0]
try:
import win32ui, win32con
except ImportError:
raise error("Need to locate the file %s, but the win32ui module is not available\nPlease run the program again, passing as a parameter the path to this file." % fileName)
# Display a common dialog to locate the file.
flags=win32con.OFN_FILEMUSTEXIST
ext = os.path.splitext(fileName)[1]
filter = "Files of requested type (*%s)|*%s||" % (ext,ext)
dlg = win32ui.CreateFileDialog(1,None,fileName,flags,filter,None)
dlg.SetOFNTitle("Locate " + fileName)
if dlg.DoModal() != win32con.IDOK:
raise KeyboardInterrupt("User cancelled the process")
retPath = dlg.GetPathName()
return os.path.abspath(retPath)
def LocatePath(fileName, searchPaths):
"""Like LocateFileName, but returns a directory only.
"""
import os
return os.path.abspath(os.path.split(LocateFileName(fileName, searchPaths))[0])
def LocateOptionalPath(fileName, searchPaths):
"""Like LocatePath, but returns None if the user cancels.
"""
try:
return LocatePath(fileName, searchPaths)
except KeyboardInterrupt:
return None
def LocateOptionalFileName(fileName, searchPaths = None):
"""Like LocateFileName, but returns None if the user cancels.
"""
try:
return LocateFileName(fileName, searchPaths)
except KeyboardInterrupt:
return None
def LocatePythonCore(searchPaths):
"""Locate and validate the core Python directories. Returns a list
of paths that should be used as the core (ie, un-named) portion of
the Python path.
"""
import os, regutil
currentPath = regutil.GetRegisteredNamedPath(None)
if currentPath:
presearchPaths = currentPath.split(";")
else:
presearchPaths = [os.path.abspath(".")]
libPath = None
for path in presearchPaths:
if FileExists(os.path.join(path, "os.py")):
libPath = path
break
if libPath is None and searchPaths is not None:
libPath = LocatePath("os.py", searchPaths)
if libPath is None:
raise error("The core Python library could not be located.")
corePath = None
suffix = IsDebug()
for path in presearchPaths:
if FileExists(os.path.join(path, "unicodedata%s.pyd" % suffix)):
corePath = path
break
if corePath is None and searchPaths is not None:
corePath = LocatePath("unicodedata%s.pyd" % suffix, searchPaths)
if corePath is None:
raise error("The core Python path could not be located.")
installPath = os.path.abspath(os.path.join(libPath, ".."))
return installPath, [libPath, corePath]
def FindRegisterPackage(packageName, knownFile, searchPaths, registryAppName = None):
"""Find and Register a package.
Assumes the core registry setup correctly.
In addition, if the location located by the package is already
in the **core** path, then an entry is registered, but no path.
(no other paths are checked, as the application whose path was used
may later be uninstalled. This should not happen with the core)
"""
import regutil, string
if not packageName: raise error("A package name must be supplied")
corePaths = regutil.GetRegisteredNamedPath(None).split(";")
if not searchPaths: searchPaths = corePaths
registryAppName = registryAppName or packageName
try:
pathLook, pathAdd = FindPackagePath(packageName, knownFile, searchPaths)
if pathAdd is not None:
if pathAdd in corePaths:
pathAdd = ""
regutil.RegisterNamedPath(registryAppName, pathAdd)
return pathLook
except error as details:
print("*** The %s package could not be registered - %s" % (packageName, details))
print("*** Please ensure you have passed the correct paths on the command line.")
print("*** - For packages, you should pass a path to the packages parent directory,")
print("*** - and not the package directory itself...")
def FindRegisterApp(appName, knownFiles, searchPaths):
"""Find and Register a package.
Assumes the core registry setup correctly.
"""
import regutil, string
if type(knownFiles)==type(''):
knownFiles = [knownFiles]
paths=[]
try:
for knownFile in knownFiles:
pathLook = FindAppPath(appName, knownFile, searchPaths)
if pathLook:
paths.append(pathLook)
except error as details:
print("*** ", details)
return
regutil.RegisterNamedPath(appName, ";".join(paths))
def FindRegisterPythonExe(exeAlias, searchPaths, actualFileNames = None):
"""Find and Register a Python exe (not necessarily *the* python.exe)
Assumes the core registry setup correctly.
"""
import regutil, string
fname, ok = FindPythonExe(exeAlias, actualFileNames, searchPaths)
if not ok:
regutil.RegisterPythonExe(fname, exeAlias)
return fname
def FindRegisterHelpFile(helpFile, searchPaths, helpDesc = None ):
import regutil
try:
pathLook = FindHelpPath(helpFile, helpDesc, searchPaths)
except error as details:
print("*** ", details)
return
# print "%s found at %s" % (helpFile, pathLook)
regutil.RegisterHelpFile(helpFile, pathLook, helpDesc)
def SetupCore(searchPaths):
"""Setup the core Python information in the registry.
This function makes no assumptions about the current state of sys.path.
After this function has completed, you should have access to the standard
Python library, and the standard Win32 extensions
"""
import sys
for path in searchPaths:
sys.path.append(path)
import os
import regutil, win32api,win32con
installPath, corePaths = LocatePythonCore(searchPaths)
# Register the core Pythonpath.
print(corePaths)
regutil.RegisterNamedPath(None, ';'.join(corePaths))
# Register the install path.
hKey = win32api.RegCreateKey(regutil.GetRootKey() , regutil.BuildDefaultPythonKey())
try:
# Core Paths.
win32api.RegSetValue(hKey, "InstallPath", win32con.REG_SZ, installPath)
finally:
win32api.RegCloseKey(hKey)
# Register the win32 core paths.
win32paths = os.path.abspath( os.path.split(win32api.__file__)[0]) + ";" + \
os.path.abspath( os.path.split(LocateFileName("win32con.py;win32con.pyc", sys.path ) )[0] )
# Python has builtin support for finding a "DLLs" directory, but
# not a PCBuild. Having it in the core paths means it is ignored when
# an EXE not in the Python dir is hosting us - so we add it as a named
# value
check = os.path.join(sys.prefix, "PCBuild")
if "64 bit" in sys.version:
check = os.path.join(check, "amd64")
if os.path.isdir(check):
regutil.RegisterNamedPath("PCBuild",check)
def RegisterShellInfo(searchPaths):
"""Registers key parts of the Python installation with the Windows Shell.
Assumes a valid, minimal Python installation exists
(ie, SetupCore() has been previously successfully run)
"""
import regutil, win32con
suffix = IsDebug()
# Set up a pointer to the .exe's
exePath = FindRegisterPythonExe("Python%s.exe" % suffix, searchPaths)
regutil.SetRegistryDefaultValue(".py", "Python.File", win32con.HKEY_CLASSES_ROOT)
regutil.RegisterShellCommand("Open", QuotedFileName(exePath)+" \"%1\" %*", "&Run")
regutil.SetRegistryDefaultValue("Python.File\\DefaultIcon", "%s,0" % exePath, win32con.HKEY_CLASSES_ROOT)
FindRegisterHelpFile("Python.hlp", searchPaths, "Main Python Documentation")
FindRegisterHelpFile("ActivePython.chm", searchPaths, "Main Python Documentation")
# We consider the win32 core, as it contains all the win32 api type
# stuff we need.
# FindRegisterApp("win32", ["win32con.pyc", "win32api%s.pyd" % suffix], searchPaths)
usage = """\
regsetup.py - Setup/maintain the registry for Python apps.
Run without options, (but possibly search paths) to repair a totally broken
python registry setup. This should allow other options to work.
Usage: %s [options ...] paths ...
-p packageName -- Find and register a package. Looks in the paths for
a sub-directory with the name of the package, and
adds a path entry for the package.
-a appName -- Unconditionally add an application name to the path.
A new path entry is create with the app name, and the
paths specified are added to the registry.
-c -- Add the specified paths to the core Pythonpath.
If a path appears on the core path, and a package also
needs that same path, the package will not bother
registering it. Therefore, By adding paths to the
core path, you can avoid packages re-registering the same path.
-m filename -- Find and register the specific file name as a module.
Do not include a path on the filename!
--shell -- Register everything with the Win95/NT shell.
--upackage name -- Unregister the package
--uapp name -- Unregister the app (identical to --upackage)
--umodule name -- Unregister the module
--description -- Print a description of the usage.
--examples -- Print examples of usage.
""" % sys.argv[0]
description="""\
If no options are processed, the program attempts to validate and set
the standard Python path to the point where the standard library is
available. This can be handy if you move Python to a new drive/sub-directory,
in which case most of the options would fail (as they need at least string.py,
os.py etc to function.)
Running without options should repair Python well enough to run with
the other options.
paths are search paths that the program will use to seek out a file.
For example, when registering the core Python, you may wish to
provide paths to non-standard places to look for the Python help files,
library files, etc.
See also the "regcheck.py" utility which will check and dump the contents
of the registry.
"""
examples="""\
Examples:
"regsetup c:\\wierd\\spot\\1 c:\\wierd\\spot\\2"
Attempts to setup the core Python. Looks in some standard places,
as well as the 2 wierd spots to locate the core Python files (eg, Python.exe,
python14.dll, the standard library and Win32 Extensions.
"regsetup -a myappname . .\subdir"
Registers a new Pythonpath entry named myappname, with "C:\\I\\AM\\HERE" and
"C:\\I\\AM\\HERE\subdir" added to the path (ie, all args are converted to
absolute paths)
"regsetup -c c:\\my\\python\\files"
Unconditionally add "c:\\my\\python\\files" to the 'core' Python path.
"regsetup -m some.pyd \\windows\\system"
Register the module some.pyd in \\windows\\system as a registered
module. This will allow some.pyd to be imported, even though the
windows system directory is not (usually!) on the Python Path.
"regsetup --umodule some"
Unregister the module "some". This means normal import rules then apply
for that module.
"""
if __name__=='__main__':
if len(sys.argv)>1 and sys.argv[1] in ['/?','-?','-help','-h']:
print(usage)
elif len(sys.argv)==1 or not sys.argv[1][0] in ['/','-']:
# No args, or useful args.
searchPath = sys.path[:]
for arg in sys.argv[1:]:
searchPath.append(arg)
# Good chance we are being run from the "regsetup.py" directory.
# Typically this will be "\somewhere\win32\Scripts" and the
# "somewhere" and "..\Lib" should also be searched.
searchPath.append("..\\Build")
searchPath.append("..\\Lib")
searchPath.append("..")
searchPath.append("..\\..")
# for developers:
# also search somewhere\lib, ..\build, and ..\..\build
searchPath.append("..\\..\\lib")
searchPath.append("..\\build")
if "64 bit" in sys.version:
searchPath.append("..\\..\\pcbuild\\amd64")
else:
searchPath.append("..\\..\\pcbuild")
print("Attempting to setup/repair the Python core")
SetupCore(searchPath)
RegisterShellInfo(searchPath)
FindRegisterHelpFile("PyWin32.chm", searchPath, "Pythonwin Reference")
# Check the registry.
print("Registration complete - checking the registry...")
import regcheck
regcheck.CheckRegistry()
else:
searchPaths = []
import getopt, string
opts, args = getopt.getopt(sys.argv[1:], 'p:a:m:c',
['shell','upackage=','uapp=','umodule=','description','examples'])
for arg in args:
searchPaths.append(arg)
for o,a in opts:
if o=='--description':
print(description)
if o=='--examples':
print(examples)
if o=='--shell':
print("Registering the Python core.")
RegisterShellInfo(searchPaths)
if o=='-p':
print("Registering package", a)
FindRegisterPackage(a,None,searchPaths)
if o in ['--upackage', '--uapp']:
import regutil
print("Unregistering application/package", a)
regutil.UnregisterNamedPath(a)
if o=='-a':
import regutil
path = ";".join(searchPaths)
print("Registering application", a,"to path",path)
regutil.RegisterNamedPath(a,path)
if o=='-c':
if not len(searchPaths):
raise error("-c option must provide at least one additional path")
import win32api, regutil
currentPaths = regutil.GetRegisteredNamedPath(None).split(";")
oldLen = len(currentPaths)
for newPath in searchPaths:
if newPath not in currentPaths:
currentPaths.append(newPath)
if len(currentPaths)!=oldLen:
print("Registering %d new core paths" % (len(currentPaths)-oldLen))
regutil.RegisterNamedPath(None,";".join(currentPaths))
else:
print("All specified paths are already registered.")
| {
"repo_name": "int19h/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32/scripts/regsetup.py",
"copies": "7",
"size": "20020",
"license": "apache-2.0",
"hash": -1263399927776883500,
"line_mean": 37.5741811175,
"line_max": 182,
"alpha_frac": 0.6448051948,
"autogenerated": false,
"ratio": 4.037104254890099,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0053829933093316506,
"num_lines": 519
} |
# A tool to setup the Python registry.
class error(Exception):
pass
import sys # at least we can count on this!
def FileExists(fname):
"""Check if a file exists. Returns true or false.
"""
import os
try:
os.stat(fname)
return 1
except os.error, details:
return 0
def IsPackageDir(path, packageName, knownFileName):
"""Given a path, a ni package name, and possibly a known file name in
the root of the package, see if this path is good.
"""
import os
if knownFileName is None:
knownFileName = "."
return FileExists(os.path.join(os.path.join(path, packageName),knownFileName))
def IsDebug():
"""Return "_d" if we're running a debug version.
This is to be used within DLL names when locating them.
"""
import imp
for suffix_item in imp.get_suffixes():
if suffix_item[0]=='_d.pyd':
return '_d'
return ''
def FindPackagePath(packageName, knownFileName, searchPaths):
"""Find a package.
Given a ni style package name, check the package is registered.
First place looked is the registry for an existing entry. Then
the searchPaths are searched.
"""
import regutil, os
pathLook = regutil.GetRegisteredNamedPath(packageName)
if pathLook and IsPackageDir(pathLook, packageName, knownFileName):
return pathLook, None # The currently registered one is good.
# Search down the search paths.
for pathLook in searchPaths:
if IsPackageDir(pathLook, packageName, knownFileName):
# Found it
ret = os.path.abspath(pathLook)
return ret, ret
raise error, "The package %s can not be located" % packageName
def FindHelpPath(helpFile, helpDesc, searchPaths):
# See if the current registry entry is OK
import os, win32api, win32con
try:
key = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE, "Software\\Microsoft\\Windows\\Help", 0, win32con.KEY_ALL_ACCESS)
try:
try:
path = win32api.RegQueryValueEx(key, helpDesc)[0]
if FileExists(os.path.join(path, helpFile)):
return os.path.abspath(path)
except win32api.error:
pass # no registry entry.
finally:
key.Close()
except win32api.error:
pass
for pathLook in searchPaths:
if FileExists(os.path.join(pathLook, helpFile)):
return os.path.abspath(pathLook)
pathLook = os.path.join(pathLook, "Help")
if FileExists(os.path.join( pathLook, helpFile)):
return os.path.abspath(pathLook)
raise error, "The help file %s can not be located" % helpFile
def FindAppPath(appName, knownFileName, searchPaths):
"""Find an application.
First place looked is the registry for an existing entry. Then
the searchPaths are searched.
"""
# Look in the first path.
import regutil, string, os
regPath = regutil.GetRegisteredNamedPath(appName)
if regPath:
pathLook = string.split(regPath,";")[0]
if regPath and FileExists(os.path.join(pathLook, knownFileName)):
return None # The currently registered one is good.
# Search down the search paths.
for pathLook in searchPaths:
if FileExists(os.path.join(pathLook, knownFileName)):
# Found it
return os.path.abspath(pathLook)
raise error, "The file %s can not be located for application %s" % (knownFileName, appName)
def FindPythonExe(exeAlias, possibleRealNames, searchPaths):
"""Find an exe.
Returns the full path to the .exe, and a boolean indicating if the current
registered entry is OK. We don't trust the already registered version even
if it exists - it may be wrong (ie, for a different Python version)
"""
import win32api, regutil, string, os, sys
if possibleRealNames is None:
possibleRealNames = exeAlias
# Look first in Python's home.
found = os.path.join(sys.prefix, possibleRealNames)
if not FileExists(found): # for developers
found = os.path.join(sys.prefix, "PCBuild", possibleRealNames)
if not FileExists(found):
found = LocateFileName(possibleRealNames, searchPaths)
registered_ok = 0
try:
registered = win32api.RegQueryValue(regutil.GetRootKey(), regutil.GetAppPathsKey() + "\\" + exeAlias)
registered_ok = found==registered
except win32api.error:
pass
return found, registered_ok
def QuotedFileName(fname):
"""Given a filename, return a quoted version if necessary
"""
import regutil, string
try:
string.index(fname, " ") # Other chars forcing quote?
return '"%s"' % fname
except ValueError:
# No space in name.
return fname
def LocateFileName(fileNamesString, searchPaths):
"""Locate a file name, anywhere on the search path.
If the file can not be located, prompt the user to find it for us
(using a common OpenFile dialog)
Raises KeyboardInterrupt if the user cancels.
"""
import regutil, string, os
fileNames = string.split(fileNamesString,";")
for path in searchPaths:
for fileName in fileNames:
try:
retPath = os.path.join(path, fileName)
os.stat(retPath)
break
except os.error:
retPath = None
if retPath:
break
else:
fileName = fileNames[0]
try:
import win32ui, win32con
except ImportError:
raise error, "Need to locate the file %s, but the win32ui module is not available\nPlease run the program again, passing as a parameter the path to this file." % fileName
# Display a common dialog to locate the file.
flags=win32con.OFN_FILEMUSTEXIST
ext = os.path.splitext(fileName)[1]
filter = "Files of requested type (*%s)|*%s||" % (ext,ext)
dlg = win32ui.CreateFileDialog(1,None,fileName,flags,filter,None)
dlg.SetOFNTitle("Locate " + fileName)
if dlg.DoModal() <> win32con.IDOK:
raise KeyboardInterrupt, "User cancelled the process"
retPath = dlg.GetPathName()
return os.path.abspath(retPath)
def LocatePath(fileName, searchPaths):
"""Like LocateFileName, but returns a directory only.
"""
import os
return os.path.abspath(os.path.split(LocateFileName(fileName, searchPaths))[0])
def LocateOptionalPath(fileName, searchPaths):
"""Like LocatePath, but returns None if the user cancels.
"""
try:
return LocatePath(fileName, searchPaths)
except KeyboardInterrupt:
return None
def LocateOptionalFileName(fileName, searchPaths = None):
"""Like LocateFileName, but returns None if the user cancels.
"""
try:
return LocateFileName(fileName, searchPaths)
except KeyboardInterrupt:
return None
def LocatePythonCore(searchPaths):
"""Locate and validate the core Python directories. Returns a list
of paths that should be used as the core (ie, un-named) portion of
the Python path.
"""
import string, os, regutil
currentPath = regutil.GetRegisteredNamedPath(None)
if currentPath:
presearchPaths = string.split(currentPath, ";")
else:
presearchPaths = [os.path.abspath(".")]
libPath = None
for path in presearchPaths:
if FileExists(os.path.join(path, "os.py")):
libPath = path
break
if libPath is None and searchPaths is not None:
libPath = LocatePath("os.py", searchPaths)
if libPath is None:
raise error, "The core Python library could not be located."
corePath = None
suffix = IsDebug()
for path in presearchPaths:
if FileExists(os.path.join(path, "unicodedata%s.pyd" % suffix)):
corePath = path
break
if corePath is None and searchPaths is not None:
corePath = LocatePath("unicodedata%s.pyd" % suffix, searchPaths)
if corePath is None:
raise error, "The core Python path could not be located."
installPath = os.path.abspath(os.path.join(libPath, ".."))
return installPath, [libPath, corePath]
def FindRegisterPackage(packageName, knownFile, searchPaths, registryAppName = None):
"""Find and Register a package.
Assumes the core registry setup correctly.
In addition, if the location located by the package is already
in the **core** path, then an entry is registered, but no path.
(no other paths are checked, as the application whose path was used
may later be uninstalled. This should not happen with the core)
"""
import regutil, string
if not packageName: raise error, "A package name must be supplied"
corePaths = string.split(regutil.GetRegisteredNamedPath(None),";")
if not searchPaths: searchPaths = corePaths
registryAppName = registryAppName or packageName
try:
pathLook, pathAdd = FindPackagePath(packageName, knownFile, searchPaths)
if pathAdd is not None:
if pathAdd in corePaths:
pathAdd = ""
regutil.RegisterNamedPath(registryAppName, pathAdd)
return pathLook
except error, details:
print "*** The %s package could not be registered - %s" % (packageName, details)
print "*** Please ensure you have passed the correct paths on the command line."
print "*** - For packages, you should pass a path to the packages parent directory,"
print "*** - and not the package directory itself..."
def FindRegisterApp(appName, knownFiles, searchPaths):
"""Find and Register a package.
Assumes the core registry setup correctly.
"""
import regutil, string
if type(knownFiles)==type(''):
knownFiles = [knownFiles]
paths=[]
try:
for knownFile in knownFiles:
pathLook = FindAppPath(appName, knownFile, searchPaths)
if pathLook:
paths.append(pathLook)
except error, details:
print "*** ", details
return
regutil.RegisterNamedPath(appName, string.join(paths,";"))
def FindRegisterPythonExe(exeAlias, searchPaths, actualFileNames = None):
"""Find and Register a Python exe (not necessarily *the* python.exe)
Assumes the core registry setup correctly.
"""
import regutil, string
fname, ok = FindPythonExe(exeAlias, actualFileNames, searchPaths)
if not ok:
regutil.RegisterPythonExe(fname, exeAlias)
return fname
def FindRegisterHelpFile(helpFile, searchPaths, helpDesc = None ):
import regutil
try:
pathLook = FindHelpPath(helpFile, helpDesc, searchPaths)
except error, details:
print "*** ", details
return
# print "%s found at %s" % (helpFile, pathLook)
regutil.RegisterHelpFile(helpFile, pathLook, helpDesc)
def SetupCore(searchPaths):
"""Setup the core Python information in the registry.
This function makes no assumptions about the current state of sys.path.
After this function has completed, you should have access to the standard
Python library, and the standard Win32 extensions
"""
import sys
for path in searchPaths:
sys.path.append(path)
import string, os
import regutil, win32api,win32con
installPath, corePaths = LocatePythonCore(searchPaths)
# Register the core Pythonpath.
print corePaths
regutil.RegisterNamedPath(None, string.join(corePaths,";"))
# Register the install path.
hKey = win32api.RegCreateKey(regutil.GetRootKey() , regutil.BuildDefaultPythonKey())
try:
# Core Paths.
win32api.RegSetValue(hKey, "InstallPath", win32con.REG_SZ, installPath)
finally:
win32api.RegCloseKey(hKey)
# Register the win32 core paths.
win32paths = os.path.abspath( os.path.split(win32api.__file__)[0]) + ";" + \
os.path.abspath( os.path.split(LocateFileName("win32con.py;win32con.pyc", sys.path ) )[0] )
# Python has builtin support for finding a "DLLs" directory, but
# not a PCBuild. Having it in the core paths means it is ignored when
# an EXE not in the Python dir is hosting us - so we add it as a named
# value
check = os.path.join(sys.prefix, "PCBuild")
if os.path.isdir(check):
regutil.RegisterNamedPath("PCBuild",check)
def RegisterShellInfo(searchPaths):
"""Registers key parts of the Python installation with the Windows Shell.
Assumes a valid, minimal Python installation exists
(ie, SetupCore() has been previously successfully run)
"""
import regutil, win32con
suffix = IsDebug()
# Set up a pointer to the .exe's
exePath = FindRegisterPythonExe("Python%s.exe" % suffix, searchPaths)
regutil.SetRegistryDefaultValue(".py", "Python.File", win32con.HKEY_CLASSES_ROOT)
regutil.RegisterShellCommand("Open", QuotedFileName(exePath)+" \"%1\" %*", "&Run")
regutil.SetRegistryDefaultValue("Python.File\\DefaultIcon", "%s,0" % exePath, win32con.HKEY_CLASSES_ROOT)
FindRegisterHelpFile("Python.hlp", searchPaths, "Main Python Documentation")
FindRegisterHelpFile("ActivePython.chm", searchPaths, "Main Python Documentation")
# We consider the win32 core, as it contains all the win32 api type
# stuff we need.
# FindRegisterApp("win32", ["win32con.pyc", "win32api%s.pyd" % suffix], searchPaths)
usage = """\
regsetup.py - Setup/maintain the registry for Python apps.
Run without options, (but possibly search paths) to repair a totally broken
python registry setup. This should allow other options to work.
Usage: %s [options ...] paths ...
-p packageName -- Find and register a package. Looks in the paths for
a sub-directory with the name of the package, and
adds a path entry for the package.
-a appName -- Unconditionally add an application name to the path.
A new path entry is create with the app name, and the
paths specified are added to the registry.
-c -- Add the specified paths to the core Pythonpath.
If a path appears on the core path, and a package also
needs that same path, the package will not bother
registering it. Therefore, By adding paths to the
core path, you can avoid packages re-registering the same path.
-m filename -- Find and register the specific file name as a module.
Do not include a path on the filename!
--shell -- Register everything with the Win95/NT shell.
--upackage name -- Unregister the package
--uapp name -- Unregister the app (identical to --upackage)
--umodule name -- Unregister the module
--description -- Print a description of the usage.
--examples -- Print examples of usage.
""" % sys.argv[0]
description="""\
If no options are processed, the program attempts to validate and set
the standard Python path to the point where the standard library is
available. This can be handy if you move Python to a new drive/sub-directory,
in which case most of the options would fail (as they need at least string.py,
os.py etc to function.)
Running without options should repair Python well enough to run with
the other options.
paths are search paths that the program will use to seek out a file.
For example, when registering the core Python, you may wish to
provide paths to non-standard places to look for the Python help files,
library files, etc.
See also the "regcheck.py" utility which will check and dump the contents
of the registry.
"""
examples="""\
Examples:
"regsetup c:\\wierd\\spot\\1 c:\\wierd\\spot\\2"
Attempts to setup the core Python. Looks in some standard places,
as well as the 2 wierd spots to locate the core Python files (eg, Python.exe,
python14.dll, the standard library and Win32 Extensions.
"regsetup -a myappname . .\subdir"
Registers a new Pythonpath entry named myappname, with "C:\\I\\AM\\HERE" and
"C:\\I\\AM\\HERE\subdir" added to the path (ie, all args are converted to
absolute paths)
"regsetup -c c:\\my\\python\\files"
Unconditionally add "c:\\my\\python\\files" to the 'core' Python path.
"regsetup -m some.pyd \\windows\\system"
Register the module some.pyd in \\windows\\system as a registered
module. This will allow some.pyd to be imported, even though the
windows system directory is not (usually!) on the Python Path.
"regsetup --umodule some"
Unregister the module "some". This means normal import rules then apply
for that module.
"""
if __name__=='__main__':
if len(sys.argv)>1 and sys.argv[1] in ['/?','-?','-help','-h']:
print usage
elif len(sys.argv)==1 or not sys.argv[1][0] in ['/','-']:
# No args, or useful args.
searchPath = sys.path[:]
for arg in sys.argv[1:]:
searchPath.append(arg)
# Good chance we are being run from the "regsetup.py" directory.
# Typically this will be "\somewhere\win32\Scripts" and the
# "somewhere" and "..\Lib" should also be searched.
searchPath.append("..\\Build")
searchPath.append("..\\Lib")
searchPath.append("..")
searchPath.append("..\\..")
# for developers:
# also search somewhere\lib, ..\build, and ..\..\build
searchPath.append("..\\..\\lib")
searchPath.append("..\\build")
searchPath.append("..\\..\\pcbuild")
print "Attempting to setup/repair the Python core"
SetupCore(searchPath)
RegisterShellInfo(searchPath)
FindRegisterHelpFile("PyWin32.chm", searchPath, "Pythonwin Reference")
# Check the registry.
print "Registration complete - checking the registry..."
import regcheck
regcheck.CheckRegistry()
else:
searchPaths = []
import getopt, string
opts, args = getopt.getopt(sys.argv[1:], 'p:a:m:c',
['shell','upackage=','uapp=','umodule=','description','examples'])
for arg in args:
searchPaths.append(arg)
for o,a in opts:
if o=='--description':
print description
if o=='--examples':
print examples
if o=='--shell':
print "Registering the Python core."
RegisterShellInfo(searchPaths)
if o=='-p':
print "Registering package", a
FindRegisterPackage(a,None,searchPaths)
if o in ['--upackage', '--uapp']:
import regutil
print "Unregistering application/package", a
regutil.UnregisterNamedPath(a)
if o=='-a':
import regutil
path = string.join(searchPaths,";")
print "Registering application", a,"to path",path
regutil.RegisterNamedPath(a,path)
if o=='-c':
if not len(searchPaths):
raise error, "-c option must provide at least one additional path"
import win32api, regutil
currentPaths = string.split(regutil.GetRegisteredNamedPath(None),";")
oldLen = len(currentPaths)
for newPath in searchPaths:
if newPath not in currentPaths:
currentPaths.append(newPath)
if len(currentPaths)<>oldLen:
print "Registering %d new core paths" % (len(currentPaths)-oldLen)
regutil.RegisterNamedPath(None,string.join(currentPaths,";"))
else:
print "All specified paths are already registered."
| {
"repo_name": "chvrga/outdoor-explorer",
"path": "java/play-1.4.4/python/Lib/site-packages/win32/scripts/regsetup.py",
"copies": "4",
"size": "20267",
"license": "mit",
"hash": 1516416816643635000,
"line_mean": 37.6614481409,
"line_max": 182,
"alpha_frac": 0.6316672423,
"autogenerated": false,
"ratio": 4.09765467044076,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006322575011280066,
"num_lines": 511
} |
# A tool to use for the analysis and gathering of scaled intensity data
# from a single macromolecular crystal. This will be both a module (for
# use in xia2) and an application in it's own right, AMI.
#
# Example usage:
#
# ami hklin1 PEAK.HKL hklin2 INFL.HKL hklin3 LREM.HKL HKLOUT merged.mtz << eof
# drename file 1 pname demo xname only dname peak
# drename file 2 pname demo xname only dname infl
# drename file 3 pname demo xname only dname lrem
# solvent 0.53
# symm P43212
# reindex h,k,l
# cell 55.67 55.67 108.92 90.0 90.0 90.0
# anomalous on
# eof
#
# should also allow for a HKLREF.
import math
import os
from xia2.Wrappers.CCP4.CCP4Factory import CCP4Factory
class AnalyseMyIntensities:
# FIXME retire this entire class...
def __init__(self):
self._working_directory = os.getcwd()
self._factory = CCP4Factory()
self._resolution = 0.0
# admin functions
def set_working_directory(self, working_directory):
self._working_directory = working_directory
self._factory.set_working_directory(working_directory)
def get_working_directory(self):
return self._working_directory
def compute_average_cell(self, hklin_list):
if len(hklin_list) == 0:
raise RuntimeError("no input reflection files to compute cell from")
cell_a = 0.0
cell_b = 0.0
cell_c = 0.0
cell_alpha = 0.0
cell_beta = 0.0
cell_gamma = 0.0
n_input = 0
sg = None
for hklin in hklin_list:
mtzdump = self._factory.Mtzdump()
mtzdump.set_hklin(hklin)
mtzdump.dump()
resolution = min(mtzdump.get_resolution_range())
if resolution < self._resolution or self._resolution == 0:
self._resolution = resolution
datasets = mtzdump.get_datasets()
reflections = mtzdump.get_reflections()
if len(datasets) > 1:
raise RuntimeError("more than one dataset in %s" % hklin)
info = mtzdump.get_dataset_info(datasets[0])
if not sg:
sg = info["spacegroup"]
elif sg != info["spacegroup"]:
raise RuntimeError("inconsistent spacegroup")
# check that this u/c is in agreement with the others -
# allow 10% grace (!)
if n_input == 0:
cell_a = info["cell"][0] * reflections
cell_b = info["cell"][1] * reflections
cell_c = info["cell"][2] * reflections
cell_alpha = info["cell"][3] * reflections
cell_beta = info["cell"][4] * reflections
cell_gamma = info["cell"][5] * reflections
n_input += reflections
else:
if math.fabs(n_input * info["cell"][0] - cell_a) / cell_a > 0.1:
raise RuntimeError("inconsistent unit cell")
if math.fabs(n_input * info["cell"][1] - cell_b) / cell_b > 0.1:
raise RuntimeError("inconsistent unit cell")
if math.fabs(n_input * info["cell"][2] - cell_c) / cell_c > 0.1:
raise RuntimeError("inconsistent unit cell")
if math.fabs(n_input * info["cell"][3] - cell_alpha) / cell_alpha > 0.1:
raise RuntimeError("inconsistent unit cell")
if math.fabs(n_input * info["cell"][4] - cell_beta) / cell_beta > 0.1:
raise RuntimeError("inconsistent unit cell")
if math.fabs(n_input * info["cell"][5] - cell_gamma) / cell_gamma > 0.1:
raise RuntimeError("inconsistent unit cell")
cell_a += info["cell"][0] * reflections
cell_b += info["cell"][1] * reflections
cell_c += info["cell"][2] * reflections
cell_alpha += info["cell"][3] * reflections
cell_beta += info["cell"][4] * reflections
cell_gamma += info["cell"][5] * reflections
n_input += reflections
cell_a /= n_input
cell_b /= n_input
cell_c /= n_input
cell_alpha /= n_input
cell_beta /= n_input
cell_gamma /= n_input
average_cell = (cell_a, cell_b, cell_c, cell_alpha, cell_beta, cell_gamma)
return average_cell, sg
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Modules/AnalyseMyIntensities.py",
"copies": "1",
"size": "4353",
"license": "bsd-3-clause",
"hash": -7295326909630316000,
"line_mean": 35.5798319328,
"line_max": 88,
"alpha_frac": 0.5605329658,
"autogenerated": false,
"ratio": 3.6858594411515666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4746392406951567,
"avg_score": null,
"num_lines": null
} |
"""A tool used to orient joints with common orientations.
The tool mostly assumes the X axis is the primary axis and joints always rotate forward on the Z axis.
Usage:
import cmt.rig.orientjoints
cmt.rig.orientjoints.OrientJointsWindow()
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import logging
import maya.cmds as cmds
import maya.api.OpenMaya as OpenMaya
import cmt.rig.skeleton as skeleton
reload(skeleton)
log = logging.getLogger(__name__)
MESSAGE_ATTRIBUTE = "cmt_jointOrient"
ORIENT_GROUP = "cmt_orient_grp"
class OrientJointsWindow(object):
def __init__(self):
name = "cmt_orientjoints"
if cmds.window(name, exists=True):
cmds.deleteUI(name, window=True)
if cmds.windowPref(name, exists=True):
cmds.windowPref(name, remove=True)
self.window = cmds.window(
name, title="CMT Orient Joints", widthHeight=(358, 380)
)
cmds.columnLayout(adjustableColumn=True)
margin_width = 4
cmds.frameLayout(
bv=False, label="Operations", collapsable=True, mw=margin_width
)
cmds.rowColumnLayout(numberOfColumns=2, adj=1)
self.insert_joint_field = cmds.intField(minValue=1, value=1)
cmds.button(label="Insert Joints", c=self.insert_joints)
cmds.setParent("..")
cmds.gridLayout(numberOfColumns=3, cellWidthHeight=(116, 30))
cmds.button(label="Left", c=self.set_left)
cmds.button(label="Center", c=self.set_center)
cmds.button(label="Right", c=self.set_right)
cmds.setParent("..")
cmds.setParent("..")
cmds.frameLayout(
bv=False, label="Quick Actions", collapsable=True, mw=margin_width
)
cmds.gridLayout(numberOfColumns=2, cellWidthHeight=(175, 65))
cmds.button(label="Make Planar Orientation", command=self.make_planar)
cmds.button(
label="Project to Planar Position", command=partial(make_position_planar)
)
cmds.button(label="Align Up With Child", command=self.align_with_child)
cmds.button(label="Zero Orient", command=self.zero_orient)
cmds.button(label="Orient to World", command=self.orient_to_world)
cmds.rowColumnLayout(numberOfColumns=4)
height = 20
label_width = 60
icon_left = "nudgeLeft.png"
icon_right = "nudgeRight.png"
cmds.text(label="Offset X", align="right", width=label_width)
cmds.iconTextButton(
style="iconOnly",
image1=icon_left,
label="spotlight",
h=height,
w=height,
c=partial(self.offset_orient_x, direction=-1),
)
self.offset_x = cmds.floatField(value=90.0)
cmds.iconTextButton(
style="iconOnly",
image1=icon_right,
label="spotlight",
h=height,
w=height,
c=partial(self.offset_orient_x, direction=1),
)
cmds.text(label="Offset Y", align="right", width=label_width)
cmds.iconTextButton(
style="iconOnly",
image1=icon_left,
label="spotlight",
h=height,
w=height,
c=partial(self.offset_orient_y, direction=-1),
)
self.offset_y = cmds.floatField(value=90.0)
cmds.iconTextButton(
style="iconOnly",
image1=icon_right,
label="spotlight",
h=height,
w=height,
c=partial(self.offset_orient_y, direction=1),
)
cmds.text(label="Offset Z", align="right", width=label_width)
cmds.iconTextButton(
style="iconOnly",
image1=icon_left,
label="spotlight",
h=height,
w=height,
c=partial(self.offset_orient_z, direction=-1),
)
self.offset_z = cmds.floatField(value=90.0)
cmds.iconTextButton(
style="iconOnly",
image1=icon_right,
label="spotlight",
h=height,
w=height,
c=partial(self.offset_orient_z, direction=1),
)
cmds.setParent("..")
cmds.setParent("..")
cmds.setParent("..")
cmds.frameLayout(
bv=False, label="Manual Orient", collapsable=True, mw=margin_width
)
cmds.columnLayout(adj=True)
cmds.rowLayout(numberOfColumns=2, cw2=(150, 150))
self.reorient_children = cmds.checkBox(
label="Reorient children", value=True, align="left"
)
self.reset_orientation = cmds.checkBox(
label="Reset orientation", value=True, align="left"
)
cmds.setParent("..")
cmds.gridLayout(numberOfColumns=2, cellWidthHeight=(175, 65))
cmds.button(label="Template Joints", command=partial(self.template_joints))
cmds.button(label="Rebuild Joints", command=partial(rebuild_joints))
cmds.setParent("..")
cmds.setParent("..")
cmds.setParent("..")
cmds.showWindow(self.window)
def insert_joints(self, *args):
joint_count = cmds.intField(self.insert_joint_field, q=True, v=True)
skeleton.insert_joints(joint_count=joint_count)
def template_joints(self, dummy):
reorient_children = cmds.checkBox(
self.reorient_children, query=True, value=True
)
reset_orientation = cmds.checkBox(
self.reset_orientation, query=True, value=True
)
template_joints(
reorient_children=reorient_children, reset_orientation=reset_orientation
)
def make_planar(self, *args):
joints = cmds.ls(sl=True, type="joint") or []
make_planar(joints)
def zero_orient(self, *args):
joints = cmds.ls(sl=True, type="joint") or []
zero_orient(joints)
def align_with_child(self, *args):
joints = cmds.ls(sl=True, type="joint") or []
align_with_child(joints)
def orient_to_world(self, *args):
joints = cmds.ls(sl=True, type="joint") or []
orient_to_world(joints)
def offset_orient_x(self, direction):
joints = cmds.ls(sl=True, type="joint") or []
amount = cmds.floatField(self.offset_x, q=True, value=True) * direction
offset_orient(joints, amount, Axis.x)
def offset_orient_y(self, direction):
joints = cmds.ls(sl=True, type="joint") or []
amount = cmds.floatField(self.offset_y, q=True, value=True) * direction
offset_orient(joints, amount, Axis.y)
def offset_orient_z(self, direction):
joints = cmds.ls(sl=True, type="joint") or []
amount = cmds.floatField(self.offset_z, q=True, value=True) * direction
offset_orient(joints, amount, Axis.z)
def set_left(self, *args):
self.set_side(1)
def set_center(self, *args):
self.set_side(0)
def set_right(self, *args):
self.set_side(2)
def set_side(self, side):
nodes = cmds.ls(sl=True)
for n in nodes:
hierarchy = cmds.listRelatives(n, ad=True)
hierarchy.append(n)
for node in hierarchy:
attr = "{}.side".format(node)
if cmds.objExists(attr):
cmds.setAttr(attr, side)
pass
class Axis:
x = "X"
y = "Y"
z = "Z"
def make_planar(joints):
for joint in joints:
parent = cmds.listRelatives(joint, parent=True, path=True)
if not parent:
log.warning(
"Cannot make %s planar because it does not have a parent.", joint
)
continue
children = _unparent_children(joint)
if not children:
log.warning(
"Cannot make %s planar because it does not have any children.", joint
)
continue
cmds.delete(
cmds.aimConstraint(
children[0],
joint,
aim=(1, 0, 0),
u=(0, 1, 0),
worldUpType="object",
worldUpObject=parent[0],
)
)
cmds.makeIdentity(joint, apply=True)
_reparent_children(joint, children)
if joints:
cmds.select(joints)
def make_position_planar(*args):
sel = cmds.ls(sl=True, type="joint")
if len(sel) <= 3:
raise RuntimeError(
"Select 3 joints to make a plane and then additional joints to move onto that plane."
)
a, b, c = [get_position(sel[i]) for i in range(3)]
ab = (b - a).normal()
ac = (c - a).normal()
normal = (ab ^ ac).normal()
joints = sel[3:]
for joint in joints:
children = _unparent_children(joint)
p = get_position(joint)
pa = a - p
dot = pa * normal
p = p + (normal * dot)
cmds.xform(joint, ws=True, t=(p.x, p.y, p.z))
_reparent_children(joint, children)
if sel:
cmds.select(sel)
def align_with_child(joints):
"""Aligns the up axis of the given joints with their respective child joint.
@param joints: List of joints to orient.
"""
for joint in joints:
children = _unparent_children(joint)
if children:
cmds.delete(
cmds.aimConstraint(
children[0],
joint,
aim=(1, 0, 0),
upVector=(0, 1, 0),
worldUpType="objectrotation",
worldUpVector=(0, 1, 0),
worldUpObject=children[0],
)
)
cmds.makeIdentity(joint, apply=True)
_reparent_children(joint, children)
if joints:
cmds.select(joints)
def zero_orient(joints):
for joint in joints:
children = _unparent_children(joint)
cmds.setAttr("{0}.jointOrient".format(joint), 0, 0, 0)
_reparent_children(joint, children)
if joints:
cmds.select(joints)
def orient_to_world(joints):
"""Orients the given joints with the world.
@param joints: Joints to orient.
"""
for joint in joints:
children = _unparent_children(joint)
parent = cmds.listRelatives(joint, parent=True, path=True)
orig_joint = joint.split("|")[-1]
if parent:
joint = cmds.parent(joint, world=True)[0]
cmds.joint(joint, e=True, oj="none", zso=True)
if parent:
joint = cmds.parent(joint, parent)[0]
joint = cmds.rename(joint, orig_joint)
_reparent_children(joint, children)
if joints:
cmds.select(joints)
def offset_orient(joints, amount, axis):
"""Offsets the orient by the given amount
@param joints: Joints to orient.
@param amount: Amount to offset by.
@param axis: Which axis X, Y or Z
"""
for joint in joints:
children = _unparent_children(joint)
attribute = "{0}.jointOrient{1}".format(joint, axis)
orient = cmds.getAttr(attribute)
orient += amount
cmds.setAttr(attribute, orient)
_reparent_children(joint, children)
if joints:
cmds.select(joints)
def _unparent_children(joint):
"""Helper function to unparent any children of the given joint.
@param joint: Joint whose children to unparent.
@return: A list of the unparented children.
"""
children = cmds.listRelatives(joint, children=True, path=True) or []
return [cmds.parent(child, world=True)[0] for child in children]
def _reparent_children(joint, children):
"""Helper function to reparent any children of the given joint.
@param joint: Joint whose children to reparent.
@param children: List of transforms to reparent
"""
for child in children:
cmds.parent(child, joint)
def template_joints(joints=None, reorient_children=True, reset_orientation=True):
if joints is None:
joints = cmds.ls(sl=True, type="joint")
if not joints:
raise RuntimeError("No joint selected to orient.")
if reorient_children:
children = cmds.listRelatives(fullPath=True, allDescendents=True, type="joint")
joints.extend(children)
red, green, blue = create_shaders()
orient_group = cmds.createNode("transform", name=ORIENT_GROUP)
manips = []
for joint in joints:
if reset_orientation:
cmds.makeIdentity(joint, apply=True)
cmds.joint(
joint,
edit=True,
orientJoint="xyz",
secondaryAxisOrient="yup",
children=False,
zeroScaleOrient=True,
)
if not cmds.listRelatives(joint, children=True):
zero_orient([joint])
continue
group, manip = create_orient_manipulator(joint, blue)
manips.append(manip)
cmds.parent(group, orient_group)
cmds.parentConstraint(joint, group)
cmds.setAttr(joint + ".template", 1)
cmds.select(manips)
def create_shaders():
"""
Creates the red/green/blue shaders.
@return: (Red, green, blue material nodes)
"""
red = cmds.shadingNode("lambert", asShader=True)
cmds.setAttr("{0}.color".format(red), 1, 0, 0, type="double3")
cmds.setAttr("{0}.ambientColor".format(red), 1, 0, 0, type="double3")
green = cmds.shadingNode("lambert", asShader=True)
cmds.setAttr("{0}.color".format(green), 0, 1, 0, type="double3")
cmds.setAttr("{0}.ambientColor".format(green), 0, 1, 0, type="double3")
blue = cmds.shadingNode("lambert", asShader=True)
cmds.setAttr("{0}.color".format(blue), 0, 0, 1, type="double3")
cmds.setAttr("{0}.ambientColor".format(blue), 0, 0, 1, type="double3")
t = 0.9
for node in [red, green, blue]:
cmds.setAttr("{0}.transparency".format(node), t, t, t, type="double3")
return red, green, blue
def create_orient_manipulator(joint, material):
joint_scale = cmds.jointDisplayScale(query=True)
joint_radius = cmds.getAttr("{0}.radius".format(joint))
radius = joint_scale * joint_radius
children = cmds.listRelatives(joint, children=True, path=True)
if children:
p1 = cmds.xform(joint, q=True, ws=True, t=True)
p1 = OpenMaya.MPoint(*p1)
p2 = cmds.xform(children[0], q=True, ws=True, t=True)
p2 = OpenMaya.MPoint(*p2)
radius = p1.distanceTo(p2)
arrow_cvs = [
[-1, 0, 0],
[-1, 2, 0],
[-2, 2, 0],
[0, 4, 0],
[2, 2, 0],
[1, 2, 0],
[1, 0, 0],
[-1, 0, 0],
]
arrow_cvs = [[x[0] * radius, x[1] * radius, x[2] * radius] for x in arrow_cvs]
shape = cmds.curve(name="{0}_zForward".format(joint), degree=1, point=arrow_cvs)
# shape = cmds.sphere(n='{0}_zForward'.format(joint), p=(0, 0, 0), ax=(0, 0, -1), ssw=0, esw=180, r=radius, d=3, ut=0, tol=0.01, s=8, nsp=4, ch=0)[0]
# cmds.setAttr('{0}.sz'.format(shape), 0)
# cmds.select(shape)
# cmds.hyperShade(assign=material)
group = cmds.createNode("transform", name="{0}_grp".format(shape))
cmds.parent(shape, group)
cmds.makeIdentity(shape, apply=True)
cmds.addAttr(shape, longName=MESSAGE_ATTRIBUTE, attributeType="message")
cmds.connectAttr(
"{0}.message".format(joint), "{0}.{1}".format(shape, MESSAGE_ATTRIBUTE)
)
for attr in ["tx", "ty", "tz", "ry", "rz", "v"]:
cmds.setAttr("{0}.{1}".format(shape, attr), lock=True, keyable=False)
return group, shape
def get_position(node):
p = cmds.xform(node, q=True, ws=True, t=True)
return OpenMaya.MPoint(p)
def create_arrow(jointName):
curve = cmds.curve(
name="%s_ForwardDirection" % jointName,
degree=1,
point=[
(-1, 0, 0),
(-1, 2, 0),
(-2, 2, 0),
(0, 4, 0),
(2, 2, 0),
(1, 2, 0),
(1, 0, 0),
(-1, 0, 0),
],
)
group = cmds.group()
cmds.xform(objectSpace=True, pivots=(0, 0, 0))
jointScale = cmds.jointDisplayScale(query=True)
jointRadius = cmds.getAttr("%s.radius" % jointName)
jointScale *= jointRadius
cmds.xform(scale=(jointScale, jointScale, jointScale))
return group
def rebuild_joints(*args):
if not cmds.objExists(ORIENT_GROUP):
return
nodes = cmds.listRelatives(ORIENT_GROUP, ad=True, path=True) or []
joints = []
for node in nodes:
attrs = cmds.listAttr(node, ud=True) or []
if MESSAGE_ATTRIBUTE not in attrs:
continue
joint = cmds.listConnections(
"{0}.{1}".format(node, MESSAGE_ATTRIBUTE), d=False
)[0]
joints.append(joint)
rotation = cmds.getAttr("{0}.rx".format(node))
children = cmds.listRelatives(joint, children=True, shapes=False, path=True)
if children:
# First unparent children so change in joint orient does not affect children
children = [cmds.parent(child, world=True)[0] for child in children]
# Add rotation offset to joint orient
orient_x = cmds.getAttr("{0}.jointOrientX".format(joint))
orient_x += rotation
while orient_x > 180.0:
orient_x -= 360.0
while orient_x < -180.0:
orient_x += 360.0
cmds.setAttr("{0}.jointOrientX".format(joint), orient_x)
# Reparent child
for child in children:
cmds.parent(child, joint)
else:
# tip joint, just zero out joint orient
cmds.setAttr("%s.jointOrientX" % joint, 0)
cmds.setAttr("%s.jointOrientY" % joint, 0)
cmds.setAttr("%s.jointOrientZ" % joint, 0)
# Untemplate
cmds.setAttr("{0}.template".format(joint), 0)
# Delete arrow group
cmds.delete(ORIENT_GROUP)
cmds.select(joints)
| {
"repo_name": "chadmv/cmt",
"path": "scripts/cmt/rig/orientjoints.py",
"copies": "1",
"size": "17952",
"license": "mit",
"hash": 8369973373625696000,
"line_mean": 31.7591240876,
"line_max": 153,
"alpha_frac": 0.579823975,
"autogenerated": false,
"ratio": 3.566858732366382,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4646682707366382,
"avg_score": null,
"num_lines": null
} |
"""A tool used to run Python scripts on disk."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import logging
import os
import runpy
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from maya.app.general.mayaMixin import MayaQWidgetBaseMixin
from cmt.ui.widgets.filepathwidget import FilePathWidget
from cmt.ui.stringcache import StringCache
logger = logging.getLogger(__name__)
def run_script(file_path, init_globals=None):
"""Execute the code at the named filesystem location.
The supplied path may refer to a Python source file, a compiled bytecode file or a
valid sys.path entry containing a __main__ module.
:param file_path: File path
:param init_globals: Optional dictionary to populate the module's globals
"""
if init_globals is None:
init_globals = dict()
file_path = os.path.realpath(file_path)
logger.info("Running {}".format(file_path))
runpy.run_path(file_path, init_globals, "__main__")
_win = None
def show():
"""Shows the window."""
global _win
if _win:
_win.close()
_win = RunScriptWindow()
_win.show()
class RunScriptWindow(MayaQWidgetBaseMixin, QMainWindow):
"""The RunScriptWindow allows the user to browse for and run Python scripts on disk.
"""
def __init__(self, parent=None):
super(RunScriptWindow, self).__init__(parent)
self.setWindowTitle("Run Script")
self.resize(800, 600)
main_widget = QWidget()
self.setCentralWidget(main_widget)
main_layout = QVBoxLayout()
main_widget.setLayout(main_layout)
self.file_model = QFileSystemModel(self)
self.root_path = FilePathWidget(
"Root: ",
FilePathWidget.directory,
name="cmt.runscript.rootpath",
parent=self,
)
self.root_path.path_changed.connect(self.set_root_path)
main_layout.addWidget(self.root_path)
splitter = QSplitter(self)
main_layout.addWidget(splitter)
splitter.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.recent_list = RecentList(self)
splitter.addWidget(self.recent_list)
self.file_tree_view = QTreeView()
self.file_model.setFilter(QDir.NoDotAndDotDot | QDir.Files | QDir.AllDirs)
self.file_model.setReadOnly(True)
self.file_model.setNameFilters(["*.py"])
self.file_model.setNameFilterDisables(False)
self.file_tree_view.setModel(self.file_model)
self.file_tree_view.setColumnHidden(1, True)
self.file_tree_view.setColumnHidden(2, True)
self.file_tree_view.setContextMenuPolicy(Qt.CustomContextMenu)
self.file_tree_view.customContextMenuRequested.connect(
self.on_file_tree_context_menu
)
self.file_tree_view.doubleClicked.connect(self.on_file_tree_double_clicked)
splitter.addWidget(self.file_tree_view)
splitter.setSizes([200, 400])
self.set_root_path(self.root_path.path)
def set_root_path(self, path):
index = self.file_model.setRootPath(path)
self.file_tree_view.setRootIndex(index)
def on_file_tree_double_clicked(self, index):
path = self.file_model.fileInfo(index).absoluteFilePath()
if not os.path.isfile(path) or not path.endswith(".py"):
return
self.run_script(path)
def on_file_tree_context_menu(self, pos):
index = self.file_tree_view.indexAt(pos)
if not index.isValid():
return
path = self.file_model.fileInfo(index).absoluteFilePath()
if not os.path.isfile(path) or not path.endswith(".py"):
return
self.create_context_menu(path, self.file_tree_view.mapToGlobal(pos))
def create_context_menu(self, path, pos):
menu = QMenu()
menu.addAction(
QAction("Run Script", self, triggered=partial(self.run_script, path))
)
menu.exec_(pos)
def run_script(self, path):
self.recent_list.recents.push(path)
run_script(path)
class RecentList(QListView):
"""List view providing quick access to recently run scripts."""
def __init__(self, parent=None):
super(RecentList, self).__init__(parent)
self.recents = StringCache("cmt.runscript.recents", max_values=20)
self.setModel(self.recents)
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.on_recents_context_menu)
self.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.doubleClicked.connect(self.on_recents_double_clicked)
def on_recents_double_clicked(self, index):
path = self.recents.data(index, Qt.DisplayRole)
run_script(path)
def on_recents_context_menu(self, pos):
index = self.indexAt(pos)
path = self.recents.data(index, Qt.DisplayRole)
menu = QMenu()
menu.addAction(QAction("Run Script", self, triggered=partial(run_script, path)))
menu.exec_(self.mapToGlobal(pos))
| {
"repo_name": "chadmv/cmt",
"path": "scripts/cmt/pipeline/runscript.py",
"copies": "1",
"size": "5142",
"license": "mit",
"hash": -7214788095317972000,
"line_mean": 32.8289473684,
"line_max": 88,
"alpha_frac": 0.6637495138,
"autogenerated": false,
"ratio": 3.646808510638298,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4810558024438298,
"avg_score": null,
"num_lines": null
} |
"""A tool written in Python and Elementary to provide a GUI for configuring Unix users and groups"""
import elementary
import evas
#Import our internal parts
from optionsWindow import *
from UserManager import *
from TimeManager import *
from TaskManager import *
from ScreenSetup import *
class Eccess(object):
def __init__( self ):
self.mainWindow = elementary.StandardWindow("eCcess", "eCcess - System Tool")
self.mainWindow.callback_delete_request_add(lambda o: elementary.exit())
self.nf = elementary.Naviframe(self.mainWindow)
self.nf.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
self.nf.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
#self.scroller = elementary.Scroller(self.mainWindow)
#self.scroller.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_FILL)
#self.scroller.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
#self.scroller.content_set(self.nf)
#self.scroller.policy_set(0, 1)
self.nf.show()
self.mainWindow.resize_object_add(self.nf)
#self.scroller.show()
def launch( self, launchto=False ):
self.mainWindow.resize(800, 400)
self.mainWindow.show()
self.options_spawn()
if launchto == "users":
self.users_groups_spawn()
elif launchto == "time":
self.time_date_spawn()
elif launchto == "taskmanager":
self.task_manager_spawn()
def options_spawn( self, bt=False ):
self.nf.item_simple_push(optionsWindow(self))
def users_groups_spawn( self, bt=False ):
print "Users and groups CB"
self.nf.item_simple_push(UserManager(self))
def time_date_spawn( self, bt=False ):
print "Times and date CB"
self.nf.item_simple_push(TimeManager(self))
def task_manager_spawn( self, bt=False ):
print "Task manager CB"
self.nf.item_simple_push(TaskManager(self))
def screen_setup_spawn( self, bt=False ):
print "Screen setup CB"
self.nf.item_simple_push(ScreenSetup(self))
if __name__ == "__main__":
elementary.init()
GUI = Eccess()
if len(sys.argv) == 1:
GUI.launch()
else:
GUI.launch(sys.argv[1])
elementary.run()
elementary.shutdown()
| {
"repo_name": "JeffHoogland/eccess",
"path": "opt/eccess/eccess.py",
"copies": "1",
"size": "2339",
"license": "bsd-3-clause",
"hash": 5365136896935670000,
"line_mean": 33.9104477612,
"line_max": 100,
"alpha_frac": 0.6460025652,
"autogenerated": false,
"ratio": 3.3224431818181817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9416091442059817,
"avg_score": 0.01047086099167284,
"num_lines": 67
} |
# A top-down merge sort
def mergesort(list_to_sort):
if len(list_to_sort) < 2: # A list of length 1 is sorted by definition
return list_to_sort
# Split the list into left and right halves
midpoint = len(list_to_sort)/2
right = mergesort(list_to_sort[midpoint:])
left = mergesort(list_to_sort[:midpoint])
# Recursively mergesort the left and the right
right = mergesort(right)
left = mergesort(left)
# Then merge them
return merge(left, right)
def merge(left, right):
res = list()
# Add the smallest item from each list onto the result
while len(left) != 0 and len(right) != 0:
if left[0] < right[0]:
res.append(left[0])
left = left[1:]
else:
res.append(right[0])
right = right[1:]
# When we're done, either list might have items left
# If so, we can just add them all to the result as they're guaranteed to be larger
if len(left) > 0:
res.extend(left)
elif len(right) > 0:
res.extend(right)
return res
if __name__ == '__main__':
test_sort = [1, 0, 9, 2, 8, 3, 7, 4, 6, 5]
print mergesort(test_sort) | {
"repo_name": "ross-t/python-ds",
"path": "Sorting/mergesort.py",
"copies": "1",
"size": "1046",
"license": "mit",
"hash": 833755693386460700,
"line_mean": 25.8461538462,
"line_max": 83,
"alpha_frac": 0.6692160612,
"autogenerated": false,
"ratio": 2.804289544235925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8803231374866847,
"avg_score": 0.03405484611381563,
"num_lines": 39
} |
# A topic is root that data is attached to. It is the equivalent of a source in searchlight/solink and acts as a table which has columns(Fields) and rows(Feeds).
#
class Topic():
def __init__(self, client):
self.client = client
# Requires authorization of **read_any_data**, or **read_application_data**.
# '/api/topics' GET
#
def list(self, options = {}):
body = options['query'] if 'query' in options else {}
response = self.client.get('/api/topics', body, options)
return response
# Requires authorization of **read_any_data**, or **read_application_data**.
# '/api/topics/:id' GET
#
def find(self, options = {}):
body = options['query'] if 'query' in options else {}
response = self.client.get('/api/topics/:id', body, options)
return response
# Requires authorization of **manage_any_data**, or **manage_application_data**.
# '/api/topics' POST
#
# topic - A hash containing the name/id of the topic (required) and a description of the topic.
def create(self, topic, options = {}):
body = options['body'] if 'body' in options else {}
body['topic'] = topic
response = self.client.post('/api/topics', body, options)
return response
# Requires authorization of **manage_any_data**, or **manage_application_data**.
# '/api/topics/:id' PUT
#
# topic - A hash containing the name/id of the topic (required) and a description of the topic.
def update(self, topic, options = {}):
body = options['body'] if 'body' in options else {}
body['topic'] = topic
response = self.client.put('/api/topics/:id', body, options)
return response
# Requires authorization of **manage_any_data**, or **manage_application_data**.
# '/api/topics/:id' DELETE
#
def delete(self, options = {}):
body = options['body'] if 'body' in options else {}
response = self.client.delete('/api/topics/:id', body, options)
return response
| {
"repo_name": "cwadding/sensit-python",
"path": "sensit/api/topic.py",
"copies": "1",
"size": "1878",
"license": "mit",
"hash": 5147568133228531000,
"line_mean": 29.7868852459,
"line_max": 161,
"alpha_frac": 0.6719914803,
"autogenerated": false,
"ratio": 3.3180212014134276,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9156652921738445,
"avg_score": 0.06667195199499641,
"num_lines": 61
} |
""" A top-level experimental script that run 100 iterations of
the Simple example (see simfMRI.exp_examples.Simple()). """
from simfMRI.exp_examples import Simple
from simfMRI.analysis.plot import hist_t_all_models
from simfMRI.runclass import Run
class RunSimple100(Run):
""" An example of a 100 iteration Simple experimental Run(). """
def __init__(self):
try:
Run.__init__(self)
except AttributeError:
pass
# ----
# An instance of simfMRI.examples.* Class (or similar)
# should go here.
self.BaseClass = Simple ## = BaseClass()
# ----
# User Globals
self.nrun = 100
self.TR = 2
self.ISI = 2
self.model_conf = "simple.ini"
self.savedir = "testdata"
self.ntrial = 60
# --
# Optional Globals
self.ncore = None
# ----
# Misc
self.prngs = None ## A list of RandomState() instances
## setup by the go() attr
if __name__ == "__main__":
sim = RunSimple100()
sim.go(parallel=False)
## Results get stored internally.
# Writing the results to a hdf5
results_name = "simple{0}".format(sim.nrun)
sim.save_results(results_name)
# And plot all the models
# (each is autosaved).
hist_t_all_models(sim.savedir, results_name+".hdf5", results_name)
| {
"repo_name": "parenthetical-e/simfMRI",
"path": "bin/simple100.py",
"copies": "1",
"size": "1454",
"license": "bsd-2-clause",
"hash": 7356698313269691000,
"line_mean": 26.4339622642,
"line_max": 70,
"alpha_frac": 0.5522696011,
"autogenerated": false,
"ratio": 3.9086021505376345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49608717516376344,
"avg_score": null,
"num_lines": null
} |
""" A top-level experimental script that run 100 iterations of
the TwoCond example (see simfMRI.exp_examples.TwoCond()). """
from simfMRI.exp_examples import TwoCond
from simfMRI.analysis.plot import hist_t_all_models
from simfMRI.runclass import Run
class RunTwoCond100(Run):
""" An example of a 100 iteration TwoCond experimental Run(). """
def __init__(self):
try:
Run.__init__(self)
except AttributeError:
pass
# ----
# An instance of simfMRI.examples.* Class (or similar)
# should go here.
self.BaseClass = TwoCond ## = BaseClass()
# ----
# User Globals
self.nrun = 100
self.TR = 2
self.ISI = 2
self.model_conf = "twocond.ini"
self.savedir = "testdata"
self.ntrial = 60
# --
# Optional Globals
self.ncore = None
# ----
# Misc
self.prngs = None ## A list of RandomState() instances
## setup by the go() attr
if __name__ == "__main__":
sim = RunTwoCond100()
sim.go(parallel=False)
## Results get stored internally.
# Writing the results to a hdf5
results_name = "twocond{0}".format(sim.nrun)
sim.save_results(results_name)
# And plot all the models
# (each is autosaved).
hist_t_all_models(sim.savedir, results_name+".hdf5", results_name)
| {
"repo_name": "parenthetical-e/simfMRI",
"path": "bin/twocond100.py",
"copies": "1",
"size": "1463",
"license": "bsd-2-clause",
"hash": -697689086135735200,
"line_mean": 26.6037735849,
"line_max": 70,
"alpha_frac": 0.5550239234,
"autogenerated": false,
"ratio": 3.8398950131233596,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9777580121565859,
"avg_score": 0.02346776299150006,
"num_lines": 53
} |
""" A top-level experimental script that run 100 iterations (on 2 cores) of
the RW example (see simfMRI.exp_examples.RW()). """
import functools
from simfMRI.exp_examples import RW
from simfMRI.analysis.plot import hist_t_all_models
from simfMRI.runclass import Run
class RunRW100(Run):
""" An example of a 100 iteration RW experimental Run(). """
def __init__(self):
try:
Run.__init__(self)
except AttributeError:
pass
# ----
# An instance of simfMRI.examples.* Class (or similar)
# should go here.
self.BaseClass = functools.partial(RW, behave="random")
## Nornalize the signature of BaseClass with
## functools.partial
## Expects:
## BaseClass(self.ntrial, TR=self.TR, ISI=self.ISI, prng=prng)
# ----
# User Globals
self.nrun = 100
self.TR = 2
self.ISI = 2
self.model_conf = "rw.ini"
self.savedir = "testdata"
self.ntrial = 60
# --
# Optional Globals
self.ncore = 2
# ----
# Misc
self.prngs = None ## A list of RandomState() instances
## setup by the go() attr
if __name__ == "__main__":
sim = RunRW100()
sim.go(parallel=False)
## Results get stored internally.
# Writing the results to a hdf5
results_name = "rw{0}".format(sim.nrun)
sim.save_results(results_name)
# And plot all the models
# (each is autosaved).
hist_t_all_models(sim.savedir, results_name+".hdf5", results_name)
| {
"repo_name": "parenthetical-e/simfMRI",
"path": "bin/rw100.py",
"copies": "1",
"size": "1654",
"license": "bsd-2-clause",
"hash": 8175993088069738000,
"line_mean": 27.5172413793,
"line_max": 76,
"alpha_frac": 0.5501813785,
"autogenerated": false,
"ratio": 3.776255707762557,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.970062636246994,
"avg_score": 0.025162144758523398,
"num_lines": 58
} |
""" A top-level experimental script that run 100 iterations (on 2 cores) of
the Simple example (see simfMRI.exp_examples.Simple()). """
from simfMRI.exp_examples import Simple
from simfMRI.analysis.plot import hist_t_all_models
from simfMRI.runclass import Run
class RunSimple100(Run):
""" An example of a 100 iteration Simple experimental Run(). """
def __init__(self):
try:
Run.__init__(self)
except AttributeError:
pass
# ----
# An instance of simfMRI.examples.* Class (or similar)
# should go here.
self.BaseClass = Simple ## = BaseClass()
# ----
# User Globals
self.nrun = 100
self.TR = 2
self.ISI = 2
self.model_conf = "simple.ini"
self.savedir = "testdata"
self.ntrial = 60
# --
# Optional Globals
self.ncore = 2
# ----
# Misc
self.prngs = None ## A list of RandomState() instances
## setup by the go() attr
if __name__ == "__main__":
sim = RunSimple100()
sim.go(parallel=True)
## Results get stored internally.
# Writing the results to a hdf5
results_name = "parallel{0}".format(sim.nrun)
sim.save_results(results_name)
# And plot all the models
# (each is autosaved).
hist_t_all_models(sim.savedir, results_name+".hdf5", results_name)
| {
"repo_name": "parenthetical-e/simfMRI",
"path": "bin/parallel100.py",
"copies": "1",
"size": "1465",
"license": "bsd-2-clause",
"hash": -2906993530622706000,
"line_mean": 26.641509434,
"line_max": 76,
"alpha_frac": 0.55221843,
"autogenerated": false,
"ratio": 3.885941644562334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4938160074562334,
"avg_score": null,
"num_lines": null
} |
# A top-level interface to the whole of xia2, for data processing & analysis.
import glob
import itertools
import logging
import math
import os
import platform
import sys
import h5py
from dials.util import Sorry
from xia2.Handlers.Citations import Citations
from xia2.Handlers.Environment import df
from xia2.XIA2Version import Version
logger = logging.getLogger("xia2.Applications.xia2_main")
def check_environment():
"""Check the environment we are running in..."""
if sys.hexversion < 0x02070000:
raise RuntimeError("Python versions older than 2.7 are not supported")
import cctbx
executable = sys.executable
cctbx_dir = os.sep.join(cctbx.__file__.split(os.sep)[:-3])
# to help wrapper code - print process id...
logger.debug("Process ID: %d", os.getpid())
logger.info("Environment configuration...")
logger.info("Python => %s", executable)
logger.info("CCTBX => %s", cctbx_dir)
ccp4_keys = ["CCP4", "CCP4_SCR"]
for k in ccp4_keys:
v = os.getenv(k)
if not v:
raise RuntimeError("%s not defined - is CCP4 set up?" % k)
if not v == v.strip():
raise RuntimeError('spaces around "%s"' % v)
logger.info(f"{k} => {v}")
from xia2.Handlers.Flags import Flags
logger.info("Starting directory: %s", Flags.get_starting_directory())
logger.info("Working directory: %s", os.getcwd())
logger.info("Free space: %.2f GB", df() / math.pow(2, 30))
hostname = platform.node().split(".")[0]
logger.info("Host: %s", hostname)
logger.info("Contact: xia2.support@gmail.com")
logger.info(Version)
# temporary workaround to bug in pointless...
if " " in os.getcwd():
raise RuntimeError(
"Space in working directory " "(https://github.com/xia2/xia2/issues/114)"
)
def check_hdf5_master_files(master_files):
"""Check the input HDF5 master files look a little bit like HDF5 master
files and not just the data files: if the latter then sys.exit() with a
helpful message"""
bad = []
for filename in master_files:
try:
with h5py.File(filename, "r") as f:
if b"/data" in f and b"/entry" not in f:
bad.append(filename)
except OSError:
bad.append(filename)
if bad:
dirs = set(os.path.split(b)[0] for b in bad)
masters = itertools.chain.from_iterable(
glob.glob(os.path.join(d, "*_master.h5")) for d in dirs
)
nxss = itertools.chain.from_iterable(
glob.glob(os.path.join(d, "*.nxs")) for d in dirs
)
message = (
"Provided input files not master files:\n "
+ "\n ".join(bad)
+ "\ndo you mean one of:\n "
+ "\n ".join(itertools.chain.from_iterable((masters, nxss)))
)
sys.exit(message)
def get_command_line():
from xia2.Handlers.CommandLine import CommandLine
CommandLine.print_command_line()
if not CommandLine.get_xinfo():
# write an xinfo file then
xinfo = os.path.join(os.getcwd(), "automatic.xinfo")
argv = CommandLine.get_argv()
if not CommandLine.get_directory():
directories = []
for arg in argv:
if os.path.isdir(arg):
directories.append(os.path.abspath(arg))
if not directories and not CommandLine.get_hdf5_master_files():
raise Sorry(
"No image directory found in command line arguments. "
"Run xia2 without any options for command line help."
)
else:
directories = CommandLine.get_directory()
directories = [os.path.abspath(d) for d in directories]
from xia2.Applications.xia2setup import write_xinfo
check_hdf5_master_files(CommandLine.get_hdf5_master_files())
if CommandLine.get_template() or CommandLine.get_hdf5_master_files():
write_xinfo(
xinfo,
directories,
template=CommandLine.get_template(),
hdf5_master_files=CommandLine.get_hdf5_master_files(),
)
else:
write_xinfo(xinfo, directories)
CommandLine.set_xinfo(xinfo)
return CommandLine
def write_citations():
# tell the user which programs were used...
logger.info("XIA2 used... %s" % ", ".join(Citations.get_programs()))
logger.info("Here are the appropriate citations (BIBTeX in xia2-citations.bib.)")
for citation in Citations.get_citations_acta():
logger.info(citation)
# and write the bibtex versions
out = open("xia2-citations.bib", "w")
for citation in Citations.get_citations():
out.write("%s\n" % citation)
out.close()
def help():
"""Print out some help for xia2."""
sys.stdout.write("%s\n" % Version)
# FIXME also needs to make reference to Phil input
# FIXME ideally should move all command-line functionality over to Phil...
# FIXME these should also be generated in automatic way #42
sys.stdout.write("An expert system for automated reduction of X-Ray\n")
sys.stdout.write("diffraction data from macromolecular crystals\n")
sys.stdout.write(
"""
Command-line options to xia2:
[pipeline=XXX] select processing pipeline, with XXX one of:
3d XDS, XSCALE
3dii XDS, XSCALE, using all images for autoindexing
dials DIALS, scaling with DIALS
dials-aimless DIALS, scaling with AIMLESS
"""
)
sys.stdout.write("[xinfo=foo.xinfo] or [/path/to/images]\n\n")
sys.stdout.write("[d_min=2.8] (say, applies to all sweeps)\n")
sys.stdout.write("[nproc=4] run on 4 processors (automatic)\n")
sys.stdout.write("[space_group=C2] (for example)\n")
sys.stdout.write("[unit_cell=50,50,50,90,90,90] (for example)\n")
sys.stdout.write("[reverse_phi=True]\n")
sys.stdout.write(
"[mosflm_beam_centre=x,y] (in mm, following the MOSFLM convention, applies to all sweeps)\n"
)
sys.stdout.write("[dials.fast_mode=True] for very fast processing\n")
sys.stdout.write("[atom=se] (say) - this is for xia2setup\n")
sys.stdout.write("[project=foo] (say) - this is for xia2setup\n")
sys.stdout.write("[crystal=bar] (say) - this is for xia2setup\n\n")
sys.stdout.write("Sensible command lines:\n")
sys.stdout.write("xia2 (pipeline=dials|3d|..) xinfo=foo.xinfo\n")
sys.stdout.write("xia2 project=foo crystal=bar (pipeline=dials|3d|..) /data/path\n")
sys.stdout.write("xia2 image=/data/path/segment_1_0001.cbf:1:900\n")
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Applications/xia2_main.py",
"copies": "1",
"size": "6647",
"license": "bsd-3-clause",
"hash": 3900493567937999000,
"line_mean": 30.8038277512,
"line_max": 100,
"alpha_frac": 0.6229878141,
"autogenerated": false,
"ratio": 3.478283621140764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9600209407510891,
"avg_score": 0.00021240554597453613,
"num_lines": 209
} |
atores = []
##codigo_geral = 0
##
##def _gerar_codigo():
## global codigo_geral
## codigo_geral += 1
## return codigo_geral
def CadastrarAtor(cod_ator,nome,nacionalidade,idade):
#cod_ator = _gerar_codigo()
ator = [cod_ator,nome,nacionalidade,idade]
atores.append(ator)
print (" \n \n ========= Ator cadastrado ============")
def BuscarAtor(cod_ator):
for a in atores:
if (a[0] == cod_ator):
print (a)
return a
return None
def BuscarAtores():
global atores
print(atores)
return atores
def RemoverAtor(cod_ator):
for a in atores:
if (a[0] == cod_ator):
atores.remove(a)
print("\n \n ========== Ator removido ==========")
return True
print ("\n \n ========= Ator não encontrado ===========")
return False
def RemoverTodosAtores():
global atores
atores = []
print ('\n \n ========== Atores removidos =========== ')
return atores
def IniciarAtores():
CadastrarAtor(1,"Messias","Brasileiro",21)
CadastrarAtor(2,"Ygor","Brasileiro",20)
CadastrarAtor(3,"Guilherme","Brasileiro",18)
| {
"repo_name": "ygorclima/apd",
"path": "Ator/ControllerAtor.py",
"copies": "1",
"size": "1162",
"license": "apache-2.0",
"hash": -8402252687898851000,
"line_mean": 23.1875,
"line_max": 62,
"alpha_frac": 0.5469422911,
"autogenerated": false,
"ratio": 2.8455882352941178,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8643138954596806,
"avg_score": 0.04987831435946224,
"num_lines": 48
} |
from __future__ import division, print_function # For Python 2 compatibility
import numpy as np
import cmath
class Polynomial(object):
def __init__(self, *coeffs):
"""Creates a Polynomial with the coefficients, starting with the constant"""
self.coeffs = np.Array(coeffs)
self.order = len(self.coeffs) - 1
def coeff(self, k):
"""-> coefficient of the `k`th order term"""
return self.coeffs[k]
def val(self, v):
"""-> evaluate P(v)"""
return sum([c*v**(k) for k, c in enumerate(self.coeffs)])
def roots(self):
"""-> numpy.Array of the roots"""
if self.order == 0:
if self.coeffs[0] != 0:
raise ZeroDivisionError("Wut, %f == 0 ?" % self.coeffs[0])
else:
return float('inf')
elif self.order == 1:
b, a = self.coeffs
return -float(b) / a
elif self.order == 2:
c, b, a = self.coeffs
dis = b * b - 4 * a * c
if dis >= 0:
disqrt = math.sqrt(dis)
else:
disqrt = cmath.sqrt(dis)
return (-b - disqrt) / 2. / a, (-b + disqrt) / 2. / a
else:
raise ArithmeticError("Dunno how to solve :(")
def add(self, other):
"""-> self + other"""
return sum_poly(self, other)
def mul(self, other):
"""Automatic FOILing"""
new_order = self.order + other.order
prod = []
if self.order >= other.order:
p, q = self.coeffs, other.coeffs
min_order, max_order = other.order, self.order
else:
p, q = other.coeffs, self.coeffs
min_order, max_order = self.order, other.order
for i in xrange(1, min_order+2):
prod.append( sum( [ a * b for a, b in zip(p, reversed(q[:i])) ] ) )
for j in xrange(1, max_order+1):
prod.append( sum( [ a * b for a, b in zip(p[j:], reversed(q)) ] ) )
return Polynomial(prod)
def __add__(self, other):
return self.add(other)
def __mul__(self, other):
return self.mul(other)
def __str__(self):
return 'Polynomial(%r)' % list(self.coeffs)
def __repr__(self):
return self.__str__()
def sum_poly(*polys):
orders = [p.order for p in polys] # all the order of the summands
max_order = max(orders)
coeffses = [p.coeffs[:] for p in polys] # make sure the coeffs are copied
coeffses = [[0]*(max_order-o) + cs for (o, cs) in zip(orders, coeffses)] # padd with zero
sums = [sum(cs) for cs in zip(*coeffses)]
return Polynomial(sums)
| {
"repo_name": "vulpicastor/pymisc",
"path": "polynomial.py",
"copies": "1",
"size": "3874",
"license": "mit",
"hash": 3485827588143269400,
"line_mean": 36.25,
"line_max": 93,
"alpha_frac": 0.5978316985,
"autogenerated": false,
"ratio": 3.7684824902723735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9704147075391203,
"avg_score": 0.032433422676234204,
"num_lines": 104
} |
# A toy example to use python to control the game.
from unrealcv import client
from unrealcv.util import read_npy, read_png
import matplotlib.pyplot as plt
import numpy as np
help_message = '''
A demo showing how to control a game using python
a, d: rotate camera to left and right.
q, e: move camera up and down.
left, right, up, down: move around
'''
plt.rcParams['keymap.save'] = ''
def main():
loc = None
rot = None
fig, ax = plt.subplots()
img = np.zeros((480, 640, 4))
ax.imshow(img)
def onpress(event):
rot_offset = 10 # Rotate 5 degree for each key press
loc_offset = 10 # Move 5.0 when press a key
if event.key == 'a': rot[1] -= rot_offset
if event.key == 'd': rot[1] += rot_offset
if event.key == 'q': loc[2] += loc_offset # Move up
if event.key == 'e': loc[2] -= loc_offset # Move down
if event.key == 'w': loc[1] -= loc_offset
if event.key == 's': loc[1] += loc_offset
if event.key == 'up': loc[1] -= loc_offset
if event.key == 'down': loc[1] += loc_offset
if event.key == 'left': loc[0] -= loc_offset
if event.key == 'right': loc[0] += loc_offset
cmd = 'vset /camera/0/rotation %s' % ' '.join([str(v) for v in rot])
client.request(cmd)
cmd = 'vset /camera/0/location %s' % ' '.join([str(v) for v in loc])
client.request(cmd)
res = client.request('vget /camera/0/lit png')
img = read_png(res)
# print(event.key)
# print('Requested image %s' % str(img.shape))
ax.imshow(img)
fig.canvas.draw()
client.connect()
if not client.isconnected():
print 'UnrealCV server is not running. Run the game from http://unrealcv.github.io first.'
return
else:
print help_message
init_loc = [float(v) for v in client.request('vget /camera/0/location').split(' ')]
init_rot = [float(v) for v in client.request('vget /camera/0/rotation').split(' ')]
loc = init_loc; rot = init_rot
fig.canvas.mpl_connect('key_press_event', onpress)
plt.title('Keep this window in focus, it will be used to receive key press event')
plt.axis('off')
plt.show() # Add event handler
if __name__ == '__main__':
main()
| {
"repo_name": "unrealcv/unrealcv",
"path": "examples/interactive_control.py",
"copies": "1",
"size": "2271",
"license": "mit",
"hash": 1570428569355378700,
"line_mean": 30.5416666667,
"line_max": 98,
"alpha_frac": 0.5887274328,
"autogenerated": false,
"ratio": 3.2723342939481266,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43610617267481266,
"avg_score": null,
"num_lines": null
} |
# A toy example to use python to control the game.
import sys
sys.path.append('..')
from unrealcv import client
import matplotlib.pyplot as plt
import numpy as np
help_message = '''
A demo showing how to control a game using python
a, d: rotate camera to left and right.
q, e: move camera up and down.
'''
plt.rcParams['keymap.save'] = ''
def onpress(event):
print event.key
if event.key == 'a':
rot[1] += 1
if event.key == 'd':
rot[1] -= 1
if event.key == 'q':
loc[2] += 1
if event.key == 'e':
loc[2] -= 1
cmd = 'vset /camera/0/rotation %s' % ' '.join([str(v) for v in rot])
client.request(cmd)
cmd = 'vset /camera/0/location %s' % ' '.join([str(v) for v in loc])
client.request(cmd)
loc = None
rot = None
def main():
client.connect()
if not client.isconnected():
print 'UnrealCV server is not running. Run the game from http://unrealcv.github.io first.'
return
else:
print help_message
init_loc = [float(v) for v in client.request('vget /camera/0/location').split(' ')]
init_rot = [float(v) for v in client.request('vget /camera/0/rotation').split(' ')]
global rot, loc
loc = init_loc; rot = init_rot
image = np.zeros((300, 300))
fig, ax = plt.subplots()
fig.canvas.mpl_connect('key_press_event', onpress)
ax.imshow(image)
plt.title('Keep this window in focus, used to receive key press event')
plt.axis('off')
plt.show() # Add event handler
if __name__ == '__main__':
main()
| {
"repo_name": "qiuwch/unrealcv",
"path": "client/examples/interactive-control.py",
"copies": "2",
"size": "1540",
"license": "mit",
"hash": -5199665551667525000,
"line_mean": 25.5517241379,
"line_max": 98,
"alpha_frac": 0.6064935065,
"autogenerated": false,
"ratio": 3.2285115303983227,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9797607672340114,
"avg_score": 0.007479472911641695,
"num_lines": 58
} |
"""A Trade is a contract signed between two Counterparties at a given datetime.
The contract referred to is typically an instance of an Asset.
In addition to the claims described within the contract, there may optionally
be an initial settlement of a Bullet payment.
"""
from __future__ import absolute_import, division, print_function
from .assets import BulletPayment
class Trade(object):
"""Base class of all Financial Trades
A Trade is a contract signed between two Counterparties on some date.
Attributes
----------
contract: Asset
The Asset on which the Trade is based.
date: datetime, optional
Date on which trade was made.
counterparty: Counterparty, optional
The Counterparty with whom the trade is made. Not necess
settlement: BulletPayment, optional
"""
def __init__(self, contract, date=None, counterparty=None,
settlement_dt=None, settlement_amt=None, settlement_ccy=None):
"""
Parameters
----------
contract: Asset
The Asset on which the Trade is based.
date: datetime, optional
Date on which trade was made.
counterparty: Counterparty, optional
The Counterparty with whom the trade is made. Not necess
settlement_dt: date, optional
Date on which a BulletPayment is made to settle trade.
settlement_amt: float, optional
Date on which a BulletPayment is made to settle trade.
settlement_ccy: str
Date on which a BulletPayment is made to settle trade.
"""
self.contract = contract
self.date = date
self.counterparty = counterparty
if settlement_dt is None and settlement_amt is None:
self.settlement = None
else:
assert settlement_dt, \
'settlement_dt provided, but not settlement_amt'
assert settlement_amt, \
'settlement_amt provided, but not settlement_dt'
if settlement_ccy is None:
# Check if currency is unambiguous
try:
settlement_ccy = contract.currency
except AttributeError:
raise AttributeError('settlement_ccy must be set.')
self.settlement = BulletPayment(dt_payment=settlement_dt,
currency=settlement_ccy,
notional=settlement_amt)
@property
def contract_type(self):
return type(self.contract)
@classmethod
def with_settlement_contract(cls, date, underlying_contract,
settlement_contract=None, counterparty=None):
"""Create a Trade given a Settlement"""
cls.date = date
cls.contract = underlying_contract
cls.counterparty = counterparty
cls.settlement = settlement_contract
# TODO Check naming convention of classmethods
# TODO Design question - Should I include asset-specific constructors?
@classmethod
def bullet_payment(cls, dt_payment, currency="USD", notional=1.0,
dt_trade=None, counterparty=None, settlement_dt=None,
settlement_amt=None, settlement_ccy="USD"):
"""Create Trade of a BulletPayment"""
payment_contract = BulletPayment(dt_payment, currency, notional)
return cls(payment_contract, dt_trade, counterparty,
settlement_dt, settlement_amt, settlement_ccy)
class Counterparty(object):
# TODO Define Counterparty
pass
class Portfolio(object):
"""A Portfolio of Trades and/or subportolios
Attributes
----------
trades: list of Trade
subportfolios: dict
Dictionary where values are of Portfolio
"""
def __init__(self, trades=None, subportfolios=None):
self.trades = []
self.subportfolios = {}
if trades is not None:
self.trades = list(trades)
if subportfolios is not None:
self.subportfolios = dict(subportfolios)
@classmethod
def of_trades(cls, trades):
return cls(trades=trades)
@classmethod
def of_subportfolios(cls, subportfolios):
return cls(subportfolios=subportfolios)
| {
"repo_name": "caseyclements/pennies",
"path": "pennies/trading/trades.py",
"copies": "1",
"size": "4310",
"license": "apache-2.0",
"hash": -6813016562903712000,
"line_mean": 33.7580645161,
"line_max": 79,
"alpha_frac": 0.6220417633,
"autogenerated": false,
"ratio": 4.331658291457287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5453700054757287,
"avg_score": null,
"num_lines": null
} |
"""A Trained Supervised Model."""
import time
from datetime import datetime
import numpy as np
import pandas as pd
import healthcareai.common.database_writers
import healthcareai.common.file_io_utilities as hcai_io
import healthcareai.common.helpers as hcai_helpers
import healthcareai.common.model_eval as hcai_model_evaluation
import healthcareai.common.top_factors as hcai_factors
import healthcareai.common.database_connections as hcai_db
import healthcareai.common.database_validators as hcai_dbval
from healthcareai.common.healthcareai_error import HealthcareAIError
class TrainedSupervisedModel(object):
"""
The meta-object that is created when training supervised models.
This object contains
- trained estimator
- trained linear estimator used for row level factor analysis
- column metadata including transformed feature columns, grain & predicted column
- the fit data preparation pipeline used for transforming new data for prediction
- calculated metrics
- test set actuals, predicted values/probabilities, predicted classes
"""
def __init__(self,
model,
feature_model,
fit_pipeline,
model_type,
column_names,
grain_column,
prediction_column,
test_set_predictions,
test_set_class_labels,
test_set_actual,
metric_by_name,
original_column_names=None,
categorical_column_info=None,
training_time=None):
"""
Create an instance of a TrainedSupervisedModel.
Args:
model (sklearn.base.BaseEstimator): The fit scikit learn algorithm for prediction
feature_model (sklearn.base.BaseEstimator): The fit scikit learn algorithm for feature importance
fit_pipeline (sklearn.pipeline.Pipeline): A fit pipeline for use on cleaning new raw data
model_type (str): 'classification' or 'regression'
column_names (list): List of column names used as features
grain_column (str): Grain column (not used as a feature).
prediction_column (str): The name of the prediction column
test_set_predictions (list): y_prediction number (either probability of class or value)
test_set_class_labels (list): y_prediction class label if classification
test_set_actual (list): y_test
metric_by_name (dict): Metrics by name
original_column_names (list): List of column names used as features before running the data preparation
pipeline (e.g. before dummification)
categorical_column_info (dict): A dictionary mapping the name of each (pre-dummified) categorical column
to a pandas.Series containing whose index consists of the different levels of the category and whose
values consist of the frequencies with which these levels occur in the training data
training_time (float): The time in seconds it took to train the model
"""
self.model = model
self.feature_model = feature_model
self.fit_pipeline = fit_pipeline
self.column_names = column_names
self._model_type = model_type
self.grain_column = grain_column
self.prediction_column = prediction_column
self.test_set_predictions = test_set_predictions
self.test_set_class_labels = test_set_class_labels
self.test_set_actual = test_set_actual
self._metric_by_name = metric_by_name
self.original_column_names = original_column_names
self.categorical_column_info = categorical_column_info
self.train_time = training_time
@property
def algorithm_name(self):
"""Model name extracted from the class type."""
model = hcai_helpers.extract_estimator_from_meta_estimator(self.model)
name = type(model).__name__
return name
@property
def is_classification(self):
"""
Return True if trainer is set up for classification.
Easy check to consolidate magic strings in all the model type switches.
"""
return self.model_type == 'classification'
@property
def is_regression(self):
"""
Return True if trainer is set up for regression.
Easy check to consolidate magic strings in all the model type switches.
"""
return self.model_type == 'regression'
@property
def best_hyperparameters(self):
"""Best hyperparameters found if model is a meta estimator."""
return hcai_helpers.get_hyperparameters_from_meta_estimator(self.model)
@property
def model_type(self):
"""Model type: 'regression' or 'classification'."""
return self._model_type
@property
def binary_classification_scores(self):
# TODO low priority, but test this
"""Return the probability scores of the first class for a binary classification model."""
if self.is_regression:
raise HealthcareAIError('ROC/PR plots are not used to evaluate regression models.')
predictions = np.squeeze(self.test_set_predictions[:, 1])
return predictions
@property
def metrics(self):
"""Return the metrics that were calculated when the model was trained."""
return self._metric_by_name
def save(self, filename=None, debug=True):
"""
Save this object to a pickle file with the given file name.
Args:
filename (str): Optional filename override. Defaults to `timestamp_<MODEL_TYPE>_<ALGORITHM_NAME>.pkl`. For
example: `2017-05-27T09-12-30_regression_LinearRegression.pkl`
debug (bool): Print debug output to console by default
"""
if filename is None:
time_string = time.strftime("%Y-%m-%dT%H-%M-%S")
filename = '{}_{}_{}.pkl'.format(time_string, self.model_type, self.algorithm_name)
hcai_io.save_object_as_pickle(self, filename)
if debug:
print('Trained {} model saved as {}'.format(self.algorithm_name, filename))
def make_predictions(self, dataframe):
"""
Given a new dataframe, apply data transformations and return a dataframe of predictions.
Args:
dataframe (pandas.core.frame.DataFrame): Raw prediction dataframe
Returns:
pandas.core.frame.DataFrame: A dataframe containing the grain id and predicted values
"""
# Run the raw dataframe through the preparation process
prepared_dataframe = self.prepare_and_subset(dataframe)
# make predictions returning probabity of a class or value of regression
if self.is_classification:
# Only save the prediction of one of the two classes
y_predictions = self.model.predict_proba(prepared_dataframe)[:, 1]
elif self.is_regression:
y_predictions = self.model.predict(prepared_dataframe)
else:
raise HealthcareAIError('Model type appears to be neither regression or classification.')
# Create a new dataframe with the grain column from the original dataframe
results = pd.DataFrame()
# Column vector must exist in order to add it to results.
if self.grain_column is not None:
results[self.grain_column] = dataframe[self.grain_column].values
results['Prediction'] = y_predictions
return results
def prepare_and_subset(self, dataframe):
"""
Prepare and subset the raw data using the pipeline saved during training.
Run the raw dataframe through the saved pipeline and return a dataframe that contains only the columns that were
in the original model.
This prevents any unexpected changes to incoming columns from interfering with the predictions.
Args:
dataframe (pandas.core.frame.DataFrame): Raw prediction dataframe
Returns:
pandas.core.frame.DataFrame: A dataframe that has been run through the pipeline and subsetted to only the
columns the model expects.
"""
# We want to be able to make predictions on new data (without labels) so don't want to insist that the
# prediction column be present in the new data. To get around this, add the prediction columns filled with
# NaNs. This column should be dropped when the dataframe is run through the pipeline.
if self.prediction_column not in dataframe.columns.values \
and self.prediction_column in self.original_column_names:
dataframe[self.prediction_column] = np.NaN
try:
# Raise an error here if any of the columns the model expects are not in the prediction dataframe
df2 = dataframe.copy()
if self.original_column_names is not None:
df2 = df2[self.original_column_names]
# Change the dtype of the categorical columns in the prediction dataframe to 'category' with levels
# determined by the training data before running the data preparation pipeline
if self.categorical_column_info is not None:
for column in self.categorical_column_info:
col_categories = self.categorical_column_info[column].index
df2[column] = df2[column].astype('category', categories=col_categories)
# Check whether the prediction data contains categories not present in the training set and print
# a message warning that these new values will be dropped and imputed
new_values = {v for v in dataframe[column].unique() if not (v in col_categories or pd.isnull(v))}
if len(new_values) > 0:
category_message = """Column {} contains levels not seen in the training set. These levels have
been removed and will be imputed or the corresponding rows dropped.\nNew levels: {}"""
print(category_message.format(column, new_values))
# Run the saved data preparation pipeline
prepared_dataframe = self.fit_pipeline.transform(df2)
# Subset the dataframe to only columns that were saved from the original model training
prepared_dataframe = prepared_dataframe[self.column_names]
except KeyError as ke:
required_columns = self.column_names
found_columns = list(dataframe.columns)
# If a pre-dummified dataset is expected as the input, list the pre-dummified columns instead of the dummies
if not self.original_column_names is None:
required_columns = self.original_column_names
error_message = """One or more of the columns that the saved trained model needs is not in the dataframe.\n
Please compare these lists to see which field(s) is/are missing. Note that you can pass in extra fields,\n
which will be ignored, but you must pass in all the required fields.\n
Required fields: {}
Given fields: {}
Likely missing field(s): {}
""".format(required_columns, found_columns, ke)
raise HealthcareAIError(error_message)
return prepared_dataframe
def make_factors(self, dataframe, number_top_features=3):
"""
Given a prediction dataframe, build and return a list of the top k features in dataframe format.
Args:
dataframe (pandas.core.frame.DataFrame): Raw prediction dataframe
number_top_features (int): Number of top features per row
Returns:
pandas.core.frame.DataFrame: A dataframe containing the grain id and factors
"""
# Run the raw dataframe through the preparation process
prepared_dataframe = self.prepare_and_subset(dataframe)
# Create a new dataframe. If grain column exists, add the grain
# column from the original dataframe; otherwise,
# just create a new empty dataframe.
if self.grain_column is not None:
results = dataframe[[self.grain_column]]
else:
results = pd.DataFrame()
# Create a list of column names
reason_col_names = ['Factor{}TXT'.format(i) for i in range(1, number_top_features + 1)]
# Get a 2 dimensional list of all the factors
top_features = hcai_factors.top_k_features(prepared_dataframe, self.feature_model, k=number_top_features)
# Verify that the number of factors matches the number of rows in the original dataframe.
if len(top_features) != len(dataframe):
raise HealthcareAIError('Warning! The number of predictions does not match the number of rows.')
# Create a dataframe from the column names and top features
reasons_df = pd.DataFrame(top_features, columns=reason_col_names, index=dataframe.index)
# Join the top features and results dataframes
results = pd.concat([results, reasons_df], axis=1, join_axes=[dataframe.index])
# results.set_index(keys=self.grain_column, inplace=True)
return results
def make_predictions_with_k_factors(self, dataframe, number_top_features=3):
"""
Create a datarrame with predictions and factors.
Given a prediction dataframe, build and return a dataframe with the grain column, the predictions and the top k
features.
Args:
dataframe (pandas.core.frame.DataFrame): Raw prediction dataframe
number_top_features (int): Number of top features per row
Returns:
pandas.core.frame.DataFrame: Predictions with factors and grain column
"""
# TODO Note this is inefficient since we are running the raw dataframe through the pipeline twice. Consider
# Get the factors and predictions
results = self.make_factors(dataframe, number_top_features=number_top_features)
predictions = self.make_predictions(dataframe)
# Verify that the number of predictions matches the number of rows in the original dataframe.
if len(predictions) != len(dataframe):
raise HealthcareAIError('Warning! The number of predictions does not match the number of rows.')
# Add predictions column to dataframe
results['Prediction'] = predictions['Prediction'].values
return results
def make_original_with_predictions_and_factors(self, dataframe, number_top_features=3):
"""
Create a dataframe containing the original data, predictions and factors.
Given a prediction dataframe, build and return a dataframe with the all the original columns, the predictions,
and the top k features.
Args:
dataframe (pandas.core.frame.DataFrame): Raw prediction dataframe
number_top_features (int): Number of top features per row
Returns:
pandas.core.frame.DataFrame:
"""
# TODO Note this is inefficient since we are running the raw dataframe through the pipeline twice.
# Get the factors and predictions
results = self.make_predictions_with_k_factors(dataframe, number_top_features=number_top_features)
# replace the original prediction column
original_dataframe = dataframe.drop([self.prediction_column], axis=1)
# Join the two dataframes together
results = pd.concat([original_dataframe, results], axis=1)
return results
def create_catalyst_dataframe(self, dataframe):
"""
Create a Health Catalyst specific dataframe of predictions.
Given a prediction dataframe, build and return a dataframe with the health catalyst specific column names, the
predictions, and the top 3 features.
Args:
dataframe (pandas.core.frame.DataFrame): Raw prediction dataframe
Returns:
pandas.core.frame.DataFrame:
"""
# Get predictions and on the top 3 features (catalyst SAMs expect 3 factors)
factors_and_predictions_df = self.make_predictions_with_k_factors(dataframe, number_top_features=3)
# Add all the catalyst-specific columns to back into the SAM
factors_and_predictions_df['BindingID'] = 0
factors_and_predictions_df['BindingNM'] = 'Python'
factors_and_predictions_df['LastLoadDTS'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
return factors_and_predictions_df
def predict_to_catalyst_sam(self, dataframe, server, database, table, schema=None, predicted_column_name=None):
"""
Given a dataframe you want predictions on, make predictions and save them to a catalyst-specific EDW table.
Args:
dataframe (pandas.core.frame.DataFrame): Raw prediction dataframe
server (str): the target server name
database (str): the database name
table (str): the destination table name
schema (str): the optional schema
predicted_column_name (str): optional predicted column name (defaults to PredictedProbNBR or
PredictedValueNBR)
"""
# Make predictions in specific format
sam_df = self.create_catalyst_dataframe(dataframe)
# Rename prediction column to default based on model type or given one
if predicted_column_name is None:
if self.is_classification:
predicted_column_name = 'PredictedProbNBR'
elif self.is_regression:
predicted_column_name = 'PredictedValueNBR'
sam_df.rename(columns={'Prediction': predicted_column_name}, inplace=True)
try:
engine = hcai_db.build_mssql_engine_using_trusted_connections(server, database)
healthcareai.common.database_writers.write_to_db_agnostic(engine, table, sam_df, schema=schema)
except HealthcareAIError as hcaie:
# Run validation and alert user
hcai_dbval.validate_catalyst_prediction_sam_connection(server, table, self.grain_column, self.prediction_column)
raise HealthcareAIError(hcaie.message)
def predict_to_sqlite(self,
prediction_dataframe,
database,
table,
prediction_generator,
predicted_column_name=None):
"""
Given a dataframe you want predictions on, make predictions and save them to an sqlite table.
Args:
prediction_dataframe (pandas.core.frame.DataFrame): Raw prediction dataframe
database (str): database file name
table (str): table name
prediction_generator (method): one of the trained supervised model prediction methods
predicted_column_name (str): optional predicted column name (defaults to PredictedProbNBR or
PredictedValueNBR)
"""
# validate inputs
if type(prediction_generator).__name__ != 'method':
raise HealthcareAIError(
'Use of this method requires a prediction generator from a trained supervised model')
# Get predictions from given generator
sam_df = prediction_generator(prediction_dataframe)
# Rename prediction column to default based on model type or given one
if predicted_column_name is None:
if self.is_classification:
predicted_column_name = 'PredictedProbNBR'
elif self.is_regression:
predicted_column_name = 'PredictedValueNBR'
sam_df.rename(columns={'Prediction': predicted_column_name}, inplace=True)
engine = hcai_db.build_sqlite_engine(database)
healthcareai.common.database_writers.write_to_db_agnostic(engine, table, sam_df)
def roc_plot(self):
"""Return a plot of the ROC curve of the holdout set from model training."""
self.validate_classification()
tsm_classification_comparison_plots(trained_supervised_models=self, plot_type='ROC')
def roc(self, print_output=True):
"""
Print out ROC details and return them with cutoffs.
Note this is a simple subset of TrainedSupervisedModel.metrics()
Args:
print_output (bool): True (default) to print a table of output.
Returns:
dict: A subset of TrainedSupervisedModel.metrics() that are ROC specific
"""
self.validate_classification()
metrics = self._metric_by_name
roc = {
'roc_auc': metrics['roc_auc'],
'best_roc_cutoff': metrics['best_roc_cutoff'],
'best_true_positive_rate': metrics['best_true_positive_rate'],
'best_false_positive_rate': metrics['best_false_positive_rate'],
'roc_thresholds': metrics['roc_thresholds'],
'true_positive_rates': metrics['true_positive_rates'],
'false_positive_rates': metrics['false_positive_rates'],
}
# roc = self._metric_by_name
if print_output:
print(('\nReceiver Operating Characteristic (ROC):\n'
' Area under curve (ROC AUC): {:0.2f}\n'
' Ideal ROC cutoff is {:0.2f}, yielding TPR of {:0.2f} and FPR of {:0.2f}').format(
roc['roc_auc'],
roc['best_roc_cutoff'],
roc['best_true_positive_rate'],
roc['best_false_positive_rate']))
print('|--------------------------------|')
print('| ROC |')
print('| Threshhold | TPR | FPR |')
print('|--------------|--------|--------|')
for i, _ in enumerate(roc['roc_thresholds']):
marker = '***' if roc['roc_thresholds'][i] == roc['best_roc_cutoff'] else ' '
print('| {} {:03.2f} | {:03.2f} | {:03.2f} |'.format(
marker,
roc['roc_thresholds'][i],
roc['true_positive_rates'][i],
roc['false_positive_rates'][i]))
print('|--------------------------------|')
print('| *** Ideal cutoff |')
print('|--------------------------------|')
return roc
def pr_plot(self):
"""Return a plot of the PR curve of the holdout set from model training."""
self.validate_classification()
tsm_classification_comparison_plots(trained_supervised_models=self, plot_type='PR')
def pr(self, print_output=True):
"""
Print out PR details and return them with cutoffs.
Note this is a simple subset of TrainedSupervisedModel.metrics()
Args:
print_output (bool): True (default) to print a table of output.
Returns:
dict: A subset of TrainedSupervisedModel.metrics() that are PR specific
"""
self.validate_classification()
metrics = self._metric_by_name
pr = {
'pr_auc': metrics['pr_auc'],
'best_pr_cutoff': metrics['best_pr_cutoff'],
'best_precision': metrics['best_precision'],
'best_recall': metrics['best_recall'],
'pr_thresholds': metrics['pr_thresholds'],
'precisions': metrics['precisions'],
'recalls': metrics['recalls'],
}
if print_output:
print(('\nPrecision-Recall:\n'
' Area under Precision Recall curve (PR AUC): {:0.2f}\n'
' Ideal PR cutoff is {:0.2f}, yielding precision of {:04.3f} and recall of {:04.3f}').format(
pr['pr_auc'],
pr['best_pr_cutoff'],
pr['best_precision'],
pr['best_recall']))
print('|---------------------------------|')
print('| Precision-Recall Thresholds |')
print('| Threshhold | Precision | Recall |')
print('|------------|-----------|--------|')
for i, _ in enumerate(pr['pr_thresholds']):
marker = '***' if pr['pr_thresholds'][i] == pr['best_pr_cutoff'] else ' '
print('| {} {:03.2f} | {:03.2f} | {:03.2f} |'.format(
marker,
pr['pr_thresholds'][i],
pr['precisions'][i],
pr['recalls'][i]))
print('|---------------------------------|')
print('| *** Ideal cutoff |')
print('|---------------------------------|')
return pr
def validate_classification(self):
"""Validate that a model is classification and raise an error if it is not.
Run this on any method that only makes sense for classification.
"""
# TODO add binary check and rename to validate_binary_classification
if self.model_type != 'classification':
raise HealthcareAIError('This function only runs on a binary classification model.')
def print_training_results(self):
"""
Print metrics, stats and hyperparameters of a trained supervised model.
This includes the model name, training time, hyperparameters, and performance metrics.
"""
print('{} Training Results:'.format(self.algorithm_name))
print('- Training time:')
print(' Trained the {} model in {} seconds'.format(self.algorithm_name,
round(self.train_time, 2)))
hyperparameters = self.best_hyperparameters
if hyperparameters is None:
hyperparameters = 'N/A: No hyperparameter search was performed'
print('- Best hyperparameters found were:\n {}'.format(hyperparameters))
if self._model_type == 'classification':
print('- {} performance metrics:\n Accuracy: {:03.2f}\n ROC AUC: {:03.2f}\n PR AUC: {:03.2f}'.format(
self.algorithm_name,
self.metrics['accuracy'],
self.metrics['roc_auc'],
self.metrics['pr_auc']))
elif self._model_type == 'regression':
print('- {} performance metrics:\n Mean Squared Error (MSE): {}\n Mean Absolute Error (MAE): {}'.format(
self.algorithm_name,
self.metrics['mean_squared_error'],
self.metrics['mean_absolute_error']))
def get_estimator_from_trained_supervised_model(trained_supervised_model):
"""
Given an instance of a TrainedSupervisedModel, return the main estimator, regardless of random search.
Args:
trained_supervised_model (TrainedSupervisedModel):
Returns:
sklearn.base.BaseEstimator: The scikit learn estimator
"""
# Validate input is a TSM
if not isinstance(trained_supervised_model, TrainedSupervisedModel):
raise HealthcareAIError('This requires an instance of a TrainedSupervisedModel')
"""
1. check if it is a TSM
Y: proceed
N: raise error?
2. check if tsm.model is a meta estimator
Y: extract best_estimator_
N: return tsm.model
"""
# Check if tsm.model is a meta estimator
result = hcai_helpers.extract_estimator_from_meta_estimator(trained_supervised_model.model)
return result
def tsm_classification_comparison_plots(trained_supervised_models, plot_type='ROC', save=False):
"""
Given a single or list of trained supervised models, plot a ROC or PR curve for each one.
Args:
plot_type (str): 'ROC' (default) or 'PR'
trained_supervised_models (TrainedSupervisedModel): a single or iterable containing TrainedSupervisedModels
save (bool): Save the plot to a file
"""
# Input validation and dispatch
if plot_type == 'ROC':
plotter = hcai_model_evaluation.roc_plot_from_thresholds
elif plot_type == 'PR':
plotter = hcai_model_evaluation.pr_plot_from_thresholds
else:
raise HealthcareAIError('Please choose either plot_type=\'ROC\' or plot_type=\'PR\'')
metrics_by_model = {}
try:
for index, model in enumerate(trained_supervised_models):
if not isinstance(model, TrainedSupervisedModel):
raise HealthcareAIError('One of the objects in the list is not a TrainedSupervisedModel ({})'
.format(model))
algorithm_name = "{}: {}".format(index + 1, model.algorithm_name)
metrics_by_model[algorithm_name] = model.metrics
except TypeError:
# input is not iterable (assume single TSM)
if not isinstance(trained_supervised_models, TrainedSupervisedModel):
raise HealthcareAIError('Input is not a TrainedSupervisedModel ({})'.format(trained_supervised_models))
metrics_by_model[trained_supervised_models.algorithm_name] = trained_supervised_models.metrics
# TODO so, you could check for different GUIDs that could be saved in each TSM!
# The assumption here is that each TSM was trained on the same train test split,
# which happens when instantiating SupervisedModelTrainer
# Plot with the selected plotter
plotter(metrics_by_model, save=save, debug=False)
def plot_rf_features_from_tsm(trained_supervised_model, x_train, feature_limit=15, save=False):
"""
Given an instance of a TrainedSupervisedModel, the x_train data, display or save a feature importance graph.
Args:
trained_supervised_model (TrainedSupervisedModel):
x_train (numpy.array): A 2D numpy array that was used for training
feature_limit (int): The maximum number of features to plot
save (bool): True to save the plot, false to display it in a blocking thread
"""
model = get_estimator_from_trained_supervised_model(trained_supervised_model)
column_names = trained_supervised_model.column_names
hcai_model_evaluation.plot_random_forest_feature_importance(
model,
x_train,
column_names,
feature_limit=feature_limit,
save=save)
| {
"repo_name": "HealthCatalyst/healthcareai-py",
"path": "healthcareai/trained_models/trained_supervised_model.py",
"copies": "2",
"size": "30435",
"license": "mit",
"hash": 8925085812735228000,
"line_mean": 44.0888888889,
"line_max": 124,
"alpha_frac": 0.6211927058,
"autogenerated": false,
"ratio": 4.500887311446317,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6122080017246317,
"avg_score": null,
"num_lines": null
} |
"""A training dojo for algorithms."""
from copy import deepcopy
import itertools
from time import time
__all__ = ["Dojo", "TimingDojo", "FitnessDojo"]
INFINITY = float("inf")
class Dojo(object):
"""A testing class for competing algorithms."""
def __init__(self, algorithms, environ, runs=100):
self.algorithms = list(algorithms)
self.environ = environ
self.runs = runs
def train(self, *args):
"""Executes the algorithms, comparing their performance.
This has to be implemented by inheriting classes.
"""
raise NotImplementedError("not implemented")
class TimingDojo(Dojo):
"""A testing class for competing algorithms, measuring the execution time
of each.
"""
def train(self, *args):
"""Trains the algorithms."""
var = list(itertools.permutations(self.algorithms))
variants = itertools.chain(*var)
results = {}
for v in variants:
runresult = 0
for r in range(self.runs):
env = deepcopy(self.environ)
start = time()
v(env, *args)
runresult += time() - start
runresult /= self.runs
if results.get(v, INFINITY) > runresult:
results[v] = runresult
return min(results, key=results.get)
class FitnessDojo(Dojo):
"""A testing class for competing algorithms, measuring the fitness of each.
"""
def __init__(self, algorithms, environ, runs=100, cmpfunc=min):
super(FitnessDojo, self).__init__(algorithms, environ, runs)
self.cmpfunc = cmpfunc
def train(self, *args):
"""Trains the algorithms."""
var = list(itertools.permutations(self.algorithms))
variants = itertools.chain(*var)
results = {}
for v in variants:
runresult = 0
for r in range(self.runs):
runresult += v(deepcopy(self.environ), *args)
runresult /= self.runs
if results.get(v, INFINITY) > runresult:
results[v] = runresult
return self.cmpfunc(results, key=results.get)
| {
"repo_name": "NiclasEriksen/rpg_procgen",
"path": "utils/dojo.py",
"copies": "1",
"size": "2222",
"license": "cc0-1.0",
"hash": 5032576229112518000,
"line_mean": 31.1641791045,
"line_max": 79,
"alpha_frac": 0.5715571557,
"autogenerated": false,
"ratio": 4.297872340425532,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5369429496125532,
"avg_score": null,
"num_lines": null
} |
# A transformation is a dictionary of semantic checksums,
# representing the input pins, together with celltype and subcelltype
# The checksum of a transformation is the hash of the JSON buffer of this dict.
# A job consists of a transformation together with all relevant entries
# from the semantic-to-syntactic checksum cache
class HardCancelError(Exception):
def __str__(self):
return self.__class__.__name__
from seamless.core.protocol.serialize import serialize
import sys
def log(*args, **kwargs):
print(*args, **kwargs, file=sys.stderr)
import json
import ast
import functools
import asyncio
import time
import traceback
from copy import deepcopy
from ...get_hash import get_dict_hash, get_hash
"""
TODO: offload exceptions (as text) to database (also allow them to be cleared in database?)
TODO: do the same with stdout, stderr
TODO: add some metadata to the above? (when and where it was executed)
"""
# Keep transformations alive for 20 secs after the last ref has expired,
# but only if they have been running locally for at least 20 secs,
# else, keep them alive for 1 sec
TF_KEEP_ALIVE_MIN = 1.0
TF_KEEP_ALIVE_MAX = 20.0
TF_ALIVE_THRESHOLD = 20.0
import logging
logger = logging.getLogger("seamless")
def print_info(*args):
msg = " ".join([str(arg) for arg in args])
logger.info(msg)
def print_warning(*args):
msg = " ".join([str(arg) for arg in args])
logger.warning(msg)
def print_debug(*args):
msg = " ".join([str(arg) for arg in args])
logger.debug(msg)
def print_error(*args):
msg = " ".join([str(arg) for arg in args])
logger.error(msg)
class RemoteTransformer:
debug = False
python_debug = False
_exception_to_clear = None
def __init__(self, tf_checksum, peer_id):
self.tf_checksum = tf_checksum
self.peer_id = peer_id
self.queue = asyncio.Queue()
class DummyTransformer:
_status_reason = None
_exception_to_clear = None
debug = False
python_debug = False
def __init__(self, tf_checksum):
self.tf_checksum = tf_checksum
self.progress = None
self.prelim = None
def tf_get_buffer(transformation):
assert isinstance(transformation, dict)
d = {}
for k in transformation:
if k in ("__compilers__", "__languages__", "__meta__"):
continue
v = transformation[k]
if k in ("__output__", "__as__"):
d[k] = v
continue
elif k == "__env__":
checksum = v
checksum = checksum.hex()
d[k] = checksum
continue
celltype, subcelltype, checksum = v
checksum = checksum.hex()
d[k] = celltype, subcelltype, checksum
content = json.dumps(d, sort_keys=True, indent=2) + "\n"
buffer = content.encode()
return buffer
def syntactic_is_semantic(celltype, subcelltype):
return celltype not in ("cson", "yaml", "python")
async def syntactic_to_semantic(
checksum, celltype, subcelltype, codename
):
assert checksum is None or isinstance(checksum, bytes)
if syntactic_is_semantic(celltype, subcelltype):
return checksum
try:
buffer = get_buffer(checksum)
except CacheMissError:
buffer = await get_buffer_remote(
checksum,
None
)
if buffer is None:
raise CacheMissError(checksum.hex()) from None
buffer_cache.cache_buffer(checksum, buffer)
if celltype in ("cson", "yaml"):
semantic_checksum = await convert(
checksum, buffer, celltype, "plain"
)
elif celltype == "python":
value = await deserialize(buffer, checksum, "python", False)
tree = ast.parse(value, filename=codename)
dump = ast.dump(tree).encode()
semantic_checksum = await calculate_checksum(dump)
buffer_cache.cache_buffer(semantic_checksum, dump)
else:
raise TypeError(celltype)
return semantic_checksum
class TransformationCache:
active = True
_destroyed = False
def __init__(self):
self.transformations = {} # tf-checksum-to-transformation
self.debug = set() # set of debug tf-checksums
self.python_debug = set() # set of python-debug tf-checksums
self.transformation_results = {} # tf-checksum-to-(result-checksum, prelim)
self.transformation_exceptions = {} # tf-checksum-to-exception
self.transformation_logs = {} # tf-checksum-to-stdout/stderr-logs (max 10k each)
self.transformation_jobs = {} # tf-checksum-to-job
self.rev_transformation_jobs = {} # job-to-tf-checksum
self.job_progress = {}
self.transformer_to_transformations = {} # 1:1, transformations as tf-checksums
self.transformations_to_transformers = {} # 1:list, transformations as tf-checksums
self.remote_transformers = {}
self.syntactic_to_semantic_checksums = {} #(checksum,celltype,subcelltype)-to-checksum
self.semantic_to_syntactic_checksums = {} #(checksum,celltype,subcelltype)-to-list-of-checksums
@staticmethod
def syntactic_to_semantic(
checksum, celltype, subcelltype, codename
):
future = asyncio.ensure_future(
syntactic_to_semantic(
checksum, celltype, subcelltype, codename
)
)
asyncio.get_event_loop().run_until_complete(future)
return future.result()
def register_transformer(self, transformer):
assert isinstance(transformer, Transformer)
assert transformer not in self.transformer_to_transformations
self.transformer_to_transformations[transformer] = None
def cancel_transformer(self, transformer, void_error):
assert isinstance(transformer, Transformer)
assert transformer in self.transformer_to_transformations
tf_checksum = self.transformer_to_transformations.get(transformer)
if tf_checksum is not None:
transformation = self.transformations[tf_checksum]
if not void_error:
self.decref_transformation(transformation, transformer)
self.transformer_to_transformations[transformer] = None
def destroy_transformer(self, transformer):
assert isinstance(transformer, Transformer)
tf_checksum = self.transformer_to_transformations.pop(transformer)
if tf_checksum is not None:
transformation = self.transformations[tf_checksum]
self.decref_transformation(transformation, transformer)
async def build_transformation(self, transformer, celltypes, inputpin_checksums, outputpin):
assert isinstance(transformer, Transformer)
cachemanager = transformer._get_manager().cachemanager
outputname, celltype, subcelltype = outputpin
transformation = {"__output__": outputpin}
as_ = {}
root = transformer._root()
if root._compilers is not None:
transformation["__compilers__"] = root._compilers
if root._languages is not None:
transformation["__languages__"] = root._languages
meta = {
"transformer_path": transformer.path,
}
if transformer.meta is not None:
meta.update(transformer.meta)
if "META" in inputpin_checksums:
checksum = inputpin_checksums["META"]
await cachemanager.fingertip(checksum)
inp_metabuf = buffer_cache.get_buffer(checksum)
if inp_metabuf is None:
raise CacheMissError("META")
inp_meta = json.loads(inp_metabuf)
meta.update(inp_meta)
metabuf = await serialize(meta, "plain")
meta_checksum = get_hash(metabuf)
buffer_cache.cache_buffer(meta_checksum, metabuf)
transformation["__meta__"] = meta_checksum
if transformer.env is not None:
envbuf = await serialize(transformer.env, "plain")
env_checksum = get_hash(envbuf)
buffer_cache.cache_buffer(env_checksum, envbuf)
transformation["__env__"] = env_checksum
transformation_build_exception = None
for pinname, checksum in inputpin_checksums.items():
if pinname == "META":
continue
await cachemanager.fingertip(checksum)
pin = transformer._pins[pinname]
celltype, subcelltype = celltypes[pinname]
if pin.as_ is not None:
as_[pinname] = pin.as_
if checksum is None:
sem_checksum = None
else:
key = (checksum, celltype, subcelltype)
sem_checksum = self.syntactic_to_semantic_checksums.get(key)
if sem_checksum is None:
codename = str(pin)
if not syntactic_is_semantic(celltype, subcelltype):
try:
sem_checksum = await syntactic_to_semantic(
checksum, celltype, subcelltype,
codename
)
except Exception as exc:
transformation_build_exception = exc
break
self.syntactic_to_semantic_checksums[key] = sem_checksum
semkey = (sem_checksum, celltype, subcelltype)
if semkey in self.semantic_to_syntactic_checksums:
semsyn = self.semantic_to_syntactic_checksums[semkey]
else:
semsyn = database_cache.sem2syn(semkey)
if semsyn is None:
semsyn = []
self.semantic_to_syntactic_checksums[semkey] = semsyn
semsyn.append(checksum)
database_sink.sem2syn(semkey, semsyn)
else:
sem_checksum = checksum
transformation[pinname] = celltype, subcelltype, sem_checksum
if len(as_):
transformation["__as__"] = as_
return transformation, transformation_build_exception
async def update_transformer(self,
transformer, celltypes, inputpin_checksums, outputpin
):
assert isinstance(transformer, Transformer)
transformation, transformation_build_exception = \
await self.build_transformation(
transformer, celltypes, inputpin_checksums, outputpin
)
result = await self.incref_transformation(
transformation, transformer,
transformation_build_exception=transformation_build_exception
)
if result is not None:
tf_checksum, result_checksum, prelim = result
if result_checksum is None or prelim:
job = self.run_job(transformation, tf_checksum)
if job is not None:
await asyncio.shield(job.future)
async def remote_wait(self, tf_checksum, peer_id):
key = tf_checksum, peer_id
transformer = self.remote_transformers.get(key)
if transformer is None:
return
await transformer.queue.get()
while 1:
try:
transformer.queue.get_nowait()
except asyncio.QueueEmpty:
break
async def incref_transformation(self, transformation, transformer, *, transformation_build_exception):
###import traceback; traceback.print_stack()
assert isinstance(transformer, (Transformer, RemoteTransformer, DummyTransformer))
if isinstance(transformer, RemoteTransformer):
key = transformer.tf_checksum, transformer.peer_id
if key in self.remote_transformers:
return
self.remote_transformers[key] = transformer
from ..manager.tasks.transformer_update import TransformerResultUpdateTask
tf_buffer = tf_get_buffer(transformation)
tf_checksum = await calculate_checksum(tf_buffer)
#print("INCREF", tf_checksum.hex(), transformer)
if tf_checksum not in self.transformations:
tf = []
buffer_cache.incref_buffer(tf_checksum, tf_buffer, False)
self.transformations_to_transformers[tf_checksum] = tf
self.transformations[tf_checksum] = transformation
if tf_checksum in self.transformation_results:
result_checksum, prelim = self.transformation_results[tf_checksum]
buffer_cache.incref(result_checksum, False)
for pinname in transformation:
if pinname in ("__compilers__", "__languages__", "__as__", "__meta__"):
continue
if pinname == "__output__":
continue
if pinname == "__env__":
sem_checksum = transformation[pinname]
else:
celltype, subcelltype, sem_checksum = transformation[pinname]
buffer_cache.incref(sem_checksum, (pinname == "__env__"))
else:
tf = self.transformations_to_transformers[tf_checksum]
if transformer.debug:
if tf_checksum not in self.debug:
self.debug.add(tf_checksum)
self.clear_exception(transformer)
if transformer.python_debug:
if tf_checksum not in self.python_debug:
self.python_debug.add(tf_checksum)
self.clear_exception(transformer)
if transformer._exception_to_clear:
self.clear_exception(tf_checksum=tf_checksum)
transformer._exception_to_clear = False
if isinstance(transformer, (RemoteTransformer, DummyTransformer)):
old_tf_checksum = None
else:
old_tf_checksum = self.transformer_to_transformations[transformer]
if old_tf_checksum != tf_checksum:
if isinstance(transformer, Transformer):
self.transformer_to_transformations[transformer] = tf_checksum
tf.append(transformer)
if old_tf_checksum is not None:
#print("INCREF WITH OLD", tf_checksum.hex(), old_tf_checksum.hex())
old_transformation = self.transformations[old_tf_checksum]
self.decref_transformation(old_transformation, transformer)
if transformation_build_exception is not None:
self.transformation_exceptions[tf_checksum] = transformation_build_exception
transformers = self.transformations_to_transformers[tf_checksum]
self._set_exc(transformers, transformation_build_exception)
return
result_checksum, prelim = self._get_transformation_result(tf_checksum)
if result_checksum is not None:
if isinstance(transformer, Transformer):
#print("CACHE HIT", transformer, result_checksum.hex())
manager = transformer._get_manager()
manager._set_transformer_checksum(
transformer,
result_checksum,
False,
prelim=prelim
)
TransformerResultUpdateTask(manager, transformer).launch()
return tf_checksum, result_checksum, prelim
def decref_transformation(self, transformation, transformer):
###import traceback; traceback.print_stack()
assert isinstance(transformer, (Transformer, RemoteTransformer, DummyTransformer))
if isinstance(transformer, RemoteTransformer):
try:
transformer.queue.put_nowait(None)
except asyncio.QueueFull:
pass
key = transformer.tf_checksum, transformer.peer_id
self.remote_transformers.pop(key, None)
tf_buffer = tf_get_buffer(transformation)
tf_checksum = calculate_checksum_sync(tf_buffer)
#print("DECREF", tf_checksum.hex(), transformer)
assert tf_checksum in self.transformations
if not isinstance(transformer, DummyTransformer):
dummy = False
transformers = self.transformations_to_transformers[tf_checksum]
assert transformer in transformers
transformers.remove(transformer)
else:
dummy = True
transformers = []
debug = any([tf.debug for tf in transformers])
if not debug:
self.debug.discard(tf_checksum)
if not len(transformers):
delay = TF_KEEP_ALIVE_MIN
job = self.transformation_jobs.get(tf_checksum)
if job is not None and job.start is not None and \
time.time() - job.start > TF_ALIVE_THRESHOLD:
delay = TF_KEEP_ALIVE_MAX
tempref = functools.partial(self.destroy_transformation, transformation, dummy)
temprefmanager.add_ref(tempref, delay, on_shutdown=True)
def destroy_transformation(self, transformation, dummy):
tf_buffer = tf_get_buffer(transformation)
tf_checksum = calculate_checksum_sync(tf_buffer)
if not dummy:
if tf_checksum in self.transformations_to_transformers:
if len(self.transformations_to_transformers[tf_checksum]):
return # A new transformer was registered in the meantime
else:
return
if tf_checksum not in self.transformations:
print("WARNING: cannot destroy unknown transformation %s" % tf_checksum.hex())
return
self.transformations.pop(tf_checksum)
if not dummy:
self.transformations_to_transformers.pop(tf_checksum)
for pinname in transformation:
if pinname in ("__output__", "__languages__", "__compilers__", "__as__", "__meta__"):
continue
if pinname == "__env__":
env_checksum = transformation["__env__"]
buffer_cache.decref(env_checksum)
continue
celltype, subcelltype, sem_checksum = transformation[pinname]
buffer_cache.decref(sem_checksum)
buffer_cache.decref(tf_checksum)
if tf_checksum in self.transformation_results:
result_checksum, result_prelim = self.transformation_results[tf_checksum]
buffer_cache.decref(result_checksum)
if result_prelim:
self.transformation_results.pop(tf_checksum)
job = self.transformation_jobs.get(tf_checksum)
if job is not None:
if job.future is not None:
job._cancelled = True
if job.remote_futures is not None:
for fut in job.remote_futures:
fut.cancel()
job.future.cancel()
def run_job(self, transformation, tf_checksum):
transformers = self.transformations_to_transformers[tf_checksum]
if tf_checksum in self.transformation_exceptions:
exc = self.transformation_exceptions[tf_checksum]
self._set_exc(transformers, exc)
return
for transformer in self.transformations_to_transformers[tf_checksum]:
transformer._status_reason = StatusReasonEnum.EXECUTING
existing_job = self.transformation_jobs.get(tf_checksum)
if existing_job is not None:
return existing_job
if not len(transformers):
codename = "<Unknown>"
else:
codename = str(transformers[-1])
tfs = []
for transformer in transformers:
if isinstance(transformer,
(RemoteTransformer, DummyTransformer)
):
continue
tfs.append(transformer._format_path())
if len(tfs):
tftxt = ",".join(tfs)
print_info("Executing transformer: {}".format(tftxt))
debug = tf_checksum in self.debug
python_debug = tf_checksum in self.python_debug
semantic_cache = {}
for k,v in transformation.items():
if k in ("__compilers__", "__languages__", "__meta__"):
continue
if k in ("__output__", "__as__"):
continue
if k == "__env__":
continue
celltype, subcelltype, sem_checksum = v
if syntactic_is_semantic(celltype, subcelltype):
continue
semkey = (sem_checksum, celltype, subcelltype)
try:
checksums = self.semantic_to_syntactic_checksums[semkey]
except KeyError:
raise KeyError(sem_checksum.hex(), celltype, subcelltype) from None
semantic_cache[semkey] = checksums
job = TransformationJob(
tf_checksum, codename,
transformation, semantic_cache,
debug, python_debug
)
job.execute(
self.prelim_callback,
self.progress_callback
)
self.transformation_jobs[tf_checksum] = job
self.rev_transformation_jobs[id(job)] = tf_checksum
return job
def progress_callback(self, job, progress):
self.job_progress[id(job)] = progress
tf_checksum = self.rev_transformation_jobs[id(job)]
transformers = self.transformations_to_transformers[tf_checksum]
for transformer in transformers:
if isinstance(transformer, RemoteTransformer):
try:
transformer.queue.put_nowait(None)
except asyncio.QueueFull:
pass
continue
if isinstance(transformer, DummyTransformer):
transformer.progress = progress
continue
manager = transformer._get_manager()
manager._set_transformer_progress(
transformer,
progress
)
def prelim_callback(self, job, prelim_checksum):
if prelim_checksum is None:
return
tf_checksum = self.rev_transformation_jobs[id(job)]
transformers = self.transformations_to_transformers[tf_checksum]
for transformer in transformers:
if isinstance(transformer, RemoteTransformer):
try:
transformer.queue.put_nowait(None)
except asyncio.QueueFull:
pass
if isinstance(transformer, DummyTransformer):
transformer.prelim = prelim_checksum
self.set_transformation_result(tf_checksum, prelim_checksum, True)
def _hard_cancel(self, job):
if self._destroyed:
return
future = job.future
assert future is not None
if future.done():
return
#future.set_exception(HardCancelError()) # does not work...
job._hard_cancelled = True
if job.remote_futures is not None:
for fut in job.remote_futures:
fut.cancel()
future.cancel()
def _set_exc(self, transformers, exc):
# TODO: offload to provenance? unless hard-canceled
for transformer in list(transformers):
if isinstance(transformer, (RemoteTransformer, DummyTransformer)):
continue
manager = transformer._get_manager()
if isinstance(exc, SeamlessInvalidValueError):
status_reason = StatusReasonEnum.INVALID
elif isinstance(exc, SeamlessUndefinedError):
status_reason = StatusReasonEnum.UNDEFINED
else:
status_reason = StatusReasonEnum.ERROR
manager.cancel_transformer(
transformer,
void=True,
reason=status_reason
)
def job_done(self, job, _):
if self._destroyed:
return
future = job.future
cancelled = (future.cancelled() or job._cancelled) and not job._hard_cancelled
tf_checksum = self.rev_transformation_jobs.pop(id(job))
self.job_progress.pop(id(job), None)
#print("/RUN JOB!",len(self.rev_transformation_jobs), cancelled)
if tf_checksum in self.transformations:
self.transformation_jobs[tf_checksum] = None
else:
self.transformation_jobs.pop(tf_checksum)
return # transformation was destroyed
transformation = self.transformations[tf_checksum]
transformers = self.transformations_to_transformers[tf_checksum]
#print("DONE!", tf_checksum.hex(), transformers, cancelled)
for transformer in list(transformers):
if isinstance(transformer,RemoteTransformer):
try:
transformer.queue.put_nowait(None)
except asyncio.QueueFull:
pass
self.decref_transformation(transformation, transformer)
if isinstance(transformer, DummyTransformer):
self.decref_transformation(transformation, transformer)
if cancelled:
return
if job._hard_cancelled:
exc = HardCancelError()
print_debug("Hard cancel:", job.codename)
else:
exc = future.exception()
if exc is None:
result_checksum, logs = future.result()
self.transformation_logs[tf_checksum] = logs
if result_checksum is None:
exc = SeamlessUndefinedError()
if exc is not None and job.remote:
try:
future.result()
except:
pass
"""
if not isinstance(exc, HardCancelError) and not job._hard_cancelled and not 1:
print("!" * 80)
print("! Transformer remote exception", job.codename)
print("!" * 80)
import traceback
traceback.print_exc()
print("!" * 80)
"""
transformers = self.transformations_to_transformers[tf_checksum]
if exc is not None:
if isinstance(exc,SeamlessTransformationError):
exc_str = None
if len(exc.args):
exc_str = exc.args[0]
if exc_str is not None:
h = SeamlessTransformationError.__module__
h += "." + SeamlessTransformationError.__name__
if exc_str.startswith(h):
exc_str = exc_str[len(h)+1:].lstrip().rstrip("\n")
exc = SeamlessTransformationError(exc_str)
self.transformation_exceptions[tf_checksum] = exc
self._set_exc(transformers, exc)
return
self.set_transformation_result(tf_checksum, result_checksum, False)
def set_transformation_result(self, tf_checksum, result_checksum, prelim):
from ..manager.tasks.transformer_update import (
TransformerResultUpdateTask
)
if tf_checksum in self.transformation_results:
old_result_checksum, old_prelim = self.transformation_results[tf_checksum]
if not old_prelim:
return # transformation result was already set by something else
buffer_cache.decref(old_result_checksum)
self.transformation_results[tf_checksum] = result_checksum, prelim
buffer_cache.incref(result_checksum, False)
if not prelim:
database_sink.set_transformation_result(tf_checksum, result_checksum)
transformers = self.transformations_to_transformers[tf_checksum]
for transformer in transformers:
if isinstance(transformer, (RemoteTransformer, DummyTransformer)):
continue
manager = transformer._get_manager()
if result_checksum is not None:
manager._set_transformer_checksum(
transformer,
result_checksum,
False,
prelim=prelim
)
TransformerResultUpdateTask(manager, transformer).launch()
else:
manager.cancel_transformer(
transformer,
void=True,
reason=StatusReasonEnum.UNDEFINED
)
def _get_transformation_result(self, tf_checksum):
result_checksum, prelim = self.transformation_results.get(
tf_checksum, (None, None)
)
if result_checksum is None:
result_checksum = database_cache.get_transformation_result(tf_checksum)
prelim = False
return result_checksum, prelim
async def serve_semantic_to_syntactic(self, sem_checksum, celltype, subcelltype, peer_id):
from ...communion_client import communion_client_manager
def ret(semsyn):
for semsyn_checksum in semsyn:
assert isinstance(semsyn_checksum, bytes), semsyn
return semsyn
if syntactic_is_semantic(celltype, subcelltype):
return ret([sem_checksum])
semkey = (sem_checksum, celltype, subcelltype)
semsyn = self.semantic_to_syntactic_checksums.get(semkey)
if semsyn is not None:
return ret(semsyn)
semsyn = database_cache.sem2syn(semkey)
if semsyn is not None:
self.semantic_to_syntactic_checksums[semkey] = semsyn
return ret(semsyn)
remote = communion_client_manager.remote_semantic_to_syntactic
semsyn = await remote(sem_checksum, celltype, subcelltype, peer_id)
if semsyn is not None:
self.semantic_to_syntactic_checksums[semkey] = semsyn
database_sink.sem2syn(semkey, semsyn)
return ret(semsyn)
return None
async def serve_get_transformation(self, tf_checksum, remote_peer_id):
assert isinstance(tf_checksum, bytes)
transformation = self.transformations.get(tf_checksum)
if transformation is None:
try:
transformation_buffer = get_buffer(
tf_checksum
)
except CacheMissError:
transformation_buffer = await get_buffer_remote(
tf_checksum,
None # NOT remote_peer_id! The submitting peer may hold a buffer we need!
)
if transformation_buffer is not None:
transformation = json.loads(transformation_buffer)
for k,v in transformation.items():
if k == "__env__":
transformation[k] = bytes.fromhex(v)
elif k not in ("__output__", "__as__"):
if v[-1] is not None:
v[-1] = bytes.fromhex(v[-1])
return transformation
async def serve_transformation_status(self, tf_checksum, peer_id):
assert isinstance(tf_checksum, bytes)
from ...communion_client import communion_client_manager
result_checksum, prelim = self._get_transformation_result(tf_checksum)
if result_checksum is not None:
if not prelim:
return 3, result_checksum
running_job = self.transformation_jobs.get(tf_checksum)
if running_job is not None:
progress = self.job_progress.get(id(running_job))
return 2, progress, result_checksum
exc = self.transformation_exceptions.get(tf_checksum)
if exc is not None:
exc_list = traceback.format_exception(
value=exc,
etype=type(exc),
tb=exc.__traceback__
)
exc_str = "".join(exc_list)
return 0, exc_str
result = await communion_client_manager.remote_transformation_status(
tf_checksum, peer_id
)
if result is not None:
return result
transformation = await self.serve_get_transformation(
tf_checksum,
remote_peer_id=peer_id
)
if transformation is None:
return -3, None
if "__hash_pattern__" in transformation:
return -1, None
remote = communion_client_manager.remote_buffer_status
for key, value in transformation.items():
if key in ("__output__", "__as__"):
continue
celltype, subcelltype, sem_checksum = value
if syntactic_is_semantic(celltype, subcelltype):
syn_checksums = [sem_checksum]
else:
syn_checksums = await self.serve_semantic_to_syntactic(
sem_checksum, celltype, subcelltype,
peer_id = None
)
if syn_checksums is None:
syn_checksums = []
for syn_checksum in syn_checksums:
if buffer_cache.buffer_check(syn_checksum):
break
curr_sub_result = await remote(
syn_checksum, peer_id=None
)
if curr_sub_result:
break
else:
return -2, None
# Seamless instances do not accept deep transformation jobs
# Otherwise, Seamless instances never return -1 (not runnable), although supervisors may
return 1, None
def clear_exception(self, transformer=None, *, tf_checksum=None):
from ..manager.tasks.transformer_update import TransformerUpdateTask
from ...communion_client import communion_client_manager
from ..manager.unvoid import unvoid_transformer
if transformer is None:
assert tf_checksum is not None
else:
assert tf_checksum is None
tf_checksum = self.transformer_to_transformations.get(transformer)
if tf_checksum is None:
transformer._exception_to_clear = True
return
exc = self.transformation_exceptions.pop(tf_checksum, None)
if exc is None:
return
clients = communion_client_manager.clients["transformation"]
for client in clients:
coro = client.clear_exception(tf_checksum)
fut = asyncio.ensure_future(coro)
client.future_clear_exception = fut
for tf in self.transformations_to_transformers[tf_checksum]:
if isinstance(tf, RemoteTransformer):
key = tf.tf_checksum, tf.peer_id
try:
tf.queue.put_nowait(None)
except asyncio.QueueFull:
pass
self.remote_transformers.pop(key, None)
continue
if isinstance(tf, DummyTransformer):
continue
unvoid_transformer(tf, tf._get_manager().livegraph)
TransformerUpdateTask(tf._get_manager(), tf).launch()
def hard_cancel(self, transformer=None, *, tf_checksum=None):
if transformer is None:
assert tf_checksum is not None
else:
assert tf_checksum is None
tf_checksum = self.transformer_to_transformations.get(transformer)
if tf_checksum is None:
return
job = self.transformation_jobs.get(tf_checksum)
if job is None:
return
self._hard_cancel(job)
async def run_transformation_async(self, tf_checksum):
from . import CacheMissError
result_checksum, prelim = self._get_transformation_result(tf_checksum)
if result_checksum is not None and not prelim:
return result_checksum
transformation = await self.serve_get_transformation(tf_checksum, None)
if transformation is None:
raise CacheMissError
for k,v in transformation.items():
if k in ("__output__", "__as__"):
continue
if k == "__env__":
continue
celltype, subcelltype, sem_checksum = v
if syntactic_is_semantic(celltype, subcelltype):
continue
await self.serve_semantic_to_syntactic(
sem_checksum, celltype, subcelltype,
None
)
transformer = DummyTransformer(tf_checksum)
async def incref_and_run():
result = await self.incref_transformation(
transformation, transformer,
transformation_build_exception=None
)
if result is not None:
tf_checksum, result_checksum, prelim = result
if result_checksum is None or prelim:
job = self.run_job(transformation, tf_checksum)
if job is not None:
await asyncio.shield(job.future)
coro = incref_and_run()
fut = asyncio.ensure_future(coro)
last_result_checksum = None
last_progress = None
fut_done_time = None
while 1:
if fut.done():
if fut_done_time is None:
fut_done_time = time.time()
else:
if time.time() - fut_done_time > 2:
fut.result()
raise Exception("Transformation finished, but didn't trigger a result or exception")
if transformer._status_reason == StatusReasonEnum.EXECUTING:
if self.transformation_jobs.get(tf_checksum) is None:
break
if transformer.prelim != last_result_checksum \
or transformer.progress != last_progress:
last_progress = transformer.progress
last_result_checksum = transformer.prelim
if last_result_checksum is None:
log(last_progress)
else:
log(last_progress, last_result_checksum.hex())
await asyncio.sleep(0.05)
if tf_checksum in self.transformation_exceptions:
raise self.transformation_exceptions[tf_checksum]
result_checksum, prelim = self._get_transformation_result(tf_checksum)
assert not prelim
return result_checksum
def run_transformation(self, tf_checksum):
fut = asyncio.ensure_future(self.run_transformation_async(tf_checksum))
asyncio.get_event_loop().run_until_complete(fut)
return fut.result()
def destroy(self):
# only called when Seamless shuts down
a = self.transformer_to_transformations
if a:
log("TransformationCache, transformer_to_transformations: %d undestroyed" % len(a))
for tf_checksum, job in self.transformation_jobs.items():
if job is None:
continue
future = job.future
if future is None:
continue
try:
future.cancel()
except:
pass
transformation_cache = TransformationCache()
from .tempref import temprefmanager
from .buffer_cache import buffer_cache
from ..protocol.get_buffer import get_buffer, get_buffer_remote, CacheMissError
from ..protocol.conversion import convert
from ..protocol.deserialize import deserialize
from ..protocol.calculate_checksum import calculate_checksum, calculate_checksum_sync
from .database_client import database_cache, database_sink
from ..transformation import TransformationJob, SeamlessTransformationError
from ..status import SeamlessInvalidValueError, SeamlessUndefinedError, StatusReasonEnum
from ..transformer import Transformer | {
"repo_name": "sjdv1982/seamless",
"path": "seamless/core/cache/transformation_cache.py",
"copies": "1",
"size": "39655",
"license": "mit",
"hash": 3741561023461508600,
"line_mean": 40.5246073298,
"line_max": 108,
"alpha_frac": 0.5888034296,
"autogenerated": false,
"ratio": 4.557522123893805,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5646325553493805,
"avg_score": null,
"num_lines": null
} |
"""A Transform takes a list of `Column` and returns a namedtuple of `Column`."""
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
from abc import abstractproperty
import collections
import inspect
from .column import Column
from .column import TransformedColumn
def _make_list_of_column(x):
"""Converts `x` into a list of `Column` if possible.
Args:
x: a `Column`, a list of `Column` or `None`.
Returns:
`x` if it is a list of Column, `[x]` if `x` is a `Column`, `[]` if x is
`None`.
Raises:
TypeError: `x` is not a `Column` a list of `Column` or `None`.
"""
if x is None:
return []
elif isinstance(x, Column):
return [x]
elif isinstance(x, (list, tuple)):
for i, y in enumerate(x):
if not isinstance(y, Column):
raise TypeError(
"Expected a tuple or list of Columns; entry %s has type %s." %
(i, type(y).__name__))
return list(x)
raise TypeError("Expected a Column or list of Column; got %s" %
type(x).__name__)
def _make_tuple_of_string(x):
"""Converts `x` into a list of `str` if possible.
Args:
x: a `str`, a list of `str`, a tuple of `str`, or `None`.
Returns:
`x` if it is a tuple of str, `tuple(x)` if it is a list of str,
`(x)` if `x` is a `str`, `()` if x is `None`.
Raises:
TypeError: `x` is not a `str`, a list or tuple of `str`, or `None`.
"""
if x is None:
return ()
elif isinstance(x, str):
return (x,)
elif isinstance(x, (list, tuple)):
for i, y in enumerate(x):
if not isinstance(y, str):
raise TypeError(
"Expected a tuple or list of strings; entry %s has type %s." %
(i, type(y).__name__))
return x
raise TypeError("Expected a string or list of strings or tuple of strings; " +
"got %s" % type(x).__name__)
def parameter(func):
"""Tag functions annotated with `@parameter` for later retrieval.
Note that all `@parameter`s are automatically `@property`s as well.
Args:
func: the getter function to tag and wrap
Returns:
A `@property` whose getter function is marked with is_parameter = True
"""
func.is_parameter = True
return property(func)
class Transform(object):
"""A function from a list of `Column` to a namedtuple of `Column`.
Transforms map zero or more columns of a DataFrame to new columns.
"""
__metaclass__ = ABCMeta
def __init__(self):
self._return_type = None
@abstractproperty
def name(self):
"""Name of the transform."""
raise NotImplementedError()
def parameters(self):
"""A dict of names to values of properties marked with `@parameter`."""
property_param_names = [name
for name, func in inspect.getmembers(type(self))
if (hasattr(func, "fget") and hasattr(
getattr(func, "fget"), "is_parameter"))]
return {name: getattr(self, name) for name in property_param_names}
@abstractproperty
def input_valency(self):
"""The number of `Column`s that the `Transform` should expect as input.
`None` indicates that the transform can take a variable number of inputs.
This function should depend only on `@parameter`s of this `Transform`.
Returns:
The number of expected inputs.
"""
raise NotImplementedError()
@property
def output_names(self):
"""The names of `Column`s output by the `Transform`.
This function should depend only on `@parameter`s of this `Transform`.
Returns:
A tuple of names of outputs provided by this Transform.
"""
return _make_tuple_of_string(self._output_names)
@abstractproperty
def _output_names(self):
"""The names of `Column`s output by the `Transform`.
This function should depend only on `@parameter`s of this `Transform`.
Returns:
Names of outputs provided by this Transform, as a string, tuple, or list.
"""
raise NotImplementedError()
@property
def return_type(self):
"""Provides a namedtuple type which will be used for output.
A Transform generates one or many outputs, named according to
_output_names. This method creates (and caches) a namedtuple type using
those names as the keys. The Transform output is then generated by
instantiating an object of this type with corresponding values.
Note this output type is used both for `__call__`, in which case the
values are `TransformedColumn`s, and for `apply_transform`, in which case
the values are `Tensor`s.
Returns:
A namedtuple type fixing the order and names of the outputs of this
transform.
"""
if self._return_type is None:
# TODO(soergel): pylint 3 chokes on this, but it is legit and preferred.
# return_type_name = "%sReturnType" % type(self).__name__
return_type_name = "ReturnType"
self._return_type = collections.namedtuple(return_type_name,
self.output_names)
return self._return_type
def _check_output_tensors(self, output_tensors):
"""Helper for `build(...)`; verifies the output of `_build_transform`.
Args:
output_tensors: value returned by a call to `_build_transform`.
Raises:
TypeError: `transform_output` is not a list.
ValueError: `transform_output` does not match `output_names`.
"""
if not isinstance(output_tensors, self.return_type):
raise TypeError(
"Expected a NamedTuple of Tensors with elements %s; got %s." %
(self.output_names, type(output_tensors).__name__))
def __call__(self, input_columns=None):
"""Apply this `Transform` to the provided `Column`s, producing 'Column's.
Args:
input_columns: None, a `Column`, or a list of input `Column`s, acting as
positional arguments.
Returns:
A namedtuple of the output Columns.
Raises:
ValueError: `input_columns` does not have expected length
"""
input_columns = _make_list_of_column(input_columns)
if len(input_columns) != self.input_valency:
raise ValueError("Expected %s input Columns but received %s." %
(self.input_valency, len(input_columns)))
output_columns = [TransformedColumn(input_columns, self, output_name)
for output_name in self.output_names]
# pylint: disable=not-callable
return self.return_type(*output_columns)
def apply_transform(self, input_columns, cache=None):
"""Apply this `Transform` to the provided `Column`s, producing 'Tensor's.
Args:
input_columns: None, a `Column`, or a list of input `Column`s, acting as
positional arguments.
cache: a dict from Column reprs to Tensors.
Returns:
A namedtuple of the output Tensors.
Raises:
ValueError: `input_columns` does not have expected length
"""
# pylint: disable=not-callable
if cache is None:
cache = {}
if len(input_columns) != self.input_valency:
raise ValueError("Expected %s input Columns but received %s." %
(self.input_valency, len(input_columns)))
input_tensors = [input_column.build(cache)
for input_column in input_columns]
# Note we cache each output individually, not just the entire output
# tuple. This allows using the graph as the cache, since it can sensibly
# cache only individual Tensors.
output_reprs = [TransformedColumn.make_repr(input_columns, self,
output_name)
for output_name in self.output_names]
output_tensors = [cache.get(output_repr) for output_repr in output_reprs]
if None in output_tensors:
result = self._apply_transform(input_tensors)
for output_name, output_repr in zip(self.output_names, output_reprs):
cache[output_repr] = getattr(result, output_name)
else:
result = self.return_type(*output_tensors)
self._check_output_tensors(result)
return result
@abstractmethod
def _apply_transform(self, input_tensors):
"""Applies the transformation to the `transform_input`.
Args:
input_tensors: a list of Tensors representing the input to
the Transform.
Returns:
A namedtuple of Tensors representing the transformed output.
"""
raise NotImplementedError()
def __str__(self):
return self.name
def __repr__(self):
parameters_sorted = ["%s: %s" % (repr(k), repr(v))
for k, v in sorted(self.parameters().items())]
parameters_joined = ", ".join(parameters_sorted)
return "%s({%s})" % (self.name, parameters_joined)
| {
"repo_name": "ivano666/tensorflow",
"path": "tensorflow/contrib/learn/python/learn/dataframe/transform.py",
"copies": "2",
"size": "9408",
"license": "apache-2.0",
"hash": -9087812181506814000,
"line_mean": 31.7804878049,
"line_max": 80,
"alpha_frac": 0.6444515306,
"autogenerated": false,
"ratio": 4.001701403658018,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005716618865239976,
"num_lines": 287
} |
"""A transient mempty value to serve as a placeholder when any monoidal value
can be used.
"""
from ..abc import Monoid
from ..utils.internal import Instance
from ..funcs.monoid import mconcat, mempty, mappend
__all__ = ('Mempty',)
class _Mempty(Monoid):
"""A class that acts as a transient mempty value, similar to the Haskell
implementation. This class should be used as a singleton, placeholder
value for operations that can expect any Monoidal value. However, Python
isn't intuitive in the same ways as Haskell is and needs some help
understanding. Mempty isn't so much the *absence* of a value as just
sitting around waiting for a monoid to be provided.
"""
__slots__ = ()
# _Mempty is its own mempty value
mempty = Instance()
_inst = None
def __new__(cls):
if cls._inst is None:
cls._inst = Monoid.__new__(cls)
return cls._inst
def __repr__(self):
return 'Mempty'
# mempty is always false
__bool__ = __nonzero__ = lambda s: False
def mappend(self, other):
"""When mempty is used to mappend something, it simply becomes
the other value if it is monoidal. Otherwise, a TypeError is raised.
In doing so, the transient mempty disappears.
"""
return other
@staticmethod
def mconcat(*monoids, **kwargs):
"""Rather than rely on the mconcat provided by Monoid, the transient
mempty will attempt to construct a mconcat call via the
pynads.funcs.mconcat function. This implementation of mconcat
will also filter all instances of mempty out instead of relying
on the _Mempty._reflected_mappend method.
"""
monoids = [m for m in monoids if m is not Mempty]
if not monoids:
return Mempty
return mconcat(*monoids, **kwargs)
def _reflected_mappend(self, other):
"""It's possible that a Mempty will end up in the middle of a list
of monoids. Rather than blowing up, Mempty will attempt to discover
the appropriate mempty value to return. If a mempty can't be
determined, a TypeError is raised instead. Unlike other calls
that delegate to pynads.utils.monoid.get_generic_mempty in some way,
there's not a good way to provide optional keyword arguments
since this method will actually be invoked by dunder method calls
like __add__ or __or__.
"""
return mappend(other, mempty(other))
__add__ = __iadd__ = __radd__ = \
__or__ = __ior__ = __ror__ = _reflected_mappend
# should work for mappends on sequences and mappings
__iter__ = lambda _: iter(())
Mempty = _Mempty()
| {
"repo_name": "justanr/pynads",
"path": "pynads/concrete/mempty.py",
"copies": "1",
"size": "2706",
"license": "mit",
"hash": -6841951416203960000,
"line_mean": 35.08,
"line_max": 77,
"alpha_frac": 0.6459719143,
"autogenerated": false,
"ratio": 3.973568281938326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5119540196238326,
"avg_score": null,
"num_lines": null
} |
"""A translation of an example from the Java Tutorial
http://java.sun.com/docs/books/tutorial/
This example converts between metric and english units
"""
from java import awt
from java.applet import Applet
from java.awt.event import ActionListener, ItemListener, AdjustmentListener
from pawt import GridBag
basicUnits = [['Metric System', [('Centimeters', 0.01),
('Meters', 1.0),
('Kilometers', 1000.0)]],
['U.S. System', [('Inches', 0.0254),
('Feet', 0.305),
('Yards', 0.914),
('Miles', 1613.0)]]
]
class SimpleBorder:
def paint(self, g):
g.drawRect(0,0,self.size.width-1, self.size.height-1)
def getInsets(self):
return awt.Insets(5,5,5,5)
class Converter(Applet, SimpleBorder):
def init(self, unitSets=basicUnits):
self.setLayout(awt.GridLayout(2,0,5,5))
self.panels = []
for name, units in unitSets:
panel = ConversionPanel(name, units, self)
self.panels.append(panel)
self.add(panel)
def convert(self, master):
value = master.getValue()
multiplier = master.getMultiplier()
for panel in self.panels:
if panel is not master:
panel.setValue(multiplier/panel.getMultiplier()*value)
class ConversionPanel(awt.Panel, SimpleBorder,
ActionListener, AdjustmentListener, ItemListener):
max, block = 10000, 100
def __init__(self, title, units, controller):
self.units = units
self.controller = controller
bag = GridBag(self, fill='HORIZONTAL')
label = awt.Label(title, awt.Label.CENTER)
bag.addRow(label)
self.text = awt.TextField('0', 10, actionListener=self)
bag.add(self.text, weightx=1.0)
self.chooser = awt.Choice(itemListener=self)
for name, multiplier in units:
self.chooser.add(name)
bag.addRow(self.chooser)
self.slider = awt.Scrollbar(awt.Scrollbar.HORIZONTAL,
maximum=self.max+10,
blockIncrement=self.block,
adjustmentListener=self)
bag.add(self.slider)
def getMultiplier(self):
return self.units[self.chooser.selectedIndex][1]
def getValue(self):
try:
return float(self.text.getText())
except:
return 0.0
def actionPerformed(self, e):
self.setSlider(self.getValue())
self.controller.convert(self)
def itemStateChanged(self, e):
self.controller.convert(self)
def adjustmentValueChanged(self, e):
self.text.setText(str(e.getValue()))
self.controller.convert(self)
def setValue(self, v):
self.text.setText(str(v))
self.setSlider(v)
def setSlider(self, f):
if f > self.max:
f = self.max
if f < 0:
f = 0
self.slider.value = int(f)
if __name__ == '__main__':
import pawt
pawt.test(Converter())
| {
"repo_name": "tunneln/CarnotKE",
"path": "jyhton/Demo/applet/deprecated/Converter.py",
"copies": "12",
"size": "2651",
"license": "apache-2.0",
"hash": -6001551510396182000,
"line_mean": 22.4601769912,
"line_max": 75,
"alpha_frac": 0.67710298,
"autogenerated": false,
"ratio": 3.068287037037037,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9745390017037037,
"avg_score": null,
"num_lines": null
} |
# A translation of calc_fitness.pl into python! For analysis of Tn-Seq.
# This script requires BioPython, which in turn has a good number of dependencies (some optional but very helpful).
# How to install BioPython and a list of its dependencies can be found here: http://biopython.org/DIST/docs/install/Installation.html
# K. McCoy
# python ~/Bio/calc_fitness.py -ef .0 -el .10 -cutoff 0 -wig ./wiggle-file.wig -t1 /ExpOut/160804_NS500751_0017_AHF2KLBGXY/Out/Maps/19F-SDMMT2Vanc0.1.map -t2 /ExpOut/160804_NS500751_0017_AHF2KLBGXY/Out/Maps/19F-SDMMT2NoAb.map -ref /data1/NextSeq/Refs/NC_012469.gbk -out output.csv -expansion 300 -normalize foo
import re
##### ARGUMENTS #####
def print_usage():
print "\n" + "You are missing one or more required flags. A complete list of flags accepted by calc_fitness is as follows:" + "\n\n"
print "\033[1m" + "Required" + "\033[0m" + "\n"
print "-ref" + "\t\t" + "The name of the reference genome file, in GenBank format." + "\n"
print "-t1" + "\t\t" + "The name of the bowtie mapfile from time 1." + "\n"
print "-t2" + "\t\t" + "The name of the bowtie mapfile from time 2." + "\n"
print "-out" + "\t\t" + "Name of a file to enter the .csv output." + "\n"
print "\n"
print "\033[1m" + "Optional" + "\033[0m" + "\n"
print "-expansion" + "\t\t" + "Expansion factor (default: 250)" + "\n"
print "-d" + "\t\t" + "All reads being analyzed are downstream of the transposon" + "\n"
print "-reads1" + "\t\t" + "The number of reads to be used to calculate the correction factor for time 0." + "\n\t\t" + "(default counted from bowtie output)" + "\n"
print "-reads2" + "\t\t" + "The number of reads to be used to calculate the correction factor for time 6." + "\n\t\t" + "(default counted from bowtie output)" + "\n"
print "-cutoff" + "\t\t" + "Discard any positions where the average of counted transcripts at time 0 and time 1 is below this number (default 0)" + "\n"
print "-cutoff2" + "\t\t" + "Discard any positions within the normalization genes where the average of counted transcripts at time 0 and time 1 is below this number (default 0)" + "\n"
print "-strand" + "\t\t" + "Use only the specified strand (+ or -) when counting transcripts (default: both)" + "\n"
print "-normalize" + "\t" + "A file that contains a list of genes that should have a fitness of 1" + "\n"
print "-maxweight" + "\t" + "The maximum weight a transposon gene can have in normalization calculations" + "\n"
print "-multiply" + "\t" + "Multiply all fitness scores by a certain value (e.g., the fitness of a knockout). You should normalize the data." + "\n"
print "-ef" + "\t\t" + "Exclude insertions that occur in the first N amount (%) of gene--becuase may not affect gene function." + "\n"
print "-el" + "\t\t" + "Exclude insertions in the last N amount (%) of the gene--considering truncation may not affect gene function." + "\n"
print "-wig" + "\t\t" + "Create a wiggle file for viewing in a genome browser. Provide a filename." + "\n"
print "-uncol" + "\t\t" + "Use if reads were uncollapsed when mapped." + "\n"
print "\n"
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-ref", action="store", dest="ref_genome")
parser.add_argument("-t1", action="store", dest="mapfile1")
parser.add_argument("-t2", action="store", dest="mapfile2")
parser.add_argument("-out", action="store", dest="outfile")
parser.add_argument("-out2", action="store", dest="outfile2")
parser.add_argument("-expansion", action="store", dest="expansion_factor")
parser.add_argument("-d", action="store", dest="downstream")
parser.add_argument("-reads1", action="store", dest="reads1")
parser.add_argument("-reads2", action="store", dest="reads2")
parser.add_argument("-cutoff", action="store", dest="cutoff")
parser.add_argument("-cutoff2", action="store", dest="cutoff2")
parser.add_argument("-strand", action="store", dest="usestrand")
parser.add_argument("-normalize", action="store", dest="normalize")
parser.add_argument("-maxweight", action="store", dest="max_weight")
parser.add_argument("-multiply", action="store", dest="multiply")
parser.add_argument("-ef", action="store", dest="exclude_first")
parser.add_argument("-el", action="store", dest="exclude_last")
parser.add_argument("-wig", action="store", dest="wig")
parser.add_argument("-uncol", action="store", dest="uncol")
arguments = parser.parse_args()
if (not arguments.ref_genome or not arguments.mapfile1 or not arguments.mapfile2 or not arguments.outfile):
print_usage()
quit()
# Sets the default value of the expansion factor to 250, which is a trivial placeholder number.
if (not arguments.expansion_factor):
arguments.expansion_factor = 250
# 75 is similarly trivial
if (not arguments.max_weight):
arguments.max_weight = 75
# Sets the default value of cutoff to 0; cutoff exists to discard positions with a low number of counted transcripts, because fitnesses calculated from them may not be very accurate, by the same reasoning that studies with low sample sizes are innacurate.
if (not arguments.cutoff):
arguments.cutoff = 0
# Sets the default value of cutoff2 to 10; cutoff2 exists to discard positions within normalization genes with a low number of counted transcripts, because fitnesses calculated from them similarly may not be very accurate.
# This only has an effect if it's larger than cutoff, since the normalization step references a list of insertions already affected by cutoff.
if (not arguments.cutoff2):
arguments.cutoff2 = 10
if (not arguments.usestrand):
arguments.usestrand = "both"
##### PARSING THE REFERENCE GENOME #####
def get_time():
import datetime
return datetime.datetime.now().time()
print "\n" + "Starting: " + str(get_time()) + "\n"
from Bio import SeqIO
import os.path
handle = open(arguments.ref_genome, "rU")
for record in SeqIO.parse(handle, "genbank"):
refname = record.id
features = record.features
handle.close()
# Makes a dictionary out of each feature that's a gene - with its gene name, start location, end location, and strand as keys to their values. Then makes a list out of all those dictionaries for ease of accessing later on.
feat_name = "gene"
if re.search("TVO", arguments.ref_genome):
feat_name = "CDS"
feature_list = []
for feature in features:
if feature.type == feat_name:
gene = feature.qualifiers["locus_tag"]
strand = feature.location.strand
start = float(feature.location.start)
end = float(feature.location.end)
# Exclude_first and exclude_last are used here to exclude whatever percentage of the genes you like from calculations; e.g. a value of 0.1 for exclude_last would exclude the last 10% of all genes!
# This can be useful because insertions at the very start or end of genes often don't actually break its function.
if (arguments.exclude_first):
start += (end - start) * float(arguments.exclude_first)
if (arguments.exclude_last):
end -= (end - start) * float(arguments.exclude_last)
feature_dictionary = {"gene": gene, "start": start, "end": end, "strand": strand}
feature_list.append(feature_dictionary)
print "Done generating feature lookup: " + str(get_time()) + "\n"
##### PARSING THE MAPFILES #####
if (arguments.uncol):
sys.exit("This script must use collapsed map input!!")
with open(arguments.mapfile1) as file:
r1 = file.readlines()
with open(arguments.mapfile2) as file:
r2 = file.readlines()
# When called, goes through each line of the mapfile to find the strand (+/Watson or -/Crick), count, and position of the read. It may be helpful to look at how the mapfiles are formatted to understand how this code finds them.
def read_mapfile(reads):
plus_total = 0
minus_total = 0
plus_counts = {"total": 0, "sites": 0}
minus_counts = {"total": 0, "sites": 0}
for read in reads:
if (arguments.uncol):
sys.exit("Not supposed to happen!")
else:
count = float(read.split()[0])
strand = read.split()[1]
position = float(read.split()[2])
# If for some reason you want to skip all reads from one of the strands - for example, if you wanted to compare the two strands - that's done here.
if arguments.usestrand != "both" and strand != arguments.usestrand:
continue
# Makes dictionaries for the + & - strands, with each insert position as a key and the number of insertions there as its corresponding value.
if (strand == "+"):
sequence_length = float(read.split()[3])
if arguments.downstream:
position += 0
# The -2 in "(sequence_length -2)" comes from a fake "TA" in the read; see how the libraries are constructed for further on this
else:
position += (sequence_length - 2)
plus_counts["total"] += count
plus_counts["sites"] += 1
if position in plus_counts:
plus_counts[position] += count
else:
plus_counts[position] = count
else:
minus_counts["total"] += count
minus_counts["sites"] += 1
if position in minus_counts:
minus_counts[position] += count
else:
minus_counts[position] = count
print "Map Counts: " + str(len(plus_counts)) + " " + str(len(minus_counts))
#print "Map elts: " + str(plus_counts.items()[1:10])
#print "Map 147: " + str(plus_counts.get(147, -1)) + " " + str(minus_counts.get(147, -1))
return (plus_counts, minus_counts)
# Calls read_mapfile(reads) to parse arguments.reads1 and arguments.reads2 (your reads from t1 and t2).
print "args.downstream : " + str(arguments.downstream)
(plus_ref_1, minus_ref_1) = read_mapfile(r1)
print "Read first file: " + str(get_time()) + "\n"
(plus_ref_2, minus_ref_2) = read_mapfile(r2)
print "Read second file: " + str(get_time()) + "\n"
# The lines below are just printed for reference. The number of sites is the length of a given dictionary of sites - 1 because its last key, "total", isn't actually a site.
print "Reads:" + "\n"
print "1: + " + str(plus_ref_1["total"]) + " - " + str(minus_ref_1["total"]) + "\n"
print "2: + " + str(plus_ref_2["total"]) + " - " + str(minus_ref_2["total"]) + "\n"
print "Sites:" + "\n"
print "1: + " + str(plus_ref_1["sites"]) + " - " + str(minus_ref_1["sites"]) + "\n"
print "2: + " + str(plus_ref_2["sites"]) + " - " + str(minus_ref_2["sites"]) + "\n"
##### FITNESS CALCULATIONS #####
# If reads1 and reads2 weren't specified in the command line, sets them as the total number of reads (found in read_mapfile())
if not arguments.reads1:
arguments.reads1 = plus_ref_1["total"] + minus_ref_1["total"]
if not arguments.reads2:
arguments.reads2 = plus_ref_2["total"] + minus_ref_2["total"]
# Calculates the correction factors for reads from t1 and t2; cfactor1 and cfactor2 are the number of reads from t1 and t2 respectively divided by total, which is the average number of reads between the two.
# This is used later on to correct for pipetting errors, or any other error that would cause unequal amounts of DNA from t1 and t2 to be sequenced so that an unequal amount of reads is produced
total = (float(arguments.reads1) + float(arguments.reads2))/2
cfactor1 = float(arguments.reads1)/total
cfactor2 = float(arguments.reads2)/total
print "Cfactor 1: " + str(cfactor1) + "\n"
print "Cfactor 2: " + str(cfactor2) + "\n"
import math
import csv
results = [["position", "strand", "count_1", "count_2", "ratio", "mt_freq_t1", "mt_freq_t2", "pop_freq_t1", "pop_freq_t2", "gene", "D", "W", "nW"]]
genic = 0
total_inserts = 0
with open(arguments.ref_genome, "r") as file:
firstline = file.readline()
genomelength = firstline.split()[2]
i = 0
while i < float(genomelength):
# At each possible location for an insertion in the genome, counts the number of actual insertions at t1 and which strand(s) the corresponding reads came from.
c1 = 0
if i in plus_ref_1:
c1 = float(plus_ref_1[i])
strand = "+/"
if i in minus_ref_1:
c1 += float(minus_ref_1[i])
strand = "b/"
elif i in minus_ref_1:
c1 = float(minus_ref_1[i])
strand = "-/"
# If there were no insertions at a certain location at t1 just continues to the next location; there can't be any comparison to make between t1 and t2 if there are no t1 insertions!
else:
i += 1
continue
# At each location where there was an insertion at t1, counts the number of insertions at t2 and which strand(s) the corresponding reads came from.
c2 = 0
if i in plus_ref_2:
c2 = float(plus_ref_2[i])
if i in minus_ref_2:
c2 += float(minus_ref_2[i])
strand += "b"
else:
strand += "+"
elif i in minus_ref_2:
c2 = float(minus_ref_2[i])
strand += "-"
# Corrects with cfactor1 and cfactor2
c1 /= cfactor1
if c2 != 0:
c2 /= cfactor2
ratio = c2/c1
else:
c2 = 0
ratio = 0
# Passes by all insertions with a number of reads smaller than the cutoff, as they may lead to inaccurate fitness calculations.
if (c1 + c2)/2 < float(arguments.cutoff):
i+= 1
continue
# Calculates each insertion's frequency within the populations at t1 and t2.
mt_freq_t1 = c1/total
mt_freq_t2 = c2/total
pop_freq_t1 = 1 - mt_freq_t1
pop_freq_t2 = 1 - mt_freq_t2
# Calculates each insertion's fitness! This is from the fitness equation log((frequency of mutation @ time 2 / frequency of mutation @ time 1)*expansion factor)/log((frequency of population without the mutation @ time 2 / frequency of population without the mutation @ time 1)*expansion factor)
w = 0
if mt_freq_t2 != 0:
top_w = math.log(mt_freq_t2*(float(arguments.expansion_factor)/mt_freq_t1))
bot_w = math.log(pop_freq_t2*(float(arguments.expansion_factor)/pop_freq_t1))
w = top_w/bot_w
# Checks which gene locus the insertion falls within, and records that.
gene = ''
for feature_dictionary in feature_list:
if feature_dictionary["start"] <= i and i <= feature_dictionary["end"]:
gene = "".join(feature_dictionary["gene"])
genic += 1
break
total_inserts += 1
# Writes all relevant information on each insertion and its fitness to a cvs file: the location of the insertion, its strand, c1, c2, etc. (the variable names are self-explanatiory)
# w is written twice, because the second w will be normalized if normalization is called for, thus becoming nW.
row = [i, strand, c1, c2, ratio, mt_freq_t1, mt_freq_t2, pop_freq_t1, pop_freq_t2, gene, arguments.expansion_factor, w, w]
results.append(row)
i += 1
with open(arguments.outfile, "wb") as csvfile:
writer = csv.writer(csvfile)
writer.writerows(results)
print "Done comparing mapfiles " + str(get_time()) + "\n"
print "Genic: " + str(genic) + "\n"
print "Total: " + str(total_inserts) + "\n"
##### NORMALIZATION #####
# If making a WIG file is requested in the arguments, starts a string to be added to and then written to the WIG file with a typical WIG file header.
# The header is just in a typical WIG file format; if you'd like to look into this more UCSC has notes on formatting WIG files on their site.
if (arguments.wig):
wigstring = "track type=wiggle_0 name=" + arguments.wig + "\n" + "variableStep chrom=" + refname + "\n"
# Takes normalization genes (which should all be predicted or known to have fitness values of exactly 1.0, like transposons for example) and uses them to normalize the fitnesses of all insertion locations
if (arguments.normalize):
with open(arguments.normalize) as file:
transposon_genes = file.read().splitlines()
print "Normalize genes loaded" + "\n"
blank_ws = 0
sum = 0
count = 0
weights = []
scores = []
for list in results:
if list[9] != '' and list[9] in transposon_genes: # and list[11]:
c1 = list[2]
c2 = list[3]
score = list[11]
avg = (c1 + c2)/2
# Skips over those insertion locations with too few insertions - their fitness values are less accurate because they're based on such small insertion numbers.
if float(c1) >= float(arguments.cutoff2):
# Sets a max weight, to prevent insertion location scores with huge weights from unbalancing the normalization.
if (avg >= float(arguments.max_weight)):
avg = float(arguments.max_weight)
# Tallies how many w values are 0 within the blank_ws value; you might get many transposon genes with a w value of 0 if a bottleneck occurs, for example, which is especially common with in vivo experiments. This is used later by aggregate.py
# For example, when studying a nasal infection in a mouse model, what bacteria "sticks" and is able to survive and what bacteria is swallowed and killed or otherwise flushed out tends to be a matter of chance not fitness; all mutants with an insertion in a specific transposon gene could be flushed out by chance!
if score == 0:
blank_ws += 1
sum += score
count += 1
weights.append(avg)
scores.append(score)
##print str(list[9]) + " " + str(score) + " " + str(c1)
# Counts and removes all "blank" fitness values of normalization genes - those that = 0 - because they most likely don't really have a fitness value of 0, and you just happened to not get any reads from that location at t2.
blank_count = 0
original_count = len(scores)
curr_count = original_count
i = 0
while i < curr_count:
w_value = scores[i]
if w_value == 0:
blank_count += 1
weights.pop(i)
scores.pop(i)
i -= 1
curr_count = len(scores)
i += 1
# If no normalization genes can pass the cutoff, normalization cannot occur, so this ends the script advises the user to try again and lower cutoff and/or cutoff2.
if len(scores) == 0:
print 'ERROR: The normalization genes do not have enough reads to pass cutoff and/or cutoff2; please lower one or both of those arguments.' + "\n"
quit()
pc_blank_normals = float(blank_count) / float(original_count)
with open(arguments.outfile2, "w") as f:
f.write("# blank out of " + str(original_count) + ": " + str(pc_blank_normals) + "\n")
f.write("blanks: " + str(pc_blank_normals) + "\n" + "total: " + str(total) + "\n" + "refname: " + refname + "\n")
for list in results:
if list[9] != '' and list[9] in transposon_genes:
c1 = list[2]
if float(c1) >= float(arguments.cutoff2):
f.write(str(list[9]) + " " + str(list[11]) + " " + str(c1) + "\n")
average = sum / count
i = 0
weighted_sum = 0
weight_sum = 0
while i < len(weights):
weighted_sum += weights[i]*scores[i]
weight_sum += weights[i]
i += 1
weighted_average = weighted_sum/weight_sum
f.write("Normalization step:" + "\n")
f.write("Regular average: " + str(average) + "\n")
f.write("Weighted Average: " + str(weighted_average) + "\n")
f.write("Total Insertions: " + str(count) + "\n")
old_ws = 0
new_ws = 0
wcount = 0
for list in results:
if list[11] == 'W':
continue
new_w = float(list[11])/weighted_average
# Sometimes you want to multiply all the fitness values by a constant; this does that.
# For example you might multiply all the values by a constant for a genetic interaction screen - where Tn-Seq is performed as usual except there's one background knockout all the mutants share.
if arguments.multiply:
new_w *= float(arguments.multiply)
if float(list[11]) > 0:
old_ws += float(list[11])
new_ws += new_w
wcount += 1
list[12] = new_w
if (arguments.wig):
wigstring += str(list[0]) + " " + str(new_w) + "\n"
old_w_mean = old_ws / wcount
new_w_mean = new_ws / wcount
f.write("Old W Average: " + str(old_w_mean) + "\n")
f.write("New W Average: " + str(new_w_mean) + "\n")
with open(arguments.outfile, "wb") as csvfile:
writer = csv.writer(csvfile)
writer.writerows(results)
if (arguments.wig):
if (arguments.normalize):
with open(arguments.wig, "wb") as wigfile:
wigfile.write(wigstring)
else:
for list in results:
wigstring += str(list[0]) + " " + str(list[11]) + "\n"
with open(arguments.wig, "wb") as wigfile:
wigfile.write(wigstring)
# `````````````
# `````````````
# ``@@@@@@@@@``
# ``@@@@@@@@@```
# ``@@@@@@@@@``
# ``@@@@@@@@@``
# ``@@@@@@@@@``
# ``@@@@@@@@@``
# ```@@@@@@@@#``
# ```@@@@@@@@#``
# ```@@@@@@@@+``
# ```@@@@@@@@'``
# ```@@@@@@@@;``
# ```@@@@@@@@;``
# ```@@@@@@@@:``
# ```@@@@@@@@,``
# ``.@@@@@@@@.``
# ``.@@@@@@@@```
# ``.@@@@@@@@```
# ``.@@@@@@@@```
# ``.@@@@@@@@``
# ``,@@@@@@@@``
# ``,@@@@@@@@``
# ``.@@@@@@@@``
# ```@@@@@@@@``
# ``:@@@@@@@@``
# ``:@@@@@@@@``
# ``:@@@@@@@@``
# ``:@@@@@@@@``
# ``'@@@@@@@@``
# ``;@@@@@@@@``
# ``:@@@@@@@@``
# ``:@@@@@@@@``
# ``:@@@@@@@@``
# ``;@@@@@@@#``
# ````+@@@@@@@#`````
# ```````#@@@@@@@#``````
# `````.,@@@@@@@@@...````
# ``@@@@@@@@@@@@@@@@@@;``
# ``@@@@@@@@@@@@@@@@@@;``
# ```````````````````````
# `````````````````````
# ``````.```````
# ````@.''```
# ```# `;```
# ``.+ @```
# ```@ ````,+```
# ```;;````` @```
# ```@ ``````,@```
# ```,+```..```@```
# ```@ ``....```@```
# ```+' ``....```#'``
# ```@```......`` @```
# ```'+```......```'@```
# ```@ ``........```@```
# ```'#```........````@```
# ```@ ``..........```#,``
# ```'#```...........`` @```
# ```@``.............```.+```
# ```:#```.............`` #```
# ``````` ```@ ```.......#......``.@```
# `````````` ```:@```#`......@......```@```
# ``````#@@@`` ```@ `.`:.......@.......`` @```
# ```.#@###@`` ```:@``..`+`....`@.......```@,``
# ```'@####@``` ```@````..@@@@@@@@#,`..#```` @```
# ```#####@@``` ``;@ ,`.,@@. `@@..#..```''``
# ``:####@#```` ```@``@`@@ @@:...`` @```
# ```@#####```` ``,@``.@, ,@`...``:@```
# ``.####@``` ```@.` @` @....``@```
# ``####@``` ``,@ @.` @`.````@```
# ``@##@```` ```@, @: ;# `@..```@.``
# ```@##```` ``.@`,@ @@, #...`` @```
# ```@#@``` ```@, # `@@@ @`.```;'``
# ```##:`` ``.@ +, .@@@ ,'..`` @```
# ``.##``` ```@, @ `@@@ @`.```,+```
# `````@##``` ```@`'. @@@ :...```@``` ``````````
# ````````````````````````````````````````##@``` ```@:`@ @@@ #...`` #``` `````````````````
# ```````````````````````````````````````.###@``` ```@ `, .@@@++'++#@@'` #`..```#``` ````````````'@@@@@.````
# `````+@####################@@@@@@@@@@@@#####@``` ```#;`,. `@#...,.,,,,,,..;@, @....`` @``````````````+@@@########@.``
# `+@##########################################,```` ```````````````@```@ +@,.,,,,,,,,,,,,,,,,,@ @....```#`````````'@@@##############@```
# `@###########################################@``````````````````````````````````````+'``.,'.#.,,,,,,,,,,,,,,,,,,,,.++@......`` @````+@@@#######@+``````'###'``
# ``:@@########@@@@@@@@@@@@@@@@@@@@@@#@@@@@@@##@``````````````````````````......,`,,.,@ ```.##.,.,,,,,,,,,,,,,,,,,,,,.##......`` :.+@@#######@@:``````````###@```
# ````````````````````````````````,#########@###@@@@#################################@'```...@.,,,,,,,,,,,,,,,,,,,,,,,#.........`'@######@@+```````````````@##```
# ```````````````````````````````@#########@#########################################```.....@:,,,,,,,,,,,,,,,,,,..;@..........`@####@@:```````` ```@##@``
# `````@@####@@@##########@@@@@@@@@@@@@@@@@@@@@@@@@@@@#+@+```......@#.,,,,,,,,,,,,,,,,.##..........`` #@#````````` ```##@```
# ``.#@######@####:```````````````````````````````````@ ``.......@:#,,,,,,,,,,,,,,,;@@`............`` @`````` ```@##:``
# ``:########@###@```````````````````````````````````#;```......+..`##,.,,,,,,,,.#@#..'............`` @```` ``;##@```
# ```@@####@@##@'```` ````@ ``.......'.....@@#+;:;'#@@;`...#`............`` @``` ```@##```
# ```````````````` ```@,```.............'..:''':.@`......:............```@.`` ``@###```
# `````````````` ``.@```..............#........'`....................```@``` ``.##@``````
# ```@.``..............`#........,.....................```@.`` ```@#+,``````
# ``.@``.,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,.................```@``` ````+#####@````
# ```@````......,........................,....,,.,.,,,,,..` @,`` ```;@@######````
# ``.@```.......,......................`......,...........```@``` ```+#########@````
# ```@```...........`@@+...............+@@`....,...........```@:`` ``;@#########@'```
# ``.@ ``............@@@@@`.........,@@@@@.....,............```@``` ``@#########@#@@```
# ```@.```............@@@@@@@`.....'@@@@@@@.....,............```#'`` ``@###@###@#@#@@@``
# ``.@```.............@@@@@@@@@..+#@@@@@@@@.....,.............`` #``` ``@#@@@@##@#@#@@@``
# ```@````.............@@@@@@@@@@@@@@@@@@@@@.....,.............```'#``` ``'#@@@@##@#@@@@@,`
# ``.@ ``.........,....@@@@@@@@@',##@@@@@@@@`....,..............```@``` ```@@@@@##@@'#@@@@`
# ```@.```.........,....@@@@@@@#`...`#@@@@@@@`....,..............```.@``` ``#@@@@##;```@@@@`
# ``.@ ``.....,,,,,,,.,.@@@@@#.,,,,,,,.#@@@@@,,,,,,,,,,,,:,,,,,,,,.``@``` ``#@@@@#.````@@@@`
# ```@. ``...............@@@;......,......#@@@`...........,.........```@``` ``#@@@;``````@@@@`
# ```@```................@,........,........+#`...........,.........```@.`` ``#@@@;``````@@@@`
# ```@.``...........................,.........`............,..........```@``` ``#@@@'`` ``#@@;`
# ``.@ ``................,..........,......................,..........```#:`` ``#@@@'`` ```#@``
# ```@,``............................,......................,...........`` @``` ``+@@@'`` `````
# ``.@```............................,......................,...........```#+`` ``;@@@+`` ``
# ```@,``.............................,......................,............```@``` ``'@@@+``
# ``.@```.........,...................,......................,............```'#``` ``;@@@+``
# ```@:```.........,...................,......................,.............`` @``` ``:@@@+``
# ```@`..,,,,,,,,,,,,,,,,,,..,.........,......................,.............```'@``` ``;@@@#``
# ``+'```...,.................,....,,,,,,,,,,,,,,,,,,,,,,,,,,,,,..........,...``@``` ``;@@@#``
# ```@ ``....,.................,.......................................,.......``;@``` ``:@@@#``
# ``'#```....,.................,.......................................,.......```@``` ``:@@@@``
# ```@```.....,.................,.......................................,........``;#`` ``:@@@@``
# ```@ ``.....,.................,.......................................,........`` @``` ``:@@@@``
# ``@````...............................................................,........`` @.`` ``;@@@@``
# ``@ ```..............................................................,........`` .#`` ``'@@@@``
# ``# ``````.`.```````````````..````````````````````..`````````````````.`````````` @`` ``'@@@@``
# ``. `````````````````````````````````````````````````````````````````````````` .;`` ``'@@@@``
# ``@;` `` ` ` ` ` ```` ` ````` ` ` `,+@``` ``+@@@@``
# `````:;'++##@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@+.```` ``+@@@@``
# ```````````````````````````+##@``````````````````@#@``````````````````````````````` ``+@@@@``
# `````````````````````````@##@``````````````````@##;```````````````````````````` ``+@@@@``
# ````###,```` ````+##@``` ``+@@@@``
# ``,###``` ``.@##``` ``'@@@@``
# ``###@`` ```@##``` ``'@@@@``
# ```@##@`` ``@##+`` ``'@@@@``
# ```###.`` ``:##@`` ``'@@@@``
# ``:###``` ```##@``` ``'@@@@``
# ``@##@`` ```@##``` ``'@@@@``
# ```@##'`` ``@###`` ``'@@@@``
# ```@##``` ```##@`` ``'@@@@``
# ``,###``` ```@#@``` ``'@@@@``
# ``####`` ``@##.`` ``'@@@@``
# ``@##@`` ``;##@`` ``'@@@@``
# `````````@##@`` ```##@```` ``;@@@@``
# ``````````````@##;`` ```###`````````````` ``;@@@@``
# `````````.,;.```###``` ``@##:`````````````` ``;@@@@``
# `````#@#########@@##``` ``###@@@@@@###@#@'``` ``;@@@@``
# ```@@###############@`` ``,################`` ``;@@@@``
# ``'@################+`` ```###############+`` ``;@@@@``
# `````````````````````` ``###########@#,```` ``.@@@@``
# ````````````````````` ``````````````````` ```@@@.`
# ```````````````` ```````
#
#
| {
"repo_name": "jsa-aerial/aerobio",
"path": "Scripts/calc_fitness.py",
"copies": "1",
"size": "43828",
"license": "mit",
"hash": 8200735867038334000,
"line_mean": 57.672021419,
"line_max": 313,
"alpha_frac": 0.3074746737,
"autogenerated": false,
"ratio": 4.81414762741652,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005720866370296478,
"num_lines": 747
} |
"""ATRCalculator
ATR: Average True Range.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:average_true_range_atr
"""
class ATRCalculator(object):
def __init__(self, window_size=10):
self.window_size = window_size
self.tr_list = []
self.last_tick = None
self.atr = None
def __call__(self, tick):
HL = tick["High"] - tick["Low"]
# if not self.last_tick:
# => ValueError: 'The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()'
if self.last_tick is not None:
HCp = abs(tick["High"] - self.last_tick["Close"]) # Cp => previous Close
LCp = abs(tick["Low"] - self.last_tick["Close"])
tr = max(HL, HCp, LCp)
else:
tr = HL
# assert tr != 0.0, "TR should not be zero!"
# The above assertion is not True. e.g. extremely low volume like BPHX @ 2014-06-03
if len(self.tr_list) < self.window_size:
self.tr_list.append(tr)
self.atr = sum(self.tr_list) / len(self.tr_list)
else:
# self.atr = (self.atr * (window_size - 1) + self.tr) / window_size
self.atr += (tr - self.atr) / self.window_size
# assert self.atr != 0.0, "ATR should not be zero! last=%s, tick=%s" % (repr(self.last_tick), repr(tick))
# The above assert is not True, e.g. RRST @ 2014-01-02
self.last_tick = tick.copy()
return self.atr
| {
"repo_name": "dyno/LMK",
"path": "lmk/calculator/ATRCalculator.py",
"copies": "1",
"size": "1499",
"license": "mit",
"hash": 2169116107196417300,
"line_mean": 37.4358974359,
"line_max": 118,
"alpha_frac": 0.5670446965,
"autogenerated": false,
"ratio": 3.1557894736842105,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4222834170184211,
"avg_score": null,
"num_lines": null
} |
# A tree data structure which stores a list of degrees and can quickly retrieve the min degree element,
# or modify any of the degrees, each in logarithmic time. It works by creating a binary tree with the
# given elements in the leaves, where each internal node stores the min of its two children.
import math
class MinTree:
def __init__(self, degrees):
self.height = int(math.ceil(math.log(len(degrees), 2)))
self.numLeaves = 2 ** self.height
self.numBranches = self.numLeaves - 1
self.n = self.numBranches + self.numLeaves
self.nodes = [float('inf')] * self.n
for i in range(len(degrees)):
self.nodes[self.numBranches + i] = degrees[i]
for i in reversed(range(self.numBranches)):
self.nodes[i] = min(self.nodes[2 * i + 1], self.nodes[2 * i + 2])
# @profile
def getMin(self):
cur = 0
for i in range(self.height):
cur = (2 * cur + 1) if self.nodes[2 * cur + 1] <= self.nodes[2 * cur + 2] else (2 * cur + 2)
# print "found min at %d: %d" % (cur, self.nodes[cur])
return (cur - self.numBranches, self.nodes[cur])
# @profile
def changeVal(self, idx, delta):
cur = self.numBranches + idx
self.nodes[cur] += delta
for i in range(self.height):
cur = (cur - 1) // 2
nextParent = min(self.nodes[2 * cur + 1], self.nodes[2 * cur + 2])
if self.nodes[cur] == nextParent:
break
self.nodes[cur] = nextParent
def getVal(self, idx):
cur = self.numBranches + idx
return self.nodes[cur]
def setVal(self, idx, val):
cur = self.numBranches + idx
self.nodes[cur] = val
for i in range(self.height):
cur = (cur - 1) // 2
nextParent = min(self.nodes[2 * cur + 1], self.nodes[2 * cur + 2])
if self.nodes[cur] == nextParent:
break
self.nodes[cur] = nextParent
def dump(self):
print "numLeaves: %d, numBranches: %d, n: %d, nodes: " % (self.numLeaves, self.numBranches, self.n)
cur = 0
for i in range(self.height + 1):
for j in range(2 ** i):
print self.nodes[cur],
cur += 1
print ''
| {
"repo_name": "shenghua-liu/HoloScope",
"path": "mytools/MinTree.py",
"copies": "1",
"size": "2293",
"license": "apache-2.0",
"hash": 7149728313982103000,
"line_mean": 39.2280701754,
"line_max": 107,
"alpha_frac": 0.5542956825,
"autogenerated": false,
"ratio": 3.474242424242424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9516629405730322,
"avg_score": 0.002381740202420285,
"num_lines": 57
} |
# A tree is either () or (left, val, right).
T = (((),'a',()), 'b', ( ((),'c',()), 'd', ()))
# A zipper is a 'point' in the tree.
# zipper = (context, tree)
# context = ('top', _, _, _)
# | ('left', context, val, right) meaning a hole (*, val, right)
# | ('right', context, left, val) meaning a hole (left, val, *)
def make_zipper(tree):
return ('top', None, None, None), tree
def at_top(((tag, _1, _2, _3), tree)): return tag == 'top'
def get_tree((_, tree)): return tree
def replace_tree((context, _), tree): return context, tree
def left((context, (left, val, right))): return ('left', context, val, right), left
def right((context, (left, val, right))): return ('right', context, left, val), right
def up(((tag, context, x, y), tree)):
if tag == 'left':
val, right = x, y
return context, (tree, val, right)
elif tag == 'right':
left, val = x, y
return context, (left, val, tree)
else:
assert False
z = make_zipper(T)
## z
#. (('top', None, None, None), (((), 'a', ()), 'b', (((), 'c', ()), 'd', ())))
## at_top(z)
#. True
## at_top(right(z))
#. False
## at_top(up(right(z)))
#. True
## get_tree(z)
#. (((), 'a', ()), 'b', (((), 'c', ()), 'd', ()))
## get_tree(right(z))
#. (((), 'c', ()), 'd', ())
## get_tree(left(right(z)))
#. ((), 'c', ())
## get_tree(replace_tree(left(right(z)), ()))
#. ()
## get_tree(up(up(replace_tree(left(right(z)), ()))))
#. (((), 'a', ()), 'b', ((), 'd', ()))
| {
"repo_name": "JaDogg/__py_playground",
"path": "reference/sketchbook/zippers/treezip.py",
"copies": "1",
"size": "1477",
"license": "mit",
"hash": -5634698662520971000,
"line_mean": 27.4038461538,
"line_max": 85,
"alpha_frac": 0.4861205146,
"autogenerated": false,
"ratio": 2.7351851851851854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8578986826481462,
"avg_score": 0.028463774660744737,
"num_lines": 52
} |
"""A tree-searching virtual machine, searching branch and match
implementation."""
from treepace.relations import Descendant
import treepace.trees
from treepace.utils import ReprMixin, IPythonDotMixin
from treepace.replace import ReplaceError
class SearchMachine(ReprMixin):
"""A tree-searching virtual machine."""
def __init__(self, node, instructions, variables, relation=Descendant):
"""Initialize the VM with the default state."""
self.branches = [SearchBranch(node, instructions[:], self, relation)]
self.machine_vars = variables
def search(self):
"""Execute all instructions and return the search results."""
while any(branch.instructions for branch in self.branches):
new_branches = []
for branch in self.branches:
if branch.instructions:
result = branch.instructions.pop(0).execute(branch)
if result is not None:
new_branches.extend(result)
else:
new_branches.append(branch)
else:
new_branches.append(branch)
self.branches = new_branches
return [branch.match for branch in self.branches]
def __str__(self):
"""Return the machine state in a form of a string."""
return "branches: %s, vars: %s" % (self.branches, self.machine_vars)
class SearchBranch(ReprMixin):
"""The search process can 'divide' itself into multiple branches."""
def __init__(self, node, instructions, vm, relation):
"""Each branch is represented by a set of current group numbers,
a match object (a subtree list containing current results), a context
node, a current relation and an instruction list."""
self.groups = {0}
self.match = Match([treepace.trees.Subtree()])
self.node = node
self.relation = relation
self.instructions = instructions
self.vm = vm
def copy(self):
"""Return a copy of this branch which can be modified without affecting
the original branch."""
branch = SearchBranch(self.node, self.instructions[:], self.vm,
self.relation)
branch.groups = self.groups.copy()
branch.match = self.match.copy()
return branch
def __str__(self):
"""Return the branch information as a string."""
fmt = "groups: %s, match: %s, node: %s, relation: %s, instructions: %s"
return fmt % (self.groups, list(map(str, self.match.groups())),
self.node, self.relation.name, list(map(str, self.instructions)))
class Match(ReprMixin, IPythonDotMixin):
"""A match is a list of groups; each group is one subtree."""
def __init__(self, groups):
"""Initialize a match with a list of subtrees."""
self._subtrees = groups
def group(self, number=0):
"""Return the given group; group 0 is the whole match."""
return self._subtrees[number]
def groups(self):
"""Return the list of all groups."""
return self._subtrees
def copy(self):
"""Return a list of copies of all subtrees."""
return Match(list(map(lambda x: x.copy(), self._subtrees)))
@staticmethod
def check_disjoint(matches):
"""Raise ReplaceError if there exists a node which is present
in at least two matches from the given list of matches."""
subtree_nodes = [match.group().nodes for match in matches]
total_count = sum(map(len, subtree_nodes))
if len(set().union(*subtree_nodes)) < total_count:
raise ReplaceError("Overlapping matches")
def __str__(self):
"""Return a string containing all groups (subtrees)."""
return str(list(map(str, self._subtrees)))
def _repr_dot_(self):
from treepace.formats import DotText
return self.group().main_tree().save(DotText, match=self)
| {
"repo_name": "sulir/treepace",
"path": "treepace/search.py",
"copies": "1",
"size": "4032",
"license": "mit",
"hash": 2949209456118498000,
"line_mean": 38.145631068,
"line_max": 79,
"alpha_frac": 0.6083829365,
"autogenerated": false,
"ratio": 4.470066518847006,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5578449455347007,
"avg_score": null,
"num_lines": null
} |
# A tree viewer to use for debugging purposes, especially for debugging likelihood
# calculations and MCMC moves involving trees. Shows a graphical representation of
# the tree as it is laid out in memory.
#
# Features:
# o Background is shown in color_plot_background (colors defined below)
# o Initially, node numbers are used to identify nodes, but pressing 'n' toggles
# the display of node names rather than numbers
# o Nodes that are selected are shown in color_selected_node and the edges of selected
# nodes are shown in color_selected_edge (this is useful for showing, for example,
# which edges were modified by a Larget-Simon move)
# o Nodes that are not selected are shown in color_unselected_node and their edges are
# shown in color_unselected_edge
# o The label of the node currently serving as the likelihood root is shown in
# color_likelihood_root, whereas other nodes are shown in color_unselected_node
# o Parental and filial conditional likelihood array status is indicated by colored
# circles at the two ends of each edge.
# Key to colors:
# color_undefined_cla: CLA status could not be determined (e.g. no TipData or
# InternalData structures)
# color_valid_cla: CLA is valid
# color_valid_cached_dot: CLA is valid but currently cached
# color_invalid_cla: CLA is invalid and will be recalculated upon next use
# color_invalid_cached_dot: CLA is invalid and cached
from phycas import *
#from threading import *
import Tkinter
#from tkFileDialog import askopenfilename
import tkFont
import math
# Useful colors (for others, see http://www.mindspring.com/~squicker/colors.html):
white = '#ffffff'
black = '#000000'
red = '#ff0000'
magenta = '#ff00ff'
maroon = '#800000'
green = '#00ff00'
dkgreen = '#008000'
teal = '#008080'
cyan = '#00ffff'
blue = '#0000ff'
purple = '#800080'
navy = '#000080'
midnight = '#00009C'
gray = '#808080'
silver = '#c0c0c0'
brown = '#5C3317'
olive = '#808000'
yellow = '#ffff00'
# The values of these variables determine the color scheme
#color_plot_background = midnight
#color_undefined_cla = silver
#color_valid_cla = green
#color_valid_cached_dot = dkgreen
#color_invalid_cla = red
#color_invalid_cached_dot = maroon
#color_selected_node = yellow
#color_selected_edge = yellow
#color_unselected_node = silver
#color_unselected_edge = silver
#color_likelihood_root = magenta
color_plot_background = silver
color_undefined_cla = gray
color_valid_cla = green
color_valid_cached_dot = black
color_invalid_cla = red
color_invalid_cached_dot = black
color_selected_node = yellow
color_selected_edge = yellow
color_unselected_node = black
color_unselected_edge = black
color_likelihood_root = magenta
# The text displayed in the Help | About dialog box
helptext = """
Knobs on ends of edges represent conditional likelihood arrays (CLAs)
Valid CLAs are GREEN
Invalid CLAs are RED
Undefined CLAs are GRAY
Dotted CLAs are cached
Likelihood root node is PINK
Keyboard shortcuts:
h - opens this help dialog box
n - toggles between node numbers and names
q - quits application normally (returns 1)
c - quits application abnormally (returns 0)
k - increases site
j - decreases site
g - go to site
Currently, edges are NOT shown proportional to their lengths.
"""
class TreeCanvas(Tkinter.Canvas):
#def __init__(self, parent, tree, tree_lock, width, height):
def __init__(self, parent, tree, width, height):
#self.tree_mutex = tree_lock
Tkinter.Canvas.__init__(self, master=parent, bg=color_plot_background, width=width, height=height)
self.frame = parent
self.tree = tree
self.use_edgelens = False # by default, all edges are drawn as equal in length (call useEdgelens() to change this)
self.tree_modified = True
self.tree_height = 0.0
self.tree_width = 0.0
self.xscaler = 1.0
self.yscaler = 1.0
self.plot_margin = 20
# variables associated with the tree being displayed
#self.default_tree_topology = '(A,(((B,C)U,(D,(E,F)W)V)T,G)S,(H,(I,(J,K)Z,L)Y)X)R'
#self.tree = Phylogeny.Tree()
#self.tree.buildFromString(self.default_tree_topology)
self.likelihood_root_nodenum = 0
# variables associated with showing nodes
self.use_node_names = False
# variables associated with showing status of conditional likelihood arrays
self.CLA_radius = 2 # radius of circle plotted for each CLA
# font-related
self.font = tkFont.Font(family='Courier', size=12)
fm = self.font.metrics()
self.font_height = fm['ascent'] + fm['descent']
self.font_Mwidth = self.font.measure('M')
self.nodename_text_color = color_unselected_node
self.bind("<Configure>", self.resizeEvent)
# I am wrapping Canvas calls in these display* functions in order to later make
# it easier to draw to a PDF file rather than the screen
def displayText(self, x, y, text, font, color):
Tkinter.Canvas.create_text(self, x, y, text=text, font=font, fill=color)
def displayLine(self, x0, y0, x, y, color, thickness):
Tkinter.Canvas.create_line(self, x0, y0, x, y, fill=color, width=thickness)
def displayFilledOval(self, x0, y0, x, y, color):
Tkinter.Canvas.create_oval(self, x0, y0, x, y, fill=color, outline=color)
def displayFilledRectangle(self, x, y, width, height, color):
Tkinter.Canvas.create_rectangle(self, x, y, width, height, fill=color)
def xtranslate(self, x):
return int(self.left + self.xscaler*x)
def ytranslate(self, y):
return int(self.bottom - self.yscaler*y)
def plotNode(self, xval, yval, number, label, node_color=color_unselected_node):
x = self.xtranslate(xval)
y = self.ytranslate(yval)
color = (number == self.likelihood_root_nodenum and color_likelihood_root or node_color)
if self.frame.site is None:
self.displayText(x, y, text=str(label), font=self.font, color=color)
def plotEdge(self, parent_x, parent_y, child_x, child_y,
thickness=1,
nodenum_radius=10, cla_radius=3, cached_radius=1,
edge_color=color_unselected_edge,
parental_color=color_undefined_cla,
parental_cached_color=color_undefined_cla,
filial_color=color_undefined_cla,
filial_cached_color=color_undefined_cla,
state_list=[], times_list=[]):
x0 = self.xtranslate(parent_x)
y0 = self.ytranslate(parent_y)
x = self.xtranslate(child_x)
y = self.ytranslate(child_y)
# Leave space at ends for node identifier
if x == x0:
if y > y0:
theta = math.pi/2.0
else:
theta = -math.pi/2.0
else:
theta = math.atan(float(y - y0)/float(x - x0))
if x < x0:
theta += math.pi
dx = float(nodenum_radius)*math.cos(theta)
dy = float(nodenum_radius)*math.sin(theta)
# Draw the edge itself
self.displayLine(x0+dx, y0+dy, x-dx, y-dy, edge_color, thickness)
if self.frame.site is None:
# Draw the parental CLA marker
self.displayFilledOval(x0+dx-cla_radius, y0+dy-cla_radius, x0+dx+cla_radius, y0+dy+cla_radius, color=parental_color)
self.displayFilledOval(x0+dx-cached_radius, y0+dy-cached_radius, x0+dx+cached_radius, y0+dy+cached_radius, color=parental_cached_color)
# Draw the filial CLA marker
self.displayFilledOval(x-dx-cla_radius, y-dy-cla_radius, x-dx+cla_radius, y-dy+cla_radius, color=filial_color)
self.displayFilledOval(x-dx-cached_radius, y-dy-cached_radius, x-dx+cached_radius, y-dy+cached_radius, color=filial_cached_color)
else:
# If state_list not empty, draw states of univents evenly spaced along edge
# tan(theta) = (y - y0)/(x - x0)
# x, y r = sqrt[(x-x0)^2 + (y - y0)^2]
# /| dy = r*sin(theta)
# 1 / | dx = r*cos(theta)
# / | Example: 3 univents shown, indexed s = 0, 1 and 2
# 1 / | r_s = (s+1)*r/(n+1)
# / | r_0 = 1*r/4 = r/4
# 0 / | r_1 = 2*r/4 = r/2
# /______| r_2 = 3*r/4
# x0, y0
nstates = len(state_list)
if nstates > 0:
xdiff = float(x - x0)
ydiff = float(y - y0)
r = math.sqrt(xdiff*xdiff + ydiff*ydiff)
if xdiff == 0.0:
theta = math.pi/2.0
else:
theta = math.acos(xdiff/r)
for i,(s,t) in enumerate(zip(state_list, times_list)):
#r_s = r*float(i)/float(nstates - 1)
r_s = r*float(t)
dx_s = r_s*math.cos(theta)
dy_s = r_s*math.sin(theta)
display_string = '%s' % (self.frame.lookup_state[s])
self.displayText(x0 + dx_s, y0 - dy_s, text=display_string, font=self.font, color=yellow)
#self.displayLine(x, y, x, y, yellow, thickness)
def getEdgeLen(self, nd):
return (self.use_edgelens and nd.getEdgeLen() or 1.0)
def drawTree(self):
# The angle at which edges are drawn is theta. If v is the length of an
# edge, then dx = v cos(theta) and dy = v sin(theta).
#
# X Y
# \ / | This is a drawing of one pair of tips (X, Y)
# \ / | and their parent (Z). The length of the
# \ v/ dy right child's edge is v. The distance between
# \ / theta | sibling distance (the intersibling distance,
# \/____ | or isd) is the horizontal distance between
# Z dx X and Y. The isd determines theta.
#
# The plot area is w pixels wide and h pixels high, so we need to find
# the angle that would allow us to draw a tree that has the same ratio
# of w to h. The width of the tree when plotted using a given theta is
# equal to (left_sum + right_sum) * cos(theta), where left_sum is the
# sum of all edge lengths from the subroot (only child of root) to the
# subroot's leftmost descendant, and right_sum is the sum of all edge
# lengths from the subroot node to its rightmost descendant (which will
# always be the tree's first postorder node). Likewise, the height of
# the tree when plotted will be the subroot's edge length plus
# longest_path * sin(theta), where longest_path is the sum of the edge
# lengths in the path from the subroot to the furthest leaf.
# acquire will block other threads while tree is drawn (this prevents trying
# to drawing a tree that is in the process of being modified - probably not a
# good idea)
#if self.tree_mutex:
# self.tree_mutex.acquire()
# Do a postorder traversal to gather information
self.tree_width = float(self.tree.getNTips() - 2)
x = self.tree_width
nd = self.tree.getFirstPostorder()
while not nd.isRoot():
if nd.isTip():
nd.setY(0.0)
nd.setX(x)
x -= 1.0
else:
lchild_x = nd.getLeftChild().getX()
rchild_x = nd.getX()
nd.setX((lchild_x + rchild_x)/2.0)
# If nd is rightmost child of its parent, initialize parent's x and y values
parent = nd.getParent()
if not nd.getRightSib():
parent.setX(nd.getX())
parent.setY(0.0)
# Make sure that parent's y equals total distance from parent to
# furthest tip in the lineage that includes nd
height_thru_nd = self.getEdgeLen(nd)
if not nd.isTip():
height_thru_nd += nd.getY()
if height_thru_nd > parent.getY():
parent.setY(height_thru_nd)
nd = nd.getNextPostorder()
# Do a preorder traversal to draw the tree
root = self.tree.getFirstPreorder()
nd = nd.getNextPreorder()
# Compute scaling factors for x and y axis based on tree height and
# the width and height of the plot area
self.tree_height = nd.getY() + self.getEdgeLen(nd)
self.yscaler = self.usableh/self.tree_height
self.xscaler = self.usablew/self.tree_width
# Draw the root node
x = root.getLeftChild().getX()
y = 0.0
id = self.use_node_names and root.getNodeName() or root.getNodeNumber()
color = root.isSelected() and color_selected_node or color_unselected_node
self.plotNode(x, y, root.getNodeNumber(), id, color)
while nd:
parent = nd.getParent()
x0 = parent.getX()
y0 = self.tree_height - parent.getY()
nd.setY(parent.getY() - self.getEdgeLen(nd))
x = nd.getX()
y = self.tree_height - nd.getY()
color = nd.isSelected() and color_selected_edge or color_unselected_edge
par_color, par_dot_color, fil_color, fil_dot_color = self.checkCLAstatus(nd)
univent_states = []
univent_times = []
if self.frame.site is not None:
if nd.isTip():
nd_data = nd.getTipData()
else:
nd_data = nd.getInternalData()
if nd_data and nd_data.getNumUnivents(self.frame.site) > 0:
univent_states = nd_data.getUniventStates(self.frame.site)
univent_times = nd_data.getUniventTimes(self.frame.site)
self.plotEdge(parent_x=x0, parent_y=y0, child_x=x, child_y=y,
thickness=nd.isSelected() and 3 or 1,
nodenum_radius=2*self.font_Mwidth,
cla_radius=4, cached_radius=2,
edge_color=color,
parental_color=par_color, parental_cached_color=par_dot_color,
filial_color=fil_color, filial_cached_color=fil_dot_color,
state_list=univent_states, times_list=univent_times)
id = self.use_node_names and nd.getNodeName() or nd.getNodeNumber()
color = nd.isSelected() and color_selected_node or color_unselected_node
self.plotNode(x, y, nd.getNodeNumber(), id, color)
nd = nd.getNextPreorder()
# Release the lock so other threads can play with the tree
#if self.tree_mutex:
# self.tree_mutex.release()
def checkCLAstatus(self, nd):
parental_color = color_undefined_cla
filial_color = color_undefined_cla
parental_cached_color = color_undefined_cla
filial_cached_color = color_undefined_cla
if nd.isTip():
td = nd.getTipData()
if td:
if td.parentalCLAValid():
parental_color = color_valid_cla
if td.parentalCLACached():
parental_cached_color = color_valid_cached_dot
else:
parental_cached_color = color_valid_cla
else:
parental_color = color_invalid_cla
if td.parentalCLACached():
parental_cached_color = color_invalid_cached_dot
else:
parental_cached_color = color_invalid_cla
else:
id = nd.getInternalData()
if id:
if id.parentalCLAValid():
parental_color = color_valid_cla
if id.parentalCLACached():
parental_cached_color = color_valid_cached_dot
else:
parental_cached_color = color_valid_cla
else:
parental_color = color_invalid_cla
if id.parentalCLACached():
parental_cached_color = color_invalid_cached_dot
else:
parental_cached_color = color_invalid_cla
if id.filialCLAValid():
filial_color = color_valid_cla
if id.filialCLACached():
filial_cached_color = color_valid_cached_dot
else:
filial_cached_color = color_valid_cla
else:
filial_color = color_invalid_cla
if id.filialCLACached():
filial_cached_color = color_invalid_cached_dot
else:
filial_cached_color = color_invalid_cla
return parental_color, parental_cached_color, filial_color, filial_cached_color
def resizeEvent(self, event):
self.resize(event.width, event.height, self.plot_margin)
self.repaint()
def resize(self, new_width, new_height, new_margin):
self.plotw = new_width
self.ploth = new_height
self.plotm = new_margin
assert self.font_height < new_margin, 'height of font too large for specified plot margin'
#self.offset = new_margin/2
self.usablew = self.plotw - 2*self.plotm
self.usableh = self.ploth - 2*self.plotm
self.left = self.plotm
self.top = self.plotm
self.right = self.plotw - self.plotm
self.bottom = self.ploth - self.plotm
self.hcenter = self.plotw/2
self.vcenter = self.ploth/2
#Canvas.config(self, width=new_width, height=new_height)
def repaint(self):
self.displayFilledRectangle(0, 0, self.plotw, self.ploth, color=color_plot_background)
self.drawTree()
def reset(self):
import gc # garbage collector
gc.collect() # should capture return value, which is number of unreachable objects found
self.repaint()
def useEdgelens(self):
# Can only honor this request if tree has edge lengths
self.use_edgelens = self.tree.hasEdgeLens()
if self.use_edgelens:
self.repaint()
#class TreeViewer(Frame,Thread):
class TreeViewer(Tkinter.Frame):
def __init__(self, tree, msg, site, parent=None):
self.lookup_state = ['A','C','G','T']
self.site = None
if site > 0:
self.site = site - 1
self.window_title_prefix = msg
self.exit_code = 1 # user can choose a normal quit (returns 1) or a cancel quit (returns 0)
#Thread.__init__(self)
Tkinter.Frame.__init__(self, parent)
self.pack(expand=Tkinter.YES, fill=Tkinter.BOTH)
# set the window title
self.setTitle()
# always position the main window 50 pixels from top and 50 pixels from left
self.winfo_toplevel().geometry("+%d+%d" % (50, 50))
# create a frame to hold the menu buttons
menuf = Tkinter.Frame(self)
menuf.pack(expand=Tkinter.NO, fill=Tkinter.X)
# create the File menu button
self.filemb = Tkinter.Menubutton(menuf, text='File', relief=Tkinter.RAISED, anchor=Tkinter.W, borderwidth=0)
self.filemb.pack(expand=Tkinter.NO, fill=Tkinter.X, side=Tkinter.LEFT)
self.filemb.menu = Tkinter.Menu(self.filemb, tearoff=0)
self.filemb['menu'] = self.filemb.menu
self.filemb.menu.add_command(label='Quit', command=self.quit)
# create the Options menu button
self.samplemb = Tkinter.Menubutton(menuf, text='Options', relief=Tkinter.RAISED, anchor=Tkinter.W, borderwidth=0)
self.samplemb.pack(expand=Tkinter.NO, fill=Tkinter.X, side=Tkinter.LEFT)
self.samplemb.menu = Tkinter.Menu(self.samplemb, tearoff=0)
self.samplemb['menu'] = self.samplemb.menu
self.samplemb.menu.add_command(label='Toggle node numbers', command=self.toggleNodeNamesNumbers)
# create the Help menu button
self.helpmb = Tkinter.Menubutton(menuf, text='Help', relief=Tkinter.RAISED, anchor=Tkinter.W, borderwidth=0)
self.helpmb.pack(expand=Tkinter.YES, fill=Tkinter.X, side=Tkinter.LEFT)
self.helpmb.menu = Tkinter.Menu(self.helpmb, tearoff=0)
self.helpmb['menu'] = self.helpmb.menu
self.helpmb.menu.add_command(label='About', command=self.helpAbout)
# create the canvas
canvasw = int(0.67*self.winfo_screenwidth())
canvash = int(0.67*self.winfo_screenheight())
#self.plotter = TreeCanvas(parent=self, tree=tree, tree_lock=mutex, width=canvasw, height=canvash)
self.plotter = TreeCanvas(parent=self, tree=tree, width=canvasw, height=canvash)
self.plotter.pack(side=Tkinter.TOP, expand=Tkinter.YES, fill=Tkinter.BOTH)
# create the status label
self.status_label = Tkinter.Label(self, justify=Tkinter.LEFT, relief=Tkinter.SUNKEN, height=1, anchor=Tkinter.W, text='Ready')
self.status_label.pack(side=Tkinter.TOP, expand=Tkinter.NO, fill=Tkinter.X)
# bind some keys to the application (doesn't matter which widget has focus when you use bind_all
# See http://www.goingthewongway.com/2007/08/24/tkinter-keyboard-bindings-in-python/ for a list of
# key codes and symbols, or enable keybdShowKeycode below, which will show you the key codes
if False:
self.bind_all("<Key>", self.keybdShowKeycode)
else:
self.bind_all("<KeyPress-n>", self.keybdToggleNodeNamesNumbers)
self.bind_all("<KeyPress-h>", self.keybdHelpAbout)
self.bind_all("<KeyPress-q>", self.keybdQuit)
self.bind_all("<KeyPress-c>", self.keybdEscapeQuit)
self.bind_all("<Right>", self.keybdIncrSite)
self.bind_all("<Left>", self.keybdDecrSite)
self.bind_all("<Next>", self.keybdSitePlusHundred)
self.bind_all("<Prior>", self.keybdSiteMinusHundred)
self.bind_all("<KeyPress-g>", self.keybdGoToSite)
#self.bind_all("<Shift-KeyPress-N>", self.keybdManySteps)
# configure event is bound only to the main frame
#self.bind("<Configure>", self.resizing)
def setTitle(self):
s = self.window_title_prefix
if self.site is not None:
s += ' (site = %d)' % self.site
self.winfo_toplevel().title(s)
def keybdShowKeycode(self, event):
import tkMessageBox # askokcancel, askquestion, showinfo
keyinfo = "event.keycode = %d\nevent.keysym = %s" % (event.keycode, event.keysym)
tkMessageBox.showinfo('Info about the key you pressed', keyinfo)
return "break"
def keybdToggleNodeNamesNumbers(self, event):
self.toggleNodeNamesNumbers()
def toggleNodeNamesNumbers(self):
if self.plotter.use_node_names:
self.plotter.use_node_names = False
self.status_label.config(text='now showing node numbers')
else:
self.plotter.use_node_names = True
self.status_label.config(text='now showing node names')
self.plotter.repaint()
def keybdHelpAbout(self, event):
self.helpAbout()
def helpAbout(self):
import tkMessageBox # askokcancel, askquestion, showinfo
tkMessageBox.showinfo('About the Phycas TreeViewer', helptext)
def keybdIncrSite(self, event):
if self.site is None:
self.site = 0
else:
self.site += 1
self.setTitle()
self.plotter.repaint()
def keybdDecrSite(self, event):
if self.site == 0:
self.site = None
else:
self.site -= 1
self.setTitle()
self.plotter.repaint()
def keybdSitePlusHundred(self, event):
if self.site is None:
self.site = 100
else:
self.site += 100
self.setTitle()
self.plotter.repaint()
def keybdSiteMinusHundred(self, event):
if self.site <= 100:
self.site = None
else:
self.site -= 100
self.setTitle()
self.plotter.repaint()
def keybdGoToSite(self, event):
import tkSimpleDialog
answer = tkSimpleDialog.askinteger('Go to site','Enter site (0 or larger)',minvalue=0)
if answer is not None:
self.site = answer
self.setTitle()
self.plotter.repaint()
def keybdQuit(self, event):
self.close()
def keybdEscapeQuit(self, event):
self.exit_code = 0
self.close()
def close(self):
self.quit()
self.destroy()
def refresh(self, message):
self.status_label.config(text=message)
self.plotter.repaint()
def setLikelihoodRoot(self, like_root_node_num):
if like_root_node_num < 0:
self.plotter.likelihood_root_nodenum = None
else:
self.plotter.likelihood_root_nodenum = like_root_node_num
def run(self):
Tkinter.mainloop()
return self.exit_code
if __name__ == '__main__':
newick = '(A,(((B,C)U,(D,(E,F)W)V)T,G)S,(H,(I,(J,K)Z,L)Y)X)R'
t = Phylogeny.Tree()
t.buildFromString(newick)
#tv = TreeViewer(tree=t)
TreeViewer(tree=t, msg='Showing default tree').run()
# Call start() method of base class Thread
# This invokes TreeViewer.run() in its own thread
#tv.start()
| {
"repo_name": "plewis/phycas",
"path": "src/python/treeviewer/TreeViewer.py",
"copies": "1",
"size": "26124",
"license": "mit",
"hash": 5712189553416356000,
"line_mean": 41.616639478,
"line_max": 147,
"alpha_frac": 0.5864339305,
"autogenerated": false,
"ratio": 3.6187837650644132,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4705217695564413,
"avg_score": null,
"num_lines": null
} |
"""A tree with operator nodes and numeric value leaves."""
from __future__ import print_function
import re
OP_NAMES = ('+', '-', '*', '/')
RE_OPS = re.compile('^[\+\-\*\/]$')
RE_NUM = re.compile('^\d+$')
class TreeError(Exception):
"""The super class for tree errors."""
class OperatorNameError(TreeError):
"""Returned if an operator is given a bad name."""
class NumValueError(TreeError):
"""Raised if the Num object value is not a valid number."""
class NotNumError(TreeError):
"""Raised if the object doesn't have a numeric value."""
class NotAnOp(TreeError):
"""This node is not an operator."""
class Op(object):
"""An arithmetic operator."""
def __init__(self, name):
"""Initialize the name and operator type."""
if name not in OP_NAMES:
raise OperatorNameError('Name {0} not an op name.'.format(name))
self.name = name
self.type = 'op'
def op(self, val_a, val_b):
"""Operate on the two numbers."""
num_a = int(val_a)
num_b = int(val_b)
if self.name == '+':
return num_a + num_b
elif self.name == '-':
return num_a - num_b
elif self.name == '*':
return num_a * num_b
elif self.name == '/':
return num_a / num_b
else:
raise OperatorNameError('Name not found {0}.'.format(self.name))
def __repr__(self):
"""Represent the object."""
return self.name
class Num(object):
"""A number."""
def __init__(self, val):
"""Save a value and set the type."""
self.type = 'num'
try:
self.val = int(val)
except ValueError:
raise NumValueError('Value {0} is not a valid number.'.format(val))
def __repr__(self):
"""Return the str representation of this object."""
return str(self.val)
class Node(object):
"""A node that can be an operator or a numeric value."""
def __init__(self, obj):
"""Save the object for this node."""
self.obj = obj
self.left = None
self.right = None
def type(self):
"""Return the type of the object."""
return self.obj.type
def val(self):
"""Return the value of the object."""
if self.obj.type == 'num':
return self.obj.val
else:
raise NotNumError('Not a number {0}.'.format(self.obj))
def op(self, val_a, val_b):
"""Operate on the two numbers."""
return self.obj.op(val_a, val_b)
def __repr__(self):
"""Return a str repr based on the object type."""
return repr(self.obj)
class Tree(object):
"""A tree of nodes with an operator and left and right sides.
The left and right can be Nums or each another Tree.
"""
def __init__(self):
"""Initialize the root which is usually filled in later."""
self.root = None
def _get_val(self, value):
"""Turn a value into a Num Node if needed."""
if isinstance(value, Node):
return value
else:
return Node(Num(value))
def make_branch(self, op_name, left_val, right_val):
"""Make a branch which is an op node with left and right values."""
node = Node(Op(op_name))
node.left = self._get_val(left_val)
node.right = self._get_val(right_val)
return node
def _eval_node(self, node):
"""Recursively walk the tree from a node, evaluating each op.
We assume that the given node has an op with a left and right.
"""
if node.type() != 'op':
raise NotAnOp('This node {0} is not an operator.'.format(node))
if node.left.type() == 'op':
left_val = self._eval_node(node.left)
else:
left_val = node.left.val()
if node.right.type() == 'op':
right_val = self._eval_node(node.right)
else:
right_val = node.right.val()
return node.op(left_val, right_val)
def eval(self):
"""Evaluate the tree starting at root."""
return self._eval_node(self.root)
def parse(self, input_str):
"""Parse and the input_str and populate the tree.
After this the tree is ready to eval.
Args:
input_str: str. A str containing an RPN expression, e.g.,
'2 3 + 4 5 + *'.
"""
parser = Parser()
parser.parse(input_str)
self.root = parser.stack[0]
def _rpn_str(self, node, template):
"""Build a str in RPN notation from the tree."""
if node.type() != 'op':
raise NotAnOp('This node {0} is not an operator.'.format(node))
if node.left.type() == 'op':
left_str = self._rpn_str(node.left, template)
else:
left_str = repr(node.left)
if node.right.type() == 'op':
right_str = self._rpn_str(node.right, template)
else:
right_str = repr(node.right)
return template.format(left_str, right_str, repr(node))
def rpn_str(self):
return self._rpn_str(self.root, '{0} {1} {2}')
def infix_str(self):
return self._rpn_str(self.root, '({0} {2} {1})')
def __repr__(self):
return self.rpn_str()
class Parser(object):
"""Parse a str in RPN to make a tree."""
def __init__(self):
"""Initialize the stack."""
self.stack = []
self.input = []
def split_input(self, input_str):
"""Split the input str.
Args:
input_str: str. A str containing an RPN expression, e.g.,
'2 3 + 4 5 + *'.
"""
self.input = input_str.split()
def tokenize(self):
"""Tokenize the next value and place on stack.
Returns:
A str indicating the type of token found, 'op' or 'num'.
"""
item = self.input.pop(0)
if RE_OPS.search(item):
self.stack.append(Node(Op(item)))
return 'op'
elif RE_NUM.search(item):
self.stack.append(Node(Num(item)))
return 'num'
def interpret(self):
"""Interpret the bottom elements on the stack.
If the bottom node is an op, create a branch with the op and two
preceeding nodes on the stack as it's left and right.
"""
if self.stack[-1].type() != 'op':
template = 'The bottom item on the stack is not an operator {0}'
raise NotAnOp(template.format(self.stack[-1]))
op_node = self.stack.pop()
right = self.stack.pop()
if right.type() == 'op' and len(self.stack) > 1:
self.stack.append(right)
self.interpret()
right = self.stack.pop()
op_node.right = right
left = self.stack.pop()
if left.type() == 'op' and len(self.stack) > 1:
self.stack.append(left)
self.interpret()
left = self.stack.pop()
op_node.left = left
self.stack.append(op_node)
def parse(self, input_str):
"""Parse the input_str and leave a full tree on the stack.
One Node object is left on the internal stack which is the root of the
entire tree.
Args:
input_str: str. A str containing an RPN expression, e.g.,
'2 3 + 4 5 + *'.
"""
self.split_input(input_str)
while self.input:
self.tokenize()
self.interpret()
| {
"repo_name": "kmggh/walk_expression_tree",
"path": "tree.py",
"copies": "1",
"size": "6788",
"license": "artistic-2.0",
"hash": -1034100279446858900,
"line_mean": 22.0884353741,
"line_max": 74,
"alpha_frac": 0.5989982322,
"autogenerated": false,
"ratio": 3.436962025316456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4535960257516456,
"avg_score": null,
"num_lines": null
} |
''' a Triangle
'''
import math
import collections
import itertools
from . import Polygon, Point, Segment, Circle
from .constants import Epsilon, Half_Pi, nearly_eq, Sqrt_3
from .exceptions import *
class Triangle(Polygon):
'''a pythonic Triangle
Implements a Triangle object in the XY plane having three
non-coincident vertices and three intersecting edges.
Vertices are labeled; 'A', 'B' and 'C'.
Edges are labeled; 'AB', 'BC' and 'AC'.
The length of edges opposite each vertex are labeled:
'a' for the side opposite vertex A.
'b' for the side opposite vertex B.
'c' for the side opposite vertex C.
Interior angles in radians are labeled:
'alpha' for CAB
'beta' for ABC
'gamma' for BCA
Usage:
>>> a = Triangle()
>>> b = Triangle(A,B,C) # A,B,C are Points or Point equivalents
>>> c = Triangle([p,q,r]) # p,q,r are Points or Point equivalents
>>> d = Triangle([x,y,z],[x,y,z],[x,y,z])
'''
@classmethod
def withAngles(cls, origin=None, base=1, alpha=None,
beta=None, gamma=None, inDegrees=False):
'''
:origin: optional Point
:alpha: optional float describing length of the side opposite A
:beta: optional float describing length of the side opposite B
:gamma: optional float describing length of the side opposite C
:return: Triangle initialized with points comprising the triangle
with the specified angles.
'''
raise NotImplementedError("withAngles")
@classmethod
def withSides(cls, origin=None, a=1, b=1, c=1):
'''
:origin: optional Point
:a: optional float describing length of the side opposite A
:b: optional float describing length of the side opposite B
:c: optional float describing length of the side opposite C
:return: Triangle initialized with points comprising the triangle
with the specified side lengths.
If only 'a' is specified, an equilateral triangle is returned.
'''
raise NotImplementedError("withSides")
@classmethod
def unit(cls,scale=1):
return cls(Point.units(scale))
def __init__(self, *args, **kwds):
'''
:args: iterable of Point or Point equivalents
:kwds: named Points where recognized names are 'A', 'B' and 'C'.
If A is an iterable containing Point or Point equivalent objects
it will be used to initialize up to three points in the triangle.
'''
kwds['defaults'] = Point(),Point(1,0),Point(0,1)
super().__init__(*args,**kwds)
if len(self) != 3:
raise ValueError(len(self))
@property
def AB(self):
return self.pairs('AB')
@AB.setter
def AB(self, iterable):
self.A, self.B = iterable
@property
def BA(self):
return self.pairs('BA')
@BA.setter
def BA(self, iterable):
self.B, self.A = iterable
@property
def BC(self):
return self.pairs('BC')
@BC.setter
def BC(self, iterable):
self.B, self.C = iterable
@property
def CB(self):
return self.pairs('CB')
@CB.setter
def CB(self, iterable):
self.C, self.B = iterable
@property
def AC(self):
return self.pairs('AC')
@AC.setter
def AC(self, iterable):
self.A, self.C = iterable
@property
def CA(self):
return self.pairs('CA')
@CA.setter
def CA(self, iterable):
self.C, self.A = iterable
@property
def ABC(self):
return [self.A, self.B, self.C]
@ABC.setter
def ABC(self, iterable):
self.A, self.B, self.C = iterable
@property
def ccw(self):
'''
Result of A.ccw(B,C), float.
See Point.ccw
'''
return self.A.ccw(self.B, self.C)
@property
def isCCW(self):
'''
True if ABC has a counter-clockwise rotation, boolean.
'''
return self.A.isCCW(self.B,self.C)
@property
def area(self):
'''
Area of the triangle, float.
Performance note: computed via Triangle.ccw (subtractions and
multiplications and a divison).
'''
return abs(self.ccw) / 2
@property
def heronsArea(self):
'''
Heron's forumla for computing the area of a triangle, float.
Performance note: contains a square root.
'''
s = self.semiperimeter
return math.sqrt(s * ((s - self.a) * (s - self.b) * (s - self.c)))
@property
def inradius(self):
'''
The radius of the triangle's incircle, float.
'''
return (self.area * 2) / self.perimeter
@property
def circumcenter(self):
'''
The intersection of the median perpendicular bisectors, Point.
The center of the circumscribed circle, which is the circle that
passes through all vertices of the triangle.
https://en.wikipedia.org/wiki/Circumscribed_circle#Cartesian_coordinates_2
BUG: only finds the circumcenter in the XY plane
'''
if self.isRight:
return self.hypotenuse.midpoint
if self.A.isOrigin:
t = self
else:
t = Triangle(self.A - self.A, self.B - self.A, self.C - self.A)
if not t.A.isOrigin:
raise ValueError('failed to translate {} to origin'.format(t))
BmulC = t.B * t.C.yx
d = 2 * (BmulC.x - BmulC.y)
bSqSum = sum((t.B ** 2).xy)
cSqSum = sum((t.C ** 2).xy)
x = (((t.C.y * bSqSum) - (t.B.y * cSqSum)) / d) + self.A.x
y = (((t.B.x * cSqSum) - (t.C.x * bSqSum)) / d) + self.A.y
return Point(x, y)
@property
def circumradius(self):
'''
Distance from the circumcenter to all the verticies in
the Triangle, float.
'''
return (self.a * self.b * self.c) / (self.area * 4)
@property
def circumcircle(self):
'''
A circle whose center is equidistant from all the
vertices of the triangle, Circle.
'''
return Circle(self.circumcenter, self.circumradius)
@property
def orthocenter(self):
'''
The intersection of the altitudes of the triangle, Point.
'''
raise NotImplementedError('orthocenter')
@property
def hypotenuse(self):
'''
The longest edge of the triangle, Segment.
'''
return max(self.edges(),key=lambda s:s.length)
@property
def alpha(self):
'''
The angle described by angle CAB in radians, float.
'''
return Segment(self.CA).radiansBetween(Segment(self.BA))
@property
def beta(self):
'''
The angle described by angle ABC in radians, float.
'''
return Segment(self.AB).radiansBetween(Segment(self.CB))
@property
def gamma(self):
'''
The angle described by angle BCA in radians, float.
'''
return Segment(self.BC).radiansBetween(Segment(self.AC))
@property
def angles(self):
'''
A list of the interior angles of the triangle, list of floats.
'''
return [self.alpha, self.beta, self.gamma]
@property
def a(self):
'''
The length of line segment BC, opposite vertex A, float.
'''
return abs(self.B.distance(self.C))
@property
def b(self):
'''
The length of line segment AC, opposite vertex B, float.
'''
return abs(self.A.distance(self.C))
@property
def c(self):
'''
The length of line segment AB, opposite vertex C, float.
'''
return abs(self.A.distance(self.B))
@property
def sides(self):
'''
A list of edge lengths [a, b, c], list of floats.
'''
return [self.a, self.b, self.c]
@property
def altitudes(self):
'''
A list of the altitudes of each vertex [AltA, AltB, AltC], list of
floats.
An altitude is the shortest distance from a vertex to the side
opposite of it.
'''
A = self.area * 2
return [A / self.a, A / self.b, A / self.c]
@property
def isEquilateral(self):
'''
True iff all side lengths are equal, boolean.
'''
return self.a == self.b == self.c
@property
def isIsosceles(self):
'''
True iff two side lengths are equal, boolean.
'''
return (self.a == self.b) or (self.a == self.c) or (self.b == self.c)
@property
def isScalene(self):
'''
True iff all side lengths are unequal, boolean.
'''
return self.a != self.b != self.c
@property
def isRight(self):
'''
True if one angle measures 90 degrees (Pi/2 radians), float.
'''
return any([nearly_eq(v,Half_Pi) for v in self.angles])
@property
def isObtuse(self):
'''
True if one angle measures greater than 90 degrees (Pi/2 radians),
float.
'''
return any([v > Half_Pi for v in self.angles])
@property
def isAcute(self):
'''
True iff all angles measure less than 90 degrees (Pi/2 radians),
float.
'''
return all([v < Half_Pi for v in self.angles])
def congruent(self, other):
'''
A congruent B
True iff all angles of 'A' equal angles in 'B' and
all side lengths of 'A' equal all side lengths of 'B', boolean.
'''
a = set(self.angles)
b = set(other.angles)
if len(a) != len(b) or len(a.difference(b)) != 0:
return False
a = set(self.sides)
b = set(other.sides)
return len(a) == len(b) and len(a.difference(b)) == 0
| {
"repo_name": "JnyJny/Geometry",
"path": "Geometry/triangle2.py",
"copies": "1",
"size": "9993",
"license": "mit",
"hash": 7130236080199315000,
"line_mean": 23.0795180723,
"line_max": 82,
"alpha_frac": 0.5595917142,
"autogenerated": false,
"ratio": 3.8039588884659308,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4863550602665931,
"avg_score": null,
"num_lines": null
} |
"""A triangle widget."""
from typing import Optional
from kivy.graphics import Triangle as KivyTriangle
from kivy.graphics.context_instructions import Color, Rotate, Scale
from kivy.properties import ListProperty, NumericProperty
from mpfmc.uix.widget import Widget
from mpfmc.core.utils import center_of_points_list
MYPY = False
if MYPY: # pragma: no cover
from mpfmc.core.mc import MpfMc
class Triangle(Widget):
"""A triangle widget."""
widget_type_name = 'Triangle'
animation_properties = ('points', 'color', 'opacity', 'rotation', 'scale')
def __init__(self, mc: "MpfMc", config: dict, key: Optional[str] = None, **kwargs) -> None:
del kwargs
super().__init__(mc=mc, config=config, key=key)
# The points in this widget are always relative to the bottom left corner
self.anchor_pos = ("left", "bottom")
# Bind to all properties that when changed need to force
# the widget to be redrawn
self.bind(color=self._draw_widget,
points=self._draw_widget,
rotation=self._draw_widget,
scale=self._draw_widget)
self._draw_widget()
def _draw_widget(self, *args) -> None:
"""Establish the drawing instructions for the widget."""
del args
if self.canvas is None:
return
# TODO: allow user to set rotation/scale origin
center = center_of_points_list(self.points)
self.canvas.clear()
with self.canvas:
Color(*self.color)
Scale(self.scale, origin=center)
Rotate(angle=self.rotation, origin=center)
KivyTriangle(points=self.points)
#
# Properties
#
points = ListProperty([0, 0, 50, 100, 100, 0])
'''The list of points to use to draw the widget in (x1, y1, x2, y2,
x3, y3) format.
:attr:`points` is a :class:`~kivy.properties.ListProperty`.
'''
rotation = NumericProperty(0)
'''Rotation angle value of the widget.
:attr:`rotation` is an :class:`~kivy.properties.NumericProperty` and defaults to
0.
'''
scale = NumericProperty(1.0)
'''Scale value of the widget.
:attr:`scale` is an :class:`~kivy.properties.NumericProperty` and defaults to
1.0.
'''
widget_classes = [Triangle]
| {
"repo_name": "missionpinball/mpf_mc",
"path": "mpfmc/widgets/triangle.py",
"copies": "1",
"size": "2317",
"license": "mit",
"hash": 6829097092122274000,
"line_mean": 27.256097561,
"line_max": 95,
"alpha_frac": 0.6262408287,
"autogenerated": false,
"ratio": 3.842454394693201,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4968695223393201,
"avg_score": null,
"num_lines": null
} |
"""A trie data structure implemented as a class."""
from collections import OrderedDict
class Node(object):
"""Node object to build a trie."""
def __init__(self, prev=None, end=False):
"""Init node object."""
self.prev = prev
self.children = OrderedDict()
self.end = end
class Trie(object):
"""
Trie class.
__init__:
insert(string): adds a string to the trie.
contains(string): returns true if string is in the trie, else false.
size(): returns the number of words in the trie. 0 if empty.
remove(string): will remove the string from the trie. Exception otherwise.
"""
def __init__(self):
"""Initialize the Trie class."""
self.root = Node()
self._size = 0
def insert(self, string):
"""Insert string into the trie."""
current_node = self.root
for i in range(len(string)):
if string[i] not in current_node.children:
new_node = Node(prev=current_node)
current_node.children[string[i]] = new_node
current_node = current_node.children[string[i]]
current_node.end = True
self._size += 1
def contains(self, string):
"""Return a boolean, true if the string is present, else false."""
curr_node = self.root
for char in string:
if char in curr_node.children:
curr_node = curr_node.children[char]
else:
return False
return curr_node.end
@property
def size(self):
"""Return the number of strings in the trie."""
return self._size
def remove(self, string):
"""Remove a string from the trie. Exception if string is absent."""
curr_node = self.root
for i in range(len(string)):
if string[i] in curr_node.children:
curr_node = curr_node.children[string[i]]
else:
raise ValueError("That word is not in this Trie.")
if curr_node.end and not curr_node.children:
i = 1
while curr_node.prev is not self.root and not curr_node.prev.end and len(curr_node.prev.children) < 2:
curr_node = curr_node.prev
i += 1
del curr_node.prev.children[string[-i]]
curr_node.prev = None
else:
curr_node.end = False
self._size -= 1
def __len__(self):
"""Allow use of len() function."""
return self.size()
def traversal(self, start=None):
"""Return a generator containing complete tokens (words) from a starting point."""
start_node = self.root
if start is not None:
for char in start:
if char in start_node.children:
start_node = start_node.children[char]
else:
raise ValueError("That string is not in this Trie.")
trav_list = []
for i, (key, node) in enumerate(reversed(start_node.children.items())):
trav_list.append((key, node))
while trav_list:
curr = trav_list.pop()
yield curr[0]
start_node = curr[1]
for i, (key, node) in enumerate(reversed(start_node.children.items())):
trav_list.append((key, node))
| {
"repo_name": "CCallahanIV/data-structures",
"path": "src/trie.py",
"copies": "2",
"size": "3333",
"license": "mit",
"hash": 508292811872656800,
"line_mean": 32.33,
"line_max": 114,
"alpha_frac": 0.5550555056,
"autogenerated": false,
"ratio": 4.1506849315068495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.570574043710685,
"avg_score": null,
"num_lines": null
} |
# A trie implementation in Python
class Node(object):
"""
Trie node implementation
"""
def __init__(self, char):
self.char = char
self.children = []
self.complete = False
self.counter = 1
def add(root, word):
"""
Adding a word into the tree
"""
node = root
for char in word:
found_in_child = False
for child in children:
if child.char == char:
child.counter += 1
node = child
found_in_child = True
break
if not found_in_child:
new_node = Node(char)
node.children.append(new_node)
node = new_node
node.complete = True
def find_prefix(root, prefix):
"""
Check and return
1. If the prefix exsists in any of the words we added so far
2. If yes then how may words actually have the prefix
"""
node = root
# If the root node has no children, then return False.
# Because it means we are trying to search in an empty trie
if not root.children:
return False, 0
for char in prefix:
char_not_found = True
# Search through all the children of the present `node`
for child in node.children:
if child.char == char:
# We found the char existing in the child.
char_not_found = False
# Assign node as the child containing the char and break
node = child
break
# Return False anyway when we did not find a char.
if char_not_found:
return False, 0
# Well, we are here means we have found the prefix. Return true to indicate that
# And also the counter of the last node. This indicates how many words have this
# prefix
return True, node.counter | {
"repo_name": "paulmorio/grusData",
"path": "datastructures/trie.py",
"copies": "1",
"size": "1538",
"license": "mit",
"hash": -2632932881910173000,
"line_mean": 23.4285714286,
"line_max": 81,
"alpha_frac": 0.6827048114,
"autogenerated": false,
"ratio": 3.272340425531915,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4455045236931915,
"avg_score": null,
"num_lines": null
} |
"""a trigram algorithm that generates text using a book-sized file as input."""
import io
import string
import re
import random
import sys
def main(file_path, num_words):
'''Call the primary functions of this module.'''
num_words = int(num_words)
data = input_file(file_path)
sentences = split_data(data)
print(sentences)
book_dic = build_dic(sentences)
print(book_dic)
book = build_book(book_dic, num_words)
first_letter_of_book = book[0].upper()
book = first_letter_of_book + book[1:]
# book = capitalize_sentences(book)
print(book)
def build_dic(sentences):
'''Call funtions that build dictionary.'''
dic = {}
for sentence in sentences:
print
if type(sentence[0]) is not 'int':
first_letter = sentence[0].lower()
sentence = first_letter + sentence[1:]
working_sentence = remove_punc(sentence)
word_array = split_words(working_sentence)
for i in range(len(word_array) - 1):
key_word = word_array[i] + ' ' + word_array[i + 1]
if word_array[i + 1] is not word_array[-1]:
value_word = word_array[i + 2]
else:
value_word = '.'
dic = add_to_dic(dic, key_word, value_word)
return dic
def build_book(dic, num_words):
'''Add the generated sentences to the book.'''
book = ''
words_to_add = select_rand_key(dic)
book = add_to_book(book, words_to_add)
while len(book.split()) < num_words:
last_two = book.split()[-2:]
last_two_string = last_two[0] + ' ' + last_two[1]
new_word = get_random_value(dic, last_two_string)
book = add_to_book(book, new_word)
if new_word == '.':
book = add_to_book(book, select_rand_key(dic))
return book
def input_file(path):
'''Open and read a given file.'''
file = io.open(path)
data = file.read()
file.close()
return data
def split_data(data):
'''Split file into individual sentences.'''
sentences = data.split('.')
return sentences
def remove_punc(sentences):
'''Remove punctuation from the sentences.'''
return re.sub('[%s]' % string.punctuation, ' ', sentences)
def split_words(sentence):
'''Split sentences into lists containing individual words.'''
return sentence.split()
def add_to_dic(dic, key, value):
'''Add two word keys and one word values to the dictionary.'''
if key in dic.keys():
dic[key].append(value)
else:
dic[key] = [value]
return dic
def select_rand_key(dic):
'''Select a random key from the dictionary.'''
random_key = random.sample(list(dic), 1)
return random_key[0]
def add_to_book(book, words):
'''Add generated words to the book.'''
if len(book) == 0:
return words
if words == '.':
return book + words
return book + ' ' + words
def get_random_value(dic, key):
'''Get random value from a key.'''
random_value = random.choice(dic[key])
return random_value
# def capitalize_sentences(book):
# updated_book = ''
# sentences = book.split('.')
# for sentence in sentences:
# sentence_start = sentence[1].upper()
# sentence = sentence_start + sentence[2:] + '. '
# updated_book += sentence
# return updated_book
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
| {
"repo_name": "midfies/trigrams",
"path": "src/trigrams.py",
"copies": "1",
"size": "3399",
"license": "mit",
"hash": -1698165840514505700,
"line_mean": 26.4112903226,
"line_max": 79,
"alpha_frac": 0.5966460724,
"autogenerated": false,
"ratio": 3.529595015576324,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4626241087976324,
"avg_score": null,
"num_lines": null
} |
"""A trivia cog that uses Open Trivia Database."""
import os
import html
import asyncio
import time
import datetime
import random
import math
import aiohttp
import discord
from discord.ext import commands
from __main__ import send_cmd_help
from .utils import checks
from .utils.dataIO import dataIO
SAVE_FILEPATH = "data/KeaneCogs/quiz/quiz.json"
class Quiz:
"""Play a kahoot-like trivia game with questions from Open Trivia Database."""
def __init__(self, bot):
self.bot = bot
self.save_file = dataIO.load_json(SAVE_FILEPATH)
self.playing_channels = {}
self.timeout = 20
self.game_tasks = []
self.starter_task = bot.loop.create_task(self.start_loop())
@commands.group(pass_context=True, no_pm=True)
async def quiz(self, ctx):
"""Play a kahoot-like trivia game with questions from Open Trivia Database.
In this game, you will compete with other players to correctly answer each
question as quickly as you can. You have 10 seconds to type the answer
choice before time runs out. Only your first answer will be registered.
The longer you take to say the right answer, the fewer points you get.
If you get it wrong, you get no points.
"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@quiz.command(name="play", pass_context=True)
async def quiz_play(self, ctx):
"""Create or join a quiz game."""
channel = ctx.message.channel
player = ctx.message.author
if channel.id not in self.playing_channels:
self.playing_channels[channel.id] = {"Start":datetime.datetime.utcnow(),
"Started":False,
"Players":{player.id:0},
"Answers":{}
}
return await self.bot.say("{} is starting a quiz game! It will start "
"in 20 seconds. Use `{}quiz play` to join."
.format(player.display_name, ctx.prefix))
channelinfo = self.playing_channels[channel.id]
if player.id in channelinfo["Players"]:
await self.bot.say("You are already in the game.")
elif channelinfo["Started"]:
await self.bot.say("A quiz game is already underway.")
else:
channelinfo["Players"][player.id] = 0
await self.bot.say("{} joined the game.".format(player.display_name))
async def start_loop(self):
"""Starts quiz games when the timeout period ends."""
while True:
await asyncio.sleep(1)
for channelid in list(self.playing_channels):
channelinfo = self.playing_channels[channelid]
since_start = (datetime.datetime.utcnow() - channelinfo["Start"]).total_seconds()
if since_start > self.timeout and not channelinfo["Started"]:
if len(channelinfo["Players"]) > 1:
channel = self.bot.get_channel(channelid)
self.game_tasks.append(self.bot.loop.create_task(self.game(channel)))
channelinfo["Started"] = True
else:
await self.bot.send_message(self.bot.get_channel(channelid),
"Nobody else joined the quiz game.")
self.playing_channels.pop(channelid)
async def on_message(self, message):
authorid = message.author.id
channelid = message.channel.id
choice = message.content.lower()
if channelid in self.playing_channels:
channelinfo = self.playing_channels[channelid]
if (authorid in channelinfo["Players"]
and authorid not in channelinfo["Answers"]
and choice in {"a", "b", "c", "d"}):
channelinfo["Answers"][authorid] = {"Choice":choice,
"Time":time.perf_counter()}
async def game(self, channel):
"""Runs a quiz game on a channel."""
self.add_server(channel.server)
try:
category = await self.category_selector()
category_name = await self.category_name(category)
response = await self.get_questions(channel.server, category=category)
except RuntimeError:
await self.bot.send_message(channel, "An error occurred in retrieving questions. "
"Please try again.")
self.playing_channels.pop(channel.id)
raise
channelinfo = self.playing_channels[channel.id]
# Introduction
intro = ("Welcome to the quiz game! Your category is {}.\n"
"Remember to answer correctly as quickly as you can. "
"Only your first answer will be registered by the game. "
"You have 10 seconds per question.\n"
"The game will begin shortly.".format(category_name))
await self.bot.send_message(channel, intro)
await asyncio.sleep(4)
# Question and Answer
afk_questions = 0
for index, dictionary in enumerate(response["results"]):
answers = [dictionary["correct_answer"]] + dictionary["incorrect_answers"]
# Display question and countdown
if len(answers) == 2: # true/false question
answers = ["True", "False", "", ""]
else:
answers = [html.unescape(answer) for answer in answers]
random.shuffle(answers)
message = "```\n"
message += "{} ({}/20)\n".format(html.unescape(dictionary["question"]), index + 1)
message += "A. {}\n".format(answers[0])
message += "B. {}\n".format(answers[1])
message += "C. {}\n".format(answers[2])
message += "D. {}\n".format(answers[3])
message += "```"
message_obj = await self.bot.send_message(channel, message)
await self.bot.add_reaction(message_obj, "0⃣")
channelinfo["Answers"].clear() # clear the previous question's answers
start_time = time.perf_counter()
numbers = ["1⃣", "2⃣", "3⃣", "4⃣", "5⃣", "6⃣", "7⃣", "8⃣", "9⃣", "🔟"]
for i in range(10):
if len(channelinfo["Answers"]) == len(channelinfo["Players"]):
break
await asyncio.sleep(1)
await self.bot.add_reaction(message_obj, numbers[i])
# Organize answers
user_answers = channelinfo["Answers"] # snapshot channelinfo["Answers"] at this point in time
# to ignore new answers that are added to it
answerdict = {["a", "b", "c", "d"][num]: answers[num] for num in range(4)}
# Check for AFK
if len(user_answers) < 2:
afk_questions += 1
if afk_questions == 3:
await self.bot.send_message(channel, "The game has been cancelled due "
"to lack of participation.")
self.playing_channels.pop(channel.id)
return
else:
afk_questions = 0
# Find and display correct answer
correct_letter = ""
for letter, answer in answerdict.items():
if answer == html.unescape(dictionary["correct_answer"]):
correct_letter = letter
break
assert answerdict[correct_letter] == html.unescape(dictionary["correct_answer"])
message = "Correct answer:```{}. {}```".format(correct_letter.upper(),
answerdict[correct_letter])
await self.bot.send_message(channel, message)
# Sort player IDs by answer time
playerids = sorted(user_answers,
key=lambda playerid: user_answers[playerid]["Time"])
# Assign scores
first = True
for playerid in playerids:
if user_answers[playerid]["Choice"] == correct_letter:
time_taken = user_answers[playerid]["Time"] - start_time
assert time_taken > 0
# the 20 in the formula below is 2 * 10s (max answer time)
points = round(1000 * (1 - (time_taken / 20)))
# The first correct answer gets a bonus 250 points
if first:
points += 250
first = False
channelinfo["Players"][playerid] += points
# Display top 5 players and their points
message = self.scoreboard(channel)
await self.bot.send_message(channel, "Scoreboard:\n" + message)
await asyncio.sleep(4)
if index < 19:
await self.bot.send_message(channel, "Next question...")
await asyncio.sleep(1)
# Ending and Results
await self.end_game(channel)
async def end_game(self, channel):
"""Ends a quiz game."""
# non-linear credit earning .0002x^{2.9} where x is score/100
# leaderboard with credits earned
channelinfo = self.playing_channels[channel.id]
idlist = sorted(channelinfo["Players"],
key=(lambda idnum: channelinfo["Players"][idnum]),
reverse=True)
winner = channel.server.get_member(idlist[0])
await self.bot.send_message(channel, "Game over! {} won!".format(winner.mention))
bank = self.bot.get_cog("Economy").bank
leaderboard = "```py\n"
max_credits = self.calculate_credits(channelinfo["Players"][idlist[0]])
end_len = len(str(max_credits)) + 1 # the 1 is for a space between a max length name and the score
rank_len = len(str(len(channelinfo["Players"])))
rank = 1
no_account = False
for playerid in idlist:
player = channel.server.get_member(playerid)
account_exists = bank.account_exists(player) # how does this know what server it's called in???
if account_exists:
if len(player.display_name) > 25 - rank_len - end_len:
name = player.display_name[:22 - rank_len - end_len] + "..."
else:
name = player.display_name
else:
if len(player.display_name) > 24 - rank_len - end_len:
name = player.display_name[:21 - rank_len - end_len] + "...*"
else:
name = player.display_name + "*"
leaderboard += str(rank)
leaderboard += " " * (1 + rank_len - len(str(rank)))
leaderboard += name
creds = self.calculate_credits(channelinfo["Players"][playerid])
creds_str = str(creds)
leaderboard += " " * (26 - rank_len - 1 - len(name) - len(creds_str))
leaderboard += creds_str + "\n"
if account_exists:
bank.deposit_credits(player, creds)
else:
no_account = True
rank += 1
if not no_account:
leaderboard += "```"
else:
leaderboard += ("* because you do not have a bank account, "
"you did not get to keep the credits you won.```\n")
await self.bot.send_message(channel, "Credits earned:\n" + leaderboard)
self.playing_channels.pop(channel.id)
def scoreboard(self, channel):
"""Returns a scoreboard string to be sent to the text channel."""
channelinfo = self.playing_channels[channel.id]
scoreboard = "```py\n"
idlist = sorted(channelinfo["Players"],
key=(lambda idnum: channelinfo["Players"][idnum]),
reverse=True)
max_score = channelinfo["Players"][idlist[0]]
end_len = len(str(max_score)) + 1
rank = 1
for playerid in idlist[:5]:
player = channel.server.get_member(playerid)
if len(player.display_name) > 24 - end_len:
name = player.display_name[:21 - end_len] + "..."
else:
name = player.display_name
scoreboard += str(rank) + " " + name
score_str = str(channelinfo["Players"][playerid])
scoreboard += " " * (24 - len(name) - len(score_str))
scoreboard += score_str + "\n"
rank += 1
scoreboard += "```"
return scoreboard
def calculate_credits(self, score):
"""Calculates credits earned from a score."""
adjusted = score / 100
if adjusted < 156.591:
result = .0002 * (adjusted**2.9)
else:
result = (.6625 * math.exp(.0411 * adjusted)) + 50
return round(result)
# OpenTriviaDB API functions
async def get_questions(self, server, category=None, difficulty=None):
"""Gets questions, resetting a token or getting a new one if necessary."""
parameters = {"amount": 20}
if category:
parameters["category"] = category
if difficulty:
parameters["difficulty"] = difficulty
for _ in range(3):
parameters["token"] = await self.get_token(server)
async with aiohttp.get("https://opentdb.com/api.php",
params=parameters) as response:
response_json = await response.json()
response_code = response_json["response_code"]
if response_code == 0:
return response_json
elif response_code == 1:
raise RuntimeError("Question retrieval unsuccessful. Response "
"code from OTDB: 1")
elif response_code == 2:
raise RuntimeError("Question retrieval unsuccessful. Response "
"code from OTDB: 2")
elif response_code == 3:
# Token expired. Obtain new one.
print("Response code from OTDB: 3")
self.save_file["Servers"][server.id]["Token"] = ""
dataIO.save_json(SAVE_FILEPATH, self.save_file)
elif response_code == 4:
# Token empty. Reset it.
print("Response code from OTDB: 4")
await self.reset_token(server)
raise RuntimeError("Failed to retrieve questions.")
async def get_token(self, server):
"""Gets the provided server's token, or generates
and saves one if one doesn't exist."""
if self.save_file["Servers"][server.id]["Token"]:
token = self.save_file["Servers"][server.id]["Token"]
else:
async with aiohttp.get("https://opentdb.com/api_token.php",
params={"command": "request"}) as response:
response_json = await response.json()
token = response_json["token"]
self.save_file["Servers"][server.id]["Token"] = token
dataIO.save_json(SAVE_FILEPATH, self.save_file)
return token
async def reset_token(self, server):
"""Resets the provided server's token."""
token = self.save_file["Servers"][server.id]["Token"]
async with aiohttp.get("https://opentdb.com/api_token.php",
params={"command": "reset", "token": token}) as response:
response_code = (await response.json())["response_code"]
if response_code != 0:
raise RuntimeError("Token reset was unsuccessful. Response code from "
"OTDB: {}".format(response_code))
async def category_selector(self):
"""Chooses a random category that has enough questions."""
for _ in range(10):
category = random.randint(9, 32)
async with aiohttp.get("https://opentdb.com/api_count.php",
params={"category": category}) as response:
response_json = await response.json()
assert response_json["category_id"] == category
if response_json["category_question_count"]["total_question_count"] > 39:
return category
raise RuntimeError("Failed to select a category.")
async def category_name(self, idnum):
"""Finds a category's name from its number."""
async with aiohttp.get("https://opentdb.com/api_category.php") as response:
response_json = await response.json()
for cat_dict in response_json["trivia_categories"]:
if cat_dict["id"] == idnum:
return cat_dict["name"]
raise RuntimeError("Failed to find category's name.")
# Other functions
def add_server(self, server):
"""Adds the server to the file if it isn't already in it."""
if server.id not in self.save_file["Servers"]:
self.save_file["Servers"][server.id] = {"Token": ""}
dataIO.save_json(SAVE_FILEPATH, self.save_file)
return
def __unload(self):
self.starter_task.cancel()
for task in self.game_tasks:
task.cancel()
def dir_check():
"""Creates a folder and save file for the cog if they don't exist."""
if not os.path.exists("data/KeaneCogs/quiz"):
print("Creating data/KeaneCogs/quiz folder...")
os.makedirs("data/KeaneCogs/quiz")
if not dataIO.is_valid_json(SAVE_FILEPATH):
print("Creating default quiz.json...")
dataIO.save_json(SAVE_FILEPATH, {"Servers": {}})
def setup(bot):
"""Creates a Quiz object."""
dir_check()
bot.add_cog(Quiz(bot))
| {
"repo_name": "keanemind/Keane-Cogs",
"path": "quiz/quiz.py",
"copies": "1",
"size": "18172",
"license": "mit",
"hash": 5391725456922098000,
"line_mean": 42.7325301205,
"line_max": 107,
"alpha_frac": 0.5393685603,
"autogenerated": false,
"ratio": 4.324279247081249,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5363647807381249,
"avg_score": null,
"num_lines": null
} |
"""A trivial base class to avoid circular imports for isinstance checks."""
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training.checkpointable import base as checkpointable_lib
class CheckpointableDataStructureBase(checkpointable_lib.CheckpointableBase):
"""Base class for data structures which contain checkpointable objects."""
pass
| {
"repo_name": "dendisuhubdy/tensorflow",
"path": "tensorflow/python/training/checkpointable/data_structures_base.py",
"copies": "3",
"size": "1122",
"license": "apache-2.0",
"hash": 5882976357665193000,
"line_mean": 40.5555555556,
"line_max": 80,
"alpha_frac": 0.7237076649,
"autogenerated": false,
"ratio": 4.734177215189874,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6957884880089873,
"avg_score": null,
"num_lines": null
} |
#A trivial demonstration of the RecurrentSig layer from iisignature_recurrent_keras.py
#relies on keras 2
import os
#os.environ["THEANO_FLAGS"]="floatX=float32,device=cpu,optimizer=fast_compile"
#os.environ["THEANO_FLAGS"]="floatX=float32,device=cpu,mode=DebugMode"
#os.environ["THEANO_FLAGS"]="floatX=float32,device=gpu0,force_device=True,cxx=g++-4.8,nvcc.flags=-D_FORCE_INLINES,nvcc.compiler_bindir=/usr/bin/g++-4.8"
#os.environ["THEANO_FLAGS"]="floatX=float32,device=gpu0,force_device=True,cxx=g++-4.8,nvcc.flags=-D_FORCE_INLINES,nvcc.compiler_bindir=/usr/bin/g++-4.8,base_compiledir=/run/user/1001/theano"
os.environ["THEANO_FLAGS"]="floatX=float32,device=cpu,force_device=True,mode=DebugMode,DebugMode.check_finite=False"
os.environ["THEANO_FLAGS"]="floatX=float32,device=cpu,force_device=True"
#os.environ["THEANO_FLAGS"]="floatX=float32,device=cpu,force_device=True,mode=NanGuardMode,exception_verbosity=high,NanGuardMode.inf_is_error=False,NanGuardMode.big_is_error=False,NanGuardMode.action=warn,optimizer=fast_compile"
os.environ["KERAS_BACKEND"]="theano"
os.environ["KERAS_BACKEND"]="tensorflow"
import numpy, sys
#add the parent directory, so we find our iisignature build if it was built --inplace
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from iisignature_recurrent_keras import RecurrentSig
import keras.models, keras.layers.recurrent, keras.layers.core
from keras.layers.recurrent import SimpleRNN, LSTM
m=keras.models.Sequential()
#a few possible networks here.
#using relu with RecurrentSig sometimes gets Nans
m.add(RecurrentSig(5,sig_level=2,input_shape=(None,3),return_sequences=False, use_signatures = True, output_signatures = False, activation="tanh",train_time_lapse=True))
#m.add(RecurrentSig(5,input_shape=(5,3),return_sequences=True, use_signatures = True, output_signatures = False, activation="relu"))
#m.add(RecurrentSig(6,return_sequences=False,activation="relu"))
#m.add(LSTM(5,input_shape=(5,3),return_sequences=False))
#m.add(LSTM(5,input_shape=(5,3),return_sequences=True))
#m.add(LSTM(6,return_sequences=False))
#m.add(keras.layers.core.Flatten(input_shape=(5,3)))
#m.add(keras.layers.core.Dense(1000,activation="relu"))
m.add(keras.layers.core.Dense(1, activation="sigmoid"))
op = keras.optimizers.Adam(lr=0.0002, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
m.compile(loss='mse', optimizer=op)#metrics = accuracy
m.summary()
#The task here for the network to learn is very easy - the average of two of the inputs
x = numpy.random.uniform(size=(2311,5,3))
y = (x[:,1,1] + x[:,3,2])/2 # The output is a number between 0 and 1, so matches sigmoid activation of the top layer
testx = x[2000:,:,:]
testy = y[2000:]
x=x[:2000,:,:]
y=y[:2000]
#a=numpy.random.uniform(size=(3,5,3))
#print (m.predict(a))
m.fit(x,y,epochs=10,shuffle=0)
print (m.evaluate(testx,testy,verbose=0))
| {
"repo_name": "bottler/iisignature",
"path": "examples/demo_rnn.py",
"copies": "1",
"size": "2853",
"license": "mit",
"hash": 5196889545030173000,
"line_mean": 48.1896551724,
"line_max": 228,
"alpha_frac": 0.7549947424,
"autogenerated": false,
"ratio": 2.7888563049853374,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8873115214206213,
"avg_score": 0.03414716663582498,
"num_lines": 58
} |
#A trivial demonstration of the RecurrentSig layer from iisignature_recurrent_torch.py
#No assertion is made that this model is a good idea, or that this code is idiomatic pytorch.
import numpy as np, sys, os, itertools
import torch
from torch.autograd import Variable
import torch.nn as nn
#add the parent directory, so we find our iisignature build if it was built --inplace
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from iisignature_recurrent_torch import RecurrentSig
criterion = nn.MSELoss()
#The task here for the network to learn is very easy - the average of two of the inputs
x = np.random.uniform(size=(2311,5,3))
y = (x[:,1,1] + x[:,3,2])/2 # The output is a number between 0 and 1, so matches sigmoid activation of the top layer
testx = x[2000:,:,:]
testy = y[2000:]
x=x[:2000,:,:]
y=y[:2000]
rnn=RecurrentSig(3,5,sig_level=2,use_signatures = False, output_signatures = False, train_time_lapse=False)
finalLayer=nn.Linear(5,1)
optimizer=torch.optim.Adam(itertools.chain(rnn.parameters(),finalLayer.parameters()),lr=0.0001)
minibatch_size = 32
def train(x_batch, y_batch):
minibatch_size=y_batch.shape[0]
hidden = rnn.initHidden(minibatch_size)
optimizer.zero_grad()
x_batch=Variable(torch.FloatTensor(x_batch))
for i in range(5):
output,hidden = rnn(x_batch[:,i,:], hidden)
output=finalLayer(output)
loss=criterion(output,Variable(torch.FloatTensor(y_batch)))
loss.backward()
optimizer.step()
return output, loss.data[0]
def predict(x):
hidden = rnn.initHidden(x.shape[0])
x=Variable(torch.FloatTensor(x))
for i in range(5):
output,hidden = rnn(x[:,i,:], hidden)
output=finalLayer(output)
return output
def evaluate(x,y):
loss=criterion(predict(x),Variable(torch.FloatTensor(y)))
return loss
nb_epoch=2
for i in range(nb_epoch*x.shape[0]//minibatch_size):
indices = np.random.randint(testx.shape[0],size=minibatch_size)
output,loss=train(x[indices],y[indices])
#print (loss)
#a=np.random.uniform(size=(3,5,3))
#print (a, predict(a).data)
print (evaluate(testx,testy).data)
| {
"repo_name": "bottler/iisignature",
"path": "examples/demo_rnn_torch.py",
"copies": "1",
"size": "2144",
"license": "mit",
"hash": -3351368772519757300,
"line_mean": 31,
"line_max": 116,
"alpha_frac": 0.7056902985,
"autogenerated": false,
"ratio": 3.098265895953757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4303956194453757,
"avg_score": null,
"num_lines": null
} |
"""A trivial spell checking API using Flask and TextBlob.
This app wraps a very simple JSON interface around TextBlob and provides very
basic spell checking and correction support (english only for now).
"""
# third-party imports
from flask import Flask
from flask import jsonify
from flask import request
from textblob import TextBlob
from textblob import Word
# define Flask app that does all the magic
app = Flask(__name__)
@app.route('/correction')
def correction():
"""Simple handler that parses a query parameter and returns a best-guess
spelling correction using the TextBlob library.
urls should take the form '/correction?text=some%20textt%20to%20corect'
data returned will be a JSON object that looks like:
{text: "some text to correct"}
"""
text = request.args.get('text', '')
text = TextBlob(text)
return jsonify(text=unicode(text.correct()))
@app.route('/spellcheck')
def spellcheck():
"""Handler that provides basic spell-checking of text that is passed in
via query parameter.
urls should take the form '/spellcheck?text=my%20speeling%20is%20quite%20badd'
data returned will be a JSON object that looks like:
{
"badd": [
["bad", 0.47987616099071206], ["bald", 0.25386996904024767],
["band", 0.16718266253869968], ["add", 0.08359133126934984],
["bade", 0.015479876160990712]
],
"is": [["is", 1]],
"my": [["my", 1]],
"quite": [["quite", 1]],
"speeling": [["spelling", 0.5], ["speeding", 0.25], ["peeling", 0.25]]
}
"""
text = request.args.get('text', '')
words = {}
for word in text.split():
words[word] = Word(word).spellcheck()
return jsonify(**words)
if __name__ == '__main__':
# app runs in debug mode, turn this off if you're deploying
app.run(debug=True)
| {
"repo_name": "paddycarey/speelchecker",
"path": "app.py",
"copies": "1",
"size": "1916",
"license": "mit",
"hash": 8282732685870773000,
"line_mean": 29.4126984127,
"line_max": 82,
"alpha_frac": 0.6299582463,
"autogenerated": false,
"ratio": 3.698841698841699,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48287999451416985,
"avg_score": null,
"num_lines": null
} |
"""ATS input converter, moves SEB from the monolithic version in 0.87
and earlier to a newer, modularized version in 0.88."""
import sys, os
try:
amanzi_xml = os.path.join(os.environ["AMANZI_SRC_DIR"], "tools","amanzi_xml")
except KeyError:
pass
else:
if amanzi_xml not in sys.path:
sys.path.append(amanzi_xml)
import copy
from amanzi_xml.utils import search as asearch
from amanzi_xml.utils import io as aio
from amanzi_xml.utils import errors as aerrors
from amanzi_xml.common import parameter, parameter_list
def add_snow_mesh(xml):
meshes = asearch.child_by_name(xml, "mesh")
snow = meshes.sublist("snow")
snow.append(parameter.StringParameter("mesh type", "aliased"))
aliased = snow.sublist("aliased parameters")
aliased.append(parameter.StringParameter("alias", "surface"))
vis = asearch.child_by_name(xml, "visualization")
surf_vis = asearch.child_by_name(vis, "surface")
snow_vis = vis.sublist("snow")
snow_vis.append(parameter.StringParameter("file name base", "visdump_snow"))
for p in surf_vis:
if p.get('name') != "file name base":
snow_vis.append(copy.copy(p))
def eval_snow_swe():
swe = parameter_list.ParameterList("snow-swe")
swe.append(parameter.StringParameter("field evaluator type", "multiplicative evaluator"))
swe.append(parameter.ArrayStringParameter("evaluator dependencies", ["snow-depth", "snow-density", "snow-cell_volume"]))
swe.append(parameter.DoubleParameter("coefficient", 1.e-3))
return swe
def eval_snow_frac_areas():
fa = parameter_list.ParameterList("surface-fractional_areas")
fa.append(parameter.StringParameter("field evaluator type", "surface balance area fractions"))
return fa
def eval_snow_source_sink(dc=None):
ss = parameter_list.ParameterList("snow-source_sink")
ss.append(parameter.StringParameter("field evaluator type", "surface balance"))
ss.append(parameter.BoolParameter("save diagnostic data", True))
if dc is not None:
ss.append(dc)
vo = ss.sublist("verbose object")
vo.append(parameter.StringParameter("verbosity level", "high"))
return ss
def eval_longwave():
lw = parameter_list.ParameterList("surface-incoming_longwave_radiation")
lw.append(parameter.StringParameter("field evaluator type", "incoming longwave radiation"))
return lw
def eval_albedo():
al = parameter_list.ParameterList("surface-subgrid_albedos")
al.append(parameter.StringParameter("field evaluator type", "albedo"))
return al
def evals(xml, dc):
eval_list = asearch.find_by_path(xml, ["state","field evaluators"])
try:
lw = asearch.child_by_name(eval_list, "surface-incoming_longwave_radiation")
except aerrors.MissingXMLError:
eval_list.append(eval_longwave())
eval_list.append(eval_snow_frac_areas())
eval_list.append(eval_snow_swe())
eval_list.append(eval_snow_source_sink(dc))
eval_list.append(eval_albedo())
try:
snow_precip = asearch.child_by_name(eval_list, "surface-precipitation_snow")
except aerrors.MissingXMLError:
pass
else:
snow_precip.setName("snow-precipitation")
def pks(xml):
pk_tree = asearch.child_by_namePath(xml, "cycle driver/PK tree")
for pk_type in asearch.gen_by_path(pk_tree, "PK type"):
if pk_type.getValue() == 'surface balance implicit':
pk_type.setValue('surface balance implicit subgrid')
pks = asearch.child_by_name(xml, "PKs")
for pk in pks:
pk_type = asearch.child_by_name(pk, "PK type")
if pk_type.get('value') == 'permafrost flow':
try:
source_term = asearch.child_by_name(pk, "source term")
except aerrors.MissingXMLError:
pass
else:
source_term.setValue(False)
elif pk_type.get('value') == 'overland flow with ice':
try:
source_term = asearch.child_by_name(pk, "source term")
except aerrors.MissingXMLError:
pk.append(parameter.BoolParameter("source term", True))
else:
source_term.setValue(True)
try:
source_is_diff = asearch.child_by_name(pk, "source term is differentiable")
except aerrors.MissingXMLError:
pk.append(parameter.BoolParameter("source term is differentiable", False))
else:
source_is_diff.setValue(False)
elif pk_type.get('value') == 'three-phase energy':
try:
source_term = asearch.child_by_name(pk, "source term")
except aerrors.MissingXMLError:
pass
else:
source_term.setValue(False)
elif pk_type.get('value') == 'surface energy':
try:
source_term = asearch.child_by_name(pk, "source term")
except aerrors.MissingXMLError:
pk.append(parameter.BoolParameter("source term", True))
else:
source_term.setValue(True)
try:
source_is_diff = asearch.child_by_name(pk, "source term is differentiable")
except aerrors.MissingXMLError:
pk.append(parameter.BoolParameter("source term is differentiable", True))
else:
source_is_diff.setValue(True)
try:
source_fd = asearch.child_by_name(pk, "source term finite difference")
except aerrors.MissingXMLError:
pk.append(parameter.BoolParameter("source term finite difference", True))
else:
source_fd.setValue(True)
elif pk_type.get('value') == 'surface balance implicit':
pk_seb = parameter_list.ParameterList(pk.get('name'))
pk_seb.append(parameter.StringParameter('PK type', 'surface balance implicit subgrid'))
pk_seb.append(parameter.StringParameter('layer name', 'snow'))
pk_seb.append(parameter.StringParameter('domain name', 'snow'))
pk_seb.append(parameter.StringParameter('primary variable key', 'snow-depth'))
pk_seb.append(parameter.StringParameter('conserved quantity key', 'snow-swe'))
pk_seb.append(parameter.StringParameter('source key', 'snow-source_sink'))
pk_seb.append(parameter.BoolParameter('source term is differentiable', False))
pk_seb.append(pk.pop('initial condition'))
try:
pk_seb.append(pk.pop('verbose object'))
except aerrors.MissingXMLError:
pass
try:
dc = pk.pop('debug cells')
except aerrors.MissingXMLError:
dc = None
else:
pk_seb.append(copy.copy(dc))
pc = pk_seb.sublist('preconditioner')
pc.append(parameter.StringParameter('preconditioner type', 'identity'))
ls = pk_seb.sublist('linear solver')
ls.append(parameter.StringParameter('iterative method', 'nka'))
nka = ls.sublist('nka parameters')
nka.append(parameter.DoubleParameter('error tolerance', 1.e-6))
nka.append(parameter.IntParameter('maximum number of iterations', 10))
pks.pop(pk.get('name'))
pks.append(pk_seb)
return dc
def update_seb(xml):
add_snow_mesh(xml)
dc = pks(xml)
evals(xml, dc)
| {
"repo_name": "amanzi/ats-dev",
"path": "tools/input_converters/seb_monolithic_to_evals.py",
"copies": "2",
"size": "7489",
"license": "bsd-3-clause",
"hash": 5480035926781949000,
"line_mean": 37.2091836735,
"line_max": 124,
"alpha_frac": 0.6235812525,
"autogenerated": false,
"ratio": 3.7388916625062407,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.536247291500624,
"avg_score": null,
"num_lines": null
} |
# at some point try
# from keras.utils import plot_model
# plot_model(model, to_file='model.png')
import matplotlib.lines as mlines
import warnings
import theano.sandbox.cuda.basic_ops as sbcuda
import numpy as np
import load_data
import realtime_augmentation as ra
import time
import sys
import json
from datetime import timedelta
import os
import matplotlib.pyplot as plt
from termcolor import colored
import functools
from custom_for_keras import input_generator
from custom_for_keras import sliced_accuracy_mean, sliced_accuracy_std, rmse,\
lr_function
from ellipse_fit import get_ellipse_kaggle_par
from custom_keras_model_and_fit_capsels import kaggle_winsol
# from custom_keras_model_x_cat_x_maxout import kaggle_x_cat_x_maxout\
# as kaggle_winsol
starting_time = time.time()
cut_fraktion = 0.8
copy_to_ram_beforehand = False
debug = True
get_winsol_weights = False
BATCH_SIZE = 16 # keep in mind
NUM_INPUT_FEATURES = 3
EPOCHS = 300
GEN_BUFFER_SIZE = 2
included_flipped = True
USE_BLENDED_PREDICTIONS = False
PRED_BLENDED_PATH = 'predictions/final/blended/blended_predictions.npy.gz'
if debug:
print os.path.isfile(PRED_BLENDED_PATH)
TRAIN_LOSS_SF_PATH = 'try_ROC.txt'
# TRAIN_LOSS_SF_PATH = "trainingNmbrs_keras_modular_includeFlip_and_37relu.txt"
# TARGET_PATH = "predictions/final/try_convnet.csv"
WEIGHTS_PATH = "analysis/final/try_start_with_noMaxout_inBetween.h5"
TXT_OUTPUT_PATH = 'try_ROC.txt'
IMAGE_OUTPUT_PATH = "img_ROC"
postfix = ''
NUM_ELLIPSE_PARAMS = 2
ELLIPSE_FIT = False
# ELLIPSE_FIT = WEIGHTS_PATH.find('ellipse') >= 0
# if ELLIPSE_FIT:
# postfix = '_ellipse'
DONT_LOAD_WEIGHTS = False
input_sizes = [(69, 69), (69, 69)]
PART_SIZE = 45
N_INPUT_VARIATION = 2
# set to True if the prediction and evaluation should be done when the
# prediction file already exists
REPREDICT_EVERYTIME = True
# TODO built this as functions, not with the if's
DO_TRAIN = True
DO_VALID = True # disable this to not bother with the validation set evaluation
DO_VALID_CORR = False # not implemented yet
VALID_CORR_OUTPUT_FILTER = np.zeros((37))
VALID_CORR_OUTPUT_FILTER[2] = 1 # star or artifact
VALID_CORR_OUTPUT_FILTER[3] = 1 # edge on yes
VALID_CORR_OUTPUT_FILTER[4] = 1 # edge on no
VALID_CORR_OUTPUT_FILTER[5] = 1 # bar feature yes
VALID_CORR_OUTPUT_FILTER[7] = 1 # spiral arms yes
VALID_CORR_OUTPUT_FILTER[14] = 1 # anything odd? no
VALID_CORR_OUTPUT_FILTER[18] = 1 # ring
VALID_CORR_OUTPUT_FILTER[19] = 1 # lence
VALID_CORR_OUTPUT_FILTER[20] = 1 # disturbed
VALID_CORR_OUTPUT_FILTER[21] = 1 # irregular
VALID_CORR_OUTPUT_FILTER[22] = 1 # other
VALID_CORR_OUTPUT_FILTER[23] = 1 # merger
VALID_CORR_OUTPUT_FILTER[24] = 1 # dust lane
N_Corr_Filter_Images = np.sum(VALID_CORR_OUTPUT_FILTER)
DO_VALIDSTUFF_ON_TRAIN = True
DO_TEST = False # disable this to not generate predictions on the testset
output_names = ["smooth", "featureOrdisk", "NoGalaxy", "EdgeOnYes", "EdgeOnNo", "BarYes", "BarNo", "SpiralYes", "SpiralNo", "BulgeNo", "BulgeJust", "BulgeObvious", "BulgDominant", "OddYes", "OddNo", "RoundCompletly", "RoundBetween", "RoundCigar",
"Ring", "Lense", "Disturbed", "Irregular", "Other", "Merger", "DustLane", "BulgeRound", "BlulgeBoxy", "BulgeNo2", "SpiralTight", "SpiralMedium", "SpiralLoose", "Spiral1Arm", "Spiral2Arm", "Spiral3Arm", "Spiral4Arm", "SpiralMoreArms", "SpiralCantTell"]
question_slices = [slice(0, 3), slice(3, 5), slice(5, 7), slice(7, 9),
slice(9, 13), slice(13, 15), slice(15, 18), slice(18, 25),
slice(25, 28), slice(28, 31), slice(31, 37)]
question_requierement = [None] * len(question_slices)
question_requierement[1] = question_slices[0].start + 1
question_requierement[2] = question_slices[1].start + 1
question_requierement[3] = question_slices[1].start + 1
question_requierement[4] = question_slices[1].start + 1
question_requierement[6] = question_slices[0].start
question_requierement[9] = question_slices[4].start
question_requierement[10] = question_slices[4].start
print 'Question requirements: %s' % question_requierement
spiral_or_ellipse_cat = [[(0, 1), (1, 1), (3, 0)], [(0, 1), (1, 0)]]
target_filename = os.path.basename(WEIGHTS_PATH).replace(".h5", ".npy.gz")
if get_winsol_weights:
target_filename = os.path.basename(WEIGHTS_PATH).replace(".pkl", ".npy.gz")
target_path_valid = os.path.join(
"predictions/final/augmented/valid", target_filename)
target_path_test = os.path.join(
"predictions/final/augmented/test", target_filename)
if copy_to_ram_beforehand:
ra.myLoadFrom_RAM = True
import copy_data_to_shm
y_train = np.load("data/solutions_train.npy")
ra.y_train = y_train
# split training data into training + a small validation set
ra.num_train = y_train.shape[0]
# integer division, is defining validation size
ra.num_valid = ra.num_train // 10
ra.num_train -= ra.num_valid
# training num check for EV usage
if ra.num_train != 55420:
print "num_train = %s not %s" % (ra.num_train, 55420)
ra.num_train = ra.num_train - ra.num_train % BATCH_SIZE
ra.y_valid = ra.y_train[ra.num_train:]
ra.y_train = ra.y_train[:ra.num_train]
load_data.num_train = y_train.shape[0]
load_data.train_ids = np.load("data/train_ids.npy")
ra.load_data.num_train = load_data.num_train
ra.load_data.train_ids = load_data.train_ids
ra.valid_ids = load_data.train_ids[ra.num_train:]
ra.train_ids = load_data.train_ids[:ra.num_train]
train_ids = load_data.train_ids
test_ids = load_data.test_ids
num_train = ra.num_train
num_test = len(test_ids)
num_valid = ra.num_valid
y_valid = ra.y_valid
y_train = ra.y_train
valid_ids = ra.valid_ids
train_ids = ra.train_ids
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
N_TRAIN = num_train
N_VALID = num_valid
print("validation sample contains %s images. \n" %
(ra.num_valid))
print 'initiate winsol class'
winsol = kaggle_winsol(BATCH_SIZE=BATCH_SIZE,
NUM_INPUT_FEATURES=NUM_INPUT_FEATURES,
PART_SIZE=PART_SIZE,
input_sizes=input_sizes,
LOSS_PATH=TRAIN_LOSS_SF_PATH,
WEIGHTS_PATH=WEIGHTS_PATH,
include_flip=included_flipped)
layer_formats = winsol.layer_formats
layer_names = layer_formats.keys()
print "Build model"
if debug:
print("input size: %s x %s x %s x %s" %
(input_sizes[0][0],
input_sizes[0][1],
NUM_INPUT_FEATURES,
BATCH_SIZE))
if ELLIPSE_FIT:
winsol.init_models_ellipse(input_shape=NUM_ELLIPSE_PARAMS)
else:
# winsol.init_models(final_units=37, loss='mean_squared_error',
# extra_metrics=[
# sliced_accuracy_mean, sliced_accuracy_std])
winsol.init_models()
if debug:
winsol.print_summary(postfix=postfix)
print winsol.models.keys()
if not DONT_LOAD_WEIGHTS:
if get_winsol_weights:
print "Import weights from run with original kaggle winner solution"
if not winsol.getWinSolWeights(debug=True, path=WEIGHTS_PATH):
raise UserWarning('Importing of the winsol weights did not work')
else:
print "Load model weights"
winsol.load_weights(path=WEIGHTS_PATH, postfix=postfix)
winsol.WEIGHTS_PATH = ((WEIGHTS_PATH.split('.', 1)[0] + '_next.h5'))
print "Set up data loading"
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(
3.0, target_size=input_sizes[1])
+ ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
def create_data_gen():
augmented_data_gen = ra.realtime_augmented_data_gen(
num_chunks=N_TRAIN / BATCH_SIZE * (EPOCHS + 1),
chunk_size=BATCH_SIZE,
augmentation_params=augmentation_params,
ds_transforms=ds_transforms,
target_sizes=input_sizes)
post_augmented_data_gen = ra.post_augment_brightness_gen(
augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(
post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
input_gen = input_generator(train_gen)
return input_gen
# # may need doubling the generator,can be done with
# itertools.tee(iterable, n=2)
input_gen = create_data_gen()
def input_gen_data():
for data, y in input_gen:
yield data
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(
valid_indices,
'train',
ds_transforms=ds_transforms,
chunk_size=N_VALID,
target_sizes=input_sizes)
return data_gen_valid
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
# move the colour dimension up
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid]
if ELLIPSE_FIT:
validation_data = ([], y_valid)
for x in xs_valid[0]:
validation_data[0].append(
get_ellipse_kaggle_par(x, num_par=NUM_ELLIPSE_PARAMS)
)
validation_data = (np.asarray(validation_data[0]), validation_data[1])
else:
validation_data = (
[xs_valid[0], xs_valid[1]], y_valid)
validation_data = (
[np.asarray(xs_valid[0]), np.asarray(xs_valid[1])], validation_data[1])
t_val = (time.time() - start_time)
print " took %.2f seconds" % (t_val)
if debug:
print("Free GPU Mem before first step %s MiB " %
(sbcuda.cuda_ndarray.cuda_ndarray.mem_info()[0] / 1024. / 1024.))
def save_exit():
# winsol.save()
print "Done!"
print ' run for %s' % timedelta(seconds=(time.time() - start_time))
exit()
sys.exit(0)
if USE_BLENDED_PREDICTIONS:
predictions = load_data.load_gz(PRED_BLENDED_PATH)
if debug:
print os.path.isfile(PRED_BLENDED_PATH)
print type(predictions)
print predictions
# print len(predictions)
print np.shape(predictions)
elif not REPREDICT_EVERYTIME and os.path.isfile(
target_path_valid) and os.path.isfile(TRAIN_LOSS_SF_PATH):
print 'Loading validation predictions from %s and loss from %s ' % (
target_path_valid, TRAIN_LOSS_SF_PATH)
predictions = load_data.load_gz(target_path_valid)
else:
try:
print ''
print 'Re-evalulating and predicting'
if DO_TRAIN:
# evalHist = winsol.evaluate_gen(
# input_gen, num_events=num_train, postfix='')
# # validation_data[0], y_valid=y_valid, postfix=postfix)
# winsol.save_loss(modelname='model_norm_metrics', postfix=postfix)
# evalHist = winsol.load_loss(
# modelname='model_norm_metrics', postfix=postfix)
print ''
predictions = winsol.predict_gen(
input_gen_data(), num_events=num_train, postfix=postfix)
print "Write predictions to %s" % target_path_valid
load_data.save_gz(target_path_valid, predictions)
except KeyboardInterrupt:
print "\ngot keyboard interuption"
save_exit()
except ValueError, e:
print "\ngot value error, could be the end of the generator in the fit"
print e
save_exit()
# evalHist = winsol.load_loss(modelname='model_norm_metrics', postfix=postfix)
if np.shape(predictions) != np.shape(y_train):
raise ValueError('prediction and validation set have different shapes, %s to %s ' % (
np.shape(predictions), np.shape(y_train)))
# FIXME add this counts decision tree dependent
n_global_cat_pred = [0] * len(output_names)
n_global_cat_valid = [0] * len(output_names)
n_global_cat_agrement = [0] * len(output_names)
n_sliced_cat_pred = [0] * len(output_names)
n_sliced_cat_valid = [0] * len(output_names)
n_sliced_cat_agrement = [0] * len(output_names)
n_sliced_cat_pred_wreq = [0] * len(output_names)
n_sliced_cat_valid_wreq = [0] * len(output_names)
n_sliced_cat_agrement_wreq = [0] * len(output_names)
n_sliced_cat_pred_wcut = [0] * len(output_names)
n_sliced_cat_valid_wcut = [0] * len(output_names)
n_sliced_cat_agrement_wcut = [0] * len(output_names)
for i in range(len(predictions)):
argpred = np.argmax(predictions[i])
argval = np.argmax(y_train[i])
n_global_cat_pred[argpred] += 1
n_global_cat_valid[argval] += 1
if argval == argpred:
n_global_cat_agrement[argval] += 1
c = 0
last_pred = [None]
last_val = [None]
for slice in question_slices:
sargpred = np.argmax(predictions[i][slice])
cutpred = predictions[i][slice][sargpred] / \
sum(predictions[i][slice]) > cut_fraktion
sargval = np.argmax(y_train[i][slice])
cutval = y_train[i][slice][sargval] / \
sum(y_train[i][slice]) > cut_fraktion
n_sliced_cat_pred[sargpred + slice.start] += 1
if cutpred:
n_sliced_cat_pred_wcut[sargpred + slice.start] += 1
n_sliced_cat_valid[sargval + slice.start] += 1
if cutval:
n_sliced_cat_valid_wcut[sargval + slice.start] += 1
if sargval == sargpred:
n_sliced_cat_agrement[sargval + slice.start] += 1
if cutpred:
n_sliced_cat_agrement_wcut[sargval + slice.start] += 1
if slice == question_slices[0]:
n_sliced_cat_pred_wreq[sargpred + slice.start] += 1
n_sliced_cat_valid_wreq[sargval + slice.start] += 1
last_pred += [sargpred + slice.start]
last_val += [sargval + slice.start]
if sargval == sargpred:
n_sliced_cat_agrement_wreq[sargval + slice.start] += 1
else:
sargpred_req = None
sargval_req = None
if not np.argmax(predictions[i][0:3]) == 2:
if question_requierement[c] in last_pred:
sargpred_req = sargpred
n_sliced_cat_pred_wreq[sargpred + slice.start] += 1
last_pred += [sargpred + slice.start]
if question_requierement[c] in last_val:
sargval_req = sargval
n_sliced_cat_valid_wreq[sargval + slice.start] += 1
last_val += [sargval + slice.start]
if sargpred_req == sargval_req and sargpred_req != None:
n_sliced_cat_agrement_wreq[sargval_req + slice.start] += 1
c += 1
def P_base(n_pred, n_agree):
return (float(n_agree) / float(n_pred))\
if n_pred else 0.
def P_i(i, n_pred, n_agree):
return P_base(n_pred[i], n_agree[i])
P = functools.partial(P_i, n_pred=n_sliced_cat_pred,
n_agree=n_sliced_cat_agrement)
def R_base(n_pred, n_agree, n_false_neg):
return float(n_agree) / float(
n_pred + n_false_neg) if n_pred or n_false_neg else 0.
def R_i(i, sli, n_pred, n_agree):
if i >= sli.start and i < sli.stop:
false_neg = sum(n_pred[sli]) - n_pred[i] - (
sum(n_agree[sli]) - n_agree[i])
return R_base(n_pred[i], n_agree[i], false_neg)
else:
warnings.warn('question number %i is not in slice %s' % (i, sli))
def R_i_slices(i, slices, n_pred, n_agree):
for sli in slices:
if i >= sli.start and i < sli.stop:
return R_i(i, sli, n_pred, n_agree)
else:
continue
else:
warnings.warn('question number %i is not in one of the slices' % (i))
R = functools.partial(R_i_slices, slices=question_slices,
n_pred=n_sliced_cat_pred, n_agree=n_sliced_cat_agrement)
# def R(i):
# for slice in question_slices:
# if i >= slice.start and i < slice.stop:
# false_neg = sum(n_sliced_cat_pred[slice]) - n_sliced_cat_pred[i] - (
# sum(n_sliced_cat_agrement[slice]) - n_sliced_cat_agrement[i])
# return float(n_sliced_cat_agrement[i]) / float(
# n_sliced_cat_agrement[i] + false_neg)
def P_wcut(i):
return (float(n_sliced_cat_agrement_wcut[i]) / float(
n_sliced_cat_pred_wcut[i])) if n_sliced_cat_pred_wcut[i] else 0.
def R_wcut(i):
for slice in question_slices:
if i >= slice.start and i < slice.stop:
false_neg = sum(n_sliced_cat_pred_wcut[slice]) -\
n_sliced_cat_pred_wcut[i] - (
sum(n_sliced_cat_agrement_wcut[slice]) -
n_sliced_cat_agrement_wcut[i])
return float(n_sliced_cat_agrement_wcut[i]) / float(
n_sliced_cat_agrement_wcut[i] + false_neg) if (
n_sliced_cat_agrement_wcut[i] + false_neg) else 0.
def P_wreq(i):
return (float(n_sliced_cat_agrement_wreq[i]) / float(
n_sliced_cat_pred_wreq[i])) if n_sliced_cat_pred_wreq[i] else 0.
def R_wreq(i):
for slice in question_slices:
if i >= slice.start and i < slice.stop:
false_neg = sum(n_sliced_cat_pred_wreq[slice]) -\
n_sliced_cat_pred_wreq[i] - (
sum(n_sliced_cat_agrement_wreq[slice]) -
n_sliced_cat_agrement_wreq[i])
return float(n_sliced_cat_agrement_wreq[i]) / float(
n_sliced_cat_agrement_wreq[i] + false_neg) if (
n_sliced_cat_agrement_wreq[i] + false_neg) else 0.
output_dic = {}
output_dic_short_hand_names = {'rmse': 'rmse',
'rmse/mean': 'rmse/mean',
# 'global categorized prediction': 'pred',
# 'global categorized valid': 'val',
# 'global categorized agree': 'agree',
'slice categorized prediction': 'qPred',
'slice categorized valid': 'qVal',
'slice categorized agree': 'qAgree',
'precision': 'P',
'recall': 'R',
# 'slice categorized prediction including tree requierement': 'qPred_req',
# 'slice categorized valid including tree requieremnet': 'qVal_req',
# 'slice categorized agree including tree requirement': 'qAgree_req',
# 'precision including tree requierement': 'P_req',
# 'recall including tree requierement': 'R_req'
}
# rmse_valid = evalHist['rmse'][-1]
rmse_augmented = np.sqrt(np.mean((y_train - predictions)**2))
# print " MSE (last iteration):\t%.6f" % float(rmse_valid)
# print ' sliced acc. (last iteration):\t%.4f' % float(evalHist['sliced_accuracy_mean'][-1])
# print ' categorical acc. (last iteration):\t%.4f' %
# float(evalHist['categorical_accuracy'][-1])
print " RMSE (augmented):\t%.6f RMSE/mean: %.2f " % (float(rmse_augmented),
float(rmse_augmented) / float(np.mean(
y_train)))
print " mean P (augmented):\t%.3f mean R (augmented):\t%.3f " % (
np.mean([P(i) for i in range(VALID_CORR_OUTPUT_FILTER.shape[0])]),
np.mean([R(i) for i in range(VALID_CORR_OUTPUT_FILTER.shape[0])]))
print " mean P (with Cut):\t%.3f mean R (with Cut):\t%.3f ,\t cut is on %s, mean cut eff. %.2f" % (
np.mean([P_wcut(i) for i in range(VALID_CORR_OUTPUT_FILTER.shape[0])]),
np.mean([R_wcut(i) for i in range(VALID_CORR_OUTPUT_FILTER.shape[0])]),
cut_fraktion,
np.mean([float(n_sliced_cat_pred_wcut[i]) / float(
n_sliced_cat_pred[i]) if n_sliced_cat_pred[i] else 0.
for i in range(VALID_CORR_OUTPUT_FILTER.shape[0])]))
P_wcut_mean_noEmpty = []
for i in range(VALID_CORR_OUTPUT_FILTER.shape[0]):
if n_sliced_cat_pred_wcut[i]:
P_wcut_mean_noEmpty.append(P_wcut(i))
P_wcut_mean_noEmpty = np.mean(P_wcut_mean_noEmpty)
R_wcut_mean_noEmpty = []
for i in range(VALID_CORR_OUTPUT_FILTER.shape[0]):
if n_sliced_cat_pred_wcut[i]:
R_wcut_mean_noEmpty.append(R_wcut(i))
R_wcut_mean_noEmpty = np.mean(R_wcut_mean_noEmpty)
cut_eff_noEmpty = []
for i in range(VALID_CORR_OUTPUT_FILTER.shape[0]):
if n_sliced_cat_pred[i]:
cut_eff_noEmpty.append(float(n_sliced_cat_pred_wcut[i]) / float(
n_sliced_cat_pred[i]))
cut_eff_noEmpty = np.mean(cut_eff_noEmpty)
print " without zero entry classes:\n mean P (with Cut):\t%.3f mean R (with Cut):\t%.3f" % (
P_wcut_mean_noEmpty,
R_wcut_mean_noEmpty)
print 'mean cut eff, without zero uncuted pred. %.2f' % (cut_eff_noEmpty)
print " MSE output wise (augmented): P(recision), R(ecall)"
qsc = 0
for i in xrange(0, VALID_CORR_OUTPUT_FILTER.shape[0]):
oneMSE = np.sqrt(np.mean((y_train.T[i] - predictions.T[i])**2))
if not str(qsc) in output_dic.keys():
output_dic[str(qsc)] = {}
output_dic[str(qsc)][output_names[i]] = {'rmse': float(oneMSE),
'rmse/mean': float(oneMSE / np.mean(y_train.T[i])),
# 'global categorized prediction': n_global_cat_pred[i],
# 'global categorized valid': n_global_cat_valid[i],
# 'global categorized agree': n_global_cat_agrement[i],
'slice categorized prediction': n_sliced_cat_pred[i],
'slice categorized valid': n_sliced_cat_valid[i],
'slice categorized agree': n_sliced_cat_agrement[i],
'precision': P(i),
'recall': R(i),
# 'slice categorized prediction including tree requierement': n_sliced_cat_pred_wreq[i],
# 'slice categorized valid including tree requieremnet': n_sliced_cat_valid_wreq[i],
# 'slice categorized agree including tree requirement': n_sliced_cat_agrement_wreq[i],
# 'precision including tree requierement': P_wreq(i),
# 'recall including tree requierement': R_wreq(i)
}
if i in [slice.start for slice in question_slices]:
print '----------------------------------------------------'
qsc += 1
if P(i) < 0.5: # oneMSE / np.mean(y_valid.T[i]) > 1.2 * rmse_augmented / np.mean(
# y_valid):
print colored(" output % s ( % s): \t%.6f RMSE/mean: % .2f \t N sliced pred., valid, agree % i, % i, % i, P % .3f, R % .3f, wCut(eff.%.2f): pred., valid, agree % i, % i, % i, P % .3f, R % .3f" % (
output_names[i], i, oneMSE, oneMSE /
np.mean(y_train.T[i]),
# n_global_cat_pred[i], n_global_cat_valid[i],
# n_global_cat_agrement[i],
n_sliced_cat_pred[i], n_sliced_cat_valid[i], n_sliced_cat_agrement[i],
P(i), R(i),
float(n_sliced_cat_pred_wcut[i]) / float(
n_sliced_cat_pred[i]) if n_sliced_cat_pred[i] else 0.,
n_sliced_cat_pred_wcut[i], n_sliced_cat_valid_wcut[i],
n_sliced_cat_agrement_wcut[i],
P_wcut(i), R_wcut(i)
),
'red')
elif P(i) > 0.9: # oneMSE / np.mean(y_valid.T[i]) < 0.8 * rmse_augmented / np.mean(
# y_valid):
print colored(" output % s ( % s): \t%.6f RMSE/mean: % .2f \t N sliced pred., valid, agree % i, % i, % i, P % .3f, R % .3f, wCut(eff.%.2f): pred., valid, agree % i, % i, % i, P % .3f, R % .3f " % (
output_names[i], i, oneMSE, oneMSE / np.mean(y_train.T[i]),
# n_global_cat_pred[i], n_global_cat_valid[i],
# n_global_cat_agrement[i],
n_sliced_cat_pred[i], n_sliced_cat_valid[i], n_sliced_cat_agrement[i],
P(i), R(i),
float(n_sliced_cat_pred_wcut[i]) / float(
n_sliced_cat_pred[i]) if n_sliced_cat_pred[i] else 0.,
n_sliced_cat_pred_wcut[i], n_sliced_cat_valid_wcut[i],
n_sliced_cat_agrement_wcut[i],
P_wcut(i), R_wcut(i)
),
'green')
else:
print (" output % s ( % s): \t%.6f RMSE/mean: % .2f \t N sliced pred., valid, agree % i, % i, % i, P % .3f, R % .3f, wCut(eff.%.2f): pred., valid, agree % i, % i, % i, P % .3f, R % .3f " %
(output_names[i], i, oneMSE, oneMSE / np.mean(y_train.T[i]),
# n_global_cat_pred[i], n_global_cat_valid[i],
# n_global_cat_agrement[i],
n_sliced_cat_pred[i], n_sliced_cat_valid[i],
n_sliced_cat_agrement[i],
P(i), R(i),
float(n_sliced_cat_pred_wcut[i]) / float(
n_sliced_cat_pred[i]) if n_sliced_cat_pred[i] else 0.,
n_sliced_cat_pred_wcut[i], n_sliced_cat_valid_wcut[i],
n_sliced_cat_agrement_wcut[i],
P_wcut(i), R_wcut(i)
)
)
with open(TXT_OUTPUT_PATH, 'a+') as f:
json.dump(output_dic_short_hand_names, f)
f.write('\n')
json.dump(output_dic, f)
f.write('\n')
imshow_c = functools.partial(
plt.imshow, interpolation='none') # , vmin=0.0, vmax=1.0)
imshow_g = functools.partial(
plt.imshow, interpolation='none', cmap=plt.get_cmap('gray')) # , vmin=0.0, vmax=1.0)
def try_different_cut_fraktion(cut_fraktions=map(lambda x: float(x) / 20.,
range(8, 20)),
figname='different_cuts.eps'):
print
print 'Testing different fraction cuts:'
cut_fraktions = cut_fraktions
n_wcut_pred = []
n_wcut_valid = []
n_wcut_agree = []
for _ in cut_fraktions:
n_wcut_pred.append([0] * len(output_names))
n_wcut_valid.append([0] * len(output_names))
n_wcut_agree.append([0] * len(output_names))
for i in range(len(predictions)):
for slic in question_slices:
sargpred = np.argmax(predictions[i][slic])
q_frak_pred = predictions[i][slic][sargpred] / \
sum(predictions[i][slic])
sargval = np.argmax(y_train[i][slic])
q_frak_valid = y_train[i][slic][sargval] / \
sum(y_train[i][slic])
for j, cut_val in enumerate(cut_fraktions):
if q_frak_valid > cut_val:
n_wcut_valid[j][sargpred + slic.start] += 1
if q_frak_pred > cut_val:
n_wcut_pred[j][sargval + slic.start] += 1
if sargval == sargpred:
n_wcut_agree[j][sargval + slic.start] += 1
Ps_no_zero = []
Rs_no_zero = []
effs = []
signigicance = [] # agree/sqrt(pred-agree)
effs_sig = []
Ps = [np.mean([P_i(i, param[0], param[1]) for i in range(
VALID_CORR_OUTPUT_FILTER.shape[0])]) for param in zip(n_wcut_pred,
n_wcut_agree)]
Rs = [np.mean([R_i_slices(i, slices=question_slices, n_pred=param[0],
n_agree=param[1]) for i in range(
VALID_CORR_OUTPUT_FILTER.shape[0])])
for param in zip(n_wcut_pred, n_wcut_agree)]
if debug:
print n_sliced_cat_pred[0:3]
print n_wcut_pred[0][0:3]
def _ePReS(n_pred, n_agree):
eff_mean = []
eff_mean_s = []
P_wcut_mean_noEmpty = []
R_wcut_mean_noEmpty = []
signi = []
for i in range(VALID_CORR_OUTPUT_FILTER.shape[0]):
if n_sliced_cat_pred[i]:
eff_mean.append(float(n_pred[i]) / float(
n_wcut_pred[0][i]))
if n_sliced_cat_agrement[i] and n_wcut_agree[0][i]:
eff_mean_s.append(float(n_agree[i]) / float(
n_wcut_agree[0][i]))
if n_pred[i]:
P_wcut_mean_noEmpty.append(P_i(i, n_pred, n_agree))
R_wcut_mean_noEmpty.append(R_i_slices(
i, question_slices, n_pred, n_agree))
if n_agree[i]:
signi.append(
float(n_agree[i]) / np.sqrt(float(n_pred[i]
- n_agree[i])))
return (np.mean(eff_mean),
np.mean(P_wcut_mean_noEmpty),
np.mean(R_wcut_mean_noEmpty),
np.mean(signi),
np.mean(eff_mean_s))
for p, a in zip(n_wcut_pred, n_wcut_agree):
_e, _P, _R, _s, _es = _ePReS(p, a)
Ps_no_zero.append(_P)
Rs_no_zero.append(_R)
effs.append(_e)
effs_sig.append(_es)
signigicance.append(_s)
if debug:
print 'cut_fraktions'
print cut_fraktions
print 'effs'
print effs
print 'effs_sig'
print effs_sig
print 'signigicance / 120'
print [s / 120. for s in signigicance]
print 'Ps'
print Ps
print 'Rs'
print Rs
print 'Ps_no_zero'
print Ps_no_zero
print 'Rs_no_zero'
print Rs_no_zero
plots = []
label_h = []
plt.subplot(211)
plots.append(plt.plot(cut_fraktions, effs, 'r-', label="eff"))
label_h.append(mlines.Line2D([], [], color='red', label='eff'))
plots.append(plt.plot(
cut_fraktions, effs_sig, 'b-', label="eff sig"))
label_h.append(mlines.Line2D([], [], color='blue', label='eff sig'))
plots.append(plt.plot(cut_fraktions, [
s / 120. for s in signigicance], 'g-', label="signif/120"))
label_h.append(mlines.Line2D([], [], color='green', label='signif/120'))
plots.append(plt.plot(cut_fraktions, Ps_no_zero, 'ro', label="Ps no zero"))
label_h.append(mlines.Line2D([], [], color='red', marker='o',
markersize=15, linewidth=0, label='P no 0.'))
plots.append(plt.plot(cut_fraktions, Rs_no_zero, 'bo', label="Rs no zero"))
label_h.append(mlines.Line2D([], [], color='blue', marker='o',
markersize=15, linewidth=0, label='R no 0.'))
plots.append(plt.plot(cut_fraktions, Ps, 'r.', label="Ps"))
label_h.append(mlines.Line2D([], [], color='red', marker='.',
markersize=15, linewidth=0, label='P'))
plots.append(plt.plot(cut_fraktions, Rs, 'b.', label="Rs"))
label_h.append(mlines.Line2D([], [], color='blue', marker='.',
markersize=15, linewidth=0, label='R'))
plt.legend(handles=label_h, bbox_to_anchor=(
0, -0.1), loc=2, borderaxespad=0.)
plt.xlabel('frac')
plt.ylabel('Cut Value')
# plt.show()
plt.savefig(figname)
plots = []
label_h = []
plt.subplot(121)
plots.append(plt.plot(Rs_no_zero, Ps_no_zero, 'r-', label="no zero"))
label_h.append(mlines.Line2D([], [], color='red', label='no zero'))
plots.append(plt.plot(Rs, Ps, 'b-', label=""))
label_h.append(mlines.Line2D([], [], color='blue', label='with zero'))
plt.legend(handles=label_h, bbox_to_anchor=(
1.05, 1), loc=2, borderaxespad=0.)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.savefig('ROC_train_test.eps')
def x_category_precision(predictions=predictions, y_valid=y_train,
conditions=spiral_or_ellipse_cat):
counts = [[0, 0, 0]]
for _ in conditions:
counts += [[0, 0, 0]]
# if debug:
# print np.shape(counts)
for i, p in enumerate(predictions):
predicted = [True] * (len(conditions) + 1)
valid = [True] * (len(conditions) + 1)
for j, cond in enumerate(conditions):
for sup_cond in cond:
predicted[j] *= (np.argmax(
p[question_slices[sup_cond[0]]]) == sup_cond[1])
valid[j] *= (np.argmax(
y_valid[i][question_slices[sup_cond[0]]]) == sup_cond[1])
if predicted[j]:
counts[j][0] += 1
if valid[j]:
counts[j][1] += 1
if predicted[j] and valid[j]:
counts[j][2] += 1
predicted[-1] = (np.sum(predicted[0:-1]) == 0)
valid[-1] = (np.sum(valid[0:-1]) == 0)
if predicted[-1]:
counts[-1][0] += 1
if valid[-1]:
counts[-1][1] += 1
if predicted[-1] and valid[-1]:
counts[-1][2] += 1
if np.sum(predicted) != 1 or np.sum(valid) != 1:
raise UserWarning(
'conditions in x_category_precision were not exclusive for image %s') % i
# if debug and not i % 1000:
# print predicted
# print valid
# print counts
# if debug:
# print counts
P_s = [(float(c[2]) / c[0]) for c in counts]
R_s = [(float(c[2]) / (2 * c[2] + sum(
[d[0] - d[2] for d in counts]) - c[0]))for c in counts]
print
print 'mean P:\t %.3f' % np.mean(P_s)
print 'mean R:\t %.3f' % np.mean(R_s)
for i, c in enumerate(counts):
print 'condition %s: \t pred,val,agree: %.3f \t P: %.3f R: %.3f' % (
i, c, P_s[i], R_s[i])
def valid_scatter():
print 'Do scatter plots'
print ' they will be saved in the folder %s ' % IMAGE_OUTPUT_PATH
if not os.path.isdir(IMAGE_OUTPUT_PATH):
os.mkdir(IMAGE_OUTPUT_PATH)
# plt.gray()
os.chdir(IMAGE_OUTPUT_PATH)
if not os.path.isdir("ValidScatter"):
os.mkdir("ValidScatter")
os.chdir("ValidScatter")
for i in xrange(0, VALID_CORR_OUTPUT_FILTER.shape[0]):
y = predictions.T[i]
x = y_valid.T[i]
fig, ax = plt.subplots()
fit = np.polyfit(x, y, deg=1)
ax.plot(x, fit[0] * x + fit[1], color='red')
ax.scatter(x, y)
plt.ylabel('prediction')
plt.xlabel('target')
plt.title("valid %s" % (output_names[i]))
oneMSE = np.sqrt(np.mean((y_valid.T[i] - predictions.T[i])**2))
plt.text(60, .025, 'RMSE: %s , RMSE/mean: %s ' %
(oneMSE, oneMSE / np.mean(y_valid.T[i])))
plt.savefig("validScatter_%s_%s.jpg" % (i, output_names[i]))
plt.close()
os.chdir("../..")
def normalize_img(img):
min = np.amin(img)
max = np.amax(img)
return (img - min) / (max - min)
def _img_wall(img, norm=False):
dim = len(np.shape(img))
shape = np.shape(img)
n_board_side = int(np.ceil(np.sqrt(shape[0])))
n_board_square = int(n_board_side**2)
if dim == 3:
img_w = shape[1]
img_h = shape[2]
wall = np.zeros((n_board_side * img_w + n_board_side + 1,
n_board_side * img_h + n_board_side + 1))
elif dim == 4:
img_w = shape[2]
img_h = shape[3]
wall = np.zeros((shape[1], n_board_side * img_w + n_board_side + 1,
n_board_side * img_h + n_board_side + 1))
else:
raise TypeError(
'Wrong dimension %s of the input' % dim)
pos = [0, 0]
for i in img:
if pos[0] >= n_board_side:
pos[0] = 0
pos[1] = pos[1] + 1
x0 = pos[0] * (img_w + 1) + 1
x1 = (pos[0] + 1) * img_w + pos[0] + 1
y0 = pos[1] * (img_h + 1) + 1
y1 = (pos[1] + 1) * img_h + pos[1] + 1
i_ = normalize_img(i) if norm else i
if dim == 3:
wall[x0:x1, y0:y1] = i_
else:
wall[:, x0:x1, y0:y1] = i_
pos[0] = pos[0] + 1
return wall
def highest_conv_activation(img_nr=None, img_id=None, layername='conv_0',
n_highest=5,
order='both', path_base='highest_activations',
verbose=1):
if img_nr and img_id:
print 'Warning: got image number and id, will use id.'
img_nr = list(valid_ids).index(img_id)
elif img_id:
img_nr = list(valid_ids).index(img_id)
elif img_nr:
img_id = valid_ids[img_nr]
if verbose:
print 'highest activations in layer %s of image %s (%s)' % (layername,
img_id, img_nr)
input_img = [np.asarray([validation_data[0][0][img_nr]]),
np.asarray([validation_data[0][1][img_nr]])]
save_dic = {}
if order == 'both' or order == 'global':
global_max = []
l_out = np.asarray(winsol.get_layer_output(
layer=layername, input_=input_img))
if debug:
print np.shape(l_out)
if verbose:
print '\t global'
l_out = np.mean(l_out, axis=(2, 3))
if debug:
print np.shape(l_out)
for i in range(n_highest):
max_ch = np.argmax(l_out)
val = l_out[0, max_ch]
l_out[0, max_ch] = 0.
global_max.append((max_ch, float(val)))
if verbose:
print '\t filter %i, with mean activation %.3f'\
% global_max[-1]
save_dic['global'] = global_max
if order == 'both' or order == 'local':
local_max = []
l_out = np.asarray(winsol.get_layer_output(
layer=layername, input_=input_img))
if debug:
print np.shape(l_out)
if verbose:
print '\t local:'
for i in range(n_highest):
max_ch = np.argmax(l_out[0]) / l_out.shape[2] / l_out.shape[3]
x = np.argmax(l_out[0, max_ch]) / l_out.shape[3]
y = np.argmax(l_out[0, max_ch, x])
val = l_out[0, max_ch, x, y]
l_out[0, max_ch, x, y] = 0.
x = float(x) / float(l_out.shape[2])
y = float(y) / float(l_out.shape[3])
local_max.append((max_ch, x, y, float(val)))
if verbose:
print '\t filter %i at %.2f %.2f, with activation %.3f'\
% local_max[-1]
save_dic['local'] = local_max
with open(path_base + '_' + str(img_id) + '.json', 'w') as f:
json.dump(save_dic, f)
def print_filters(image_nr=0, norm=False):
if not os.path.isdir(IMAGE_OUTPUT_PATH):
os.mkdir(IMAGE_OUTPUT_PATH)
print "Print filtered"
image_nr = image_nr
if type(image_nr) == int:
input_img = [np.asarray([validation_data[0][0][image_nr]]),
np.asarray([validation_data[0][1][image_nr]])]
elif image_nr == 'ones':
input_img = [np.ones(shape=(np.asarray(
[validation_data[0][0][0]]).shape)), np.ones(
shape=(np.asarray([validation_data[0][0][0]]).shape))]
elif image_nr == 'zeros':
input_img = [np.zeros(shape=(np.asarray(
[validation_data[0][0][0]]).shape)), np.zeroes(
shape=(np.asarray([validation_data[0][0][0]]).shape))]
print ' getting outputs'
intermediate_outputs = {}
for n in layer_names:
intermediate_outputs[n] = np.asarray(winsol.get_layer_output(
n, input_=input_img))
intermediate_outputs[n] = intermediate_outputs[n][0]
if layer_formats[n] <= 0:
board_side = int(np.ceil(np.sqrt(len(intermediate_outputs[n]))))
board_square = int(board_side**2)
intermediate_outputs[n] = np.append(
intermediate_outputs[n], [0] * (board_square - len(
intermediate_outputs[n])))
intermediate_outputs[n] = np.reshape(
intermediate_outputs[n], (board_side, board_side))
os.chdir(IMAGE_OUTPUT_PATH)
intermed_out_dir = 'intermediate_outputs'
if norm:
intermed_out_dir += '_norm'
if not os.path.isdir(intermed_out_dir):
os.mkdir(intermed_out_dir)
os.chdir(intermed_out_dir)
print ' output images will be saved at %s/%s' % (IMAGE_OUTPUT_PATH,
intermed_out_dir)
print ' plotting outputs'
if type(image_nr) == int:
imshow_c(np.transpose(input_img[0][0], (1, 2, 0)))
plt.savefig('input_fig_%s_rotation_0.jpg' % (image_nr))
plt.close()
imshow_c(np.transpose(input_img[1][0], (1, 2, 0)))
plt.savefig('input_fig_%s_rotation_45.jpg' % (image_nr))
plt.close()
for i in range(len(input_img[0][0])):
imshow_g(input_img[0][0][i])
plt.savefig('input_fig_%s_rotation_0_dim_%s.jpg' % (image_nr, i))
plt.close()
for i in range(len(input_img[1][0])):
imshow_g(input_img[1][0][i])
plt.savefig('input_fig_%s_rotation_45_dim_%s.jpg' %
(image_nr, i))
plt.close()
for n in layer_names:
if layer_formats[n] > 0:
imshow_g(_img_wall(intermediate_outputs[n], norm))
if not norm:
plt.colorbar()
plt.savefig('output_fig_%s_%s.jpg' %
(image_nr, n))
plt.close()
else:
imshow_g(normalize_img(
intermediate_outputs[n]) if norm else intermediate_outputs[n])
if not norm:
plt.colorbar()
plt.savefig('output_fig_%s_%s.jpg' %
(image_nr, n))
plt.close()
os.chdir('../..')
def print_weights(norm=False):
if not os.path.isdir(IMAGE_OUTPUT_PATH):
os.mkdir(IMAGE_OUTPUT_PATH)
os.chdir(IMAGE_OUTPUT_PATH)
weights_out_dir = 'weights'
if norm:
weights_out_dir += '_normalized'
if not os.path.isdir(weights_out_dir):
os.mkdir(weights_out_dir)
os.chdir(weights_out_dir)
print 'Printing weights'
for name in layer_formats:
if layer_formats[name] == 1:
w, b = winsol.get_layer_weights(layer=name)
w = np.transpose(w, (3, 0, 1, 2))
w = _img_wall(w, norm)
b = _img_wall(b, norm)
# elif layer_formats[name] == 0:
# w, b = winsol.get_layer_weights(layer=name)
# w = _img_wall(w, norm)
# b = _img_wall(b, norm)
else:
continue
for i in range(len(w)):
imshow_g(w[i])
if not norm:
plt.colorbar()
plt.savefig('weight_layer_%s_kernel_channel_%s.jpg' % (name, i))
plt.close()
imshow_g(b)
if not norm:
plt.colorbar()
plt.savefig('weight_layer_%s_bias.jpg' % (name))
plt.close()
os.chdir('../..')
def get_best_id(category_name, n=1):
dtype = []
dtype.append(('img_nr', int))
for q in output_names:
dtype.append((q, float))
print len(dtype)
print len(predictions[0])
print type(predictions[0])
print len(tuple(np.append(np.array(valid_ids[0]), predictions[0])))
predictions_dtyped = np.array([], dtype=dtype)
for id, line in zip(valid_ids, predictions):
predictions_dtyped = np.append(
predictions_dtyped, np.asarray(
tuple(np.append(np.array(id), line)), dtype=dtype))
return np.sort(predictions_dtyped, order=category_name)['img_nr'][
-1] if n == 1 else np.sort(predictions_dtyped, order=category_name)[
'img_nr'][
-1 - n: len(predictions_dtyped['img_nr'])]
# x_category_precision(predictions=predictions, y_valid=y_valid)
# # print_weights(norm=True)
# print_weights(norm=True)
# valid_scatter()
# print_filters(2, norm=True)
# #print_filters(3, norm=True)
# highest_conv_activation(img_id=get_best_id('RoundCompletly'))
# highest_conv_activation(img_id=get_best_id('Spiral3Arm'))
# print_filters(list(valid_ids).index(get_best_id('RoundCompletly')))
# print_filters(list(valid_ids).index(get_best_id('Spiral3Arm')))
# print
# print
# print 'RoundCompletly:'
# for id in get_best_id('RoundCompletly', 5):
# print 'predicted with %.3f' % predictions[list(valid_ids).index(id)][
# output_names.index('RoundCompletly')]
# highest_conv_activation(img_id=id)
# print
# print
# print 'Spiral3Arm:'
# for id in get_best_id('Spiral3Arm', 5):
# print 'predicted with %.3f' % predictions[list(valid_ids).index(id)][
# output_names.index('Spiral3Arm')]
# highest_conv_activation(img_id=id)
# print
try_different_cut_fraktion(figname='cuts_train_data.eps')
# print_weights()
# print_weights(True)
save_exit()
| {
"repo_name": "garbersc/keras-galaxies",
"path": "predict_convnet_on_train_data.py",
"copies": "1",
"size": "44757",
"license": "bsd-3-clause",
"hash": 297940264379752000,
"line_mean": 35.3287337662,
"line_max": 267,
"alpha_frac": 0.5589963581,
"autogenerated": false,
"ratio": 3.0261663286004055,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9083113707453517,
"avg_score": 0.00040979584937758646,
"num_lines": 1232
} |
# at some point try
# from keras.utils import plot_model
# plot_model(model, to_file='model.png')
import theano.sandbox.cuda.basic_ops as sbcuda
import numpy as np
import load_data
import realtime_augmentation as ra
import time
import sys
import glob
import json
from datetime import timedelta
import os
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from termcolor import colored
import functools
debug = True
weights_dir = 'weight_history'
output_dir = 'img_weight_hist'
imshow_c = functools.partial(
plt.imshow, interpolation='none') # , vmin=0.0, vmax=1.0)
imshow_g = functools.partial(
plt.imshow, interpolation='none', cmap=plt.get_cmap('gray')) # , vmin=0.0, vmax=1.0)
def normalize_img(img):
min = np.amin(img)
max = np.amax(img)
return (img - min) / (max - min)
def _img_wall(img, norm=False):
dim = len(np.shape(img))
shape = np.shape(img)
n_board_side = int(np.ceil(np.sqrt(shape[0])))
n_board_square = int(n_board_side**2)
if dim == 3:
img_w = shape[1]
img_h = shape[2]
wall = np.zeros((n_board_side * img_w + n_board_side + 1,
n_board_side * img_h + n_board_side + 1))
elif dim == 4:
img_w = shape[2]
img_h = shape[3]
wall = np.zeros((shape[1], n_board_side * img_w + n_board_side + 1,
n_board_side * img_h + n_board_side + 1))
else:
raise TypeError(
'Wrong dimension %s of the input' % dim)
pos = [0, 0]
for i in img:
if pos[0] >= n_board_side:
pos[0] = 0
pos[1] = pos[1] + 1
x0 = pos[0] * (img_w + 1) + 1
x1 = (pos[0] + 1) * img_w + pos[0] + 1
y0 = pos[1] * (img_h + 1) + 1
y1 = (pos[1] + 1) * img_h + pos[1] + 1
i_ = normalize_img(i) if norm else i
if dim == 3:
wall[x0:x1, y0:y1] = i_
else:
wall[:, x0:x1, y0:y1] = i_
pos[0] = pos[0] + 1
return wall
def print_weights(norm=False):
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
weights_history_paths = glob.glob(
os.path.join(weights_dir, "weights_of_dense_output_*.npy")
)
if debug:
print len(weights_history_paths)
# for i, n in enumerate(weights_history_paths):
# if n.find('weights_of_conv_0_') < 0:
# weights_history_paths.remove(n)
print 'There are %s weight files selected' % len(weights_history_paths)
if not len(weights_history_paths):
print weights_history_paths
raise Warning('No weight files found in ' + weights_dir)
base_path_l = weights_history_paths[0].split('_')[0:-1]
base_path = ''
for s in base_path_l:
base_path = base_path + s + '_'
weights_out_dir = 'weights'
if norm:
weights_out_dir += '_normalized'
if not os.path.isdir(weights_out_dir):
os.mkdir(weights_out_dir)
print 'Printing weights'
weight_imgs = []
bias_imgs = []
figs = [] # plt.figure()
axs = [] # fig.add_subplot(111)
for j in range(0, len(weights_history_paths), 1):
# if j > 126: # BAD, BAD, BAD
# break
w, b = np.load(base_path + str(j) + '.npy')
# w = np.transpose(w, (3, 0, 1, 2))
if weights_history_paths[j].find('dense') >= 0:
if j == 0:
print w.shape
print w.shape[0]
print b.shape
board_side = int(np.ceil(np.sqrt(w.shape[0])))
board_square = int(board_side**2)
w = np.transpose(w, (1, 0))
wn = []
for wi in w:
wn.append(np.append(
wi, [0] * (board_square - len(wi))))
w = np.reshape(
np.array(wn), (3, board_side, board_side,))
# w = _img_wall(w, norm)
# b = _img_wall(b, norm)
for i in range(len(w)):
if j:
weight_imgs[i].append(
(axs[i].imshow(w[i], interpolation='none',
cmap=plt.get_cmap('gray')),
axs[i].set_title('')))
else:
figs.append(plt.figure())
axs.append(figs[-1].add_subplot(111))
weight_imgs.append([(axs[i].imshow(w[i], interpolation='none',
cmap=plt.get_cmap('gray')),
axs[i].set_title(''))])
# if not norm:
# plt.colorbar()
if not j:
fig_b = plt.figure()
ax_b = fig_b.add_subplot(111)
# bias_imgs.append((ax_b.imshow(b, interpolation='none',
# cmap=plt.get_cmap('gray')),
# axs[i].set_title('')))
# if not norm:
# plt.colorbar()
im_ani = []
print np.shape(weight_imgs)
print type(weight_imgs[0][0])
for i, k in enumerate(weight_imgs):
im_ani.append(animation.ArtistAnimation(figs[i], k, interval=50,
repeat_delay=None,
))
out_path = output_dir + '/' + \
base_path_l[0].split('/')[-1] + 'channel_' + str(i) + '_w.gif'
im_ani[i].save(out_path, writer='imagemagick')
print 'saved gif to %s' % out_path
# im_ani_b = animation.ArtistAnimation(fig_b, bias_imgs, interval=50,
# repeat_delay=None,
# )
# out_path = output_dir + '/' + base_path_l[0].split('/')[-1] + '_b.gif'
# im_ani_b.save(out_path, writer='imagemagick')
# print 'saved gif to %s' % out_path
# plt.show()
# print_weights(norm=True)
print_weights(norm=False)
print 'Done!'
| {
"repo_name": "garbersc/keras-galaxies",
"path": "ensembled_weights.py",
"copies": "1",
"size": "5847",
"license": "bsd-3-clause",
"hash": 2974064421774337000,
"line_mean": 29.7736842105,
"line_max": 89,
"alpha_frac": 0.5023088763,
"autogenerated": false,
"ratio": 3.2573816155988857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9259398094238067,
"avg_score": 0.00005847953216374269,
"num_lines": 190
} |
"""AT-specific Form helpers."""
from __future__ import unicode_literals
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.translation import ugettext_lazy as _
from .at_states import STATE_CHOICES
re_ssn = re.compile(r'^\d{4} \d{6}')
class ATZipCodeField(RegexField):
"""
A form field that validates its input is an Austrian postcode.
Accepts 4 digits (first digit must be greater than 0).
"""
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(ATZipCodeField, self).__init__(r'^[1-9]{1}\d{3}$',
max_length, min_length, *args, **kwargs)
class ATStateSelect(Select):
"""A ``Select`` widget that uses a list of AT states as its choices."""
def __init__(self, attrs=None):
super(ATStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
class ATSocialSecurityNumberField(Field):
"""
Austrian Social Security numbers are composed of a 4 digits and 6 digits field.
The latter represents in most cases the person's birthdate while
the first 4 digits represent a 3-digits counter and a one-digit checksum.
The 6-digits field can also differ from the person's birthdate if the
3-digits counter suffered an overflow.
This code is based on information available on
http://de.wikipedia.org/wiki/Sozialversicherungsnummer#.C3.96sterreich
"""
default_error_messages = {
'invalid': _('Enter a valid Austrian Social Security Number in XXXX XXXXXX format.'),
}
def clean(self, value):
value = super(ATSocialSecurityNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ""
if not re_ssn.search(value):
raise ValidationError(self.error_messages['invalid'])
sqnr, date = value.split(" ")
sqnr, check = (sqnr[:3], (sqnr[3]))
if int(sqnr) < 100:
raise ValidationError(self.error_messages['invalid'])
res = (int(sqnr[0]) * 3 + int(sqnr[1]) * 7 + int(sqnr[2]) * 9 +
int(date[0]) * 5 + int(date[1]) * 8 + int(date[2]) * 4 +
int(date[3]) * 2 + int(date[4]) * 1 + int(date[5]) * 6)
res = res % 11
if res != int(check):
raise ValidationError(self.error_messages['invalid'])
return '%s%s %s' % (sqnr, check, date)
| {
"repo_name": "thor/django-localflavor",
"path": "localflavor/at/forms.py",
"copies": "3",
"size": "2571",
"license": "bsd-3-clause",
"hash": 6484882523526741000,
"line_mean": 34.2191780822,
"line_max": 93,
"alpha_frac": 0.6285492026,
"autogenerated": false,
"ratio": 3.6781115879828326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00046809543498419886,
"num_lines": 73
} |
"""attach comments to files
Revision ID: 254ac5fc3941
Revises: 50344aecd1c2
Create Date: 2015-04-13 15:52:07.104397
"""
# revision identifiers, used by Alembic.
revision = '254ac5fc3941'
down_revision = '50344aecd1c2'
import sys
import warnings
from alembic import op
import sqlalchemy as sa
from gertty.dbsupport import sqlite_alter_columns, sqlite_drop_columns
def upgrade():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
op.add_column('comment', sa.Column('file_key', sa.Integer()))
sqlite_alter_columns('comment', [
sa.Column('file_key', sa.Integer(), sa.ForeignKey('file.key'))
])
update_query = sa.text('update comment set file_key=:file_key where key=:key')
file_query = sa.text('select f.key from file f where f.revision_key=:revision_key and f.path=:path')
file_insert_query = sa.text('insert into file (key, revision_key, path, old_path, status, inserted, deleted) '
' values (NULL, :revision_key, :path, NULL, NULL, NULL, NULL)')
conn = op.get_bind()
countres = conn.execute('select count(*) from comment')
comments = countres.fetchone()[0]
comment_res = conn.execute('select p.name, c.number, c.status, r.key, r.number, m.file, m.key '
'from project p, change c, revision r, comment m '
'where m.revision_key=r.key and r.change_key=c.key and '
'c.project_key=p.key order by p.name')
count = 0
for (pname, cnumber, cstatus, rkey, rnumber, mfile, mkey) in comment_res.fetchall():
count += 1
sys.stdout.write('Comment %s / %s\r' % (count, comments))
sys.stdout.flush()
file_res = conn.execute(file_query, revision_key=rkey, path=mfile)
file_key = file_res.fetchone()
if not file_key:
conn.execute(file_insert_query, revision_key=rkey, path=mfile)
file_res = conn.execute(file_query, revision_key=rkey, path=mfile)
file_key = file_res.fetchone()
fkey = file_key[0]
file_res = conn.execute(update_query, file_key=fkey, key=mkey)
sqlite_drop_columns('comment', ['revision_key', 'file'])
print
def downgrade():
pass
| {
"repo_name": "aspiers/gertty",
"path": "gertty/alembic/versions/254ac5fc3941_attach_comments_to_files.py",
"copies": "1",
"size": "2276",
"license": "apache-2.0",
"hash": -9217671783624476000,
"line_mean": 34.5625,
"line_max": 114,
"alpha_frac": 0.6221441125,
"autogenerated": false,
"ratio": 3.3818722139673105,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9473005303857478,
"avg_score": 0.00620220452196641,
"num_lines": 64
} |
"""Attach devices to a ticket."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import helpers
@click.command()
@click.argument('identifier', type=int)
@click.option('--hardware', 'hardware_identifier',
help="The identifier for hardware to attach")
@click.option('--virtual', 'virtual_identifier',
help="The identifier for a virtual server to attach")
@environment.pass_env
def cli(env, identifier, hardware_identifier, virtual_identifier):
"""Attach devices to a ticket."""
ticket_mgr = SoftLayer.TicketManager(env.client)
if hardware_identifier and virtual_identifier:
raise exceptions.ArgumentError("Cannot attach hardware and a virtual server at the same time")
if hardware_identifier:
hardware_mgr = SoftLayer.HardwareManager(env.client)
hardware_id = helpers.resolve_id(hardware_mgr.resolve_ids, hardware_identifier, 'hardware')
ticket_mgr.attach_hardware(identifier, hardware_id)
elif virtual_identifier:
vs_mgr = SoftLayer.VSManager(env.client)
vs_id = helpers.resolve_id(vs_mgr.resolve_ids, virtual_identifier, 'VS')
ticket_mgr.attach_virtual_server(identifier, vs_id)
else:
raise exceptions.ArgumentError("Must have a hardware or virtual server identifier to attach")
| {
"repo_name": "softlayer/softlayer-python",
"path": "SoftLayer/CLI/ticket/attach.py",
"copies": "3",
"size": "1425",
"license": "mit",
"hash": -7774346059918943000,
"line_mean": 39.7142857143,
"line_max": 102,
"alpha_frac": 0.7235087719,
"autogenerated": false,
"ratio": 4.178885630498534,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011959525270014513,
"num_lines": 35
} |
"""Attached Files Utilities.
"""
#
# Copyright (c) 2009 shinGETsu Project.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id$
#
import mimetypes
import imghdr
__version__ = '$Revision$'
__all__ = ['is_valid_image']
# For Unittest
_imghdr = imghdr
def is_valid_image(mimetype, path):
"""Type of path is same as mimetype or not.
"""
if not path:
return False
path_suffix = _imghdr.what(path)
if not path_suffix:
return False
(path_type, null) = mimetypes.guess_type('test.' + path_suffix)
if mimetype == path_type:
return True
if (path_type == 'image/jpeg') and (mimetype == 'image/pjpeg'):
return True
return False
| {
"repo_name": "shingetsu/saku-ex",
"path": "shingetsu/attachutil.py",
"copies": "1",
"size": "1936",
"license": "bsd-2-clause",
"hash": 2969965270698690000,
"line_mean": 34.8518518519,
"line_max": 76,
"alpha_frac": 0.7267561983,
"autogenerated": false,
"ratio": 4.110403397027601,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 54
} |
"""Attaches a disk volume to a virtual machine."""
from baseCmd import *
from baseResponse import *
class attachVolumeCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""the ID of the disk volume"""
"""Required"""
self.id = None
self.typeInfo['id'] = 'uuid'
"""the ID of the virtual machine"""
"""Required"""
self.virtualmachineid = None
self.typeInfo['virtualmachineid'] = 'uuid'
"""the ID of the device to map the volume to within the guest OS. If no deviceId is passed in, the next available deviceId will be chosen. Possible values for a Linux OS are:* 0 - /dev/xvda* 1 - /dev/xvdb* 2 - /dev/xvdc* 4 - /dev/xvde* 5 - /dev/xvdf* 6 - /dev/xvdg* 7 - /dev/xvdh* 8 - /dev/xvdi* 9 - /dev/xvdj"""
self.deviceid = None
self.typeInfo['deviceid'] = 'long'
self.required = ["id", "virtualmachineid", ]
class attachVolumeResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""ID of the disk volume"""
self.id = None
self.typeInfo['id'] = 'string'
"""the account associated with the disk volume"""
self.account = None
self.typeInfo['account'] = 'string'
"""the date the volume was attached to a VM instance"""
self.attached = None
self.typeInfo['attached'] = 'date'
"""the chain info of the volume"""
self.chaininfo = None
self.typeInfo['chaininfo'] = 'string'
"""the date the disk volume was created"""
self.created = None
self.typeInfo['created'] = 'date'
"""the boolean state of whether the volume is destroyed or not"""
self.destroyed = None
self.typeInfo['destroyed'] = 'boolean'
"""the ID of the device on user vm the volume is attahed to. This tag is not returned when the volume is detached."""
self.deviceid = None
self.typeInfo['deviceid'] = 'long'
"""bytes read rate of the disk volume"""
self.diskBytesReadRate = None
self.typeInfo['diskBytesReadRate'] = 'long'
"""bytes write rate of the disk volume"""
self.diskBytesWriteRate = None
self.typeInfo['diskBytesWriteRate'] = 'long'
"""io requests read rate of the disk volume"""
self.diskIopsReadRate = None
self.typeInfo['diskIopsReadRate'] = 'long'
"""io requests write rate of the disk volume"""
self.diskIopsWriteRate = None
self.typeInfo['diskIopsWriteRate'] = 'long'
"""the display text of the disk offering"""
self.diskofferingdisplaytext = None
self.typeInfo['diskofferingdisplaytext'] = 'string'
"""ID of the disk offering"""
self.diskofferingid = None
self.typeInfo['diskofferingid'] = 'string'
"""name of the disk offering"""
self.diskofferingname = None
self.typeInfo['diskofferingname'] = 'string'
"""an optional field whether to the display the volume to the end user or not."""
self.displayvolume = None
self.typeInfo['displayvolume'] = 'boolean'
"""the domain associated with the disk volume"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the ID of the domain associated with the disk volume"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""Hypervisor the volume belongs to"""
self.hypervisor = None
self.typeInfo['hypervisor'] = 'string'
"""true if the volume is extractable, false otherwise"""
self.isextractable = None
self.typeInfo['isextractable'] = 'boolean'
"""an alternate display text of the ISO attached to the virtual machine"""
self.isodisplaytext = None
self.typeInfo['isodisplaytext'] = 'string'
"""the ID of the ISO attached to the virtual machine"""
self.isoid = None
self.typeInfo['isoid'] = 'string'
"""the name of the ISO attached to the virtual machine"""
self.isoname = None
self.typeInfo['isoname'] = 'string'
"""max iops of the disk volume"""
self.maxiops = None
self.typeInfo['maxiops'] = 'long'
"""min iops of the disk volume"""
self.miniops = None
self.typeInfo['miniops'] = 'long'
"""name of the disk volume"""
self.name = None
self.typeInfo['name'] = 'string'
"""the path of the volume"""
self.path = None
self.typeInfo['path'] = 'string'
"""the project name of the vpn"""
self.project = None
self.typeInfo['project'] = 'string'
"""the project id of the vpn"""
self.projectid = None
self.typeInfo['projectid'] = 'string'
"""provisioning type used to create volumes."""
self.provisioningtype = None
self.typeInfo['provisioningtype'] = 'string'
"""need quiesce vm or not when taking snapshot"""
self.quiescevm = None
self.typeInfo['quiescevm'] = 'boolean'
"""the display text of the service offering for root disk"""
self.serviceofferingdisplaytext = None
self.typeInfo['serviceofferingdisplaytext'] = 'string'
"""ID of the service offering for root disk"""
self.serviceofferingid = None
self.typeInfo['serviceofferingid'] = 'string'
"""name of the service offering for root disk"""
self.serviceofferingname = None
self.typeInfo['serviceofferingname'] = 'string'
"""size of the disk volume"""
self.size = None
self.typeInfo['size'] = 'long'
"""ID of the snapshot from which this volume was created"""
self.snapshotid = None
self.typeInfo['snapshotid'] = 'string'
"""the state of the disk volume"""
self.state = None
self.typeInfo['state'] = 'string'
"""the status of the volume"""
self.status = None
self.typeInfo['status'] = 'string'
"""name of the primary storage hosting the disk volume"""
self.storage = None
self.typeInfo['storage'] = 'string'
"""id of the primary storage hosting the disk volume; returned to admin user only"""
self.storageid = None
self.typeInfo['storageid'] = 'string'
"""shared or local storage"""
self.storagetype = None
self.typeInfo['storagetype'] = 'string'
"""an alternate display text of the template for the virtual machine"""
self.templatedisplaytext = None
self.typeInfo['templatedisplaytext'] = 'string'
"""the ID of the template for the virtual machine. A -1 is returned if the virtual machine was created from an ISO file."""
self.templateid = None
self.typeInfo['templateid'] = 'string'
"""the name of the template for the virtual machine"""
self.templatename = None
self.typeInfo['templatename'] = 'string'
"""type of the disk volume (ROOT or DATADISK)"""
self.type = None
self.typeInfo['type'] = 'string'
"""id of the virtual machine"""
self.virtualmachineid = None
self.typeInfo['virtualmachineid'] = 'string'
"""display name of the virtual machine"""
self.vmdisplayname = None
self.typeInfo['vmdisplayname'] = 'string'
"""name of the virtual machine"""
self.vmname = None
self.typeInfo['vmname'] = 'string'
"""state of the virtual machine"""
self.vmstate = None
self.typeInfo['vmstate'] = 'string'
"""ID of the availability zone"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""name of the availability zone"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
"""the list of resource tags associated with volume"""
self.tags = []
"""the ID of the latest async job acting on this object"""
self.jobid = None
self.typeInfo['jobid'] = ''
"""the current status of the latest async job acting on this object"""
self.jobstatus = None
self.typeInfo['jobstatus'] = ''
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
| {
"repo_name": "MissionCriticalCloud/marvin",
"path": "marvin/cloudstackAPI/attachVolume.py",
"copies": "1",
"size": "8913",
"license": "apache-2.0",
"hash": -3640416543725088300,
"line_mean": 41.4428571429,
"line_max": 320,
"alpha_frac": 0.593627286,
"autogenerated": false,
"ratio": 4.141728624535316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5235355910535316,
"avg_score": null,
"num_lines": null
} |
"""Attaches an ISO to a virtual machine."""
from baseCmd import *
from baseResponse import *
class attachIsoCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""the ID of the ISO file"""
"""Required"""
self.id = None
self.typeInfo['id'] = 'uuid'
"""the ID of the virtual machine"""
"""Required"""
self.virtualmachineid = None
self.typeInfo['virtualmachineid'] = 'uuid'
self.required = ["id", "virtualmachineid", ]
class attachIsoResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the ID of the virtual machine"""
self.id = None
self.typeInfo['id'] = 'string'
"""the account associated with the virtual machine"""
self.account = None
self.typeInfo['account'] = 'string'
"""the number of cpu this virtual machine is running with"""
self.cpunumber = None
self.typeInfo['cpunumber'] = 'integer'
"""the speed of each cpu"""
self.cpuspeed = None
self.typeInfo['cpuspeed'] = 'integer'
"""the amount of the vm's CPU currently used"""
self.cpuused = None
self.typeInfo['cpuused'] = 'string'
"""the date when this virtual machine was created"""
self.created = None
self.typeInfo['created'] = 'date'
"""Vm details in key/value pairs."""
self.details = None
self.typeInfo['details'] = 'map'
"""the read (io) of disk on the vm"""
self.diskioread = None
self.typeInfo['diskioread'] = 'long'
"""the write (io) of disk on the vm"""
self.diskiowrite = None
self.typeInfo['diskiowrite'] = 'long'
"""the read (bytes) of disk on the vm"""
self.diskkbsread = None
self.typeInfo['diskkbsread'] = 'long'
"""the write (bytes) of disk on the vm"""
self.diskkbswrite = None
self.typeInfo['diskkbswrite'] = 'long'
"""the ID of the disk offering of the virtual machine"""
self.diskofferingid = None
self.typeInfo['diskofferingid'] = 'string'
"""the name of the disk offering of the virtual machine"""
self.diskofferingname = None
self.typeInfo['diskofferingname'] = 'string'
"""user generated name. The name of the virtual machine is returned if no displayname exists."""
self.displayname = None
self.typeInfo['displayname'] = 'string'
"""an optional field whether to the display the vm to the end user or not."""
self.displayvm = None
self.typeInfo['displayvm'] = 'boolean'
"""the name of the domain in which the virtual machine exists"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the ID of the domain in which the virtual machine exists"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""the virtual network for the service offering"""
self.forvirtualnetwork = None
self.typeInfo['forvirtualnetwork'] = 'boolean'
"""the group name of the virtual machine"""
self.group = None
self.typeInfo['group'] = 'string'
"""the group ID of the virtual machine"""
self.groupid = None
self.typeInfo['groupid'] = 'string'
"""Os type ID of the virtual machine"""
self.guestosid = None
self.typeInfo['guestosid'] = 'string'
"""true if high-availability is enabled, false otherwise"""
self.haenable = None
self.typeInfo['haenable'] = 'boolean'
"""the ID of the host for the virtual machine"""
self.hostid = None
self.typeInfo['hostid'] = 'string'
"""the name of the host for the virtual machine"""
self.hostname = None
self.typeInfo['hostname'] = 'string'
"""the hypervisor on which the template runs"""
self.hypervisor = None
self.typeInfo['hypervisor'] = 'string'
"""instance name of the user vm; this parameter is returned to the ROOT admin only"""
self.instancename = None
self.typeInfo['instancename'] = 'string'
"""true if vm contains XS tools inorder to support dynamic scaling of VM cpu/memory."""
self.isdynamicallyscalable = None
self.typeInfo['isdynamicallyscalable'] = 'boolean'
"""an alternate display text of the ISO attached to the virtual machine"""
self.isodisplaytext = None
self.typeInfo['isodisplaytext'] = 'string'
"""the ID of the ISO attached to the virtual machine"""
self.isoid = None
self.typeInfo['isoid'] = 'string'
"""the name of the ISO attached to the virtual machine"""
self.isoname = None
self.typeInfo['isoname'] = 'string'
"""ssh key-pair"""
self.keypair = None
self.typeInfo['keypair'] = 'string'
"""the memory allocated for the virtual machine"""
self.memory = None
self.typeInfo['memory'] = 'integer'
"""the name of the virtual machine"""
self.name = None
self.typeInfo['name'] = 'string'
"""the incoming network traffic on the vm"""
self.networkkbsread = None
self.typeInfo['networkkbsread'] = 'long'
"""the outgoing network traffic on the host"""
self.networkkbswrite = None
self.typeInfo['networkkbswrite'] = 'long'
"""OS type id of the vm"""
self.ostypeid = None
self.typeInfo['ostypeid'] = 'long'
"""the password (if exists) of the virtual machine"""
self.password = None
self.typeInfo['password'] = 'string'
"""true if the password rest feature is enabled, false otherwise"""
self.passwordenabled = None
self.typeInfo['passwordenabled'] = 'boolean'
"""the project name of the vm"""
self.project = None
self.typeInfo['project'] = 'string'
"""the project id of the vm"""
self.projectid = None
self.typeInfo['projectid'] = 'string'
"""public IP address id associated with vm via Static nat rule"""
self.publicip = None
self.typeInfo['publicip'] = 'string'
"""public IP address id associated with vm via Static nat rule"""
self.publicipid = None
self.typeInfo['publicipid'] = 'string'
"""device ID of the root volume"""
self.rootdeviceid = None
self.typeInfo['rootdeviceid'] = 'long'
"""device type of the root volume"""
self.rootdevicetype = None
self.typeInfo['rootdevicetype'] = 'string'
"""the ID of the service offering of the virtual machine"""
self.serviceofferingid = None
self.typeInfo['serviceofferingid'] = 'string'
"""the name of the service offering of the virtual machine"""
self.serviceofferingname = None
self.typeInfo['serviceofferingname'] = 'string'
"""State of the Service from LB rule"""
self.servicestate = None
self.typeInfo['servicestate'] = 'string'
"""the state of the virtual machine"""
self.state = None
self.typeInfo['state'] = 'string'
"""an alternate display text of the template for the virtual machine"""
self.templatedisplaytext = None
self.typeInfo['templatedisplaytext'] = 'string'
"""the ID of the template for the virtual machine. A -1 is returned if the virtual machine was created from an ISO file."""
self.templateid = None
self.typeInfo['templateid'] = 'string'
"""the name of the template for the virtual machine"""
self.templatename = None
self.typeInfo['templatename'] = 'string'
"""the user's ID who deployed the virtual machine"""
self.userid = None
self.typeInfo['userid'] = 'string'
"""the user's name who deployed the virtual machine"""
self.username = None
self.typeInfo['username'] = 'string'
"""the vgpu type used by the virtual machine"""
self.vgpu = None
self.typeInfo['vgpu'] = 'string'
"""the ID of the availablility zone for the virtual machine"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the name of the availability zone for the virtual machine"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
"""list of affinity groups associated with the virtual machine"""
self.affinitygroup = []
"""the list of nics associated with vm"""
self.nic = []
"""list of security groups associated with the virtual machine"""
self.securitygroup = []
"""the list of resource tags associated with vm"""
self.tags = []
"""the ID of the latest async job acting on this object"""
self.jobid = None
self.typeInfo['jobid'] = ''
"""the current status of the latest async job acting on this object"""
self.jobstatus = None
self.typeInfo['jobstatus'] = ''
class affinitygroup:
def __init__(self):
""""the ID of the affinity group"""
self.id = None
""""the account owning the affinity group"""
self.account = None
""""the description of the affinity group"""
self.description = None
""""the domain name of the affinity group"""
self.domain = None
""""the domain ID of the affinity group"""
self.domainid = None
""""the name of the affinity group"""
self.name = None
""""the project name of the affinity group"""
self.project = None
""""the project ID of the affinity group"""
self.projectid = None
""""the type of the affinity group"""
self.type = None
""""virtual machine IDs associated with this affinity group"""
self.virtualmachineIds = None
class nic:
def __init__(self):
""""the ID of the nic"""
self.id = None
""""the broadcast uri of the nic"""
self.broadcasturi = None
""""device id for the network when plugged into the virtual machine"""
self.deviceid = None
""""the gateway of the nic"""
self.gateway = None
""""the IPv6 address of network"""
self.ip6address = None
""""the cidr of IPv6 network"""
self.ip6cidr = None
""""the gateway of IPv6 network"""
self.ip6gateway = None
""""the ip address of the nic"""
self.ipaddress = None
""""true if nic is default, false otherwise"""
self.isdefault = None
""""the isolation uri of the nic"""
self.isolationuri = None
""""true if nic is default, false otherwise"""
self.macaddress = None
""""the netmask of the nic"""
self.netmask = None
""""the ID of the corresponding network"""
self.networkid = None
""""the name of the corresponding network"""
self.networkname = None
""""the Secondary ipv4 addr of nic"""
self.secondaryip = None
""""the traffic type of the nic"""
self.traffictype = None
""""the type of the nic"""
self.type = None
""""Id of the vm to which the nic belongs"""
self.virtualmachineid = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class egressrule:
def __init__(self):
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class ingressrule:
def __init__(self):
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class securitygroup:
def __init__(self):
""""the ID of the security group"""
self.id = None
""""the account owning the security group"""
self.account = None
""""the description of the security group"""
self.description = None
""""the domain name of the security group"""
self.domain = None
""""the domain ID of the security group"""
self.domainid = None
""""the name of the security group"""
self.name = None
""""the project name of the group"""
self.project = None
""""the project id of the group"""
self.projectid = None
""""the number of virtualmachines associated with this securitygroup"""
self.virtualmachinecount = None
""""the list of virtualmachine ids associated with this securitygroup"""
self.virtualmachineids = None
""""the list of egress rules associated with the security group"""
self.egressrule = []
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the list of ingress rules associated with the security group"""
self.ingressrule = []
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the ID of the latest async job acting on this object"""
self.jobid = None
""""the current status of the latest async job acting on this object"""
self.jobstatus = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
| {
"repo_name": "MissionCriticalCloud/marvin",
"path": "marvin/cloudstackAPI/attachIso.py",
"copies": "1",
"size": "24235",
"license": "apache-2.0",
"hash": -2402080652488110600,
"line_mean": 37.6523125997,
"line_max": 131,
"alpha_frac": 0.5714462554,
"autogenerated": false,
"ratio": 4.3975684993649065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5469014754764907,
"avg_score": null,
"num_lines": null
} |
"""Attach genomes to a clinical report using the new flexible family report nomenclature.
"""
import csv
import simplejson as json
import os
import requests
from requests.auth import HTTPBasicAuth
import sys
import argparse
# Load environment variables for request authentication parameters
if "FABRIC_API_PASSWORD" not in os.environ:
sys.exit("FABRIC_API_PASSWORD environment variable missing")
if "FABRIC_API_LOGIN" not in os.environ:
sys.exit("FABRIC_API_LOGIN environment variable missing")
FABRIC_API_LOGIN = os.environ['FABRIC_API_LOGIN']
FABRIC_API_PASSWORD = os.environ['FABRIC_API_PASSWORD']
FABRIC_API_URL = os.environ.get('FABRIC_API_URL', 'https://api.fabricgenomics.com')
auth = HTTPBasicAuth(FABRIC_API_LOGIN, FABRIC_API_PASSWORD)
def add_genomes_to_clinical_report(clinical_report_id,
proband,
family_1,
family_2,
family_3,
family_4,
score_indels,
project_id,
hpo_terms):
"""Launch a flexible family report. Return the JSON response.
"""
# Construct url and request
url = "{}/reports/{}".format(FABRIC_API_URL, clinical_report_id)
if proband.get('genome_id') is None:
proband = None
if family_1.get('genome_id') is None:
family_1 = None
if family_2.get('genome_id') is None:
family_2 = None
if family_3.get('genome_id') is None:
family_3 = None
if family_4.get('genome_id') is None:
family_4 = None
data_payload = {'proband': proband,
'family_1': family_1,
'family_2': family_2,
'family_3': family_3,
'family_4': family_4,
'background': 'FULL',
'score_indels': score_indels,
'project_id': project_id,
'hpo_terms': json.dumps(hpo_terms) if hpo_terms else None}
sys.stdout.write("Attaching genomes to clinical report...\n")
result = requests.put(url, auth=auth, data=json.dumps(data_payload))
return result.json()
def main():
"""Launch a flexible family report.
"""
parser = argparse.ArgumentParser(
description='Launch a flexibly family report.')
parser.add_argument('--proband_genome_id', metavar='proband_genome_id', type=int)
parser.add_argument('--proband_sex', metavar='proband_sex', type=str, choices=['m', 'f'])
parser.add_argument('--family_1_genome_id', metavar='family_1_genome_id', type=int)
parser.add_argument('--family_1_sex', metavar='family_1_sex', type=str, choices=['m', 'f'])
parser.add_argument('--family_1_affected', metavar='family_1_affected', type=int, choices=[0,1], default=None)
parser.add_argument('--family_1_relationship', metavar='family_1_relationship', type=str, choices=['mother', 'father', 'sibling', 'other'])
parser.add_argument('--family_1_label', metavar='family_1_label', type=str)
parser.add_argument('--family_2_genome_id', metavar='family_2_genome_id', type=int)
parser.add_argument('--family_2_sex', metavar='family_2_sex', type=str, choices=['m', 'f'])
parser.add_argument('--family_2_affected', metavar='family_2_affected', type=int, choices=[0,1], default=None)
parser.add_argument('--family_2_relationship', metavar='family_2_relationship', type=str, choices=['mother', 'father', 'sibling', 'other'])
parser.add_argument('--family_2_label', metavar='family_2_label', type=str)
parser.add_argument('--family_3_genome_id', metavar='family_3_genome_id', type=int)
parser.add_argument('--family_3_sex', metavar='family_3_sex', type=str, choices=['m', 'f'])
parser.add_argument('--family_3_affected', metavar='family_3_affected', type=int, choices=[0,1], default=None)
parser.add_argument('--family_3_relationship', metavar='family_3_relationship', type=str, choices=['mother', 'father', 'sibling', 'other'])
parser.add_argument('--family_3_label', metavar='family_3_label', type=str)
parser.add_argument('--family_4_genome_id', metavar='family_4_genome_id', type=int)
parser.add_argument('--family_4_sex', metavar='family_4_sex', type=str, choices=['m', 'f'])
parser.add_argument('--family_4_affected', metavar='family_4_affected', type=int, choices=[0,1], default=None)
parser.add_argument('--family_4_relationship', metavar='family_4_relationship', type=str, choices=['mother', 'father', 'sibling', 'other'])
parser.add_argument('--family_4_label', metavar='family_4_label', type=str)
parser.add_argument('--indels', metavar='score_indels', type=int, choices=[0,1], default=0)
parser.add_argument('clinical_report_id', metavar='clinical_report_id')
parser.add_argument('--project_id', metavar='project_id', type=int)
parser.add_argument('--hpo', metavar='hpo_terms')
args = parser.parse_args()
proband = {
'genome_id': args.proband_genome_id,
'sex': args.proband_sex
}
family_1 = {
'genome_id': args.family_1_genome_id,
'sex': args.family_1_sex,
'affected': bool(args.family_1_affected) if args.family_1_affected in [0, 1] else None,
'relationship': args.family_1_relationship,
'label': args.family_1_label
}
family_2 = {
'genome_id': args.family_2_genome_id,
'sex': args.family_2_sex,
'affected': bool(args.family_2_affected) if args.family_2_affected in [0, 1] else None,
'relationship': args.family_2_relationship,
'label': args.family_2_label
}
family_3 = {
'genome_id': args.family_3_genome_id,
'sex': args.family_3_sex,
'affected': bool(args.family_3_affected) if args.family_3_affected in [0, 1] else None,
'relationship': args.family_3_relationship,
'label': args.family_3_label
}
family_4 = {
'genome_id': args.family_4_genome_id,
'sex': args.family_4_sex,
'affected': bool(args.family_4_affected) if args.family_4_affected in [0, 1] else None,
'relationship': args.family_4_relationship,
'label': args.family_4_label
}
score_indels = bool(args.indels)
clinical_report_id = args.clinical_report_id
project_id = args.project_id
hpo_terms = args.hpo or None
if hpo_terms is not None:
hpo_terms = hpo_terms.split(',')
family_report_json = add_genomes_to_clinical_report(
clinical_report_id,
proband,
family_1,
family_2,
family_3,
family_4,
score_indels,
project_id,
hpo_terms)
# Confirm launched report data
sys.stdout.write("\n")
if "clinical_report" not in family_report_json.keys():
print family_report_json
sys.exit("Failed to launch. Check report parameters for correctness.")
clinical_report = family_report_json['clinical_report']
sys.stdout.write('Launched Family Report:\n'
'id: {}\n'
'test_type: {}\n'
'accession_id: {}\n'
'created_on: {}\n'
'created_by: {}\n'
'status: {}\n'
'filter_id: {}\n'
'panel_id: {}\n'
'hpo_terms: {}\n'
'filter_name: {}\n'
'workspace_id: {}\n'
'sample_collected_date: {}\n'
'sample_received_date: {}\n'
'include_cosmic: {}\n'
'vaast_report_id: {}\n'
'members: {}\n'
'genome_id: {}\n'
'version: {}\n'
.format(clinical_report.get('id', 'Missing'),
clinical_report.get('test_type','Missing'),
clinical_report.get('accession_id','Missing'),
clinical_report.get('created_on','Missing'),
clinical_report.get('created_by','Missing'),
clinical_report.get('status', 'Missing'),
clinical_report.get('filter_id','Missing'),
clinical_report.get('panel_id','Missing'),
clinical_report.get('hpo_terms', 'Missing'),
clinical_report.get('filter_name', 'Missing'),
clinical_report.get('workspace_id','Missing'),
clinical_report.get('sample_collected_date','Missing'),
clinical_report.get('sample_received_date','Missing'),
clinical_report.get('include_cosmic','Missing'),
clinical_report.get('vaast_report_id', 'Missing'),
json.dumps(clinical_report.get('members', '{}'), indent=1),
clinical_report.get('genome_id', 'Missing'),
clinical_report.get('version', 'Missing')))
if __name__ == "__main__":
main()
| {
"repo_name": "Omicia/omicia_api_examples",
"path": "python/ClinicalReportLaunchers/add_genomes_to_flexible_family.py",
"copies": "1",
"size": "9237",
"license": "mit",
"hash": -3217978379049963500,
"line_mean": 44.2794117647,
"line_max": 143,
"alpha_frac": 0.565984627,
"autogenerated": false,
"ratio": 3.630896226415094,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4696880853415094,
"avg_score": null,
"num_lines": null
} |
""" Attachments """
from email.encoders import encode_base64
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from future.moves.urllib.parse import quote_plus
from .util import unicode_header
class Attachment(object):
""" File attachment information.
This can be provided to the [`Message`](#message) object on construction.
:param filename: Filename of attachment
:type filename: str|unicode|None
:param data: Raw file data
:type data: str|bytes|None
:param content_type: File mimetype
:type content_type: str|None
:param disposition: Content-Disposition: 'attachment', 'inline', ...
:type disposition: str|None
:param headers: Additional headers for the attachment
:type headers: dict|None
"""
def __init__(self, filename, data, content_type='application/octet-stream', disposition='attachment', headers=None):
self.filename = filename
self.content_type = content_type
self.data = data
self.disposition = disposition
self.headers = headers or {}
# Inlining
if self.disposition == 'inline':
self.headers.setdefault('Content-ID', '<{}>'.format(quote_plus(filename)))
def _build_mime_object(self):
""" Create a MIMe object
:rtype: email.mime.base.MIMEBase
"""
obj = MIMEBase(*self.content_type.split('/'))
obj.set_payload(self.data)
encode_base64(obj)
return obj
def _mime(self):
""" Build a MIME object for the attachment
:return: MIMEBase
:rtype: email.mime.base.MIMEBase
"""
obj = self._build_mime_object()
# Content-Disposition
obj.add_header('Content-Disposition', self.disposition, filename=unicode_header(self.filename))
# Add headers
for k, v in self.headers.items():
obj[k] = v
#itertools.starmap(obj.add_header, self.headers.items()) # does not work
# Finish
return obj
class ImageAttachment(Attachment):
""" Image attachment.
* It guesses the Content-Type from the data stream
* Supports 'inline' images: images embedded in the email. Useful for templates.
Once an 'inline' image is created, its filename is used for 'Content-ID', which allows to reference it in the HTML body:
```python
from mailem import Message, Attachment, ImageAttachment
msg = Message(
['test@example.com'],
'Hello',
'<img src="cid:flowers.jpg" />', # Referenced with "cid:<filename>"
attachments=[
ImageAttachment('flowers.jpg', open('flowers.jpg').read(), 'inline')
]
)
```
Arguments:
:param filename: Image attachment filename. Will also become 'Content-ID' when inlined.
:type filename: str|unicode|None
:param data: The raw file data
:type data: str|None
"""
def __init__(self, filename, data, disposition='attachment', headers=None):
super(ImageAttachment, self).__init__(filename, data, None, disposition, headers)
def _build_mime_object(self):
return MIMEImage(self.data)
| {
"repo_name": "kolypto/py-mailem",
"path": "mailem/attachment.py",
"copies": "1",
"size": "3197",
"license": "bsd-2-clause",
"hash": 4700480686617296000,
"line_mean": 30.3431372549,
"line_max": 128,
"alpha_frac": 0.6324679387,
"autogenerated": false,
"ratio": 4.151948051948052,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011716873069976793,
"num_lines": 102
} |
"""Attachment utils."""
from pathlib import Path
from uuid import uuid4
from blobstash.docstore.error import DocStoreError
from blobstash.filetree import FileTreeClient
_FILETREE_POINTER_FMT = "@filetree/ref:{}"
_FILETREE_ATTACHMENT_FS_PREFIX = "_filetree:docstore"
class Attachment:
"""An attachment represents a file stored in FileTree and tied to the document via a pointer."""
def __init__(self, pointer, node):
self.pointer = pointer
self.node = node
def __repr__(self):
return "blobstash.docstore.attachment.Attachment(pointer={!r}, node={!r})".format(
self.pointer, self.node
)
def add_attachment(client, path):
"""Creates a new attachment (i.e. upload the file or directory to FileTree), and returns a pointer object."""
p = Path(path)
if p.is_file():
with open(p.absolute(), "rb") as fileobj:
node = FileTreeClient(client=client).fput_node(
p.name, fileobj, content_type=None
)
else:
fs = FileTreeClient(client=client).fs(
uuid4().hex, prefix=_FILETREE_ATTACHMENT_FS_PREFIX
)
fs.upload(path)
node = fs.node()
pointer = _FILETREE_POINTER_FMT.format(node.ref)
return Attachment(pointer, node)
def fadd_attachment(client, name, fileobj, content_type=None):
"""Creates a new attachment from the fileobj content with name as filename and returns a pointer object."""
node = FileTreeClient(client=client).fput_node(name, fileobj, content_type)
pointer = _FILETREE_POINTER_FMT.format(node.ref)
return Attachment(pointer, node)
def fget_attachment(client, attachment):
"""Returns a fileobj (that needs to be closed) with the content off the attachment."""
node = attachment.node
if node.is_dir():
raise DocStoreError(
"cannot get a fileobj for a directory, please use get_attachment instead"
)
return FileTreeClient(client=client).fget_node(node)
def get_attachment(client, attachment, path):
node = attachment.node
if node.is_file():
FileTreeClient(client=client).get_node(node, path)
return
FileTreeClient(client=client).fs(
ref=node.ref, prefix=_FILETREE_ATTACHMENT_FS_PREFIX
).download(path)
| {
"repo_name": "tsileo/blobstash-python-docstore",
"path": "blobstash/docstore/attachment.py",
"copies": "1",
"size": "2286",
"license": "mit",
"hash": -8760938269791560000,
"line_mean": 31.6571428571,
"line_max": 113,
"alpha_frac": 0.6666666667,
"autogenerated": false,
"ratio": 3.778512396694215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49451790633942144,
"avg_score": null,
"num_lines": null
} |
"""Attach signals to this app's models."""
# -*- coding: utf-8 -*-
import json
import logging
import channels.layers
from asgiref.sync import async_to_sync
from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import Job, Log
logger = logging.getLogger(__name__) # pylint: disable=C0103
def send_message(event):
'''
Call back function to send message to the browser
'''
message = event['text']
channel_layer = channels.layers.get_channel_layer()
# Send message to WebSocket
async_to_sync(channel_layer.send)(text_data=json.dumps(
message
))
@receiver(post_save, sender=Job, dispatch_uid='update_job_status_listeners')
def update_job_status_listeners(sender, instance, **kwargs):
'''
Sends job status to the browser when a Job is modified
'''
logger.debug("Job modified: {} :: status = {}.".format(
instance, instance.status))
user = instance.owner
group_name = 'job-user-{}'.format(user.username)
message = {
'job_id': instance.id,
'title': instance.title,
'status': instance.status,
'modified': instance.modified.isoformat(),
}
channel_layer = channels.layers.get_channel_layer()
async_to_sync(channel_layer.group_send)(
group_name,
{
'type': 'send_message',
'text': message
}
)
@receiver(post_save, sender=Log, dispatch_uid='update_job_log_listeners')
def update_job_log_listeners(sender, instance, **kwargs):
'''
Sends job status to the browser when a Log is modified
'''
logger.debug("Log modified: {} :: content = {}.".format(
instance, instance.content))
job_pk = instance.job.id
group_name = 'job-log-{}'.format(job_pk)
message = {
'log_id': instance.id,
'time': instance.time.isoformat(),
'content': instance.content,
'stream': instance.stream,
}
channel_layer = channels.layers.get_channel_layer()
async_to_sync(channel_layer.group_send)(
group_name,
{
'type': 'send_message',
'text': message
}
)
| {
"repo_name": "ornl-ndav/django-remote-submission",
"path": "django_remote_submission/signals.py",
"copies": "1",
"size": "2175",
"license": "isc",
"hash": -3667463687333967400,
"line_mean": 24,
"line_max": 76,
"alpha_frac": 0.6179310345,
"autogenerated": false,
"ratio": 3.7760416666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9893972701166667,
"avg_score": 0,
"num_lines": 87
} |
# Attack agent thread. This performs the attack based on instructions from the main thread.
import time, threading, Queue
import attacks.syn2
import attacks.icmpflood
import attacks.httpflood
class Attacker(threading.Thread):
def __init__(self, Q):
threading.Thread.__init__(self) # Required for thread class
self.instruction_q = Q
def run(self):
while 1:
self.poll_queue()
def poll_queue(self):
try:
self.instruction = self.instruction_q.get(True, 0.05)
self.attack_handler(self.instruction)
#time.sleep(1)
except Queue.Empty:
pass
def attack_handler(self, ins):
#print "Attack type: ", ins
id = ins['id']
if ins[id]['attack'] == 'SYNFLOOD':
print "Performing SYN FLOOD at rate: ", ins[id]['rate']
Att = attacks.syn2.SYNFLOOD(self.instruction_q, ins[id]['target'], ins[id]['port'], ins[id]['rate'])
Att.main()
elif ins[id]['attack'] == 'ICMPFLOOD':
print "Performing ICMP flood at rate: ", ins[id]['rate']
Att = attacks.icmpflood.ICMP_FLOOD(self.instruction_q, ins[id]['target'], ins[id]['port'], ins[id]['rate'], ins[id]['pktsize'])
Att.main()
elif ins[id]['attack'] == 'HTTPFLOOD':
print "Performing HTTP GET Flood"
Att = attacks.httpflood.http_main(self.instruction_q, ins[id]['target'], ins[id]['port'], ins[id]['rate'], ins[id]['get'], ins[id]['processes'], ins[id]['connections'])
Att.main()
elif ins[id]['attack'] == 'ABC':
print "ABC attack"
else:
print "Unknown attack"
| {
"repo_name": "mikeberkelaar/controlleddos",
"path": "Attack_Agent/Xattacker.py",
"copies": "1",
"size": "1697",
"license": "apache-2.0",
"hash": -2745641035464261600,
"line_mean": 32.94,
"line_max": 180,
"alpha_frac": 0.5715969358,
"autogenerated": false,
"ratio": 3.602972399150743,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4674569334950743,
"avg_score": null,
"num_lines": null
} |
"""attack functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def parameters(max_epsilon, image_factor, image_pixels,
manual_alpha=None):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# eps is a difference between pixels so it should be in [0, 2] interval.
# Renormalizing epsilon from [0, 255] to [0, 2].
# image_factor = 2.0/255.0
eps = image_factor * (max_epsilon * 0.95)
if manual_alpha is not None:
alpha = manual_alpha
else:
# * num_iter_default/ num_iter, default=20
alpha_default, eps_default = 1.0, 16.0
alpha = alpha_default * eps / eps_default
norm_factor = np.sqrt(image_pixels)
alpha *= norm_factor
# num_iter += int(eps_default / max_epsilon)
# scale according to num_iter?
return eps, alpha
def adv_graph_sm(x_input, target_class_input, x_min, x_max,
predicted_labels, logits_weighted,
num_classes, alpha,
real_class=None, debug=False):
decay_factor = 0.9
n_dims = len(x_input.get_shape().as_list())
if real_class is None:
done_adv = tf.equal(target_class_input, predicted_labels)
decay_adv = tf.reshape((1 - tf.to_float(done_adv) * decay_factor),
[-1] + [1] * (n_dims - 1))
else:
# done_adv = tf.not_equal(real_class, predicted_labels)
done_adv = 0
decay_adv = 1 - done_adv
# label_smoothing?
one_hot_target_class = tf.one_hot(target_class_input, num_classes)
loss_target = 0
for logits, w in logits_weighted:
loss_target += tf.nn.softmax_cross_entropy_with_logits(
labels=one_hot_target_class, logits=logits) * w
if real_class is not None:
one_hot_real_class = tf.one_hot(real_class, num_classes)
for logits, w in logits_weighted:
loss_target -= tf.nn.softmax_cross_entropy_with_logits(
labels=one_hot_real_class, logits=logits) * w
# rm_axes = [1,2,3]
rm_axes = list(range(1, n_dims))
grad_adv = tf.gradients(loss_target, x_input)[0]
grad_norm = tf.rsqrt(tf.reduce_sum(grad_adv * grad_adv, rm_axes,
keep_dims=True))
new = x_input - alpha * grad_adv * grad_norm * decay_adv
x_adv = tf.clip_by_value(new, x_min, x_max)
# keep the gradients whose x is still inside epsilon
grad_filtered = grad_adv * tf.to_float(tf.equal(new, x_adv))
norm_filtered = tf.rsqrt(tf.reduce_sum(grad_filtered * grad_filtered,
rm_axes, keep_dims=True))
new = x_input - alpha * grad_adv * norm_filtered * decay_adv
x_adv = tf.clip_by_value(new, x_min, x_max)
if debug:
slice_i = (slice(None),) + tuple([0] * (n_dims - 1))
grad_normx = 1 / grad_norm[slice_i]
filtered_grad_max = tf.reduce_max(norm_filtered, rm_axes)
dbg_msg = [grad_adv, loss_target, done_adv, grad_normx, filtered_grad_max]
else:
dbg_msg = None
return x_adv, dbg_msg
def adv_graph(x_input, target_class_input, x_ref,
m_predicted_labels, m_logits_weighted,
num_classes, alpha, eps,
m_real_class=None, debug=False):
num_models = len(m_predicted_labels)
x_max = tf.clip_by_value(x_ref + eps, -1.0, 1.0)
x_min = tf.clip_by_value(x_ref - eps, -1.0, 1.0)
sum_x_adv = 0
m_dbg_msg = []
for i in range(num_models):
if m_real_class is not None:
real_class = m_real_class[i]
else:
real_class = None
x_adv, dbg_msg = adv_graph_sm(x_input, target_class_input, x_min, x_max,
m_predicted_labels[i], m_logits_weighted[i],
num_classes, alpha,
real_class=real_class, debug=debug)
sum_x_adv += x_adv
m_dbg_msg.append(dbg_msg)
x_adv = tf.clip_by_value(sum_x_adv / num_models, x_min, x_max)
return x_adv, m_dbg_msg
| {
"repo_name": "huschen/kaggle_nips17_adversarial",
"path": "submission_code/targeted/attack.py",
"copies": "1",
"size": "3952",
"license": "mit",
"hash": 1468147415904231200,
"line_mean": 32.2100840336,
"line_max": 78,
"alpha_frac": 0.6067813765,
"autogenerated": false,
"ratio": 3.0707070707070705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41774884472070706,
"avg_score": null,
"num_lines": null
} |
"""attack graph functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.slim.nets import inception
from lib_adv import inception_resnet_v2
from lib_adv import utils
# from tensorflow.contrib import slim
slim = tf.contrib.slim
def parameters(max_epsilon, image_factor, image_pixels, norm_ord,
manual_alpha=None, min_num_iter=20):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# eps is a difference between pixels so it should be in [0, 2] interval.
# Renormalizing epsilon from [0, 255] to [0, 2].
# image_factor = 2.0/255.0
eps = image_factor * max_epsilon
if manual_alpha is not None:
alpha = manual_alpha
else:
alpha_default, eps_default, min_iter_default = 1.0, 16.0, 20.0
alpha = alpha_default * eps / eps_default * min_iter_default / min_num_iter
if norm_ord == 1:
norm_factor = image_pixels
else:
norm_factor = np.sqrt(image_pixels)
alpha *= norm_factor
return eps, alpha
def inception_models(x_input, num_classes, resnet=True):
if resnet:
with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
logits, end_points = inception_resnet_v2.inception_resnet_v2(
x_input, num_classes=num_classes, is_training=False)
else:
with slim.arg_scope(inception.inception_v3_arg_scope()):
logits, end_points = inception.inception_v3(
x_input, num_classes=num_classes, is_training=False)
return logits, end_points
def graph_base(x_input, num_classes, model_names, targeted=True):
m_pred_labels = []
m_logits_weighted = []
for model_name in model_names:
with tf.variable_scope(model_name):
resnet = 'incpt_resnet' in model_name
logits, end_points = inception_models(x_input, num_classes, resnet)
predicted_labels = tf.cast(tf.argmax(end_points['Predictions'], 1),
tf.int32)
logits_weighted = [(logits, 1), (end_points['AuxLogits'], 0.4)]
m_pred_labels.append(predicted_labels)
m_logits_weighted.append(logits_weighted)
if not targeted:
# use the labels from the last model
target_labels = tf.cast(tf.argmin(logits, axis=1), tf.int32)
else:
target_labels = None
return target_labels, m_pred_labels, m_logits_weighted
def vector_norm(t, norm_ord, keep_dims=True):
n_dims = len(t.get_shape().as_list())
# rm_axes = [1,2,3]
rm_axes = list(range(1, n_dims))
if norm_ord == 1:
norm = tf.reduce_sum(tf.abs(t), rm_axes, keep_dims=keep_dims)
else:
norm = tf.sqrt(tf.reduce_sum(t * t, rm_axes, keep_dims=keep_dims))
return norm
def graph_adv_sm(x_input, num_classes, dx_min, dx_max,
target_labels,
predicted_labels, logits_weighted,
alpha, norm_ord, debug=False):
# label_smoothing?
one_hot_target_labels = tf.one_hot(target_labels, num_classes)
loss_target = 0
for logits, w in logits_weighted:
loss_target += tf.nn.softmax_cross_entropy_with_logits(
labels=one_hot_target_labels, logits=logits) * w
grad_adv = -tf.gradients(loss_target, x_input)[0]
g_norm = vector_norm(grad_adv, norm_ord)
grad_normed = alpha * grad_adv / g_norm
grad_nclip = tf.clip_by_value(grad_normed, dx_min, dx_max)
# keep the gradients whose x is still inside epsilon
grad_filtered = grad_adv * tf.to_float(tf.equal(grad_nclip, grad_normed))
g_fnorm = vector_norm(grad_filtered, norm_ord)
delta_raw = alpha * grad_adv / g_fnorm
delta_x = tf.clip_by_value(delta_raw, dx_min, dx_max)
if debug:
grad_applied = delta_x
done_adv = tf.equal(target_labels, predicted_labels)
dbg_msg = [grad_adv, grad_applied, loss_target, done_adv]
else:
dbg_msg = None
return delta_x, dbg_msg
def graph_adv(x_input, num_classes, x_ref, target_labels,
m_pred_labels, m_logits_weighted,
alpha, eps, norm_ord, debug=False):
utils.logger.debug('v2.3, mode=%d' % norm_ord)
x_min = tf.clip_by_value(x_ref - eps, -1.0, 1.0)
x_max = tf.clip_by_value(x_ref + eps, -1.0, 1.0)
dx_min = x_min - x_input
dx_max = x_max - x_input
sum_delta_x = 0
m_dbg_msg = []
num_models = len(m_pred_labels)
for i in range(num_models):
delta_x, dbg_msg = graph_adv_sm(x_input, num_classes,
dx_min, dx_max, target_labels,
m_pred_labels[i], m_logits_weighted[i],
alpha, norm_ord, debug=debug)
sum_delta_x += delta_x
m_dbg_msg.append(dbg_msg)
# if norm_ord == 2: divied by np.sqrt(num_models)
scale_delta_x = sum_delta_x / num_models
x_adv = tf.clip_by_value(scale_delta_x + x_input, x_min, x_max)
return x_adv, m_dbg_msg
| {
"repo_name": "huschen/kaggle_nips17_adversarial",
"path": "models_targeted_attacks/target_mng/lib_adv/attack.py",
"copies": "1",
"size": "4876",
"license": "mit",
"hash": -3411833694992636400,
"line_mean": 31.5066666667,
"line_max": 79,
"alpha_frac": 0.6406890894,
"autogenerated": false,
"ratio": 3.062814070351759,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4203503159751759,
"avg_score": null,
"num_lines": null
} |
# Attack master/manager
import threading, time, Queue # Python
import Xtcpconnector
import Xattacker # Own classes
# Statics
SERVER_IP = "145.100.102.108"
SERVER_PORT = 55555
class Agent():
def __init__(self, Q):
self.bla = "bla"
self.in_Q = Q
def slave(self):
while True:
try:
self.MSG = self.in_Q.get(True, 0.05)
if self.MSG is None:
continue
except Queue.Empty:
pass
if __name__ == '__main__':
ALL_THREADS = []
attack_q = Queue.Queue(maxsize=1)
instruction_q = Queue.Queue(maxsize=1)
print "1.Starting TCP connector"
LISTENER = Xtcpconnector.TCPclient(attack_q, SERVER_IP, SERVER_PORT)
ALL_THREADS.append(LISTENER)
LISTENER.daemon = True
LISTENER.start()
#LISTENER.join()
print " TCP connector started"
print "2. Starting attack agent"
attack_slave = Xattacker.Attacker(instruction_q)
ALL_THREADS.append(attack_slave)
attack_slave.daemon = True
attack_slave.start()
print " Attack agent started\n"
while 1:
try:
data = attack_q.get(True, 0.05)
try:
instruction_q.put((data))
except Queue.Full:
bogus = instruction_q.get(False)
instruction_q.put((data))
except Queue.Empty:
pass
#time.sleep(50)
print "Done"
| {
"repo_name": "mikeberkelaar/controlleddos",
"path": "Attack_Agent/attack_agent.py",
"copies": "1",
"size": "1444",
"license": "apache-2.0",
"hash": -3160076877471000000,
"line_mean": 21.9206349206,
"line_max": 72,
"alpha_frac": 0.5657894737,
"autogenerated": false,
"ratio": 3.5831265508684864,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46489160245684863,
"avg_score": null,
"num_lines": null
} |
"""Attack of the Grothons from Planet #25"""
# Imports.
from bs4 import BeautifulSoup
from textwrap import TextWrapper
# Classes.
class Engine(object):
def __init__(self, scene_map):
self.scene_map = scene_map
def play(self):
current_scene = self.scene_map.opening_scene()
while True:
print '-' * 70
next_scene_name = current_scene.enter()
current_scene = self.scene_map.next_scene(next_scene_name)
class Scene(object):
def enter(self):
print "Not yet configured. Subclass it and implement enter()."
exit(1)
# The central corridor scene.
class Central(Scene):
success = False
def enter(self):
if self.success == False:
print WRAP.fill(XML.scene.central.init.string)
else:
print WRAP.fill(XML.scene.central.back.string)
action = raw_input('> ')
if action == 'attack':
print WRAP.fill(XML.scene.central.die.string)
exit(1)
elif action == 'outsmart':
print WRAP.fill(XML.scene.central.live.string)
self.success = True
direction = raw_input('> ')
if direction == 'forward':
return 'armory'
else:
print WRAP.fill(XML.scene.central.error.string)
print '\n'
return 'central'
# The armory scene.
class Armory(Scene):
success = False
def enter(self):
if self.success == False:
print WRAP.fill(XML.scene.armory.init.string)
else:
print WRAP.fill(XML.scene.armory.back.string)
action = raw_input('> ')
if action == 'sneak':
print WRAP.fill(XML.scene.armory.die.string)
exit(1)
elif action == 'smite':
print WRAP.fill(XML.scene.armory.live.string)
self.success = True
direction = raw_input('> ')
if direction == 'forward':
return 'bridge'
elif direction == 'retreat':
return 'central'
else:
print WRAP.fill(XML.scene.armory.error.string)
print '\n'
return 'armory'
class Bridge(Scene):
success = False
def enter(self):
if self.success == False:
print WRAP.fill(XML.scene.bridge.init.string)
else:
print WRAP.fill(XML.scene.bridge.back.string)
action = raw_input('> ')
if action == 'strike':
print WRAP.fill(XML.scene.bridge.die.string)
exit(1)
elif action == 'quaff':
print WRAP.fill(XML.scene.bridge.live.string)
self.success = True
direction = raw_input('> ')
if direction == 'forward':
return 'pod'
elif direction == 'retreat':
return 'armory'
else:
print WRAP.fill(XML.scene.bridge.error.string)
print '\n'
return 'bridge'
class Pod(Scene):
success = False
def enter(self):
if self.success == False:
print WRAP.fill(XML.scene.pod.init.string)
else:
print WRAP.fill(XML.scene.pod.back.string)
action = raw_input('> ')
if action == 'wet':
print WRAP.fill(XML.scene.pod.die.string)
exit(1)
elif action == 'attack':
print WRAP.fill(XML.scene.pod.live.string)
self.success = True
direction = raw_input('> ')
if direction == 'forward':
return 'planet'
elif direction == 'retreat':
return 'bridge'
else:
print WRAP.fill(XML.scene.pod.error.string)
print '\n'
return 'pod'
class Planet(Scene):
def enter(self):
print WRAP.fill(XML.scene.planet.init.string)
action = raw_input('> ')
if action == 'open':
print WRAP.fill(XML.scene.planet.die.string)
exit(1)
elif action == 'helmet':
print WRAP.fill(XML.scene.planet.live.string)
exit(1)
else:
print WRAP.fill(XML.scene.planet.error.string)
print '\n'
return 'planet'
class Map(object):
scenes = {
'central': Central(),
'armory': Armory(),
'bridge': Bridge(),
'pod': Pod(),
'planet': Planet()
}
def __init__(self, start_scene):
self.start_scene = start_scene
def next_scene(self, scene_name):
return Map.scenes.get(scene_name)
def opening_scene(self):
return self.next_scene(self.start_scene)
# Have BeautifulSoup parse the game text file.
XML = BeautifulSoup(open('gothons_text.xml'))
# Set up text formatting.
WRAP = TextWrapper()
WRAP.initial_indent = '* '
WRAP.fix_sentence_endings = True
a_map = Map('central')
a_game = Engine(a_map)
a_game.play()
| {
"repo_name": "OzBonus/LPTHW",
"path": "Gothons/gothons_game.py",
"copies": "1",
"size": "4962",
"license": "unlicense",
"hash": -622556965240809000,
"line_mean": 21.4524886878,
"line_max": 70,
"alpha_frac": 0.53808948,
"autogenerated": false,
"ratio": 3.8554778554778553,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9886380750436599,
"avg_score": 0.0014373170082512645,
"num_lines": 221
} |
ATTACK_PATTERN = {
'response': {
"id": "attack-pattern--01a5a209-b94c-450b-b7f9-946497d91055",
"name": "ATTACK_PATTERN 1",
"type": "attack-pattern",
"modified": "2020-05-13T22:50:51.258Z",
"created": "2017-05-31T21:30:44.329Z",
"description": "Adversaries may abuse Windows Management Instrumentation (WMI) to achieve execution.",
"x_mitre_platforms": [
"Windows"
],
"external_references": [
{
"url": "https://attack.mitre.org/techniques/T1047",
"source_name": "mitre-attack",
"external_id": "T1047"
},
{
"description": "Wikipedia. (2016, June 12). Server Message Block. Retrieved June 12, 2016.",
"source_name": "Wikipedia SMB",
"url": "https://en.wikipedia.org/wiki/Server_Message_Block"
},
{
"description": "Microsoft. (2003, March 28). What Is RPC?. Retrieved June 12, 2016.",
"source_name": "TechNet RPC",
"url": "https://technet.microsoft.com/en-us/library/cc787851.aspx"
},
],
"kill_chain_phases": [
{
"phase_name": "defense-evasion",
"kill_chain_name": "mitre-attack"
},
{
"phase_name": "privilege-escalation",
"kill_chain_name": "mitre-attack"
}
]
},
'map_result': {
'stixid': 'attack-pattern--01a5a209-b94c-450b-b7f9-946497d91055',
'firstseenbysource': '2017-05-31T21:30:44.329Z',
'killchainphases': ['Defense Evasion', 'Privilege Escalation'],
'modified': "2020-05-13T22:50:51.258Z",
'description': "Adversaries may abuse Windows Management Instrumentation (WMI) to achieve execution.",
'operatingsystemrefs': ['Windows'],
'mitreid': 'T1047',
'publications': [{'link': "https://en.wikipedia.org/wiki/Server_Message_Block",
'title': "Wikipedia. (2016, June 12). Server Message Block. Retrieved June 12, 2016.",
'source': 'Wikipedia SMB'},
{'link': "https://technet.microsoft.com/en-us/library/cc787851.aspx",
'title': 'Microsoft. (2003, March 28). What Is RPC?. Retrieved June 12, 2016.',
"source": 'TechNet RPC'}],
'tags': ['T1047']
},
'indicator': ([{'fields': {'description': 'Adversaries may abuse Windows Management '
'Instrumentation (WMI) to achieve execution.',
'firstseenbysource': '2017-05-31T21:30:44.329Z',
'killchainphases': ['Defense Evasion', 'Privilege Escalation'],
'mitreid': 'T1047',
'modified': '2020-05-13T22:50:51.258Z',
'operatingsystemrefs': ['Windows'],
'publications': [{'link': 'https://en.wikipedia.org/wiki/Server_Message_Block',
'source': 'Wikipedia SMB',
'title': 'Wikipedia. (2016, June 12). Server '
'Message Block. Retrieved June 12, '
'2016.'},
{'link': 'https://technet.microsoft.com/en-us/library/cc787851.aspx',
'source': 'TechNet RPC',
'title': 'Microsoft. (2003, March 28). What Is '
'RPC?. Retrieved June 12, 2016.'}],
'stixid': 'attack-pattern--01a5a209-b94c-450b-b7f9-946497d91055',
'tags': ['T1047']},
'rawJSON': {'created': '2017-05-31T21:30:44.329Z',
'description': 'Adversaries may abuse Windows Management '
'Instrumentation (WMI) to achieve execution.',
'external_references': [{'external_id': 'T1047',
'source_name': 'mitre-attack',
'url': 'https://attack.mitre.org/techniques/T1047'},
{'description': 'Wikipedia. (2016, June '
'12). Server Message '
'Block. Retrieved June '
'12, 2016.',
'source_name': 'Wikipedia SMB',
'url': 'https://en.wikipedia.org/wiki/Server_Message_Block'},
{'description': 'Microsoft. (2003, '
'March 28). What Is '
'RPC?. Retrieved June '
'12, 2016.',
'source_name': 'TechNet RPC',
'url': 'https://technet.microsoft.com/en-us/library/cc787851.aspx'}],
'id': 'attack-pattern--01a5a209-b94c-450b-b7f9-946497d91055',
'kill_chain_phases': [{'kill_chain_name': 'mitre-attack',
'phase_name': 'defense-evasion'},
{'kill_chain_name': 'mitre-attack',
'phase_name': 'privilege-escalation'}],
'modified': '2020-05-13T22:50:51.258Z',
'name': 'ATTACK_PATTERN 1',
'type': 'attack-pattern',
'x_mitre_platforms': ['Windows']},
'score': 2,
'type': 'Attack Pattern',
'value': 'ATTACK_PATTERN 1'}],
[],
{'attack-pattern--01a5a209-b94c-450b-b7f9-946497d91055': 'ATTACK_PATTERN 1'})
}
STIX_ATTACK_PATTERN = {
'response': {
"id": "attack-pattern--01a5a209-b94c-450b-b7f9-946497d91055",
"name": "ATTACK_PATTERN 1",
"type": "attack-pattern",
"modified": "2020-05-13T22:50:51.258Z",
"created": "2017-05-31T21:30:44.329Z",
"description": "Adversaries may abuse Windows Management Instrumentation (WMI) to achieve execution.",
"x_mitre_platforms": [
"Windows"
],
"external_references": [
{
"url": "https://attack.mitre.org/techniques/T1047",
"source_name": "mitre-attack",
"external_id": "T1047"
},
{
"description": "Wikipedia. (2016, June 12). Server Message Block. Retrieved June 12, 2016.",
"source_name": "Wikipedia SMB",
"url": "https://en.wikipedia.org/wiki/Server_Message_Block"
},
{
"description": "Microsoft. (2003, March 28). What Is RPC?. Retrieved June 12, 2016.",
"source_name": "TechNet RPC",
"url": "https://technet.microsoft.com/en-us/library/cc787851.aspx"
},
],
"kill_chain_phases": [
{
"phase_name": "defense-evasion",
"kill_chain_name": "mitre-attack"
},
{
"phase_name": "privilege-escalation",
"kill_chain_name": "mitre-attack"
}
]
},
'map_result': {
'stixid': 'attack-pattern--01a5a209-b94c-450b-b7f9-946497d91055',
'firstseenbysource': '2017-05-31T21:30:44.329Z',
'stixkillchainphases': ['Defense Evasion', 'Privilege Escalation'],
'modified': "2020-05-13T22:50:51.258Z",
'stixdescription': "Adversaries may abuse Windows Management Instrumentation (WMI) to achieve execution.",
'operatingsystemrefs': ['Windows'],
'mitreid': 'T1047',
'publications': [{'link': "https://en.wikipedia.org/wiki/Server_Message_Block",
'title': "Wikipedia. (2016, June 12). Server Message Block. Retrieved June 12, 2016.",
'source': 'Wikipedia SMB'},
{'link': "https://technet.microsoft.com/en-us/library/cc787851.aspx",
'title': 'Microsoft. (2003, March 28). What Is RPC?. Retrieved June 12, 2016.',
"source": 'TechNet RPC'}],
'tags': ['T1047']
}
}
COURSE_OF_ACTION = {
'response': {
"id": "course-of-action--02f0f92a-0a51-4c94-9bda-6437b9a93f22",
"name": "COURSE_OF_ACTION 1",
"type": "course-of-action",
"description": "Prevent files from having a trailing space after the extension.",
"modified": "2019-07-25T11:46:32.010Z",
"external_references": [
{
"external_id": "T1151",
"source_name": "mitre-attack",
"url": "https://attack.mitre.org/mitigations/T1151"
}
],
"created": "2018-10-17T00:14:20.652Z"
},
'map_result': {'description': 'Prevent files from having a trailing space after the '
'extension.',
'firstseenbysource': '2018-10-17T00:14:20.652Z',
'mitreid': 'T1151',
'modified': '2019-07-25T11:46:32.010Z',
'publications': [],
'stixid': 'course-of-action--02f0f92a-0a51-4c94-9bda-6437b9a93f22',
'tags': ['T1151']},
'indicator': ([{'fields': {'description': 'Prevent files from having a trailing space after '
'the extension.',
'firstseenbysource': '2018-10-17T00:14:20.652Z',
'mitreid': 'T1151',
'modified': '2019-07-25T11:46:32.010Z',
'publications': [],
'stixid': 'course-of-action--02f0f92a-0a51-4c94-9bda-6437b9a93f22',
'tags': ['T1151']},
'rawJSON': {'created': '2018-10-17T00:14:20.652Z',
'description': 'Prevent files from having a trailing space '
'after the extension.',
'external_references': [{'external_id': 'T1151',
'source_name': 'mitre-attack',
'url': 'https://attack.mitre.org/mitigations/T1151'}],
'id': 'course-of-action--02f0f92a-0a51-4c94-9bda-6437b9a93f22',
'modified': '2019-07-25T11:46:32.010Z',
'name': 'COURSE_OF_ACTION 1',
'type': 'course-of-action'},
'score': 0,
'type': 'Course of Action',
'value': 'COURSE_OF_ACTION 1'}],
[],
{'course-of-action--02f0f92a-0a51-4c94-9bda-6437b9a93f22': 'COURSE_OF_ACTION 1'})
}
INTRUSION_SET = {
'response': {
"external_references": [
{
"external_id": "G0066",
"source_name": "mitre-attack",
"url": "https://attack.mitre.org/groups/G0066"
},
{
"description": "(Citation: Security Affairs Elderwood Sept 2012)",
"source_name": "Elderwood"
},
],
"description": "[Elderwood](https://attack.mitre.org/groups/G0066)",
"modified": "2021-03-02T22:40:11.097Z",
"created": "2018-04-18T17:59:24.739Z",
"aliases": [
"Elderwood",
"Elderwood Gang",
"Beijing Group",
"Sneaky Panda"
],
"id": "intrusion-set--03506554-5f37-4f8f-9ce4-0e9f01a1b484",
"name": "INTRUSION_SET 1",
"type": "intrusion-set"
},
'map_result': {'aliases': ['Elderwood', 'Elderwood Gang', 'Beijing Group', 'Sneaky Panda'],
'description': '[Elderwood](https://attack.mitre.org/groups/G0066)',
'firstseenbysource': '2018-04-18T17:59:24.739Z',
'mitreid': 'G0066',
'modified': '2021-03-02T22:40:11.097Z',
'publications': [{'link': None,
'source': 'Elderwood',
'title': '(Citation: Security Affairs Elderwood Sept '
'2012)'}],
'stixid': 'intrusion-set--03506554-5f37-4f8f-9ce4-0e9f01a1b484',
'tags': ['G0066']},
"indicator": ([{'fields': {'aliases': ['Elderwood',
'Elderwood Gang',
'Beijing Group',
'Sneaky Panda'],
'description': '[Elderwood](https://attack.mitre.org/groups/G0066)',
'firstseenbysource': '2018-04-18T17:59:24.739Z',
'mitreid': 'G0066',
'modified': '2021-03-02T22:40:11.097Z',
'publications': [{'link': None,
'source': 'Elderwood',
'title': '(Citation: Security Affairs '
'Elderwood Sept 2012)'}],
'stixid': 'intrusion-set--03506554-5f37-4f8f-9ce4-0e9f01a1b484',
'tags': ['G0066']},
'rawJSON': {'aliases': ['Elderwood',
'Elderwood Gang',
'Beijing Group',
'Sneaky Panda'],
'created': '2018-04-18T17:59:24.739Z',
'description': '[Elderwood](https://attack.mitre.org/groups/G0066)',
'external_references': [{'external_id': 'G0066',
'source_name': 'mitre-attack',
'url': 'https://attack.mitre.org/groups/G0066'},
{'description': '(Citation: Security '
'Affairs Elderwood Sept '
'2012)',
'source_name': 'Elderwood'}],
'id': 'intrusion-set--03506554-5f37-4f8f-9ce4-0e9f01a1b484',
'modified': '2021-03-02T22:40:11.097Z',
'name': 'INTRUSION_SET 1',
'type': 'intrusion-set'},
'score': 3,
'type': 'Intrusion Set',
'value': 'INTRUSION_SET 1'}],
[],
{'intrusion-set--03506554-5f37-4f8f-9ce4-0e9f01a1b484': 'INTRUSION_SET 1'})
}
MALWARE = {
'response': {
"description": "[Wiarp](https://attack.mitre.org/software/S0206)",
"external_references": [
{
"external_id": "S0206",
"source_name": "mitre-attack",
"url": "https://attack.mitre.org/software/S0206"
},
{
"description": "Zhou, R. (2012, May 15). Backdoor.Wiarp. Retrieved February 22, 2018.",
"source_name": "Symantec Wiarp May 2012",
"url": "https://www.symantec.com/security_response/writeup.jsp?docid=2012-051606-1005-99"
}
],
"x_mitre_platforms": [
"Windows"
],
"x_mitre_aliases": [
"Wiarp"
],
"modified": "2021-01-06T19:32:28.378Z",
"created": "2018-04-18T17:59:24.739Z",
"labels": [
"malware"
],
"id": "malware--039814a0-88de-46c5-a4fb-b293db21880a",
"name": "MALWARE 1",
"type": "malware"
},
'map_result': {'aliases': ['Wiarp'],
'description': '[Wiarp](https://attack.mitre.org/software/S0206)',
'firstseenbysource': '2018-04-18T17:59:24.739Z',
'mitreid': 'S0206',
'modified': '2021-01-06T19:32:28.378Z',
'operatingsystemrefs': ['Windows'],
'publications': [
{'link': 'https://www.symantec.com/security_response/writeup.jsp?docid=2012-051606-1005-99',
'source': 'Symantec Wiarp May 2012',
'title': 'Zhou, R. (2012, May 15). Backdoor.Wiarp. '
'Retrieved February 22, 2018.'}],
'stixid': 'malware--039814a0-88de-46c5-a4fb-b293db21880a',
'tags': ['S0206', 'malware']},
"indicator": ([{'fields': {'aliases': ['Wiarp'],
'description': '[Wiarp](https://attack.mitre.org/software/S0206)',
'firstseenbysource': '2018-04-18T17:59:24.739Z',
'mitreid': 'S0206',
'modified': '2021-01-06T19:32:28.378Z',
'operatingsystemrefs': ['Windows'],
'publications': [{
'link': 'https://www.symantec.com/security_response/writeup.jsp?docid=2012-051606-1005-99',
'source': 'Symantec Wiarp May 2012',
'title': 'Zhou, R. (2012, May 15). '
'Backdoor.Wiarp. Retrieved February '
'22, 2018.'}],
'stixid': 'malware--039814a0-88de-46c5-a4fb-b293db21880a',
'tags': ['S0206', 'malware']},
'rawJSON': {'created': '2018-04-18T17:59:24.739Z',
'description': '[Wiarp](https://attack.mitre.org/software/S0206)',
'external_references': [{'external_id': 'S0206',
'source_name': 'mitre-attack',
'url': 'https://attack.mitre.org/software/S0206'},
{'description': 'Zhou, R. (2012, May '
'15). Backdoor.Wiarp. '
'Retrieved February 22, '
'2018.',
'source_name': 'Symantec Wiarp May '
'2012',
'url': 'https://www.symantec.com/security_response/writeup.jsp?docid=2012-051606-1005-99'}],
'id': 'malware--039814a0-88de-46c5-a4fb-b293db21880a',
'labels': ['malware'],
'modified': '2021-01-06T19:32:28.378Z',
'name': 'MALWARE 1',
'type': 'malware',
'x_mitre_aliases': ['Wiarp'],
'x_mitre_platforms': ['Windows']},
'score': 3,
'type': 'Malware',
'value': 'MALWARE 1'}],
[],
{'malware--039814a0-88de-46c5-a4fb-b293db21880a': 'MALWARE 1'})
}
STIX_MALWARE = {
'response': {
"description": "[Wiarp](https://attack.mitre.org/software/S0206)",
"external_references": [
{
"external_id": "S0206",
"source_name": "mitre-attack",
"url": "https://attack.mitre.org/software/S0206"
},
{
"description": "Zhou, R. (2012, May 15). Backdoor.Wiarp. Retrieved February 22, 2018.",
"source_name": "Symantec Wiarp May 2012",
"url": "https://www.symantec.com/security_response/writeup.jsp?docid=2012-051606-1005-99"
}
],
"x_mitre_platforms": [
"Windows"
],
"x_mitre_aliases": [
"Wiarp"
],
"modified": "2021-01-06T19:32:28.378Z",
"created": "2018-04-18T17:59:24.739Z",
"labels": [
"malware"
],
"id": "malware--039814a0-88de-46c5-a4fb-b293db21880a",
"name": "MALWARE 1",
"type": "malware"
},
'map_result': {'stixaliases': ['Wiarp'],
'stixdescription': '[Wiarp](https://attack.mitre.org/software/S0206)',
'firstseenbysource': '2018-04-18T17:59:24.739Z',
'mitreid': 'S0206',
'modified': '2021-01-06T19:32:28.378Z',
'operatingsystemrefs': ['Windows'],
'publications': [
{'link': 'https://www.symantec.com/security_response/writeup.jsp?docid=2012-051606-1005-99',
'source': 'Symantec Wiarp May 2012',
'title': 'Zhou, R. (2012, May 15). Backdoor.Wiarp. '
'Retrieved February 22, 2018.'}],
'stixid': 'malware--039814a0-88de-46c5-a4fb-b293db21880a',
'tags': ['S0206', 'malware']}
}
TOOL = {
'response': {
"name": "TOOL 1",
"type": "tool",
"description": "[PowerSploit](https://attack.mitre.org/software/S0194)",
"external_references": [
{
"external_id": "S0194",
"source_name": "mitre-attack",
"url": "https://attack.mitre.org/software/S0194"
}
],
"x_mitre_platforms": [
"Windows"
],
"x_mitre_aliases": [
"PowerSploit"
],
"modified": "2021-02-09T14:00:16.093Z",
"created": "2018-04-18T17:59:24.739Z",
"labels": [
"tool"
],
"id": "tool--13cd9151-83b7-410d-9f98-25d0f0d1d80d"
},
'map_result': {'aliases': ['PowerSploit'],
'description': '[PowerSploit](https://attack.mitre.org/software/S0194)',
'firstseenbysource': '2018-04-18T17:59:24.739Z',
'mitreid': 'S0194',
'modified': '2021-02-09T14:00:16.093Z',
'operatingsystemrefs': ['Windows'],
'publications': [],
'stixid': 'tool--13cd9151-83b7-410d-9f98-25d0f0d1d80d',
'tags': ['S0194', 'tool']},
"indicator": ([{'fields': {'aliases': ['PowerSploit'],
'description': '[PowerSploit](https://attack.mitre.org/software/S0194)',
'firstseenbysource': '2018-04-18T17:59:24.739Z',
'mitreid': 'S0194',
'modified': '2021-02-09T14:00:16.093Z',
'operatingsystemrefs': ['Windows'],
'publications': [],
'stixid': 'tool--13cd9151-83b7-410d-9f98-25d0f0d1d80d',
'tags': ['S0194', 'tool']},
'rawJSON': {'created': '2018-04-18T17:59:24.739Z',
'description': '[PowerSploit](https://attack.mitre.org/software/S0194)',
'external_references': [{'external_id': 'S0194',
'source_name': 'mitre-attack',
'url': 'https://attack.mitre.org/software/S0194'}],
'id': 'tool--13cd9151-83b7-410d-9f98-25d0f0d1d80d',
'labels': ['tool'],
'modified': '2021-02-09T14:00:16.093Z',
'name': 'TOOL 1',
'type': 'tool',
'x_mitre_aliases': ['PowerSploit'],
'x_mitre_platforms': ['Windows']},
'score': 2,
'type': 'Tool',
'value': 'TOOL 1'}],
[],
{'tool--13cd9151-83b7-410d-9f98-25d0f0d1d80d': 'TOOL 1'})
}
STIX_TOOL = {
'response': {
"name": "TOOL 1",
"type": "tool",
"description": "[PowerSploit](https://attack.mitre.org/software/S0194)",
"external_references": [
{
"external_id": "S0194",
"source_name": "mitre-attack",
"url": "https://attack.mitre.org/software/S0194"
}
],
"x_mitre_platforms": [
"Windows"
],
"x_mitre_aliases": [
"PowerSploit"
],
"modified": "2021-02-09T14:00:16.093Z",
"created": "2018-04-18T17:59:24.739Z",
"labels": [
"tool"
],
"id": "tool--13cd9151-83b7-410d-9f98-25d0f0d1d80d"
},
'map_result': {'stixaliases': ['PowerSploit'],
'stixdescription': '[PowerSploit](https://attack.mitre.org/software/S0194)',
'firstseenbysource': '2018-04-18T17:59:24.739Z',
'mitreid': 'S0194',
'modified': '2021-02-09T14:00:16.093Z',
'operatingsystemrefs': ['Windows'],
'publications': [],
'stixid': 'tool--13cd9151-83b7-410d-9f98-25d0f0d1d80d',
'tags': ['S0194', 'tool']},
}
ID_TO_NAME = {
"attack-pattern--707399d6-ab3e-4963-9315-d9d3818cd6a0": "entity b",
"malware--6a21e3a4-5ffe-4581-af9a-6a54c7536f44": "entity a"
}
RELATION = {
'response': {
"type": "relationship",
"description": " [Explosive](https://attack.mitre.org/software/S0569)",
"source_ref": "malware--6a21e3a4-5ffe-4581-af9a-6a54c7536f44",
"created": "2021-04-27T01:56:35.810Z",
"relationship_type": "uses",
"modified": "2021-04-27T01:56:35.810Z",
"target_ref": "attack-pattern--707399d6-ab3e-4963-9315-d9d3818cd6a0",
},
"indicator": [{'entityA': 'entity a',
'entityAFamily': 'Indicator',
'entityAType': 'Malware',
'entityB': 'entity b',
'entityBFamily': 'Indicator',
'entityBType': 'Attack Pattern',
'fields': {'description': ' '
'[Explosive](https://attack.mitre.org/software/S0569)',
'firstseenbysource': '2021-04-27T01:56:35.810Z',
'lastseenbysource': '2021-04-27T01:56:35.810Z'},
'name': 'uses',
'reverseName': 'used-by',
'type': 'IndicatorToIndicator'}]
}
| {
"repo_name": "demisto/content",
"path": "Packs/FeedMitreAttackv2/Integrations/FeedMitreAttackv2/test_data/mitre_test_data.py",
"copies": "1",
"size": "28200",
"license": "mit",
"hash": 5206370522935797000,
"line_mean": 50.9337016575,
"line_max": 149,
"alpha_frac": 0.4203900709,
"autogenerated": false,
"ratio": 3.8688434627520922,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.47892335336520925,
"avg_score": null,
"num_lines": null
} |
attack_power = 100
# Print keyword only available python 2 and lower.
print "Attack Power:", attack_power
print "Attack Power: {} points".format(attack_power)
print "Attack Power: {attack_power} points".format(attack_power=100)
print "Attack Power: %s" % (attack_power) # python 1 and 2... won't work on 3
# Print as a built-in function
print("Attack Power:", attack_power)
print("Attack Power: {} points".format(attack_power))
# 0th 1st ...
print("Attack Power: {0} points".format(attack_power, percent_to_hit, ...))
print("Attack Power: {attack_power} points".format(attack_power=100))
print "Attack Power".lower() # attack power
print "Attack Power".upper() # ATTACK POWER
print "Attack Power".capitalize() # Attack power
print ":".join("Attack Power", "{}".format(attack_power)) # Attack Power : 100
print "Attack " + "Power" # Attack Power
for character in "Attack Power":
print character # A t t a c k P o w e r <--- each on its own line
# A
# t
# t
ap_string = "Attack Power"
if "attack power" == ap_string.lower():
pass
| {
"repo_name": "LearnPythonAndMakeGames/BasicPythonTutorialSeries",
"path": "basic_tutorials/strings.py",
"copies": "1",
"size": "1126",
"license": "apache-2.0",
"hash": 2859369721345135000,
"line_mean": 33.1212121212,
"line_max": 79,
"alpha_frac": 0.6394316163,
"autogenerated": false,
"ratio": 3.254335260115607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43937668764156074,
"avg_score": null,
"num_lines": null
} |
# Attacks when we multiple RSA Public keys available
# * Attack 1: Same e. Different N. (N_i, N_j) != 1 for some i, j
import sys
import daedmath
class MultiKey:
def __init__(self, keys):
if not keys:
print "ERROR: No Keys Loaded"
sys.exit(2)
self.keys = keys
def hack(self):
# First Attack: Check if any two keys have (N_i, N_j) != 1
print "[*] Factoring of 2 keys using GCD"
flag = False
for i in range(len(self.keys)):
for j in range(i + 1, len(self.keys)):
gcd = daedmath.euclid(self.keys[i].n, self.keys[j].n)
if gcd != 1:
flag = True
print " [*] Success"
for k in [i, j]:
self.keys[k].p = gcd
self.keys[k].q = self.keys[k].n / gcd
self.keys[k].phin = self.keys[k].n + 1 - self.keys[k].p - self.keys[k].q
self.keys[k].d = self.keys[k].phin / self.keys[k].e
self.keys[k].MakePrivateKey()
if not flag:
print "[X] Failed"
| {
"repo_name": "sushant94/daedalus",
"path": "rsa_multiple_keys.py",
"copies": "1",
"size": "1152",
"license": "mit",
"hash": -3723282704392148000,
"line_mean": 36.1612903226,
"line_max": 96,
"alpha_frac": 0.4696180556,
"autogenerated": false,
"ratio": 3.4491017964071857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9406976420015167,
"avg_score": 0.002348686398403725,
"num_lines": 31
} |
"""Attempt #1 at organizing neuron models
- We specify types of neurons using subclasses of Neuron
- This includes things like LIF vs HH and also Float vs Fixed, Rate vs Spiking
- We build a NeuronPool object which actually has code for running neurons
- We keep a list of known Neuron types around so if we're asked for just
a Rate neuron, we can pick the first on on the list that matches
"""
import numpy as np
"""
Neuron type specifications
"""
class Neuron(object):
pass
class LIF(Neuron):
def __init__(self, tau_rc=0.02, tau_ref=0.002):
self.tau_rc = tau_rc
self.tau_ref = tau_ref
class Rate(Neuron):
pass
class Spiking(Neuron):
pass
class Fixed(Neuron):
pass
class Izhikevich(Neuron):
def __init__(self, a=0.02, b=0.2, c=-65, d=8):
self.a = a
self.b = b
self.c = c
self.d = d
"""
Base class for neuron pools
Pass in a list of neuron_types to set parameters
"""
class NeuronPool:
def __init__(self, n_neurons, neuron_types=None):
if neuron_types is None:
neuron_types = self.neuron_types
for n in neuron_types:
for key, value in n.__dict__.items():
if not key.startswith('_'):
setattr(self, key, value)
self.make(n_neurons)
def make(self, n_neurons):
raise NotImplementedError('NeuronPools must provide "make"')
def step(self, dt, J):
raise NotImplementedError('NeuronPools must provide "step"')
"""
Various neuron models
"""
class LIFRatePool(NeuronPool):
neuron_type = [LIF, Rate]
def make(self, n_neurons):
pass
def step(self, dt, J):
old = np.seterr(divide='ignore', invalid='ignore')
try:
r = 1.0 / (self.tau_ref + self.tau_rc * np.log1p(1.0 / (J-1)))
r[J <= 1] = 0
finally:
np.seterr(**old)
return r * dt # multiply by dt to do rate per timestep
class LIFSpikingPool(NeuronPool):
neuron_type = [LIF, Spiking]
def make(self, n_neurons):
self.voltage = np.zeros(n_neurons)
self.refractory_time = np.zeros(n_neurons)
def step(self, dt, J):
dv = (dt / self.tau_rc) * (J - self.voltage)
self.voltage += dv
self.voltage[self.voltage < 0] = 0
self.refractory_time -= dt
self.voltage *= (1-self.refractory_time / dt).clip(0, 1)
spiked = self.voltage > 1
overshoot = (self.voltage[spiked > 0] - 1) / dv[spiked > 0]
spiketime = dt * (1 - overshoot)
self.voltage[spiked > 0] = 0
self.refractory_time[spiked > 0] = self.tau_ref + spiketime
return spiked
class LIFFixedPool(NeuronPool):
neuron_type = [LIF, Spiking, Fixed]
def make(self, n_neurons):
self.voltage = np.zeros(n_neurons, dtype='i32')
self.refractory_time = np.zeros(n_neurons, dtype='u8')
self.dt = None
self.lfsr = 1
def step(self, dt, J):
if self.dt != dt:
self.dt = dt
self.dt_over_tau_rc = int(dt * 0x10000 / self.tau_rc)
self.ref_steps = int(self.tau_ref / dt)
J = np.asarray(J * 0x10000, dtype='i32')
dv = ((J - self.voltage) * self.dt_over_tau_rc) >> 16
dv[self.refractory_time > 0] = 0
self.refractory_time[self.refractory_time > 0] -= 1
self.voltage += dv
self.voltage[self.voltage < 0] = 0
spiked = self.voltage > 0x10000
self.refractory_time[spiked > 0] = self.ref_steps
# randomly adjust the refractory period to account for overshoot
for i in np.where(spiked > 0)[0]:
p = ((self.voltage[i] - 0x10000) << 16) / dv[i]
if self.lfsr < p:
self.refractory_time[i] -= 1
self.lfsr = (self.lfsr >> 1) ^ (-(self.lfsr & 0x1) & 0xB400)
self.voltage[spiked > 0] = 0
return spiked
class IzhikevichPool(NeuronPool):
neuron_type = [Izhikevich, Spiking]
def make(self, n_neurons):
self.v = np.zeros(n_neurons) + self.c
self.u = self.b * self.v
def step(self, dt, J):
dv = (0.04 * self.v ** 2 + 5 * self.v + 140 - self.u + J) * 1000
du = (self.a * (self.b * self.v - self.u)) * 1000
self.v += dv * dt
self.u += du * dt
spiked = self.v >= 30
self.v[spiked > 0] = self.c
self.u[spiked > 0] = self.u[spiked > 0] + self.d
return spiked
"""
List of known neuron models, in order of preference
"""
neuron_models = [
LIFSpikingPool,
LIFRatePool,
LIFFixedPool,
IzhikevichPool,
]
"""
Create a pool of neurons, given the required type specifications
"""
import inspect
def create(n_neurons, neuron_type):
# make sure it's a list
try:
len(neuron_type)
except TypeError:
neuron_type = [neuron_type]
# make sure elements in the list are instances, not classes
for i, type in enumerate(neuron_type):
if inspect.isclass(type):
neuron_type[i] = type()
# look through the list of neuron models to see if we can
# find a match
for model in neuron_models:
for type in neuron_type:
if type.__class__ not in model.neuron_type:
break
else:
return model(n_neurons, neuron_type)
raise Exception('Could not find suitable neuron model')
if __name__ == '__main__':
spiking = create(100, [LIF, Spiking])
rate = create(100, [LIF, Rate])
fixed = create(100, [LIF, Fixed])
iz = create(100, [Izhikevich])
#iz = create(100, [Izhikevich(a=0.02, b=0.2, c=-50, d=2)])
J = np.linspace(-2, 10, 100)
dt = 0.001
T = 1
spiking_data = []
rate_data = []
iz_data = []
fixed_data = []
v = []
for i in range(int(T/dt)):
spiking_data.append(spiking.step(dt, J))
rate_data.append(rate.step(dt, J))
iz_data.append(iz.step(dt, J))
fixed_data.append(fixed.step(dt, J))
v.append(fixed.voltage[-1])
rate_tuning = np.sum(rate_data, axis=0)/T
spiking_tuning = np.sum(spiking_data, axis=0)/T
iz_tuning = np.sum(iz_data, axis=0)/T
fixed_tuning = np.sum(fixed_data, axis=0)/T
import pylab
pylab.subplot(2, 1, 1)
pylab.plot(J, rate_tuning)
pylab.plot(J, spiking_tuning)
pylab.plot(J, iz_tuning)
pylab.plot(J, fixed_tuning, linewidth=4)
pylab.subplot(2, 1, 2)
pylab.plot(v)
#pylab.plot(np.array(fixed_data)[:,-1])
pylab.show()
| {
"repo_name": "ctn-waterloo/neuron_models",
"path": "v1-attributes.py",
"copies": "1",
"size": "6523",
"license": "mit",
"hash": -2887944705496027000,
"line_mean": 23.7083333333,
"line_max": 78,
"alpha_frac": 0.5756553733,
"autogenerated": false,
"ratio": 3.113603818615752,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41892591919157524,
"avg_score": null,
"num_lines": null
} |
"""Attempt #2 at organizing neuron models
- We specify types of neurons using subclasses of Neuron
- This includes things like LIF vs HH and also Float vs Fixed, Rate vs Spiking
- We build a NeuronPool object which actually has code for running neurons
- We keep a list of known Neuron types around so if we're asked for just
a Rate neuron, we can pick the first on on the list that matches
- Configuration of parameters is done via descriptors
"""
import numpy as np
import weakref
"""
Neuron type specifications
"""
class FloatParameter(object):
def __init__(self, default, min=None, max=None):
self.default = float(default)
self.min = min
self.max = max
self.data = weakref.WeakKeyDictionary()
def __get__(self, instance, owner):
return self.data.get(instance, self.default)
def __set__(self, instance, value):
if self.min is not None and value < self.min:
raise AttributeError('parameter value must be >=%g' % self.min)
if self.max is not None and value > self.max:
raise AttributeError('parameter value must be <=%g' % self.max)
self.data[instance] = float(value)
class Neuron(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __setattr__(self, key, value):
if key not in dir(self):
raise AttributeError('Unknown parameter "%s"' % key)
super(Neuron, self).__setattr__(key, value)
class LIF(Neuron):
tau_rc = FloatParameter(0.02, min=0)
tau_ref = FloatParameter(0.002, min=0)
class Rate(Neuron):
pass
class Spiking(Neuron):
pass
class Fixed(Neuron):
pass
class Izhikevich(Neuron):
a = FloatParameter(0.02)
b = FloatParameter(0.2)
c = FloatParameter(-65)
d = FloatParameter(8)
"""
Base class for neuron pools
Pass in a list of neuron_types to set parameters
"""
class NeuronPool:
def __init__(self, n_neurons, neuron_types=None):
if neuron_types is None:
neuron_types = self.neuron_types
for n in neuron_types:
for key in dir(n):
if not key.startswith('_'):
setattr(self, key, getattr(n, key))
self.make(n_neurons)
def make(self, n_neurons):
raise NotImplementedError('NeuronPools must provide "make"')
def step(self, dt, J):
raise NotImplementedError('NeuronPools must provide "step"')
"""
Various neuron models
"""
class LIFRatePool(NeuronPool):
neuron_type = [LIF, Rate]
def make(self, n_neurons):
pass
def step(self, dt, J):
old = np.seterr(divide='ignore', invalid='ignore')
try:
r = 1.0 / (self.tau_ref + self.tau_rc * np.log1p(1.0 / (J-1)))
r[J <= 1] = 0
finally:
np.seterr(**old)
return r * dt # multiply by dt to do rate per timestep
class LIFSpikingPool(NeuronPool):
neuron_type = [LIF, Spiking]
def make(self, n_neurons):
self.voltage = np.zeros(n_neurons)
self.refractory_time = np.zeros(n_neurons)
def step(self, dt, J):
dv = (dt / self.tau_rc) * (J - self.voltage)
self.voltage += dv
self.voltage[self.voltage < 0] = 0
self.refractory_time -= dt
self.voltage *= (1-self.refractory_time / dt).clip(0, 1)
spiked = self.voltage > 1
overshoot = (self.voltage[spiked > 0] - 1) / dv[spiked > 0]
spiketime = dt * (1 - overshoot)
self.voltage[spiked > 0] = 0
self.refractory_time[spiked > 0] = self.tau_ref + spiketime
return spiked
class LIFFixedPool(NeuronPool):
neuron_type = [LIF, Spiking, Fixed]
def make(self, n_neurons):
self.voltage = np.zeros(n_neurons, dtype='i32')
self.refractory_time = np.zeros(n_neurons, dtype='u8')
self.dt = None
self.lfsr = 1
def step(self, dt, J):
if self.dt != dt:
self.dt = dt
self.dt_over_tau_rc = int(dt * 0x10000 / self.tau_rc)
self.ref_steps = int(self.tau_ref / dt)
J = np.asarray(J * 0x10000, dtype='i32')
dv = ((J - self.voltage) * self.dt_over_tau_rc) >> 16
dv[self.refractory_time > 0] = 0
self.refractory_time[self.refractory_time > 0] -= 1
self.voltage += dv
self.voltage[self.voltage < 0] = 0
spiked = self.voltage > 0x10000
self.refractory_time[spiked > 0] = self.ref_steps
# randomly adjust the refractory period to account for overshoot
for i in np.where(spiked > 0)[0]:
p = ((self.voltage[i] - 0x10000) << 16) / dv[i]
if self.lfsr < p:
self.refractory_time[i] -= 1
self.lfsr = (self.lfsr >> 1) ^ (-(self.lfsr & 0x1) & 0xB400)
self.voltage[spiked > 0] = 0
return spiked
class IzhikevichPool(NeuronPool):
neuron_type = [Izhikevich, Spiking]
def make(self, n_neurons):
self.v = np.zeros(n_neurons) + self.c
self.u = self.b * self.v
def step(self, dt, J):
dv = (0.04 * self.v ** 2 + 5 * self.v + 140 - self.u + J) * 1000
du = (self.a * (self.b * self.v - self.u)) * 1000
self.v += dv * dt
self.u += du * dt
spiked = self.v >= 30
self.v[spiked > 0] = self.c
self.u[spiked > 0] = self.u[spiked > 0] + self.d
return spiked
"""
List of known neuron models, in order of preference
"""
neuron_models = [
LIFSpikingPool,
LIFRatePool,
LIFFixedPool,
IzhikevichPool,
]
"""
Create a pool of neurons, given the required type specifications
"""
import inspect
def create(n_neurons, neuron_type):
# make sure it's a list
try:
len(neuron_type)
except TypeError:
neuron_type = [neuron_type]
# make sure elements in the list are instances, not classes
for i, type in enumerate(neuron_type):
if inspect.isclass(type):
neuron_type[i] = type()
# look through the list of neuron models to see if we can
# find a match
for model in neuron_models:
for type in neuron_type:
if type.__class__ not in model.neuron_type:
break
else:
return model(n_neurons, neuron_type)
raise Exception('Could not find suitable neuron model')
if __name__ == '__main__':
spiking = create(100, [LIF, Spiking])
rate = create(100, [LIF, Rate])
fixed = create(100, [LIF, Fixed])
iz = create(100, [Izhikevich])
#iz = create(100, [Izhikevich(a=0.02, b=0.2, c=-50, d=2)])
J = np.linspace(-2, 10, 100)
dt = 0.001
T = 1
spiking_data = []
rate_data = []
iz_data = []
fixed_data = []
v = []
for i in range(int(T/dt)):
spiking_data.append(spiking.step(dt, J))
rate_data.append(rate.step(dt, J))
iz_data.append(iz.step(dt, J))
fixed_data.append(fixed.step(dt, J))
v.append(fixed.voltage[-1])
rate_tuning = np.sum(rate_data, axis=0)/T
spiking_tuning = np.sum(spiking_data, axis=0)/T
iz_tuning = np.sum(iz_data, axis=0)/T
fixed_tuning = np.sum(fixed_data, axis=0)/T
import pylab
pylab.subplot(2, 1, 1)
pylab.plot(J, rate_tuning)
pylab.plot(J, spiking_tuning)
pylab.plot(J, iz_tuning)
pylab.plot(J, fixed_tuning, linewidth=4)
pylab.subplot(2, 1, 2)
pylab.plot(v)
#pylab.plot(np.array(fixed_data)[:,-1])
pylab.show()
| {
"repo_name": "ctn-waterloo/neuron_models",
"path": "v2-parameters.py",
"copies": "1",
"size": "7491",
"license": "mit",
"hash": 3800150249164001300,
"line_mean": 24.7422680412,
"line_max": 78,
"alpha_frac": 0.5857695902,
"autogenerated": false,
"ratio": 3.221935483870968,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9279856319653448,
"avg_score": 0.005569750883504087,
"num_lines": 291
} |
"""Attempt #3 at organizing neuron models
- We specify types of neurons using subclasses of Neuron
- This includes things like LIF vs HH and also Float vs Fixed, Rate vs Spiking
- We build a NeuronPool object which actually has code for running neurons
- We keep a list of known Neuron types around so if we're asked for just
a Rate neuron, we can pick the first on on the list that matches
- Configuration of parameters is done via descriptors
- NeuronPools use multiple inheritence off neuron types
- build() step is delayed until after constructor, as we don't want that
to happen until build time
"""
import numpy as np
import weakref
"""
Neuron type specifications
"""
class FloatParameter(object):
def __init__(self, default, min=None, max=None):
self.default = float(default)
self.min = min
self.max = max
self.data = weakref.WeakKeyDictionary()
def __get__(self, instance, owner):
return self.data.get(instance, self.default)
def __set__(self, instance, value):
if self.min is not None and value < self.min:
raise AttributeError('parameter value must be >=%g' % self.min)
if self.max is not None and value > self.max:
raise AttributeError('parameter value must be <=%g' % self.max)
self.data[instance] = float(value)
class Neuron(object):
def __init__(self, **kwargs):
self._allow_new_attributes = False
for key, value in kwargs.items():
setattr(self, key, value)
def __setattr__(self, key, value):
if (not key.startswith('_') and not self._allow_new_attributes
and key not in dir(self)):
raise AttributeError('Unknown parameter "%s"' % key)
super(Neuron, self).__setattr__(key, value)
class LIF(Neuron):
tau_rc = FloatParameter(0.02, min=0)
tau_ref = FloatParameter(0.002, min=0)
class Rate(Neuron):
pass
class Spiking(Neuron):
pass
class Fixed(Neuron):
pass
class Izhikevich(Neuron):
a = FloatParameter(0.02)
b = FloatParameter(0.2)
c = FloatParameter(-65)
d = FloatParameter(8)
"""
Base class for neuron pools
Pass in a list of neuron_types to set parameters
"""
class NeuronPool(Neuron):
def __init__(self, neuron_types=None):
self._allow_new_attributes = False
for n in neuron_types:
for key in dir(n):
if not key.startswith('_'):
setattr(self, key, getattr(n, key))
self._allow_new_attributes = True
def build(self, n_neurons):
raise NotImplementedError('NeuronPools must provide "make"')
def step(self, dt, J):
raise NotImplementedError('NeuronPools must provide "step"')
"""
Various neuron models
"""
class LIFRatePool(NeuronPool, LIF, Rate):
def build(self, n_neurons):
pass
def step(self, dt, J):
old = np.seterr(divide='ignore', invalid='ignore')
try:
r = 1.0 / (self.tau_ref + self.tau_rc * np.log1p(1.0 / (J-1)))
r[J <= 1] = 0
finally:
np.seterr(**old)
return r * dt # multiply by dt to do rate per timestep
class LIFSpikingPool(NeuronPool, LIF, Spiking):
def build(self, n_neurons):
self.voltage = np.zeros(n_neurons)
self.refractory_time = np.zeros(n_neurons)
def step(self, dt, J):
dv = (dt / self.tau_rc) * (J - self.voltage)
self.voltage += dv
self.voltage[self.voltage < 0] = 0
self.refractory_time -= dt
self.voltage *= (1-self.refractory_time / dt).clip(0, 1)
spiked = self.voltage > 1
overshoot = (self.voltage[spiked > 0] - 1) / dv[spiked > 0]
spiketime = dt * (1 - overshoot)
self.voltage[spiked > 0] = 0
self.refractory_time[spiked > 0] = self.tau_ref + spiketime
return spiked
class LIFFixedPool(NeuronPool, LIF, Spiking, Fixed):
def build(self, n_neurons):
self.voltage = np.zeros(n_neurons, dtype='i32')
self.refractory_time = np.zeros(n_neurons, dtype='u8')
self.dt = None
self.lfsr = 1
def step(self, dt, J):
if self.dt != dt:
self.dt = dt
self.dt_over_tau_rc = int(dt * 0x10000 / self.tau_rc)
self.ref_steps = int(self.tau_ref / dt)
J = np.asarray(J * 0x10000, dtype='i32')
dv = ((J - self.voltage) * self.dt_over_tau_rc) >> 16
dv[self.refractory_time > 0] = 0
self.refractory_time[self.refractory_time > 0] -= 1
self.voltage += dv
self.voltage[self.voltage < 0] = 0
spiked = self.voltage > 0x10000
self.refractory_time[spiked > 0] = self.ref_steps
# randomly adjust the refractory period to account for overshoot
for i in np.where(spiked > 0)[0]:
p = ((self.voltage[i] - 0x10000) << 16) / dv[i]
if self.lfsr < p:
self.refractory_time[i] -= 1
self.lfsr = (self.lfsr >> 1) ^ (-(self.lfsr & 0x1) & 0xB400)
self.voltage[spiked > 0] = 0
return spiked
class IzhikevichPool(NeuronPool, Izhikevich, Spiking):
def build(self, n_neurons):
self.v = np.zeros(n_neurons) + self.c
self.u = self.b * self.v
def step(self, dt, J):
dv = (0.04 * self.v ** 2 + 5 * self.v + 140 - self.u + J) * 1000
du = (self.a * (self.b * self.v - self.u)) * 1000
self.v += dv * dt
self.u += du * dt
spiked = self.v >= 30
self.v[spiked > 0] = self.c
self.u[spiked > 0] = self.u[spiked > 0] + self.d
return spiked
"""
List of known neuron models, in order of preference
"""
neuron_models = [
LIFSpikingPool,
LIFRatePool,
LIFFixedPool,
IzhikevichPool,
]
"""
Create a pool of neurons, given the required type specifications
"""
import inspect
def create(n_neurons, neuron_type):
# make sure it's a list
try:
len(neuron_type)
except TypeError:
neuron_type = [neuron_type]
# make sure elements in the list are instances, not classes
for i, type in enumerate(neuron_type):
if inspect.isclass(type):
neuron_type[i] = type()
# look through the list of neuron models to see if we can
# find a match
for model in neuron_models:
for type in neuron_type:
if not issubclass(model, type.__class__):
break
else:
n = model(neuron_type)
n.build(n_neurons)
return n
raise Exception('Could not find suitable neuron model')
if __name__ == '__main__':
default = create(100, [])
spiking = create(100, [LIF, Spiking])
rate = create(100, [LIF, Rate])
fixed = create(100, [LIF, Fixed])
iz = create(100, [Izhikevich])
#iz = create(100, [Izhikevich(a=0.02, b=0.2, c=-50, d=2)])
J = np.linspace(-2, 10, 100)
dt = 0.001
T = 1
default_data = []
spiking_data = []
rate_data = []
iz_data = []
fixed_data = []
v = []
for i in range(int(T/dt)):
default_data.append(default.step(dt, J))
spiking_data.append(spiking.step(dt, J))
rate_data.append(rate.step(dt, J))
iz_data.append(iz.step(dt, J))
fixed_data.append(fixed.step(dt, J))
v.append(fixed.voltage[-1])
default_tuning = np.sum(default_data, axis=0)/T
spiking_tuning = np.sum(spiking_data, axis=0)/T
rate_tuning = np.sum(rate_data, axis=0)/T
iz_tuning = np.sum(iz_data, axis=0)/T
fixed_tuning = np.sum(fixed_data, axis=0)/T
import pylab
pylab.subplot(2, 1, 1)
pylab.plot(J, default_tuning, label='default')
pylab.plot(J, spiking_tuning, label='spiking LIF')
pylab.plot(J, rate_tuning, label='rate LIF')
pylab.plot(J, iz_tuning, label='Iz')
pylab.plot(J, fixed_tuning, label='fixed LIF')
pylab.legend(loc='best')
pylab.subplot(2, 1, 2)
pylab.plot(v)
#pylab.plot(np.array(fixed_data)[:,-1])
pylab.show()
| {
"repo_name": "ctn-waterloo/neuron_models",
"path": "v3-mixins.py",
"copies": "1",
"size": "7999",
"license": "mit",
"hash": -7366375373327847000,
"line_mean": 26.1152542373,
"line_max": 78,
"alpha_frac": 0.5913239155,
"autogenerated": false,
"ratio": 3.2635658914728682,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4354889806972868,
"avg_score": null,
"num_lines": null
} |
#attempt #3
def treeScanner(directory):
import os
import re
from datetime import date
from markdown_processor import processMD
class Folder(object):
"""This is the folder Class
attributes:
self.name - the name in the filesystem for the folder
self.root - the root filesystem path to the folder
self.files - a list of files within the folder
self.entries - a list of markdown
self.type - the type of folder
default is 'folder'
self.parent - the containing folder
"""
def __init__(self, name, root):
self.name = name
self.root = root
self.files = []
self.entries = []
self.type = 'folder'
self.parent = root.split('/')[-1]
def makeExperiment(self):
print self.name
self.status = 'active'
self.data = []
#print self.entries
file_list = self.files
for file in file_list:
#print file.name
if file.type == 'entry':
if file.entry_type == 'dated-entry':
self.entries.append(file)
elif file.entry_type == 'intro':
self.intro = file
elif file.entry_type == 'conclusions':
self.conclusions = file
self.status = 'completed'
elif file.entry_type == 'todos':
self.todos = file
else:
self.data.append(file)
if self.entries is not None:
self.entries.sort(key = lambda x: x.date)
self.start = self.entries[0].date
if self.status == 'completed':
self.end = self.entries[-1].date
else:
self.status = 'active'
self.type = 'experiment'
def makeProject(self):
self.experiments = []
self.type = 'project'
class File(object):
def __init__(self, name, root):
self.name = name
self.root = root
self.uri = root+'/'+name
self.parent = root.split('/')[-1]
self.entry_type = None
if '.md' in name:
self.type = 'entry'
if name == 'intro.md':
self.entry_type = 'intro'
elif name == 'conclusions.md':
self.entry_type = 'conclusions'
elif name == 'todos.md':
self.entry_type = 'todos'
elif re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}', name):
entry_date_data = name.strip('.md').split('-')[0:3]
year = int(entry_date_data[0])
month = int(entry_date_data[1])
day = int(entry_date_data[2])
entry_date = date(year, month, day)
self.entry_type = 'dated-entry'
self.date = entry_date
self.content = processMD(self.uri)
elif name == 'index.html':
self.type = 'notebook_page'
else:
self.type = 'file'
#create folder objects list
#create file objects list
folders = []
found_files = []
for item in os.walk(directory):
root, dirs, files = item
parsed_root = root.split('/')
folder_name = parsed_root[-1]
folder_root = '/'.join(parsed_root[:-1])
folders.append(Folder(name = folder_name, root = folder_root))
for file in files:
file_name = file
root = root
new_file = File(name = file_name, root = root)
found_files.append(new_file)
for file in found_files:
for folder in folders:
if file.parent == folder.name:
folder.files.append(file)
#for file in folder.files:
#print file.name
for folder in folders:
found_dated_entry = False
for file in folder.files:
if found_dated_entry == False:
if file.type == 'entry':
if file.entry_type == 'dated-entry':
print 'TRUE'
folder.makeExperiment()
found_dated_entry = True
return(folders)
#scan the folders for projects and experiments
#scan the files for entries and process them accordingly and put them in
#folders = []
#files = []
#entries = []
#projects = []
#experiments = []
treeScanner('/Users/christophermackay/Desktop/Coding/lab-notebook/notebook')
| {
"repo_name": "crmackay/lab-notebook-builder",
"path": "bin/builder/dir_scanner_3.py",
"copies": "1",
"size": "5037",
"license": "mit",
"hash": 1712399003461432000,
"line_mean": 30.6855345912,
"line_max": 76,
"alpha_frac": 0.4586063133,
"autogenerated": false,
"ratio": 4.694315004659832,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5652921317959831,
"avg_score": null,
"num_lines": null
} |
"""Attempt #4 at organizing neuron models
- We specify types of neurons using subclasses of Neuron
- This includes things like LIF vs HH and also Float vs Fixed, Rate vs Spiking
- We build a NeuronPool object which actually has code for running neurons
- We keep a list of known Neuron types around so if we're asked for just
a Rate neuron, we can pick the first on on the list that matches
- Configuration of parameters is done via descriptors
- NeuronPools use multiple inheritence off neuron types
- build() step is delayed until after constructor, as we don't want that
to happen until build time
- We initially construct a dummy class that can be fleshed out with
the actual neuron model. The dummy class would be made by the
initial call to nengo.Ensemble() and it wouldn't get fleshed out with
an actual backend's neural implementation until build time
"""
import numpy as np
import weakref
import inspect
"""
Neuron type specifications
"""
class FloatParameter(object):
def __init__(self, default, min=None, max=None):
self.default = float(default)
self.min = min
self.max = max
self.data = weakref.WeakKeyDictionary()
def __get__(self, instance, owner):
return self.data.get(instance, self.default)
def __set__(self, instance, value):
if self.min is not None and value < self.min:
raise AttributeError('parameter value must be >=%g' % self.min)
if self.max is not None and value > self.max:
raise AttributeError('parameter value must be <=%g' % self.max)
self.data[instance] = float(value)
class Neuron(object):
def __init__(self, **kwargs):
self._allow_new_attributes = False
for key, value in kwargs.items():
setattr(self, key, value)
def __setattr__(self, key, value):
if (not key.startswith('_') and not self._allow_new_attributes
and key not in dir(self)):
raise AttributeError('Unknown parameter "%s"' % key)
super(Neuron, self).__setattr__(key, value)
class LIF(Neuron):
tau_rc = FloatParameter(0.02, min=0)
tau_ref = FloatParameter(0.002, min=0)
class Rate(Neuron):
pass
class Spiking(Neuron):
pass
class Fixed(Neuron):
pass
class Izhikevich(Neuron):
a = FloatParameter(0.02)
b = FloatParameter(0.2)
c = FloatParameter(-65)
d = FloatParameter(8)
"""
Base class for neuron pools
Pass in a list of neuron_types to set parameters
"""
class NeuronPool(Neuron):
def __init__(self, neuron_types):
self._allow_new_attributes = False
for n in neuron_types:
for key in dir(n):
if not key.startswith('_'):
setattr(self, key, getattr(n, key))
self._allow_new_attributes = True
def build(self, n_neurons):
raise NotImplementedError('NeuronPools must provide "make"')
def step(self, dt, J):
raise NotImplementedError('NeuronPools must provide "step"')
"""
This is the class that should be created by an Ensemble during model
constructon. A backend's builder can call build() on this, pass in a
list of models it knows about, and get a constructed object.
"""
class NeuronPoolSpecification(NeuronPool):
def __init__(self, n_neurons, neuron_types):
self._allow_new_attributes = True
self.n_neurons = n_neurons
# make sure it's a list
try:
len(neuron_types)
except TypeError:
neuron_types = [neuron_types]
# make sure elements in the list are instances, not classes
for i, type in enumerate(neuron_types):
if inspect.isclass(type):
neuron_types[i] = type()
self.neuron_types = neuron_types
for n in neuron_types:
for key in dir(n):
if not key.startswith('_'):
setattr(self, key, getattr(n, key))
self._allow_new_attributes = False
def build(self, pool_classes):
# look through the list of neuron models to see if we can
# find a match
for model in pool_classes:
for type in self.neuron_types:
if not issubclass(model, type.__class__):
break
else:
n = model(self.neuron_types)
print n
for key in dir(n):
if not key.startswith('_') and not callable(getattr(n, key)):
setattr(n, key, getattr(self, key, getattr(n, key)))
print key, getattr(n, key)
n.build(self.n_neurons)
return n
raise Exception('Could not find suitable neuron model')
"""
Various neuron models
"""
class LIFRatePool(NeuronPool, LIF, Rate):
def build(self, n_neurons):
pass
def step(self, dt, J):
old = np.seterr(divide='ignore', invalid='ignore')
try:
r = 1.0 / (self.tau_ref + self.tau_rc * np.log1p(1.0 / (J-1)))
r[J <= 1] = 0
finally:
np.seterr(**old)
return r * dt # multiply by dt to do rate per timestep
class LIFSpikingPool(NeuronPool, LIF, Spiking):
def build(self, n_neurons):
self.voltage = np.zeros(n_neurons)
self.refractory_time = np.zeros(n_neurons)
def step(self, dt, J):
dv = (dt / self.tau_rc) * (J - self.voltage)
self.voltage += dv
self.voltage[self.voltage < 0] = 0
self.refractory_time -= dt
self.voltage *= (1-self.refractory_time / dt).clip(0, 1)
spiked = self.voltage > 1
overshoot = (self.voltage[spiked > 0] - 1) / dv[spiked > 0]
spiketime = dt * (1 - overshoot)
self.voltage[spiked > 0] = 0
self.refractory_time[spiked > 0] = self.tau_ref + spiketime
return spiked
class LIFFixedPool(NeuronPool, LIF, Spiking, Fixed):
def build(self, n_neurons):
self.voltage = np.zeros(n_neurons, dtype='i32')
self.refractory_time = np.zeros(n_neurons, dtype='u8')
self.dt = None
self.lfsr = 1
def step(self, dt, J):
if self.dt != dt:
self.dt = dt
self.dt_over_tau_rc = int(dt * 0x10000 / self.tau_rc)
self.ref_steps = int(self.tau_ref / dt)
J = np.asarray(J * 0x10000, dtype='i32')
dv = ((J - self.voltage) * self.dt_over_tau_rc) >> 16
dv[self.refractory_time > 0] = 0
self.refractory_time[self.refractory_time > 0] -= 1
self.voltage += dv
self.voltage[self.voltage < 0] = 0
spiked = self.voltage > 0x10000
self.refractory_time[spiked > 0] = self.ref_steps
# randomly adjust the refractory period to account for overshoot
for i in np.where(spiked > 0)[0]:
p = ((self.voltage[i] - 0x10000) << 16) / dv[i]
if self.lfsr < p:
self.refractory_time[i] -= 1
self.lfsr = (self.lfsr >> 1) ^ (-(self.lfsr & 0x1) & 0xB400)
self.voltage[spiked > 0] = 0
return spiked
class IzhikevichPool(NeuronPool, Izhikevich, Spiking):
def build(self, n_neurons):
self.v = np.zeros(n_neurons) + self.c
self.u = self.b * self.v
def step(self, dt, J):
dv = (0.04 * self.v ** 2 + 5 * self.v + 140 - self.u + J) * 1000
du = (self.a * (self.b * self.v - self.u)) * 1000
self.v += dv * dt
self.u += du * dt
spiked = self.v >= 30
self.v[spiked > 0] = self.c
self.u[spiked > 0] = self.u[spiked > 0] + self.d
return spiked
"""
List of known neuron models, in order of preference
"""
neuron_models = [
LIFSpikingPool,
LIFRatePool,
LIFFixedPool,
IzhikevichPool,
]
if __name__ == '__main__':
specs = {
'default': [],
'LIF spiking': [LIF, Spiking],
'LIF rate': [LIF, Rate],
'LIF fixed': [LIF, Fixed],
'Iz': [Izhikevich],
'Iz burst': [Izhikevich(a=0.02, b=0.2, c=-50, d=2)],
}
J = np.linspace(-2, 10, 100)
dt = 0.001
T = 1
import pylab
for name, spec in specs.items():
pool_spec = NeuronPoolSpecification(100, spec)
# you can change a parameter before build time
if name=='LIF rate':
pool_spec.tau_rc = 0.05
spec_model = pool_spec.build(neuron_models)
data = []
for i in range(int(T/dt)):
data.append(spec_model.step(dt, J))
tuning = np.sum(data, axis=0)/T
pylab.plot(J, tuning, label=name)
pylab.legend(loc='best')
pylab.show()
| {
"repo_name": "ctn-waterloo/neuron_models",
"path": "v4-fleshout.py",
"copies": "1",
"size": "8657",
"license": "mit",
"hash": -1873103159575087000,
"line_mean": 26.7467948718,
"line_max": 81,
"alpha_frac": 0.5799930692,
"autogenerated": false,
"ratio": 3.5034399028733305,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9555208678941152,
"avg_score": 0.005644858626435808,
"num_lines": 312
} |
"""Attempt #5 at organizing neuron models
- We specify types of neurons using subclasses of Neuron
- This includes things like LIF vs HH and also Float vs Fixed, Rate vs Spiking
- We build a NeuronPool object which actually has code for running neurons
- We keep a list of known Neuron types around so if we're asked for just
a Rate neuron, we can pick the first on on the list that matches
- Configuration of parameters is done via descriptors
- make() step is delayed until after constructor, as we don't want that
to happen until build time
- We initially construct a dummy class that can be fleshed out with
the actual neuron model. The dummy class would be made by the
initial call to nengo.Ensemble() and it wouldn't get fleshed out with
an actual backend's neural implementation until build time
- We don't want the actual backend's class for running neurons to be
a subclass of NeuronPool, since that's putting a lot of constraints
on it. Instead, we just decorate the classes to indicate what they
support.
"""
import numpy as np
import weakref
import inspect
"""
Neuron type specifications
"""
class FloatParameter(object):
def __init__(self, default, min=None, max=None):
self.default = float(default)
self.min = min
self.max = max
self.data = weakref.WeakKeyDictionary()
def __get__(self, instance, owner):
if instance is None: return self.default
return self.data.get(instance, self.default)
def __set__(self, instance, value):
if self.min is not None and value < self.min:
raise AttributeError('parameter value must be >=%g' % self.min)
if self.max is not None and value > self.max:
raise AttributeError('parameter value must be <=%g' % self.max)
self.data[instance] = float(value)
class Neuron(object):
def __init__(self, **kwargs):
self._allow_new_attributes = False
for key, value in kwargs.items():
setattr(self, key, value)
def __setattr__(self, key, value):
if (not key.startswith('_') and not self._allow_new_attributes
and key not in dir(self)):
raise AttributeError('Unknown parameter "%s"' % key)
super(Neuron, self).__setattr__(key, value)
class LIF(Neuron):
tau_rc = FloatParameter(0.02, min=0)
tau_ref = FloatParameter(0.002, min=0)
class Rate(Neuron):
pass
class Spiking(Neuron):
pass
class Fixed(Neuron):
pass
class Izhikevich(Neuron):
a = FloatParameter(0.02)
b = FloatParameter(0.2)
c = FloatParameter(-65)
d = FloatParameter(8)
"""
This is the class that should be created by an Ensemble during model
constructon. A backend's builder can call build() on this, pass in a
list of models it knows about, and get a constructed object.
"""
class NeuronPoolSpecification(object):
def __init__(self, n_neurons, neuron_types):
self._allow_new_attributes = True
self.n_neurons = n_neurons
# make sure it's a list
try:
len(neuron_types)
except TypeError:
neuron_types = [neuron_types]
# make sure elements in the list are instances, not classes
for i, type in enumerate(neuron_types):
if inspect.isclass(type):
neuron_types[i] = type()
self.neuron_types = neuron_types
for n in neuron_types:
for key in dir(n):
if not key.startswith('_'):
setattr(self, key, getattr(n, key))
self._allow_new_attributes = False
def __setattr__(self, key, value):
if (not key.startswith('_') and not self._allow_new_attributes
and key not in dir(self)):
raise AttributeError('Unknown parameter "%s"' % key)
super(NeuronPoolSpecification, self).__setattr__(key, value)
def build(self, pool_classes):
# look through the list of neuron models to see if we can
# find a match
for model in pool_classes:
params = {}
for type in self.neuron_types:
if not type.__class__ in model.neuron_types:
break
else:
for cls in model.neuron_types:
for key in dir(cls):
if not key.startswith('_'):
params[key] = getattr(self, key, getattr(cls, key))
n = model()
for key, value in params.items():
setattr(n, key, value)
return n
raise Exception('Could not find suitable neuron model')
"""
Backend-specific neuron models
"""
class NeuronPool(object):
def make(self, n_neurons):
raise NotImplementedError('NeuronPools must provide "make"')
def step(self, dt, J):
raise NotImplementedError('NeuronPools must provide "step"')
def implements(*neuron_types):
def wrapper(klass):
klass.neuron_types = neuron_types
return klass
return wrapper
@implements(LIF, Rate)
class LIFRatePool(NeuronPool):
def make(self, n_neurons):
pass
def step(self, dt, J):
old = np.seterr(divide='ignore', invalid='ignore')
try:
r = 1.0 / (self.tau_ref + self.tau_rc * np.log1p(1.0 / (J-1)))
r[J <= 1] = 0
finally:
np.seterr(**old)
return r * dt # multiply by dt to do rate per timestep
@implements(LIF, Spiking)
class LIFSpikingPool(NeuronPool):
def make(self, n_neurons):
self.voltage = np.zeros(n_neurons)
self.refractory_time = np.zeros(n_neurons)
def step(self, dt, J):
dv = (dt / self.tau_rc) * (J - self.voltage)
self.voltage += dv
self.voltage[self.voltage < 0] = 0
self.refractory_time -= dt
self.voltage *= (1-self.refractory_time / dt).clip(0, 1)
spiked = self.voltage > 1
overshoot = (self.voltage[spiked > 0] - 1) / dv[spiked > 0]
spiketime = dt * (1 - overshoot)
self.voltage[spiked > 0] = 0
self.refractory_time[spiked > 0] = self.tau_ref + spiketime
return spiked
@implements(LIF, Spiking, Fixed)
class LIFFixedPool(NeuronPool):
def make(self, n_neurons):
self.voltage = np.zeros(n_neurons, dtype='i32')
self.refractory_time = np.zeros(n_neurons, dtype='u8')
self.dt = None
self.lfsr = 1
def step(self, dt, J):
if self.dt != dt:
self.dt = dt
self.dt_over_tau_rc = int(dt * 0x10000 / self.tau_rc)
self.ref_steps = int(self.tau_ref / dt)
J = np.asarray(J * 0x10000, dtype='i32')
dv = ((J - self.voltage) * self.dt_over_tau_rc) >> 16
dv[self.refractory_time > 0] = 0
self.refractory_time[self.refractory_time > 0] -= 1
self.voltage += dv
self.voltage[self.voltage < 0] = 0
spiked = self.voltage > 0x10000
self.refractory_time[spiked > 0] = self.ref_steps
# randomly adjust the refractory period to account for overshoot
for i in np.where(spiked > 0)[0]:
p = ((self.voltage[i] - 0x10000) << 16) / dv[i]
if self.lfsr < p:
self.refractory_time[i] -= 1
self.lfsr = (self.lfsr >> 1) ^ (-(self.lfsr & 0x1) & 0xB400)
self.voltage[spiked > 0] = 0
return spiked
@implements(Izhikevich, Spiking)
class IzhikevichPool(NeuronPool):
def make(self, n_neurons):
self.v = np.zeros(n_neurons) + self.c
self.u = self.b * self.v
def step(self, dt, J):
dv = (0.04 * self.v ** 2 + 5 * self.v + 140 - self.u + J) * 1000
du = (self.a * (self.b * self.v - self.u)) * 1000
self.v += dv * dt
self.u += du * dt
spiked = self.v >= 30
self.v[spiked > 0] = self.c
self.u[spiked > 0] = self.u[spiked > 0] + self.d
return spiked
"""
List of known neuron models, in order of preference
"""
neuron_models = [
LIFSpikingPool,
LIFRatePool,
LIFFixedPool,
IzhikevichPool,
]
if __name__ == '__main__':
specs = {
'default': [],
'LIF spiking': [LIF, Spiking],
'LIF rate': [LIF, Rate],
'LIF fixed': [LIF, Fixed],
'Iz': [Izhikevich],
'Iz burst': [Izhikevich(a=0.02, b=0.2, c=-50, d=2)],
}
J = np.linspace(-2, 10, 100)
dt = 0.001
T = 1
import pylab
for name, spec in specs.items():
pool_spec = NeuronPoolSpecification(100, spec)
# you can change a parameter before build time
if name=='LIF rate':
pool_spec.tau_rc = 0.05
spec_model = pool_spec.build(neuron_models)
spec_model.make(pool_spec.n_neurons)
data = []
for i in range(int(T/dt)):
data.append(spec_model.step(dt, J))
tuning = np.sum(data, axis=0)/T
pylab.plot(J, tuning, label=name)
pylab.legend(loc='best')
pylab.show()
| {
"repo_name": "ctn-waterloo/neuron_models",
"path": "v5-separate.py",
"copies": "1",
"size": "9031",
"license": "mit",
"hash": -2438515772797290500,
"line_mean": 27.0465838509,
"line_max": 79,
"alpha_frac": 0.5854279703,
"autogenerated": false,
"ratio": 3.5429580227540214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9604300440076918,
"avg_score": 0.0048171105954208015,
"num_lines": 322
} |
"""Attempt #6 at organizing neuron models
- We specify types of neurons using subclasses of Neuron
- This includes things like LIF vs HH and also Float vs Fixed, Rate vs Spiking
- We build a NeuronPool object which actually has code for running neurons
- We keep a list of known Neuron types around so if we're asked for just
a Rate neuron, we can pick the first on on the list that matches
- Configuration of parameters is done via descriptors
- make() step is delayed until after constructor, as we don't want that
to happen until build time
- We initially construct a dummy class that can be fleshed out with
the actual neuron model. The dummy class would be made by the
initial call to nengo.Ensemble() and it wouldn't get fleshed out with
an actual backend's neural implementation until build time
- We don't want the actual backend's class for running neurons to be
a subclass of NeuronPool, since that's putting a lot of constraints
on it. Instead, we just decorate the classes to indicate what they
support.
"""
import numpy as np
import weakref
import inspect
"""
Neuron type specifications
"""
class FloatParameter(object):
def __init__(self, default, min=None, max=None):
self.default = float(default)
self.min = min
self.max = max
self.data = weakref.WeakKeyDictionary()
def __get__(self, instance, owner):
if instance is None: return self.default
return self.data.get(instance, self.default)
def __set__(self, instance, value):
if self.min is not None and value < self.min:
raise AttributeError('parameter value must be >=%g' % self.min)
if self.max is not None and value > self.max:
raise AttributeError('parameter value must be <=%g' % self.max)
self.data[instance] = float(value)
class NodeParameter(object):
def __init__(self, dimensions):
self.dimensions = 1
def __get__(self, instance, owner):
return self
def __set__(self, instance, value):
raise AttributeError('cannot change a NodeParameter')
class NeuronType(object):
def __init__(self, **kwargs):
self._allow_new_attributes = False
for key, value in kwargs.items():
setattr(self, key, value)
def __setattr__(self, key, value):
if (not key.startswith('_') and not self._allow_new_attributes
and key not in dir(self)):
raise AttributeError('Unknown parameter "%s"' % key)
super(NeuronType, self).__setattr__(key, value)
class LIF(NeuronType):
tau_rc = FloatParameter(0.02, min=0)
tau_ref = FloatParameter(0.002, min=0)
class Rate(NeuronType):
pass
class Spiking(NeuronType):
pass
class Fixed(NeuronType):
pass
class Izhikevich(NeuronType):
a = FloatParameter(0.02)
b = FloatParameter(0.2)
c = FloatParameter(-65)
d = FloatParameter(8)
class GruberSpiny(NeuronType):
dopamine = NodeParameter(dimensions=1)
import nengo
"""
This is the class that should be created by an Ensemble during model
constructon. A backend's builder can call build() on this, pass in a
list of models it knows about, and get a constructed object.
"""
class Neurons(object):
def __init__(self, n_neurons, neuron_types):
self._allow_new_attributes = True
self.n_neurons = n_neurons
# make sure it's a list
try:
len(neuron_types)
except TypeError:
neuron_types = [neuron_types]
# make sure elements in the list are instances, not classes
for i, type in enumerate(neuron_types):
if inspect.isclass(type):
neuron_types[i] = type()
self.neuron_types = neuron_types
for n in neuron_types:
for key in dir(n):
if not key.startswith('_'):
value = getattr(n, key)
if isinstance(value,NodeParameter):
value = nengo.Node(size_in=value.dimensions,
size_out=value.dimensions)
setattr(self, key, value)
self._allow_new_attributes = False
def __setattr__(self, key, value):
if (not key.startswith('_') and not self._allow_new_attributes
and key not in dir(self)):
raise AttributeError('Unknown parameter "%s"' % key)
super(Neurons, self).__setattr__(key, value)
def build(self, pool_classes):
# look through the list of neuron models to see if we can
# find a match
for model in pool_classes:
params = {}
for type in self.neuron_types:
if not type.__class__ in model.neuron_types:
break
else:
for cls in model.neuron_types:
for key in dir(cls):
if not key.startswith('_'):
params[key] = getattr(self, key, getattr(cls, key))
n = model()
for key, value in params.items():
setattr(n, key, value)
return n
raise Exception('Could not find suitable neuron model')
"""
Backend-specific neuron models
"""
def implements(*neuron_types):
def wrapper(klass):
klass.neuron_types = neuron_types
return klass
return wrapper
@implements(LIF, Rate)
class LIFRatePool(object):
def make(self, n_neurons):
pass
def step(self, dt, J):
old = np.seterr(divide='ignore', invalid='ignore')
try:
r = 1.0 / (self.tau_ref + self.tau_rc * np.log1p(1.0 / (J-1)))
r[J <= 1] = 0
finally:
np.seterr(**old)
return r * dt # multiply by dt to do rate per timestep
@implements(LIF, Spiking)
class LIFSpikingPool(object):
def make(self, n_neurons):
self.voltage = np.zeros(n_neurons)
self.refractory_time = np.zeros(n_neurons)
def step(self, dt, J):
dv = (dt / self.tau_rc) * (J - self.voltage)
self.voltage += dv
self.voltage[self.voltage < 0] = 0
self.refractory_time -= dt
self.voltage *= (1-self.refractory_time / dt).clip(0, 1)
spiked = self.voltage > 1
overshoot = (self.voltage[spiked > 0] - 1) / dv[spiked > 0]
spiketime = dt * (1 - overshoot)
self.voltage[spiked > 0] = 0
self.refractory_time[spiked > 0] = self.tau_ref + spiketime
return spiked
@implements(LIF, Spiking, Fixed)
class LIFFixedPool(object):
def make(self, n_neurons):
self.voltage = np.zeros(n_neurons, dtype='i32')
self.refractory_time = np.zeros(n_neurons, dtype='u8')
self.dt = None
self.lfsr = 1
def step(self, dt, J):
if self.dt != dt:
self.dt = dt
self.dt_over_tau_rc = int(dt * 0x10000 / self.tau_rc)
self.ref_steps = int(self.tau_ref / dt)
J = np.asarray(J * 0x10000, dtype='i32')
dv = ((J - self.voltage) * self.dt_over_tau_rc) >> 16
dv[self.refractory_time > 0] = 0
self.refractory_time[self.refractory_time > 0] -= 1
self.voltage += dv
self.voltage[self.voltage < 0] = 0
spiked = self.voltage > 0x10000
self.refractory_time[spiked > 0] = self.ref_steps
# randomly adjust the refractory period to account for overshoot
for i in np.where(spiked > 0)[0]:
p = ((self.voltage[i] - 0x10000) << 16) / dv[i]
if self.lfsr < p:
self.refractory_time[i] -= 1
self.lfsr = (self.lfsr >> 1) ^ (-(self.lfsr & 0x1) & 0xB400)
self.voltage[spiked > 0] = 0
return spiked
@implements(Izhikevich, Spiking)
class IzhikevichPool(object):
def make(self, n_neurons):
self.v = np.zeros(n_neurons) + self.c
self.u = self.b * self.v
def step(self, dt, J):
dv = (0.04 * self.v ** 2 + 5 * self.v + 140 - self.u + J) * 1000
du = (self.a * (self.b * self.v - self.u)) * 1000
self.v += dv * dt
self.u += du * dt
spiked = self.v >= 30
self.v[spiked > 0] = self.c
self.u[spiked > 0] = self.u[spiked > 0] + self.d
return spiked
@implements(GruberSpiny, Rate)
class GruberSpinyPool(object):
V_reset = -60
def make(self, n_neurons):
self.Vm = np.zeros(n_neurons) + self.V_reset
def step(self, dt, J):
Cm=1; E_K=-90; g_L=.008; VKir2_h=-111; VKir2_c=-11; gbar_Kir2=1.2
VKsi_h=-13.5; VKsi_c=11.8; gbar_Ksi=.45; R=8.315; F=96480; T=293
VLCa_h=-35; VLCa_c=6.1; Pbar_LCa=4.2; Ca_o=.002; Ca_i=0.01
# A backend needs to support some function to get at the values
# of a Node created by a NodeParameter
mu = 1.2# self.dopamine.get_value()
L_Kir2 = 1.0/(1 + np.exp(-(self.Vm-VKir2_h)/VKir2_c))
L_Ksi = 1.0/(1 + np.exp(-(self.Vm-VKsi_h)/VKsi_c))
L_LCa = 1.0/(1 + np.exp(-(self.Vm-VLCa_h)/VLCa_c))
P_LCa = Pbar_LCa * L_LCa
x = np.exp(-2 * self.Vm/1000*F/(R*T))
I_Kir2 = gbar_Kir2*L_Kir2*(self.Vm-E_K)
I_Ksi = gbar_Ksi*L_Ksi*(self.Vm-E_K)
I_LCa = P_LCa*(4*self.Vm/1000*F*F/(R*T))* ((Ca_i-Ca_o*x)/(1-x))
I_L = g_L*(self.Vm-E_K)
dv = -(1.0/Cm)*(mu*(I_Kir2 + I_LCa) + I_Ksi + I_L-J*1000)
self.Vm += dv*dt
Vf_h = -55
Vf_c = 25 # this is published as 2.5
Vf = -58
ref_time = 0.05 / (1 + np.exp((self.Vm - Vf_h)/Vf_c))
rate = np.where(self.Vm > Vf, 1.0/ref_time, 0)
return rate * dt
"""
List of known neuron models, in order of preference
"""
neuron_models = [
LIFSpikingPool,
LIFRatePool,
LIFFixedPool,
IzhikevichPool,
GruberSpinyPool
]
if __name__ == '__main__':
specs = {
'default': [],
'LIF spiking': [LIF, Spiking],
'LIF rate': [LIF, Rate],
'LIF fixed': [LIF, Fixed],
'Iz': [Izhikevich],
'Iz burst': [Izhikevich(a=0.02, b=0.2, c=-50, d=2)],
'Gruber': [GruberSpiny],
}
J = np.linspace(-2, 10, 100)
dt = 0.001
T = 1
import pylab
for name, spec in specs.items():
with nengo.Model():
pool_spec = Neurons(100, spec)
# you can change a parameter before build time
if name=='LIF rate':
pool_spec.tau_rc = 0.05
spec_model = pool_spec.build(neuron_models)
spec_model.make(pool_spec.n_neurons)
data = []
for i in range(int(T/dt)):
data.append(spec_model.step(dt, J))
tuning = np.sum(data, axis=0)/T
pylab.plot(J, tuning, label=name)
pylab.legend(loc='best')
pylab.show()
| {
"repo_name": "ctn-waterloo/neuron_models",
"path": "v6-dopamine.py",
"copies": "1",
"size": "10786",
"license": "mit",
"hash": -2103051528465433900,
"line_mean": 27.6861702128,
"line_max": 79,
"alpha_frac": 0.5716669757,
"autogenerated": false,
"ratio": 3.3045343137254903,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.437620128942549,
"avg_score": null,
"num_lines": null
} |
""" attempt at a more general-purpose parallel simulation script using the 2D solver.
should do the following: simulate forces in the pore for a given list of ranges of parameter values.
distribute this simulation to a given number of processors.
create a data and metadata file for every range.
#if data file already exists and metadata match, attempt to finish the data file.
if simulation is finished at the end, save plot of the range in same DIR.
"""
from ..tools.protocol import Data, unique_id
from ..tools.utilities import save_dict
from ..tools.mpipool import mpimap
from mpi4py import MPI
from ..dirnames import DATADIR
import numpy, os
#from .calculate_forces import calculate2D
__all__ = ["iterate_in_parallel", "post_iteration", "simulate",
"parallel_output"]
# directory where data are saved
savedir = DATADIR + "/sim/stamps/"
if not os.path.exists(savedir):
os.makedirs(savedir)
# general functions for running simulations and saving output
# TODO: this is a bit messy.. need class for storing and iterating
# through parameter sets (while holding some fixed)
def iterate_in_parallel(method, nproc=1, iterkeys=None, **params):
''' evaluate a given method for a given parameter set.
params is a dict and some of its values are allowed to be iterable.
the method is expected to return a dict with the SAME KEYS for every parameter in one iteration.
an exception ocurring in the method is NOT handled without stopping the iteration.
'''
# find the parameters to be iterated through
iterkeys2 = [key for key in params if hasattr(params[key], "__iter__")]
if iterkeys is None:
iterkeys = iterkeys2
elif set(iterkeys) <= set(iterkeys2):
for key in iterkeys:
iterkeys2.remove(key)
iterkeys = iterkeys + iterkeys2
else:
print "I'm ignoring your iterkeys."
iterkeys = iterkeys2
# create stamp of the input
stamp = dict(params)
stamp["iterkeys"] = iterkeys
stamp["method"] = method.__name__
# create list of params instances to be mapped
iterator = combinations(params, iterkeys)
# create the function to be mapped with
def f(params): return method(**params)
# map iterator using mpi4py
# FIXME: doesn't work if some dolfin function are used, e.g. Function.extrapolate
if MPI.COMM_WORLD.Get_size() > 1:
result = mpimap(f, iterator)
# map iterator using multiprocessing.Pool
# FIXME: this approach of distributing across multiple processors is inconvenient
# since a single error kills the whole simulation.
# (not necessarily, error can be catched and displayed by method)
# also it's not supposed to be appropriate for HPC architectures
elif nproc>1:
from pathos.helpers import mp # mp = fork of multiprocessing package
pool = mp.Pool(nproc)
result = pool.map(f, iterator)
pool.close()
pool.join()
# map in serial
else:
result = map(f, iterator)
return join_dicts(result), stamp
def combinations(dic, iterkeys):
# Input: dict of iterables and/or single values, list of iterable keys to provide order
# Output: list of dicts with all possible combinations of single values
P = [{k:dic[k] for k in dic if k not in iterkeys}]
for key in iterkeys:
P2 = []
for val in dic[key]:
for p in P:
p2 = dict(p)
p2[key] = val
P2.append(p2)
P = P2
#print P
return P
def join_dicts(lst):
# [{"F":1.0}, {"F":2.0}, ...] --> {"F":[1.0, 2.0, ...]}
keys = []
for dic in lst:
if dic is not None:
keys = dic.keys()
return {key:[dic[key] for dic in lst if dic is not None] for key in keys}
def post_iteration(result, stamp, showplot=False):
''' in case method output is a dict, put result of iterate_in_parallel
into nicer form, save in .dat file and create plots '''
# create unique id for filenames
uid = str(unique_id())
# save stamp to file
save_dict(stamp, dir=savedir, name=("stamp"+uid))
# put result and input into form
#result = join_dicts(result)
iterkeys = stamp.pop("iterkeys")
# create combinations only of relevant (iterable) parameters
input_params = {k:stamp[k] for k in iterkeys} # can be empty
input = join_dicts(combinations(input_params, iterkeys))
# save iterated parameters and result to data file
N = len(result.values()[0])
data = Data(savedir+"result"+uid+".dat", N=N, overwrite=True)
data.data["status"][:] = 1
for key in input:
data.data[key] = numpy.array(input[key])
for key in result:
data.data[key] = numpy.array(result[key])
data.write()
# no plot if not at least two different parameter sets
if len(iterkeys) == 0:
return
# create plot for every result column
# TODO: for the moment i assume that iterkeys[0] is the one to be plotted
# thus i can use numpy indexing to get the right chunks of the results
#if plotkey is None:
plotkey = iterkeys[0]
iterkeys.remove(plotkey)
x = stamp.pop(plotkey)
nx = len(x)
# create combinations only of relevant (iterable) parameters
input_params = {k:stamp[k] for k in iterkeys}
params = combinations(input_params, iterkeys)
from matplotlib.pyplot import plot, xlabel, ylabel, legend, savefig, show
import matplotlib.pyplot as plt
plots = {}
# for every result column
for key, rescol in result.items():
i = 0
# create new figure
fig, ax = plt.subplots()
plots[key] = ax
# for every non-axis input parameter set held fixed
for pset in params:
# get the corresponding chunk of length nx of result column
chunk = slice(i*nx, (i+1)*nx)
i += 1
y = rescol[chunk]
# create fitting label using the fixed params
label = ", ".join("%s=%s" % t for t in pset.items())
# add x,y to plot and label axis with keys
#print x,y
plot(x, y, '-x', label=label)
xlabel(plotkey)
ylabel(key)
legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
savefig(savedir+"plot"+uid+key+".eps", bbox_inches='tight')
if showplot: show()
return plots
#else: close()
# general simulation (for modules with calculate() function)
# optionally, module can also provide post_calculate() function that receives the result
def simulate(name, nproc=1, outputs=None, plot=None,
write_files=True, **params):
script = __import__("nanopores.scripts."+name, fromlist=["calculate"])
calculate = script.calculate
if outputs is not None:
def f(**x):
res = calculate(**x)
return {key:res[key] for key in outputs if key in res}
else:
f = calculate
if plot is not None:
result, stamp = iterate_in_parallel(f, nproc=nproc, iterkeys=[plot], **params)
else:
result, stamp = iterate_in_parallel(f, nproc=nproc, **params)
if MPI.COMM_WORLD.Get_rank() > 0 or not write_files:
return
stamp["script"] = name
print result, stamp
if hasattr(script, "post_calculate"):
script.post_calculate(result, stamp)
else:
post_iteration(result, stamp, showplot=False)
return result
def parallel_output(calculate, nproc=1, plot=None, showplot=False, **params):
if plot is not None:
result, stamp = iterate_in_parallel(calculate, nproc=nproc, iterkeys=[plot], **params)
else:
result, stamp = iterate_in_parallel(calculate, nproc=nproc, **params)
if MPI.COMM_WORLD.Get_rank() > 0:
return
plots = post_iteration(result, stamp, showplot=showplot)
return plots
# simulation in 2D (script for howorka pore)
#def simulation2D(nproc=1, outputs=None, plot=None, write_files=True, **params):
# if outputs is not None:
# def f(**x):
# res = calculate2D(**x)
# return {key:res[key] for key in outputs}
# else:
# f = calculate2D
# if plot is not None:
# result, stamp = iterate_in_parallel(f, nproc=nproc, iterkeys=[plot], **params)
# else:
# result, stamp = iterate_in_parallel(f, nproc=nproc, **params)
# if MPI.COMM_WORLD.Get_rank() > 0 or not write_files:
# return
# post_iteration(result, stamp, showplot=False)
# return result
| {
"repo_name": "mitschabaude/nanopores",
"path": "nanopores/scripts/simulation2D.py",
"copies": "1",
"size": "8550",
"license": "mit",
"hash": 2843646993336350700,
"line_mean": 36.012987013,
"line_max": 104,
"alpha_frac": 0.6371929825,
"autogenerated": false,
"ratio": 3.8220831470719716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4959276129571972,
"avg_score": null,
"num_lines": null
} |
"""Attempt at creating an autocomplete class."""
from trie import Trie
class Autocomplete(object):
"""Takes a vocab list on init., provides methods to autocomplete."""
def __init__(self, vocab, max_completions=5):
"""Initialize autocomplete."""
self.vocab = vocab
self.max_completions = max_completions
self._stored_vocab = Trie()
for item in vocab:
self._stored_vocab.insert(item)
def complete_me(self, word):
"""Autocomplete a word based on self.vocab."""
first_letter = self._stored_vocab.root
for character in word:
if character in first_letter.children:
first_letter = first_letter.children[character]
else:
return []
word_suggestions = []
word_choices = self._search(word, first_letter, word_suggestions)
return word_choices
def _search(self, word, first_letter, word_suggestions):
"""Traverse trie based on provided word, return suggested words up to max_completions."""
if first_letter.end:
word_suggestions.append(word)
elif not first_letter.children:
word_suggestions.append(word)
if len(word_suggestions) >= self.max_completions:
return word_suggestions
for i, (key, node) in enumerate(first_letter.children.items()):
extended_word = word + key
self._search(extended_word, node, word_suggestions)
return word_suggestions
| {
"repo_name": "pasaunders/code-katas",
"path": "src/autocomplete.py",
"copies": "1",
"size": "1520",
"license": "mit",
"hash": 6772944379606689000,
"line_mean": 35.1904761905,
"line_max": 97,
"alpha_frac": 0.6184210526,
"autogenerated": false,
"ratio": 4.305949008498583,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5424370061098582,
"avg_score": null,
"num_lines": null
} |
# Attempt at implementing autoencoder for MNIST
# Multiple variations of this have been tried, eg. the linear (PCA),
# sigmoidal, and denoising. None of them end up producing local filters
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
import argparse
import sys
import json
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import argparse
# Import data
mnist = input_data.read_data_sets('tmp/tensorflow/mnist/input_data', one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W1 = tf.Variable(tf.truncated_normal([784, 500], stddev=.1))
b1 = tf.Variable(tf.zeros([500]))
x1 = x * ( numpy.random.rand( 100, 784 ) < .7 )
h = tf.nn.sigmoid( tf.matmul(x1, W1) + b1 )
W2 = tf.transpose( W1 )
b2 = tf.Variable(tf.zeros([784]))
y = tf.matmul(h, W2) + b2
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 784])
cross_entropy_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(y, y_) )
train_step = tf.train.GradientDescentOptimizer(0.1).minimize( cross_entropy_loss )
cross_entropy_summary = tf.summary.scalar( 'cross_entropy', cross_entropy_loss )
tf_weights = [ tf.split( 0, 28, W1[:,0:100] ) ]
tf_weights_4d = tf.transpose( tf_weights, [ 3, 1, 2, 0 ] )
weights_images = tf.image_summary( 'weights', tf_weights_4d, 100 )
sess = tf.Session()
saver = tf.train.Saver()
parser = argparse.ArgumentParser()
parser.add_argument("-restore",
help="restore model from file")
parser.add_argument("-save",
help="save model to file")
parser.add_argument("-logdir",
help="logdir")
args = parser.parse_args()
if ( args.restore is None ):
sess.run( tf.global_variables_initializer() )
else:
saver.restore( sess, args.restore )
summary_writer = tf.train.SummaryWriter( args.logdir, sess.graph )
saver = tf.train.Saver()
# Train
step = 0
for _ in range(100000):
for _ in range(100):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_xs})
cross_entropy_summary_str = sess.run( cross_entropy_summary, feed_dict= {x: batch_xs, y_: batch_xs} )
summary_writer.add_summary( cross_entropy_summary_str, global_step=step)
wtimgs = sess.run( weights_images, feed_dict= {x: batch_xs, y_: batch_xs} )
summary_writer.add_summary( wtimgs, global_step=step)
step = step + 1
save_path = saver.save( sess, args.save )
with open('weights.json', 'w') as outfile:
json.dump(sess.run( W1 ).tolist(), outfile)
| {
"repo_name": "jfrancis71/TensorFlowApps",
"path": "TrainMNISTAutoencoder.py",
"copies": "1",
"size": "2609",
"license": "mit",
"hash": 116172512827372380,
"line_mean": 27.6703296703,
"line_max": 103,
"alpha_frac": 0.6830203143,
"autogenerated": false,
"ratio": 3.0301974448315914,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9148418537062373,
"avg_score": 0.012959844413843825,
"num_lines": 91
} |
# Attempt at replicating the results from 'Recurrent Highway Networks' using keras
# Arxiv paper: https://arxiv.org/abs/1607.03474
# Reference implementation: https://github.com/julian121266/RecurrentHighwayNetworks
#
import time
import numpy as np
import keras.optimizers
from keras.layers import Embedding, Dense, LSTM, TimeDistributed
from keras.models import Sequential
from rhn import RHN
from lstm_ln import LSTM_LN
from lstm_zoneout import LSTM_zoneout
seq_len = 180
batch_size = 128
epochs = 1000
rhn_size = 256
def load(filename):
with open(filename, 'r') as f:
data = f.read()
data = np.fromstring(data, dtype=np.uint8)
unique, data = np.unique(data, return_inverse=True)
return data, len(unique)
print 'Loading data...'
data, dim = load('text8')
print 'Alphabet size', dim
def batchXY(start_idx, length, slen=seq_len):
Xs = np.zeros((length, dim), dtype='float32')
Xs[np.arange(length), data[start_idx:start_idx+length]] = 1
X, Y = [], []
for idx in xrange(0, length-slen, slen):
X.append(Xs[idx:idx+slen, :])
Y.append(Xs[idx+1:idx+slen+1])
return np.array(X), np.array(Y)
model_name = 'lstm'
train_lbatch = 18
lbatch_size = 5*10**6
validX, validY = batchXY(train_lbatch*lbatch_size, lbatch_size, slen=4096)
print "Valid", np.shape(validX), np.shape(validY)
model = Sequential()
input_shape=(None, dim)
if model_name == 'rhn':
model.add(RHN(rhn_size, 2, return_sequences=True, consume_less='cpu', input_shape=input_shape))
elif model_name == 'lstm-zoneout':
model.add(LSTM_zoneout(rhn_size, zoneout_c=0.5, zoneout_h=0.05,
return_sequences=True, consume_less='gpu', input_shape=input_shape))
elif model_name == 'lstm':
model.add(LSTM(rhn_size, return_sequences=True, consume_less='gpu', input_shape=input_shape))
else:
raise(Exception('Unknown model %s' % model_name))
model.add(TimeDistributed(Dense(dim, activation='softmax'), input_shape=(None, rhn_size)))
optimizer = keras.optimizers.Adam(lr=0.001, clipnorm=1.0)
print "Compiling model..."
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
bpcs = []
for epoch_idx in xrange(epochs):
start_time = time.time()
for cnt in xrange(train_lbatch):
X, Y = batchXY(cnt * lbatch_size, lbatch_size)
model.fit(X, Y, batch_size=batch_size, nb_epoch=1)
# Recompute the loss on the last batch
train_loss = model.evaluate(X, Y, batch_size=batch_size)
loss = model.evaluate(validX, validY, batch_size=batch_size)
bpcs.append((train_loss[0]/np.log(2), loss[0]/np.log(2)))
print epoch_idx, time.time() - start_time
print bpcs
model.save('iter%d.h5' % epoch_idx)
| {
"repo_name": "LaurentMazare/deep-models",
"path": "rhn/rhn-text8.py",
"copies": "1",
"size": "2654",
"license": "apache-2.0",
"hash": 8281991883817838000,
"line_mean": 33.0256410256,
"line_max": 97,
"alpha_frac": 0.7049736247,
"autogenerated": false,
"ratio": 2.8910675381263617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9041242366635525,
"avg_score": 0.01095975923816724,
"num_lines": 78
} |
"""Attempt at totalling up DCP data
Run from `RUN_12Z.sh` for previous day
Run from `RUN_20_AFTER.sh` for current day
"""
import datetime
import sys
import pytz
import numpy as np
from pandas.io.sql import read_sql
from pyiem.util import get_dbconn, utc, logger
LOG = logger()
def workflow(date):
"""Do the necessary work for this date"""
pgconn = get_dbconn("hads", user="nobody")
iem_pgconn = get_dbconn("iem")
icursor = iem_pgconn.cursor()
# load up the current obs
df = read_sql(
f"""
WITH dcp as (
SELECT id, iemid, tzname from stations where network ~* 'DCP'
and tzname is not null
), obs as (
SELECT iemid, pday from summary_{date.year}
WHERE day = %s)
SELECT d.id, d.iemid, d.tzname, coalesce(o.pday, 0) as pday from
dcp d LEFT JOIN obs o on (d.iemid = o.iemid)
""",
iem_pgconn,
params=(date,),
index_col="id",
)
bases = {}
ts = utc(date.year, date.month, date.day, 12)
for tzname in df["tzname"].unique():
base = ts.astimezone(pytz.timezone(tzname))
bases[tzname] = base.replace(hour=0)
# retrieve data that is within 12 hours of our bounds
sts = datetime.datetime(
date.year, date.month, date.day
) - datetime.timedelta(hours=12)
ets = sts + datetime.timedelta(hours=48)
obsdf = read_sql(
f"""
SELECT distinct station, valid at time zone 'UTC' as utc_valid, value
from raw{date.year} WHERE valid between %s and %s and
substr(key, 1, 3) = 'PPH' and value >= 0
""",
pgconn,
params=(sts, ets),
index_col=None,
)
if obsdf.empty:
LOG.info("%s found no data", date)
return
obsdf["utc_valid"] = obsdf["utc_valid"].dt.tz_localize(pytz.UTC)
precip = np.zeros((24 * 60))
grouped = obsdf.groupby("station")
for station in obsdf["station"].unique():
if station not in df.index:
continue
precip[:] = 0
tz = df.loc[station, "tzname"]
current_pday = df.loc[station, "pday"]
for _, row in grouped.get_group(station).iterrows():
ts = row["utc_valid"].to_pydatetime()
if ts <= bases[tz]:
continue
t1 = (ts - bases[tz]).total_seconds() / 60.0
t0 = max([0, t1 - 60.0])
precip[int(t0) : int(t1)] = row["value"] / 60.0
pday = np.sum(precip)
if pday > 50 or np.allclose([pday], [current_pday]):
continue
iemid = int(df.loc[station, "iemid"])
icursor.execute(
f"UPDATE summary_{date.year} "
"SET pday = %s WHERE iemid = %s and day = %s",
(pday, iemid, date),
)
if icursor.rowcount == 0:
LOG.info("Adding record %s[%s] for day %s", station, iemid, date)
icursor.execute(
f"INSERT into summary_{date.year} "
"(iemid, day) VALUES (%s, %s)",
(iemid, date),
)
icursor.execute(
f"UPDATE summary_{date.year} "
"SET pday = %s WHERE iemid = %s and day = %s "
"and %s > coalesce(pday, 0)",
(pday, iemid, date, pday),
)
icursor.close()
iem_pgconn.commit()
def main(argv):
"""Do Something"""
if len(argv) == 4:
ts = datetime.date(int(argv[1]), int(argv[2]), int(argv[3]))
else:
ts = datetime.date.today()
workflow(ts)
if __name__ == "__main__":
main(sys.argv)
| {
"repo_name": "akrherz/iem",
"path": "scripts/hads/compute_hads_pday.py",
"copies": "1",
"size": "3548",
"license": "mit",
"hash": -4688594871699963000,
"line_mean": 30.1228070175,
"line_max": 77,
"alpha_frac": 0.5417136415,
"autogenerated": false,
"ratio": 3.3314553990610327,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4373169040561033,
"avg_score": null,
"num_lines": null
} |
# Attempted solution of Riddler at https://fivethirtyeight.com/features/riddler-nation-goes-to-war/
from random import shuffle
Reps = 10000000
# How many cards go face-down in a tie-break?
CardsDown = 1
# Play the next cards and break any ties. Return True if
# there are more cards to play. Result is 1 if the aces
# player (me) wins the game.
def NextRound():
global Me,You,Result,CardsDown
Pot = []
Done = False
while not Done:
MyCard = Me.pop()
YourCard = You.pop()
Pot.extend([MyCard,YourCard])
shuffle(Pot)
if MyCard > YourCard:
# My card beats yours.
# So I get the pot of played cards.
Me = Pot + Me
# No tie to be broken
Done = True
if len(You) == 0:
# You lose
Result = 1
else:
# You have more cards to play
Result = 0
elif YourCard > MyCard:
You = Pot + You
Done = True
if len(Me) == 0:
Result = 2
else:
Result = 0
else:
# A tie.
if len(Me) < 1 + CardsDown:
# I don't have enough cards to play the tiebreak
Done = True
Result = 2
elif len(You) < 1 + CardsDown:
Done = True
Result = 1
else:
# Play the tie-break, by first laying down the face-down
# cards and then continuing the "while not Done" loop
for i in range(CardsDown):
Pot.extend([Me.pop(),You.pop()])
return (Result == 0)
# You have four of every number from 0 to 11, while I have just four 12s
YourCards = []
for i in range(12):
YourCards.extend([i]*4)
# Main loop
Accum = 0
for Rep in range(Reps):
Me = [12,12,12,12]
You = list(YourCards)
shuffle(You)
while NextRound():
continue
if Result == 1:
Accum += 1
print(1.0*Accum/Reps)
| {
"repo_name": "hectorpefo/hectorpefo.github.io",
"path": "_includes/GameOfWar.py",
"copies": "1",
"size": "1646",
"license": "mit",
"hash": 8296094936817178000,
"line_mean": 21.2432432432,
"line_max": 99,
"alpha_frac": 0.6391251519,
"autogenerated": false,
"ratio": 2.7479131886477464,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8659488424056591,
"avg_score": 0.04550998329823119,
"num_lines": 74
} |
# attempting the classify the charts, after armor/tests/imageToDataTest3.py
# Plan: 1. compute features and store them
# 2. classify
# 3. display
#
#sleepTime= 140000
sleepTime =0
import time
print time.asctime()
print 'sleeping now for ', sleepTime, 'seconds'
time.sleep(sleepTime)
import os
import time
import pickle
import numpy as np
from armor import pattern
dbz = pattern.DBZ
dp = pattern.dp
plt = pattern.plt
inputFolder = dp.defaultImageDataFolder + 'charts2-allinone-/'
imageFolder = dp.root + 'labLogs2/charts2_extracted/'
outputFolder = dp.root + 'labLogs2/charts2_features/'
try:
os.makedirs(outputFolder)
except:
print outputFolder, 'exists'
N = 500
L = os.listdir(inputFolder)
L = [v for v in L if v.startswith('2014') or v.startswith('2013')]
#L = os.listdir(imageFolder)
if 'MOS' in L[0]:
L = [l[:4] + l[5:7] + l[8:10] + '.' + l[11:15] for l in L]
else:
L = [l[:-4] for l in L]
L.sort()
print len(L)
print L[:10]
R = np.random.random(N)
R = (R*len(L)).astype(int)
R = [L[v] for v in R]
R[:10]
#R = [l[:4] + l[5:7] + l[8:10] + '.' + l[11:15] for l in R]
R[:10]
R = [dbz(v) for v in R]
R[:10]
"""
##############
# test case
a = R[0]
print a.dataTime
a.loadImage(rawImage=True)
a.show()
#
a.loadImage()
a.show()
#
a1 = a.connectedComponents()
a2 = a.above(51).connectedComponents()
a1.show(block=True)
a2.show(block=True)
# get the components
M1 = a1.matrix.max()
M2 = a2.matrix.max()
components1 = [(a1.matrix==v).sum() for v in range(M1+1)]
components2 = [(a2.matrix==v).sum() for v in range(M2+1)]
#components1 = sorted([(a1.matrix==v).sum() for v in range(M1+1)][1:], reverse=True)
#components2 = sorted([(a2.matrix==v).sum() for v in range(M2+1)][1:], reverse=True)
#components1 = [v for v in components1 if v>=100]
#components2 = [v for v in components2 if v>=10]
print sorted(components1, reverse=True)[1:]
print sorted(components2, reverse=True)[1:]
# get the moments
from armor.geometry import moments as mmt
HuPowers = np.array([2., 4., 6., 6., 12., 8., 12.])
HuPowers = (HuPowers)**-1
moments1 = np.array([mmt.HuMoments(a1.matrix==v)**HuPowers for v in range(len(components1))])
moments2 = np.array([mmt.HuMoments(a2.matrix==v)**HuPowers for v in range(len(components2))])
print moments1
print moments2
# defining the features
numberOfComponents = len([v for v in components1[1:] if v>=100]) # region of at least 100 pixels
volume = a1.matrix.sum() + a2.matrix.sum()
features = { 'dataTime' : a.dataTime,
'globalFeatures' : a1.globalShapeFeatures(lowerThreshold=1, upperThreshold=51,),
'localFeatures' : [a1.levelSet(v).globalShapeFeatures() for v in range(len(components1))], # this includes the "background"
}
pickle.dump(features, open('features_' + a.dataTime +'.pydump','w'))
# end test case
##############################
"""
###########
# later #
###########
count = 0
for imageName in L:
count +=1
dataTime = imageName
print dataTime
if os.path.exists(outputFolder+'features_' + dataTime +'.pydump'):
continue
a=dbz(dataTime)
a.loadImage()
a.show()
a1 = a.connectedComponents()
a2 = a.above(51).connectedComponents()
#if count < 1:
# print 'waiting for check'
# a1.show(block=True)
# print 'waiting for check'
# a2.show(block=True)
#elif count==3:
# print 'it runs from now on, no more a1.show(block=True)'
# get the components
M1 = a1.matrix.max()
M2 = a2.matrix.max()
components1 = [(a1.matrix==v).sum() for v in range(M1+1)]
components2 = [(a2.matrix==v).sum() for v in range(M2+1)]
print sorted(components1, reverse=True)[1:]
print sorted(components2, reverse=True)[1:]
# defining the features
numberOfComponents = len([v for v in components1[1:] if v>=100]) # region of at least 100 pixels
volume = a1.matrix.sum() + a2.matrix.sum()
synopsis = "volume: " + str(volume) +'\n'
synopsis += "major components: " + str(sorted(components1, reverse=True)[1:])
print synopsis
features = { 'dataTime' : a.dataTime,
'globalFeatures' : a1.globalShapeFeatures(lowerThreshold=1, upperThreshold=51,),
'localFeatures' : [a1.levelSet(v).globalShapeFeatures() for v in range(len(components1))],
'synopsis' : synopsis ,
}
pickle.dump(features, open(outputFolder+'features_' + a.dataTime +'.pydump','w'))
"""
for a in R:
a.imagePath = outputFolder+a.dataTime+'.png'
if os.path.exists(a.imagePath):
continue
a.loadImage()
b = a.copy()
b.loadImage(rawImage=True)
plt.subplot(121)
plt.imshow(b.matrix, origin='lower')
plt.subplot(122)
plt.imshow(a.matrix, origin='lower')
plt.title(a.dataTime)
plt.savefig(a.imagePath)
plt.show(block=False)
print 'sleeping 2 seconds'
time.sleep(2)
if N>=100:
a.matrix=np.array([0]) #free up some memory
"""
| {
"repo_name": "yaukwankiu/armor",
"path": "tests/imageToDataTest4.py",
"copies": "1",
"size": "5132",
"license": "cc0-1.0",
"hash": 3112838748372136000,
"line_mean": 28.6647398844,
"line_max": 147,
"alpha_frac": 0.6102883866,
"autogenerated": false,
"ratio": 2.959630911188005,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4069919297788005,
"avg_score": null,
"num_lines": null
} |
# attempting the classify the charts, after armor/tests/imageToDataTest3.py
# Plan: 1. compute features and store them
# *2. classify
# - basically, put all of the feature vectors in an array and perform k-means or others such as DBSCAN (once i know how to do it)
# 3. display
#
import os
import time
import pickle
import numpy as np
from armor import pattern
dbz = pattern.DBZ
dp = pattern.dp
plt = pattern.plt
imageFolder = dp.defaultImageDataFolder + 'charts2-allinone-/'
inputFolder = dp.root + 'labLogs2/charts2_features/'
outputFolder = dp.root + 'labLogs2/charts2_classification_local/'
block=True
try:
os.makedirs(outputFolder)
except:
print outputFolder, 'exists'
L = os.listdir(inputFolder)
L.sort()
len(L)
L[:10]
L[0][9:22]
#L = [v[9:22] for v in L]
###############
# test case
k = 6 # k for k-means
featureMatrix=0 # initialisation
featureRowToShapeLabel = {}
#featureMatrix = np.array([])
i=0
a = dbz(dataTime=L[i][9:22])
print a.dataTime
a.loadImage(rawImage=True).show()
time.sleep(1)
a.loadImage().show()
a1 = a.connectedComponents()
features = pickle.load(open(inputFolder+L[i],'r'))
lf = features['localFeatures']
#
# constructing the feature matrix
#
for j in range(len(lf)):
# key line below:
#fmRow = np.array([np.log10(lf[j]['volume'])] + (lf[j]['centroid']/10).tolist() + [np.log(v) for v in lf[j]['HuMoments']] + [lf[j]['numberOfComponents']])
fmRow = np.array([(lf[j]['volume'])**.5] + (lf[j]['centroid']/10).tolist() + [np.log(v) for v in lf[j]['HuMoments']] + [lf[j]['numberOfComponents']])
inds = np.where(np.isnan(fmRow))
#fmRow[inds] = -99
fmRow[inds] = 0.
inds = np.where(np.isinf(fmRow))
#fmRow[inds] = -999
fmRow[inds] = 0.
print fmRow
try:
featureMatrix = np.vstack([featureMatrix, fmRow])
featureRowToShapeLabel[len(featureMatrix)-1] = (a.dataTime, j)
print "feature level", len(featureMatrix)-1, ":", a.dataTime, 'shape label', j
except:
featureMatrix = fmRow
featureRowToShapeLabel[0] = (a.dataTime, j)
print "feature level 0:", a.dataTime, 'shape label', j
#
# classification
#
from scipy import cluster
res = cluster.vq.kmeans2(cluster.vq.whiten(featureMatrix), k=k)
#
# display
#
for j in range(k):
print "\n------------------------------------------\n"
print "Cluster:", j
ind = np.where(res[1]==j)
for jj in ind[0]:
dataTime, j1 = featureRowToShapeLabel[jj]
print 'chart:', dataTime, ' / region index:', jj,
if a.dataTime != dataTime:
a = dbz(dataTime=dataTime, name="chart2_"+dataTime).load()
a1 = a.connectedComponents()
if block:
print " ... waiting"
else:
print ''
a1.levelSet(jj).show(block=block)
if not block:
time.sleep(1)
| {
"repo_name": "yaukwankiu/armor",
"path": "tests/imageToDataTest5.py",
"copies": "1",
"size": "2976",
"license": "cc0-1.0",
"hash": 5528325042294674000,
"line_mean": 29.3673469388,
"line_max": 158,
"alpha_frac": 0.59375,
"autogenerated": false,
"ratio": 3.1162303664921467,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42099803664921465,
"avg_score": null,
"num_lines": null
} |
# attempting the classify the charts, after armor/tests/imageToDataTest3.py
# this is the loop version of imageToTest5.py
# Plan: 1. compute features and store them
# *2. classify
# - basically, put all of the feature vectors in an array and perform k-means or others such as DBSCAN (once i know how to do it)
# 3. display
#
import os
import time
import pickle
import numpy as np
from armor import pattern
dbz = pattern.DBZ
dp = pattern.dp
plt = pattern.plt
imageFolder = dp.defaultImageDataFolder + 'charts2-allinone-/'
inputFolder = dp.root + 'labLogs2/charts2_features/'
outputFolder = dp.root + 'labLogs2/charts2_classification_local/'
try:
os.makedirs(outputFolder)
except:
print outputFolder, 'exists'
L = os.listdir(inputFolder)
L.sort()
len(L)
L[:10]
L[0][9:22]
#L = [v[9:22] for v in L]
timeString = str(int(time.time()))
k = 30 # k for k-means
N = 100 # number of images to be tested
block= False
display=False
throttle=0.01
featureMatrix=0 # initialisation
featureRowToShapeLabel = {}
#featureMatrix = np.array([])
for i in range(N):
print "\n============================================================="
print 'sample:', i
a = dbz(dataTime=L[i][9:22])
print a.dataTime
a.loadImage(rawImage=True)
if display:
a.show()
time.sleep(throttle)
a.loadImage()
a.show()
time.sleep(throttle)
a1 = a.connectedComponents()
features = pickle.load(open(inputFolder+L[i],'r'))
lf = features['localFeatures']
#
# constructing the feature matrix
#
for j in range(len(lf)):
# key line below:
#fmRow = np.array([np.log10(lf[j]['volume'])] + (lf[j]['centroid']/10).tolist() + [np.log(v) for v in lf[j]['HuMoments']] + [lf[j]['numberOfComponents']])
fmRow = np.array([(lf[j]['volume'])**.5] + (lf[j]['centroid']/10).tolist() + [np.log(v) for v in lf[j]['HuMoments']] + [lf[j]['numberOfComponents']])
inds = np.where(np.isnan(fmRow))
#fmRow[inds] = -99
fmRow[inds] = 0.
inds = np.where(np.isinf(fmRow))
#fmRow[inds] = -999
fmRow[inds] = 0.
print fmRow
try:
featureMatrix = np.vstack([featureMatrix, fmRow])
featureRowToShapeLabel[len(featureMatrix)-1] = (a.dataTime, j)
print "feature level", len(featureMatrix)-1, ":", a.dataTime, 'shape label', j
except:
featureMatrix = fmRow
featureRowToShapeLabel[0] = (a.dataTime, j)
print "feature level 0:", a.dataTime, 'shape label', j
#
# classification
#
from scipy import cluster
print "\n======================================================"
print 'feature matrix size: ', featureMatrix.shape
time.sleep(throttle)
print 'clustering....'
res = cluster.vq.kmeans2(cluster.vq.whiten(featureMatrix), k=k)
#
# display
#
print '\n======================================================='
print 'Results:'
time.sleep(throttle)
os.makedirs(outputFolder+timeString+"__k%d__N%d" %(k, N))
for j in range(k):
print "\n-----------------------------------------------------------------\n"
print "Cluster:", j
ind = np.where(res[1]==j)
for jj in ind[0]:
dataTime, j1 = featureRowToShapeLabel[jj]
print 'chart:', dataTime, ' / region index:', j1,
if block:
print " ... waiting"
else:
print ''
if a.dataTime != dataTime:
a = dbz(dataTime=dataTime, name="chart2_"+dataTime).load()
a1 = a.connectedComponents()
a1.levelSet(j1).show(block=block)
a1.levelSet(j1).saveImage(outputFolder + timeString + "__k%d__N%d/cluster%d_%s_region%d.png"% (k, N, j, dataTime, j1))
if not block:
time.sleep(throttle)
| {
"repo_name": "yaukwankiu/armor",
"path": "tests/imageToDataTest6.py",
"copies": "1",
"size": "3884",
"license": "cc0-1.0",
"hash": 8755037214743457000,
"line_mean": 32.1965811966,
"line_max": 162,
"alpha_frac": 0.5592173018,
"autogenerated": false,
"ratio": 3.351164797238999,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4410382099038999,
"avg_score": null,
"num_lines": null
} |
# Attempting to get very high accuracy with MNIST convnet
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
# Set random seed
np.random.seed(123456)
tf.set_random_seed(123456)
# Get data
mnist = input_data.read_data_sets("/tmp/data")
h = 28
w = 28
channels = 1
n_inputs = h * w
# Architecture Conv 64, Max Pool, Conv 32, Average Pool, FC
conv1_filters = 64
conv1_k = 3
conv1_s = 1
conv1_pad = 'SAME'
pool1_filters = 64
pool1_k = 3
pool1_s = 1
pool1_pad = 'SAME'
conv2_filters = 32
conv2_k = 3
conv2_s = 2
conv2_pad = 'SAME'
pool2_filters = 32
pool2_k = 3
pool2_s = 2
pool2_pad = 'SAME'
n_fc1 = 32
n_outputs = 10
fc1_dropout = 0.5
# Construct graph
graph = tf.Graph()
with graph.as_default():
with tf.device("/cpu:0"):
is_training = tf.placeholder(tf.bool, shape=(), name='Is_Training')
with tf.name_scope('inputs'):
X = tf.placeholder(tf.float32, shape=[None, n_inputs], name='X')
X_reshaped = tf.reshape(X, shape=[-1, h, w, channels])
y = tf.placeholder(tf.int32, shape=[None], name='y')
with tf.name_scope('conv_1'):
conv_1 = tf.layers.conv2d(X_reshaped, filters=conv1_filters,
kernel_size=conv1_k, strides=conv1_s, padding=conv1_pad,
activation=tf.nn.elu, name='conv_1')
with tf.name_scope('pool_1'):
pool_1 = tf.nn.max_pool(conv_1, ksize=[1,3,3,1], strides=[1,1,1,1],
padding=pool1_pad)
with tf.name_scope('conv_2'):
conv_2 = tf.layers.conv2d(pool_1, filters=conv2_filters,
kernel_size=conv2_k, strides=conv2_s, padding=conv2_pad,
activation=tf.nn.elu, name='conv_2')
with tf.name_scope('pool_2'):
pool_2 = tf.nn.avg_pool(conv_2, ksize=[1,3,3,1], strides=[1,2,2,1],
padding=pool2_pad)
# Have to flatten pool_2 for fully connected layer
pool_2_flat = tf.reshape(pool_2, shape=[-1, 7 * 7 * pool2_filters])
with tf.name_scope('fully_connected_1'):
fc_1 = tf.layers.dense(pool_2_flat, n_fc1, activation=tf.nn.elu,
name='fully_connected_1')
fc_1_drop = tf.layers.dropout(fc_1, rate=fc1_dropout,
training=is_training)
with tf.name_scope('output'):
logits = tf.layers.dense(fc_1_drop, n_outputs, name='output')
# y_probs = tf.nn.softmax(logits, name='y_probs')
with tf.name_scope('train'):
x_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y)
loss = tf.reduce_mean(x_entropy)
optimizer = tf.train.AdamOptimizer()
training_op = optimizer.minimize(loss)
with tf.name_scope('eval'):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
# Run calcs
n_epochs = 25
batch_size = 16
with tf.Session(graph=graph) as sess:
#with tf.device("/cpu:0"):
init.run()
for epoch in range(n_epochs):
for k in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={is_training: True,
X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={is_training: False,
X: X_batch, y: y_batch})
acc_test = accuracy.eval(feed_dict={is_training: False,
X: mnist.test.images, y: mnist.test.labels})
print(epoch, "Train acc: ", acc_train, 'Test acc: ', acc_test) | {
"repo_name": "KT12/hands_on_machine_learning",
"path": "MNIST_convnet_problem_9.py",
"copies": "1",
"size": "3774",
"license": "mit",
"hash": 8627815000793627000,
"line_mean": 31.5431034483,
"line_max": 95,
"alpha_frac": 0.5760466349,
"autogenerated": false,
"ratio": 3.1267605633802815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9137776173893595,
"avg_score": 0.01300620487733724,
"num_lines": 116
} |
"""Attempts Migration of a system virtual machine to the host specified."""
from baseCmd import *
from baseResponse import *
class migrateSystemVmCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""destination Host ID to migrate VM to"""
"""Required"""
self.hostid = None
self.typeInfo['hostid'] = 'uuid'
"""the ID of the virtual machine"""
"""Required"""
self.virtualmachineid = None
self.typeInfo['virtualmachineid'] = 'uuid'
self.required = ["hostid", "virtualmachineid", ]
class migrateSystemVmResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the ID of the system VM"""
self.id = None
self.typeInfo['id'] = 'string'
"""the number of active console sessions for the console proxy system vm"""
self.activeviewersessions = None
self.typeInfo['activeviewersessions'] = 'integer'
"""the date and time the system VM was created"""
self.created = None
self.typeInfo['created'] = 'date'
"""the first DNS for the system VM"""
self.dns1 = None
self.typeInfo['dns1'] = 'string'
"""the second DNS for the system VM"""
self.dns2 = None
self.typeInfo['dns2'] = 'string'
"""the gateway for the system VM"""
self.gateway = None
self.typeInfo['gateway'] = 'string'
"""the host ID for the system VM"""
self.hostid = None
self.typeInfo['hostid'] = 'string'
"""the hostname for the system VM"""
self.hostname = None
self.typeInfo['hostname'] = 'string'
"""the hypervisor on which the template runs"""
self.hypervisor = None
self.typeInfo['hypervisor'] = 'string'
"""the job ID associated with the system VM. This is only displayed if the router listed is part of a currently running asynchronous job."""
self.jobid = None
self.typeInfo['jobid'] = 'string'
"""the job status associated with the system VM. This is only displayed if the router listed is part of a currently running asynchronous job."""
self.jobstatus = None
self.typeInfo['jobstatus'] = 'integer'
"""the link local IP address for the system vm"""
self.linklocalip = None
self.typeInfo['linklocalip'] = 'string'
"""the link local MAC address for the system vm"""
self.linklocalmacaddress = None
self.typeInfo['linklocalmacaddress'] = 'string'
"""the link local netmask for the system vm"""
self.linklocalnetmask = None
self.typeInfo['linklocalnetmask'] = 'string'
"""the name of the system VM"""
self.name = None
self.typeInfo['name'] = 'string'
"""the network domain for the system VM"""
self.networkdomain = None
self.typeInfo['networkdomain'] = 'string'
"""the Pod ID for the system VM"""
self.podid = None
self.typeInfo['podid'] = 'string'
"""the private IP address for the system VM"""
self.privateip = None
self.typeInfo['privateip'] = 'string'
"""the private MAC address for the system VM"""
self.privatemacaddress = None
self.typeInfo['privatemacaddress'] = 'string'
"""the private netmask for the system VM"""
self.privatenetmask = None
self.typeInfo['privatenetmask'] = 'string'
"""the public IP address for the system VM"""
self.publicip = None
self.typeInfo['publicip'] = 'string'
"""the public MAC address for the system VM"""
self.publicmacaddress = None
self.typeInfo['publicmacaddress'] = 'string'
"""the public netmask for the system VM"""
self.publicnetmask = None
self.typeInfo['publicnetmask'] = 'string'
"""the state of the system VM"""
self.state = None
self.typeInfo['state'] = 'string'
"""the system VM type"""
self.systemvmtype = None
self.typeInfo['systemvmtype'] = 'string'
"""the template ID for the system VM"""
self.templateid = None
self.typeInfo['templateid'] = 'string'
"""the Zone ID for the system VM"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the Zone name for the system VM"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
| {
"repo_name": "MissionCriticalCloud/marvin",
"path": "marvin/cloudstackAPI/migrateSystemVm.py",
"copies": "1",
"size": "4448",
"license": "apache-2.0",
"hash": -3466269384945375700,
"line_mean": 39.4363636364,
"line_max": 153,
"alpha_frac": 0.5959982014,
"autogenerated": false,
"ratio": 4.212121212121212,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5308119413521212,
"avg_score": null,
"num_lines": null
} |
"""Attempts Migration of a VM to a different host or Root volume of the vm to a different storage pool"""
from baseCmd import *
from baseResponse import *
class migrateVirtualMachineCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""the ID of the virtual machine"""
"""Required"""
self.virtualmachineid = None
self.typeInfo['virtualmachineid'] = 'uuid'
"""Destination Host ID to migrate VM to. Required for live migrating a VM from host to host"""
self.hostid = None
self.typeInfo['hostid'] = 'uuid'
"""Destination storage pool ID to migrate VM volumes to. Required for migrating the root disk volume"""
self.storageid = None
self.typeInfo['storageid'] = 'uuid'
self.required = ["virtualmachineid", ]
class migrateVirtualMachineResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the ID of the virtual machine"""
self.id = None
self.typeInfo['id'] = 'string'
"""the account associated with the virtual machine"""
self.account = None
self.typeInfo['account'] = 'string'
"""the number of cpu this virtual machine is running with"""
self.cpunumber = None
self.typeInfo['cpunumber'] = 'integer'
"""the speed of each cpu"""
self.cpuspeed = None
self.typeInfo['cpuspeed'] = 'integer'
"""the amount of the vm's CPU currently used"""
self.cpuused = None
self.typeInfo['cpuused'] = 'string'
"""the date when this virtual machine was created"""
self.created = None
self.typeInfo['created'] = 'date'
"""Vm details in key/value pairs."""
self.details = None
self.typeInfo['details'] = 'map'
"""the read (io) of disk on the vm"""
self.diskioread = None
self.typeInfo['diskioread'] = 'long'
"""the write (io) of disk on the vm"""
self.diskiowrite = None
self.typeInfo['diskiowrite'] = 'long'
"""the read (bytes) of disk on the vm"""
self.diskkbsread = None
self.typeInfo['diskkbsread'] = 'long'
"""the write (bytes) of disk on the vm"""
self.diskkbswrite = None
self.typeInfo['diskkbswrite'] = 'long'
"""the ID of the disk offering of the virtual machine"""
self.diskofferingid = None
self.typeInfo['diskofferingid'] = 'string'
"""the name of the disk offering of the virtual machine"""
self.diskofferingname = None
self.typeInfo['diskofferingname'] = 'string'
"""user generated name. The name of the virtual machine is returned if no displayname exists."""
self.displayname = None
self.typeInfo['displayname'] = 'string'
"""an optional field whether to the display the vm to the end user or not."""
self.displayvm = None
self.typeInfo['displayvm'] = 'boolean'
"""the name of the domain in which the virtual machine exists"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the ID of the domain in which the virtual machine exists"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""the virtual network for the service offering"""
self.forvirtualnetwork = None
self.typeInfo['forvirtualnetwork'] = 'boolean'
"""the group name of the virtual machine"""
self.group = None
self.typeInfo['group'] = 'string'
"""the group ID of the virtual machine"""
self.groupid = None
self.typeInfo['groupid'] = 'string'
"""Os type ID of the virtual machine"""
self.guestosid = None
self.typeInfo['guestosid'] = 'string'
"""true if high-availability is enabled, false otherwise"""
self.haenable = None
self.typeInfo['haenable'] = 'boolean'
"""the ID of the host for the virtual machine"""
self.hostid = None
self.typeInfo['hostid'] = 'string'
"""the name of the host for the virtual machine"""
self.hostname = None
self.typeInfo['hostname'] = 'string'
"""the hypervisor on which the template runs"""
self.hypervisor = None
self.typeInfo['hypervisor'] = 'string'
"""instance name of the user vm; this parameter is returned to the ROOT admin only"""
self.instancename = None
self.typeInfo['instancename'] = 'string'
"""true if vm contains XS tools inorder to support dynamic scaling of VM cpu/memory."""
self.isdynamicallyscalable = None
self.typeInfo['isdynamicallyscalable'] = 'boolean'
"""an alternate display text of the ISO attached to the virtual machine"""
self.isodisplaytext = None
self.typeInfo['isodisplaytext'] = 'string'
"""the ID of the ISO attached to the virtual machine"""
self.isoid = None
self.typeInfo['isoid'] = 'string'
"""the name of the ISO attached to the virtual machine"""
self.isoname = None
self.typeInfo['isoname'] = 'string'
"""ssh key-pair"""
self.keypair = None
self.typeInfo['keypair'] = 'string'
"""the memory allocated for the virtual machine"""
self.memory = None
self.typeInfo['memory'] = 'integer'
"""the name of the virtual machine"""
self.name = None
self.typeInfo['name'] = 'string'
"""the incoming network traffic on the vm"""
self.networkkbsread = None
self.typeInfo['networkkbsread'] = 'long'
"""the outgoing network traffic on the host"""
self.networkkbswrite = None
self.typeInfo['networkkbswrite'] = 'long'
"""OS type id of the vm"""
self.ostypeid = None
self.typeInfo['ostypeid'] = 'long'
"""the password (if exists) of the virtual machine"""
self.password = None
self.typeInfo['password'] = 'string'
"""true if the password rest feature is enabled, false otherwise"""
self.passwordenabled = None
self.typeInfo['passwordenabled'] = 'boolean'
"""the project name of the vm"""
self.project = None
self.typeInfo['project'] = 'string'
"""the project id of the vm"""
self.projectid = None
self.typeInfo['projectid'] = 'string'
"""public IP address id associated with vm via Static nat rule"""
self.publicip = None
self.typeInfo['publicip'] = 'string'
"""public IP address id associated with vm via Static nat rule"""
self.publicipid = None
self.typeInfo['publicipid'] = 'string'
"""device ID of the root volume"""
self.rootdeviceid = None
self.typeInfo['rootdeviceid'] = 'long'
"""device type of the root volume"""
self.rootdevicetype = None
self.typeInfo['rootdevicetype'] = 'string'
"""the ID of the service offering of the virtual machine"""
self.serviceofferingid = None
self.typeInfo['serviceofferingid'] = 'string'
"""the name of the service offering of the virtual machine"""
self.serviceofferingname = None
self.typeInfo['serviceofferingname'] = 'string'
"""State of the Service from LB rule"""
self.servicestate = None
self.typeInfo['servicestate'] = 'string'
"""the state of the virtual machine"""
self.state = None
self.typeInfo['state'] = 'string'
"""an alternate display text of the template for the virtual machine"""
self.templatedisplaytext = None
self.typeInfo['templatedisplaytext'] = 'string'
"""the ID of the template for the virtual machine. A -1 is returned if the virtual machine was created from an ISO file."""
self.templateid = None
self.typeInfo['templateid'] = 'string'
"""the name of the template for the virtual machine"""
self.templatename = None
self.typeInfo['templatename'] = 'string'
"""the user's ID who deployed the virtual machine"""
self.userid = None
self.typeInfo['userid'] = 'string'
"""the user's name who deployed the virtual machine"""
self.username = None
self.typeInfo['username'] = 'string'
"""the vgpu type used by the virtual machine"""
self.vgpu = None
self.typeInfo['vgpu'] = 'string'
"""the ID of the availablility zone for the virtual machine"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the name of the availability zone for the virtual machine"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
"""list of affinity groups associated with the virtual machine"""
self.affinitygroup = []
"""the list of nics associated with vm"""
self.nic = []
"""list of security groups associated with the virtual machine"""
self.securitygroup = []
"""the list of resource tags associated with vm"""
self.tags = []
"""the ID of the latest async job acting on this object"""
self.jobid = None
self.typeInfo['jobid'] = ''
"""the current status of the latest async job acting on this object"""
self.jobstatus = None
self.typeInfo['jobstatus'] = ''
class affinitygroup:
def __init__(self):
""""the ID of the affinity group"""
self.id = None
""""the account owning the affinity group"""
self.account = None
""""the description of the affinity group"""
self.description = None
""""the domain name of the affinity group"""
self.domain = None
""""the domain ID of the affinity group"""
self.domainid = None
""""the name of the affinity group"""
self.name = None
""""the project name of the affinity group"""
self.project = None
""""the project ID of the affinity group"""
self.projectid = None
""""the type of the affinity group"""
self.type = None
""""virtual machine IDs associated with this affinity group"""
self.virtualmachineIds = None
class nic:
def __init__(self):
""""the ID of the nic"""
self.id = None
""""the broadcast uri of the nic"""
self.broadcasturi = None
""""device id for the network when plugged into the virtual machine"""
self.deviceid = None
""""the gateway of the nic"""
self.gateway = None
""""the IPv6 address of network"""
self.ip6address = None
""""the cidr of IPv6 network"""
self.ip6cidr = None
""""the gateway of IPv6 network"""
self.ip6gateway = None
""""the ip address of the nic"""
self.ipaddress = None
""""true if nic is default, false otherwise"""
self.isdefault = None
""""the isolation uri of the nic"""
self.isolationuri = None
""""true if nic is default, false otherwise"""
self.macaddress = None
""""the netmask of the nic"""
self.netmask = None
""""the ID of the corresponding network"""
self.networkid = None
""""the name of the corresponding network"""
self.networkname = None
""""the Secondary ipv4 addr of nic"""
self.secondaryip = None
""""the traffic type of the nic"""
self.traffictype = None
""""the type of the nic"""
self.type = None
""""Id of the vm to which the nic belongs"""
self.virtualmachineid = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class egressrule:
def __init__(self):
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class ingressrule:
def __init__(self):
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class securitygroup:
def __init__(self):
""""the ID of the security group"""
self.id = None
""""the account owning the security group"""
self.account = None
""""the description of the security group"""
self.description = None
""""the domain name of the security group"""
self.domain = None
""""the domain ID of the security group"""
self.domainid = None
""""the name of the security group"""
self.name = None
""""the project name of the group"""
self.project = None
""""the project id of the group"""
self.projectid = None
""""the number of virtualmachines associated with this securitygroup"""
self.virtualmachinecount = None
""""the list of virtualmachine ids associated with this securitygroup"""
self.virtualmachineids = None
""""the list of egress rules associated with the security group"""
self.egressrule = []
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the list of ingress rules associated with the security group"""
self.ingressrule = []
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the ID of the latest async job acting on this object"""
self.jobid = None
""""the current status of the latest async job acting on this object"""
self.jobstatus = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
| {
"repo_name": "MissionCriticalCloud/marvin",
"path": "marvin/cloudstackAPI/migrateVirtualMachine.py",
"copies": "1",
"size": "24552",
"license": "apache-2.0",
"hash": 6575136808959167000,
"line_mean": 38.0333863275,
"line_max": 131,
"alpha_frac": 0.5742098403,
"autogenerated": false,
"ratio": 4.407108239095315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003090939507511456,
"num_lines": 629
} |
"""Attempts Migration of a VM with its volumes to a different host"""
from baseCmd import *
from baseResponse import *
class migrateVirtualMachineWithVolumeCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""Destination Host ID to migrate VM to."""
"""Required"""
self.hostid = None
self.typeInfo['hostid'] = 'uuid'
"""the ID of the virtual machine"""
"""Required"""
self.virtualmachineid = None
self.typeInfo['virtualmachineid'] = 'uuid'
"""Storage to pool mapping. This parameter specifies the mapping between a volume and a pool where you want to migrate that volume. Format of this parameter: migrateto[volume-index].volume=<uuid>&migrateto[volume-index].pool=<uuid>Where, [volume-index] indicates the index to identify the volume that you want to migrate, volume=<uuid> indicates the UUID of the volume that you want to migrate, and pool=<uuid> indicates the UUID of the pool where you want to migrate the volume. Example: migrateto[0].volume=<71f43cd6-69b0-4d3b-9fbc-67f50963d60b>&migrateto[0].pool=<a382f181-3d2b-4413-b92d-b8931befa7e1>&migrateto[1].volume=<88de0173-55c0-4c1c-a269-83d0279eeedf>&migrateto[1].pool=<95d6e97c-6766-4d67-9a30-c449c15011d1>&migrateto[2].volume=<1b331390-59f2-4796-9993-bf11c6e76225>&migrateto[2].pool=<41fdb564-9d3b-447d-88ed-7628f7640cbc>"""
self.migrateto = []
self.typeInfo['migrateto'] = 'map'
self.required = ["hostid", "virtualmachineid", ]
class migrateVirtualMachineWithVolumeResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the ID of the virtual machine"""
self.id = None
self.typeInfo['id'] = 'string'
"""the account associated with the virtual machine"""
self.account = None
self.typeInfo['account'] = 'string'
"""the number of cpu this virtual machine is running with"""
self.cpunumber = None
self.typeInfo['cpunumber'] = 'integer'
"""the speed of each cpu"""
self.cpuspeed = None
self.typeInfo['cpuspeed'] = 'integer'
"""the amount of the vm's CPU currently used"""
self.cpuused = None
self.typeInfo['cpuused'] = 'string'
"""the date when this virtual machine was created"""
self.created = None
self.typeInfo['created'] = 'date'
"""Vm details in key/value pairs."""
self.details = None
self.typeInfo['details'] = 'map'
"""the read (io) of disk on the vm"""
self.diskioread = None
self.typeInfo['diskioread'] = 'long'
"""the write (io) of disk on the vm"""
self.diskiowrite = None
self.typeInfo['diskiowrite'] = 'long'
"""the read (bytes) of disk on the vm"""
self.diskkbsread = None
self.typeInfo['diskkbsread'] = 'long'
"""the write (bytes) of disk on the vm"""
self.diskkbswrite = None
self.typeInfo['diskkbswrite'] = 'long'
"""the ID of the disk offering of the virtual machine"""
self.diskofferingid = None
self.typeInfo['diskofferingid'] = 'string'
"""the name of the disk offering of the virtual machine"""
self.diskofferingname = None
self.typeInfo['diskofferingname'] = 'string'
"""user generated name. The name of the virtual machine is returned if no displayname exists."""
self.displayname = None
self.typeInfo['displayname'] = 'string'
"""an optional field whether to the display the vm to the end user or not."""
self.displayvm = None
self.typeInfo['displayvm'] = 'boolean'
"""the name of the domain in which the virtual machine exists"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the ID of the domain in which the virtual machine exists"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""the virtual network for the service offering"""
self.forvirtualnetwork = None
self.typeInfo['forvirtualnetwork'] = 'boolean'
"""the group name of the virtual machine"""
self.group = None
self.typeInfo['group'] = 'string'
"""the group ID of the virtual machine"""
self.groupid = None
self.typeInfo['groupid'] = 'string'
"""Os type ID of the virtual machine"""
self.guestosid = None
self.typeInfo['guestosid'] = 'string'
"""true if high-availability is enabled, false otherwise"""
self.haenable = None
self.typeInfo['haenable'] = 'boolean'
"""the ID of the host for the virtual machine"""
self.hostid = None
self.typeInfo['hostid'] = 'string'
"""the name of the host for the virtual machine"""
self.hostname = None
self.typeInfo['hostname'] = 'string'
"""the hypervisor on which the template runs"""
self.hypervisor = None
self.typeInfo['hypervisor'] = 'string'
"""instance name of the user vm; this parameter is returned to the ROOT admin only"""
self.instancename = None
self.typeInfo['instancename'] = 'string'
"""true if vm contains XS tools inorder to support dynamic scaling of VM cpu/memory."""
self.isdynamicallyscalable = None
self.typeInfo['isdynamicallyscalable'] = 'boolean'
"""an alternate display text of the ISO attached to the virtual machine"""
self.isodisplaytext = None
self.typeInfo['isodisplaytext'] = 'string'
"""the ID of the ISO attached to the virtual machine"""
self.isoid = None
self.typeInfo['isoid'] = 'string'
"""the name of the ISO attached to the virtual machine"""
self.isoname = None
self.typeInfo['isoname'] = 'string'
"""ssh key-pair"""
self.keypair = None
self.typeInfo['keypair'] = 'string'
"""the memory allocated for the virtual machine"""
self.memory = None
self.typeInfo['memory'] = 'integer'
"""the name of the virtual machine"""
self.name = None
self.typeInfo['name'] = 'string'
"""the incoming network traffic on the vm"""
self.networkkbsread = None
self.typeInfo['networkkbsread'] = 'long'
"""the outgoing network traffic on the host"""
self.networkkbswrite = None
self.typeInfo['networkkbswrite'] = 'long'
"""OS type id of the vm"""
self.ostypeid = None
self.typeInfo['ostypeid'] = 'long'
"""the password (if exists) of the virtual machine"""
self.password = None
self.typeInfo['password'] = 'string'
"""true if the password rest feature is enabled, false otherwise"""
self.passwordenabled = None
self.typeInfo['passwordenabled'] = 'boolean'
"""the project name of the vm"""
self.project = None
self.typeInfo['project'] = 'string'
"""the project id of the vm"""
self.projectid = None
self.typeInfo['projectid'] = 'string'
"""public IP address id associated with vm via Static nat rule"""
self.publicip = None
self.typeInfo['publicip'] = 'string'
"""public IP address id associated with vm via Static nat rule"""
self.publicipid = None
self.typeInfo['publicipid'] = 'string'
"""device ID of the root volume"""
self.rootdeviceid = None
self.typeInfo['rootdeviceid'] = 'long'
"""device type of the root volume"""
self.rootdevicetype = None
self.typeInfo['rootdevicetype'] = 'string'
"""the ID of the service offering of the virtual machine"""
self.serviceofferingid = None
self.typeInfo['serviceofferingid'] = 'string'
"""the name of the service offering of the virtual machine"""
self.serviceofferingname = None
self.typeInfo['serviceofferingname'] = 'string'
"""State of the Service from LB rule"""
self.servicestate = None
self.typeInfo['servicestate'] = 'string'
"""the state of the virtual machine"""
self.state = None
self.typeInfo['state'] = 'string'
"""an alternate display text of the template for the virtual machine"""
self.templatedisplaytext = None
self.typeInfo['templatedisplaytext'] = 'string'
"""the ID of the template for the virtual machine. A -1 is returned if the virtual machine was created from an ISO file."""
self.templateid = None
self.typeInfo['templateid'] = 'string'
"""the name of the template for the virtual machine"""
self.templatename = None
self.typeInfo['templatename'] = 'string'
"""the user's ID who deployed the virtual machine"""
self.userid = None
self.typeInfo['userid'] = 'string'
"""the user's name who deployed the virtual machine"""
self.username = None
self.typeInfo['username'] = 'string'
"""the vgpu type used by the virtual machine"""
self.vgpu = None
self.typeInfo['vgpu'] = 'string'
"""the ID of the availablility zone for the virtual machine"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the name of the availability zone for the virtual machine"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
"""list of affinity groups associated with the virtual machine"""
self.affinitygroup = []
"""the list of nics associated with vm"""
self.nic = []
"""list of security groups associated with the virtual machine"""
self.securitygroup = []
"""the list of resource tags associated with vm"""
self.tags = []
"""the ID of the latest async job acting on this object"""
self.jobid = None
self.typeInfo['jobid'] = ''
"""the current status of the latest async job acting on this object"""
self.jobstatus = None
self.typeInfo['jobstatus'] = ''
class affinitygroup:
def __init__(self):
""""the ID of the affinity group"""
self.id = None
""""the account owning the affinity group"""
self.account = None
""""the description of the affinity group"""
self.description = None
""""the domain name of the affinity group"""
self.domain = None
""""the domain ID of the affinity group"""
self.domainid = None
""""the name of the affinity group"""
self.name = None
""""the project name of the affinity group"""
self.project = None
""""the project ID of the affinity group"""
self.projectid = None
""""the type of the affinity group"""
self.type = None
""""virtual machine IDs associated with this affinity group"""
self.virtualmachineIds = None
class nic:
def __init__(self):
""""the ID of the nic"""
self.id = None
""""the broadcast uri of the nic"""
self.broadcasturi = None
""""device id for the network when plugged into the virtual machine"""
self.deviceid = None
""""the gateway of the nic"""
self.gateway = None
""""the IPv6 address of network"""
self.ip6address = None
""""the cidr of IPv6 network"""
self.ip6cidr = None
""""the gateway of IPv6 network"""
self.ip6gateway = None
""""the ip address of the nic"""
self.ipaddress = None
""""true if nic is default, false otherwise"""
self.isdefault = None
""""the isolation uri of the nic"""
self.isolationuri = None
""""true if nic is default, false otherwise"""
self.macaddress = None
""""the netmask of the nic"""
self.netmask = None
""""the ID of the corresponding network"""
self.networkid = None
""""the name of the corresponding network"""
self.networkname = None
""""the Secondary ipv4 addr of nic"""
self.secondaryip = None
""""the traffic type of the nic"""
self.traffictype = None
""""the type of the nic"""
self.type = None
""""Id of the vm to which the nic belongs"""
self.virtualmachineid = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class egressrule:
def __init__(self):
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class ingressrule:
def __init__(self):
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class securitygroup:
def __init__(self):
""""the ID of the security group"""
self.id = None
""""the account owning the security group"""
self.account = None
""""the description of the security group"""
self.description = None
""""the domain name of the security group"""
self.domain = None
""""the domain ID of the security group"""
self.domainid = None
""""the name of the security group"""
self.name = None
""""the project name of the group"""
self.project = None
""""the project id of the group"""
self.projectid = None
""""the number of virtualmachines associated with this securitygroup"""
self.virtualmachinecount = None
""""the list of virtualmachine ids associated with this securitygroup"""
self.virtualmachineids = None
""""the list of egress rules associated with the security group"""
self.egressrule = []
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the list of ingress rules associated with the security group"""
self.ingressrule = []
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the ID of the latest async job acting on this object"""
self.jobid = None
""""the current status of the latest async job acting on this object"""
self.jobstatus = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
| {
"repo_name": "MissionCriticalCloud/marvin",
"path": "marvin/cloudstackAPI/migrateVirtualMachineWithVolume.py",
"copies": "1",
"size": "25251",
"license": "apache-2.0",
"hash": -3703310376731175000,
"line_mean": 39.080952381,
"line_max": 847,
"alpha_frac": 0.5799770306,
"autogenerated": false,
"ratio": 4.321581379428376,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0030433474983348014,
"num_lines": 630
} |
"""Attempts to create a test user,
as the empty JIRA instance isn't provisioned with one.
"""
import time
from os import environ
import requests
from jira import JIRA
CI_JIRA_URL = environ["CI_JIRA_URL"]
def add_user_to_jira():
try:
JIRA(
CI_JIRA_URL,
basic_auth=(environ["CI_JIRA_ADMIN"], environ["CI_JIRA_ADMIN_PASSWORD"]),
).add_user(
username=environ["CI_JIRA_USER"],
email="user@example.com",
fullname=environ["CI_JIRA_USER_FULL_NAME"],
password=environ["CI_JIRA_USER_PASSWORD"],
)
print("user {}".format(environ["CI_JIRA_USER"]))
except Exception as e:
if "username already exists" not in str(e):
raise e
if __name__ == "__main__":
start_time = time.time()
timeout_mins = 15
print(
"waiting for instance of jira to be running, to add a user for CI system:\n timeout = {} mins".format(
timeout_mins
)
)
while True:
try:
requests.get(CI_JIRA_URL + "rest/api/2/permissions")
print("JIRA IS REACHABLE")
add_user_to_jira()
break
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as ex:
print(
"encountered {} while waiting for the JiraServer docker".format(str(ex))
)
time.sleep(20)
if start_time + 60 * timeout_mins < time.time():
raise TimeoutError(
"Jira server wasn't reachable within timeout {}".format(timeout_mins)
)
| {
"repo_name": "pycontribs/jira",
"path": "make_local_jira_user.py",
"copies": "1",
"size": "1600",
"license": "bsd-2-clause",
"hash": 371357962708413000,
"line_mean": 29.1886792453,
"line_max": 110,
"alpha_frac": 0.566875,
"autogenerated": false,
"ratio": 3.8004750593824226,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48673500593824226,
"avg_score": null,
"num_lines": null
} |
# Attempts to display the line and column of violating code.
class ParserException(Exception):
def __init__(self, message='Error Message not found.', item=None):
self.message = message
self.lineno = None
self.col_offset = None
if item and hasattr(item, 'lineno'):
self.set_err_pos(item.lineno, item.col_offset)
if hasattr(item, 'source_code'):
self.source_code = item.source_code.splitlines()
def set_err_pos(self, lineno, col_offset):
if not self.lineno:
self.lineno = lineno
if not self.col_offset:
self.col_offset = col_offset
def __str__(self):
output = self.message
if self.lineno:
output = 'line %d: %s\n%s' % (
self.lineno,
output,
self.source_code[self.lineno - 1]
)
if self.col_offset:
col = '-' * self.col_offset + '^'
output += '\n' + col
return output
class VariableDeclarationException(ParserException):
pass
class StructureException(ParserException):
pass
class ConstancyViolationException(ParserException):
pass
class NonPayableViolationException(ParserException):
pass
class InvalidLiteralException(ParserException):
pass
class InvalidTypeException(ParserException):
pass
class TypeMismatchException(ParserException):
pass
| {
"repo_name": "NedYork/viper",
"path": "viper/exceptions.py",
"copies": "1",
"size": "1461",
"license": "mit",
"hash": -5040325890434958000,
"line_mean": 22.1904761905,
"line_max": 70,
"alpha_frac": 0.6002737851,
"autogenerated": false,
"ratio": 4.234782608695652,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5335056393795652,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.