prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
xt
import os
import re
import json
import shutil
import subprocess
from tempfile import mkdtemp, mktemp
from distutils.spawn import find_executable
import frappe
from frappe.utils.minify import JavascriptMinify
import click
import psutil
from urllib.parse import urlparse
from simple_chalk import green
from semantic_version import Version
timestamps = {}
app_paths = None
sites_path = os.path.abspath(os.getcwd())
def download_file(url, prefix):
from requests import get
filename = urlparse(url).path.split("/")[-1]
local_filename = os.path.join(prefix, filename)
with get(url, stream=True, allow_redirects=True) as r:
r.raise_for_status()
with open(local_filename, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
return local_filename
def build_missing_files():
'''Check which files dont exist yet from the assets.json and run build for those files'''
missing_assets = []
current_asset_files = []
for type in ["css", "js"]:
folder = os.path.join(sites_path, "assets", "frappe", "dist", type)
current_asset_files.extend(os.listdir(folder))
development = frappe.local.conf.developer_mode or frappe.local.dev_server
build_mode = "development" if dev | elopment else "production"
assets_json = frappe.read_file("assets/assets.json")
if assets_json:
assets_json = frappe.parse_json(assets_json)
for bundle_file, output_file in assets_json.items():
if not output_file.startswith('/assets/frappe'):
continue
if os.path.basename(output_file) not in current_asset_files:
| missing_assets.append(bundle_file)
if missing_assets:
click.secho("\nBuilding missing assets...\n", fg="yellow")
files_to_build = ["frappe/" + name for name in missing_assets]
bundle(build_mode, files=files_to_build)
else:
# no assets.json, run full build
bundle(build_mode, apps="frappe")
def get_assets_link(frappe_head):
from subprocess import getoutput
from requests import head
tag = getoutput(
r"cd ../apps/frappe && git show-ref --tags -d | grep %s | sed -e 's,.*"
r" refs/tags/,,' -e 's/\^{}//'"
% frappe_head
)
if tag:
# if tag exists, download assets from github release
url = "https://github.com/frappe/frappe/releases/download/{0}/assets.tar.gz".format(tag)
else:
url = "http://assets.frappeframework.com/{0}.tar.gz".format(frappe_head)
if not head(url):
raise ValueError("URL {0} doesn't exist".format(url))
return url
def download_frappe_assets(verbose=True):
"""Downloads and sets up Frappe assets if they exist based on the current
commit HEAD.
Returns True if correctly setup else returns False.
"""
from subprocess import getoutput
assets_setup = False
frappe_head = getoutput("cd ../apps/frappe && git rev-parse HEAD")
if frappe_head:
try:
url = get_assets_link(frappe_head)
click.secho("Retrieving assets...", fg="yellow")
prefix = mkdtemp(prefix="frappe-assets-", suffix=frappe_head)
assets_archive = download_file(url, prefix)
print("\n{0} Downloaded Frappe assets from {1}".format(green('✔'), url))
if assets_archive:
import tarfile
directories_created = set()
click.secho("\nExtracting assets...\n", fg="yellow")
with tarfile.open(assets_archive) as tar:
for file in tar:
if not file.isdir():
dest = "." + file.name.replace("./frappe-bench/sites", "")
asset_directory = os.path.dirname(dest)
show = dest.replace("./assets/", "")
if asset_directory not in directories_created:
if not os.path.exists(asset_directory):
os.makedirs(asset_directory, exist_ok=True)
directories_created.add(asset_directory)
tar.makefile(file, dest)
print("{0} Restored {1}".format(green('✔'), show))
build_missing_files()
return True
else:
raise
except Exception:
# TODO: log traceback in bench.log
click.secho("An Error occurred while downloading assets...", fg="red")
assets_setup = False
finally:
try:
shutil.rmtree(os.path.dirname(assets_archive))
except Exception:
pass
return assets_setup
def symlink(target, link_name, overwrite=False):
"""
Create a symbolic link named link_name pointing to target.
If link_name exists then FileExistsError is raised, unless overwrite=True.
When trying to overwrite a directory, IsADirectoryError is raised.
Source: https://stackoverflow.com/a/55742015/10309266
"""
if not overwrite:
return os.symlink(target, link_name)
# os.replace() may fail if files are on different filesystems
link_dir = os.path.dirname(link_name)
# Create link to target with temporary filename
while True:
temp_link_name = mktemp(dir=link_dir)
# os.* functions mimic as closely as possible system functions
# The POSIX symlink() returns EEXIST if link_name already exists
# https://pubs.opengroup.org/onlinepubs/9699919799/functions/symlink.html
try:
os.symlink(target, temp_link_name)
break
except FileExistsError:
pass
# Replace link_name with temp_link_name
try:
# Pre-empt os.replace on a directory with a nicer message
if os.path.isdir(link_name):
raise IsADirectoryError("Cannot symlink over existing directory: '{}'".format(link_name))
try:
os.replace(temp_link_name, link_name)
except AttributeError:
os.renames(temp_link_name, link_name)
except:
if os.path.islink(temp_link_name):
os.remove(temp_link_name)
raise
def setup():
global app_paths, assets_path
pymodules = []
for app in frappe.get_all_apps(True):
try:
pymodules.append(frappe.get_module(app))
except ImportError:
pass
app_paths = [os.path.dirname(pymodule.__file__) for pymodule in pymodules]
assets_path = os.path.join(frappe.local.sites_path, "assets")
def bundle(mode, apps=None, hard_link=False, make_copy=False, restore=False, verbose=False, skip_frappe=False, files=None):
"""concat / minify js files"""
setup()
make_asset_dirs(hard_link=hard_link)
mode = "production" if mode == "production" else "build"
command = "yarn run {mode}".format(mode=mode)
if apps:
command += " --apps {apps}".format(apps=apps)
if skip_frappe:
command += " --skip_frappe"
if files:
command += " --files {files}".format(files=','.join(files))
command += " --run-build-command"
check_node_executable()
frappe_app_path = frappe.get_app_path("frappe", "..")
frappe.commands.popen(command, cwd=frappe_app_path, env=get_node_env())
def watch(apps=None):
"""watch and rebuild if necessary"""
setup()
command = "yarn run watch"
if apps:
command += " --apps {apps}".format(apps=apps)
check_node_executable()
frappe_app_path = frappe.get_app_path("frappe", "..")
frappe.commands.popen(command, cwd=frappe_app_path, env=get_node_env())
def check_node_executable():
node_version = Version(subprocess.getoutput('node -v')[1:])
warn = '⚠️ '
if node_version.major < 14:
click.echo(f"{warn} Please update your node version to 14")
if not find_executable("yarn"):
click.echo(f"{warn} Please install yarn using below command and try again.\nnpm install -g yarn")
click.echo()
def get_node_env():
node_env = {
"NODE_OPTIONS": f"--max_old_space_size={get_safe_max_old_space_size()}"
}
return node_env
def get_safe_max_old_space_size():
safe_max_old_space_size = 0
try:
total_memory = psutil.virtual_memory().total / (1024 * 1024)
# reference for the safe limit assumption
# https://nodejs.org/api/cli.html#cli_max_old_space_size_size_in_megabytes
# set minimum value 1GB
safe_max_old_space_size = max(1024, int(total_memory * 0.75))
except Exception:
pass
return safe_max_old_space_size
def generate_assets_map():
symlinks = {}
for app_name in frappe.get_all_apps():
app_doc_path = None
pymodule = frappe.get_module(app_name)
app_base_path = os.path.abspath(os.path.dirname(pymodule.__file__))
app_public_path = os.path.join(app_base_path, "public")
app_node_modules_path = os.path.join(app_base_path, "..", "node_modules")
app_docs_path = os.path.join(app_base_path, "docs")
app_www_docs_path = os.path.join(app_base_path, "www", "docs")
app_assets = os.path.abspath(app_public_path)
app_node_modules = os.path.abspath(app_node_modules_path)
# {app}/public > assets/ |
__ | all__ = ["bayes", "cpt", "heuristic", "payoff", "r | ecognition", "rl", "sharing"] |
# -*- coding: utf-8 -*-
# Description: sort audio clips by intensity, frequency, or duration; outputs .csv file for use in sequence.ck via ChucK
# python sort_audio.py -sort syllables
# python sort_audio.py -sort syllables -by frequency
# python sort_audio.py -sort syllables -by duration -fixed 0
import argparse
import csv
import json
import os
from pprint import pprint
import sys
import time
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILE", default="data/still_i_rise.json", help="Path to input aligned transcript json file")
parser.add_argument('-out', dest="OUTPUT_FILE", de | fault="data/ck_sequence.csv", help="Path to output csv sequence file")
parser.add_argument('-sort', dest="SORT_FIELD", default="words", help="Field to sort: syllables, words, or lines")
parser.add_argument('-by', dest="SORT_BY", default="intensity", help="Feature to sort | by: intensity, frequency, or duration")
parser.add_argument('-dir', dest="SORT_DIRECTION", type=int, default=1, help="Sort direction: -1 or 1")
parser.add_argument('-overlap', dest="OVERLAP_MS", type=int, default=50, help="Amount of ms to overlap in clip")
parser.add_argument('-fixed', dest="FIXED_MS", type=int, default=200, help="Fixed ms to play each sound clip; set to 0 to disable")
parser.add_argument('-cd', dest="CLIP_DIR", default="clips/", help="Path to clip directory")
parser.add_argument('-fe', dest="FILE_EXT", default=".wav", help="File extension of audio clips")
# init input
args = parser.parse_args()
SORT_FIELD = args.SORT_FIELD
SORT_BY = args.SORT_BY
SORT_DIRECTION = args.SORT_DIRECTION
OVERLAP_MS = args.OVERLAP_MS
FIXED_MS = args.FIXED_MS
CLIP_DIR = args.CLIP_DIR
FILE_EXT = args.FILE_EXT
data = {}
with open(args.INPUT_FILE) as f:
data = json.load(f)
# populate clips
clips = []
if SORT_FIELD in data:
clips = data[SORT_FIELD]
if SORT_FIELD=="syllables":
for word in data["words"]:
clips += word["syllables"]
# add duration
for i, clip in enumerate(clips):
clips[i]["duration"] = clip["end"] - clip["start"]
# sort clips
clips = sorted(clips, key=lambda c: SORT_DIRECTION * c[SORT_BY])
# generate a sequence
sequence = []
ms = 0
for clip in clips:
dur = int(clip["duration"] * 1000)
filename = CLIP_DIR + SORT_FIELD + "/" + clip["name"] + FILE_EXT
if os.path.isfile(filename):
sequence.append({
"elapsed_ms": ms,
"gain": 1.0,
"file": filename
})
else:
print "%s not found" % filename
if FIXED_MS > 0:
ms += FIXED_MS
else:
ms += dur - OVERLAP_MS
ms = max(0, ms)
print "Total time: %s" % time.strftime('%M:%S', time.gmtime(ms/1000))
# Add milliseconds to sequence
elapsed = 0
for i, step in enumerate(sequence):
sequence[i]['milliseconds'] = step['elapsed_ms'] - elapsed
elapsed = step['elapsed_ms']
# Write sequence
with open(args.OUTPUT_FILE, 'wb') as f:
w = csv.writer(f)
for step in sequence:
w.writerow([step['file']])
w.writerow([step['gain']])
w.writerow([step['milliseconds']])
f.seek(-2, os.SEEK_END) # remove newline
f.truncate()
print "Successfully wrote sequence to file: %s" % args.OUTPUT_FILE
|
"""SocialNewspaper URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from ArticleManagement import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^share_article/$', views.share_article, name="share_article"),
url(r'^print_sharing/(?P<article_id>[0-9]+)$', views.print_sharing, name="print_sharing"),
url(r'^insert_article/$', views.insert_article, name="insert_article"),
url(r'^add_interesting/(?P<article_id>[0-9]+)$', views.add_interesting, name="add_ | interesting"),
url(r'^print_articl | es/$', views.print_articles, name="print_articles"),
url(r'^editorial/$', views.editorial, name="editorial"),
url(r'^$', views.editorial, name="home")
]
|
"""Authenticating HTTP Server
This module builds on BaseHTTPServer and implements basic authentication
"""
import base64
import binascii
import BaseHTTPServer
DEFAULT_AUTH_ERROR_MESSAGE = """
<head>
<title>%(code)s - %(message)s</title>
</head>
<body>
<h1>Authorization Required</h1>
this server could not verify that you
are authorized to access the document
requested. Either you supplied the wrong
credentials (e.g., bad password), or your
browser doesn't understand how to supply
the credentials required.
</body>"""
def _quote_html(html):
return html.replace("&", "&").replace("<", "<").replace(">", ">")
class AuthRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""
Simple handler that can check for auth headers
In your subclass you have to define the method get_userinfo(user, password)
which should return 1 or None depending on whether the password was
ok or not. None means tha | t the user is not authorized.
"""
# False means no authentiation
DO_AUTH = 1
def parse_request(self):
if not BaseHTTPServer.BaseHTTPRequestHandler.parse | _request(self):
return False
if self.DO_AUTH:
authorization = self.headers.get('Authorization', '')
if not authorization:
self.send_autherror(401, "Authorization Required")
return False
scheme, credentials = authorization.split()
if scheme != 'Basic':
self.send_error(501)
return False
credentials = base64.decodestring(credentials)
user, password = credentials.split(':', 2)
if not self.get_userinfo(user, password, self.command):
self.send_autherror(401, "Authorization Required")
return False
return True
def send_autherror(self, code, message=None):
"""Send and log an auth error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
try:
short, long = self.responses[code]
except KeyError:
short, long = '???', '???'
if message is None:
message = short
explain = long
self.log_error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug
# #1100201)
content = (self.error_auth_message_format % {'code': code, 'message':
_quote_html(message), 'explain': explain})
self.send_response(code, message)
self.send_header('Content-Type', self.error_content_type)
self.send_header('WWW-Authenticate', 'Basic realm="PyWebDAV"')
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(content)
error_auth_message_format = DEFAULT_AUTH_ERROR_MESSAGE
def get_userinfo(self, user, password, command):
"""Checks if the given user and the given
password are allowed to access.
"""
# Always reject
return None
|
"""
Tests related specifically to the FacilityDataset model.
"""
from django.db.utils import IntegrityError
from django.test import TestCase
from ..models import Classroom
from ..models import Facility
from ..models import FacilityDataset
from ..models import FacilityUser
from ..models import LearnerGroup
class FacilityDatasetTestCase(TestCase):
def setUp(self):
self.facility = Facility.objects.create()
self.classroom = Classroom.objects.create(parent=self.facility)
self.learner_group = LearnerGroup.objects.create(parent=self.classroom)
self.facility_user = FacilityUser.objects.create(
username="blah", password="#", facility=self.facility
)
def test_datasets_equal(self):
self.assertTrue(self.facility.dataset is not None)
self.assertEqual(self.facility.dataset, self.classroom.dataset)
self.assertEqual(self.classroom.dataset, self.learner_group.dataset)
self.assertEqual(self.learner_group.dataset, self.facility_user.dataset)
def test_cannot_create_role_across_datasets(self):
facility2 = Facility.objects.create()
with self.assertRaises(IntegrityError):
facility2.add_admin(self.facility_user)
def test_cannot_create_membership_across_datasets(self):
facility2 = Facility.objects.create()
facility_user2 = FacilityUser.objects.create(
username="blah", password="#", facility=facility2
)
with self.assertRaises(IntegrityError):
self.learner_group.add_learner(facility_user2) |
def test_cannot_pass_inappropriate_dataset(self):
facility2 = Facility.objects.create()
with self.assertRaises(IntegrityError):
FacilityUser.objects.create(
facility=self.facility, datase | t=facility2.dataset
)
def test_cannot_change_dataset(self):
facility2 = Facility.objects.create()
self.facility_user.dataset = facility2.dataset
with self.assertRaises(IntegrityError):
self.facility_user.save()
def test_cannot_change_facility(self):
facility2 = Facility.objects.create()
self.facility_user.facility = facility2
with self.assertRaises(IntegrityError):
self.facility_user.save()
def test_manually_passing_dataset_for_new_facility(self):
dataset = FacilityDataset.objects.create()
facility = Facility(name="blah", dataset=dataset)
facility.full_clean()
facility.save()
self.assertEqual(dataset, facility.dataset)
def test_dataset_representation(self):
self.assertEqual(
str(self.facility.dataset),
"FacilityDataset for {}".format(self.facility.name),
)
new_dataset = FacilityDataset.objects.create()
self.assertEqual(str(new_dataset), "FacilityDataset (no associated Facility)")
|
a_level] [--output]
Synopsis:
Takes alignment data from short reads mapped to an assembly in BAM format
and the phylogeny of those short reads from the Phylosift
sequence_taxa_summary.txt file to identify and color the phylogeny of each
contig in the assembly.
Copyright:
esom_tracer2.py Color ESOM best matches by phylogenies
Copyright (C) 2016 William Brazelton, Alex | Hyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program i | s distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function
import argparse
import bz2
from collections import defaultdict
import colorsys
import gzip
import pysam
import string
import sys
import zipfile
__author__ = 'Alex Hyer'
__version__ = '1.3.0'
class Contig:
"""Stores phylogenetic and ESOM data from short reads for a contig"""
def __init__(self, contig_id):
self.name = contig_id
self.taxa_dict = defaultdict(float)
self.chunk_numbers = []
self.class_number = None
def add_chunk_numbers(self, chunk_numbers):
"""Assign ESOM chunk numbers to contig
:param chunk_number: NAMES file number of chunk for contig
:type chunk_number: int or list of ints
"""
if type(chunk_numbers) is int:
chunk_numbers = [chunk_numbers]
self.chunk_numbers += chunk_numbers
def assign_class_number(self, class_number):
"""Assign the contig to an ESOM class
:param class_number: the ESOM class to assign the contig to
:type class_number: int
"""
self.class_number = class_number
def add_taxa_data(self, taxa_name, prob_mass):
"""Add Phylosift short read data to contig information
Note: all taxa given are assumed to be at the same taxonomic level
:param taxa_name: The taxa the short read is associated with
:type taxa_name: str
:param prob_mass: The Phylosift probability mass for the read
"type prob_mass: float
"""
self.taxa_dict[taxa_name] += prob_mass
def best_taxa(self):
"""Identify the most probable taxa for the contig and return it
:returns: most probable taxa for the contig
:rtype: str
"""
try:
taxa = max(self.taxa_dict.iteritems(), key=lambda x: x[1])[0]
except ValueError:
taxa = None
return taxa
def possible_taxa(self):
"""Returns all possible taxa for contig
:returns: all possible taxa for contig
:rtype: view
"""
return self.taxa_dict.keys()
def names_dict(names_handle):
"""Returns nested dictionary of NAMES file
:returns: Dictionary as structured below
:rtype: dict
:param names_handle: file handle to NAMES file
:type names_handle: File Object
Dictionary Structure (YAML format)
----------------------------------
contig_name:
contig_chunk: chunk_number
"""
temp_dict = defaultdict(dict)
names_handle.readline()
for line in names_handle:
columns = line.strip().split('\t')
name = '-'.join(columns[2].split('_')[0:2]).strip()
temp_dict[name][columns[1]] = columns[0]
return temp_dict
def rainbow_picker(scale):
"""Generates rainbow RGB values
:returns: [scale] number of RGB tuples
:rtype: list
:param scale: number of RGB values to generate
:type scale: int
"""
hsv_tuples = [(float(i) / float(scale), 1.0, 1.0) for i in range(scale)]
rgb_tuples = map(lambda x: tuple(i * 255 for i in \
colorsys.hsv_to_rgb(*x)), hsv_tuples)
return rgb_tuples
def taxa_dict(taxa_handle):
"""Returns nested dictionary of sequence_taxa_summary.txt
:returns: Dictionary as structured below
:rtype: dict
:param taxa_handle: file handle to sequence_taxa_summary.txt
:type taxa_handle: File Object
Dictionary Structure (YAML format)
----------------------------------
short_read_name:
taxa_level: [taxa_name,probability_mass]
"""
temp_dict = defaultdict(dict)
taxa_handle.readline()
for line in taxa_handle:
columns = line.strip().split('\t')
temp_dict[columns[0].strip().split()[0]][columns[3]] = [columns[4], columns[5]]
return temp_dict
def x_reader(file_name):
"""Detect compression type and return appropriate file handle
:returns: A file handle depending on file type
:rtype: File Handle
:param file_name: Name of file to open
:type file_name: str
Supports GZIP, BZIP2, and ZIP compressed files,
returns a normal file handle if file isn't compressed.
"""
supported_files = {
'gz': gzip.open,
'bz2': bz2.BZ2File,
'zip': zipfile.ZipFile.open
}
last_ext = file_name.split('.')[-1]
if last_ext in supported_files:
return supported_files[last_ext](file_name, 'rU')
else:
return open(file_name, 'rU')
def main(args):
print(' '.join(sys.argv[:]))
# Instantiate each contig and assign chunk numbers
print('> Processing {0}'.format(args.names.name))
names = names_dict(args.names)
args.names.close()
print('> Processed {0} unique contigs from {1}'.format(str(len(names)),
args.names.name))
contigs = defaultdict(dict)
for name in names:
contigs[name] = Contig(name)
chunk_numbers = [int(names[name][chunk]) for chunk in names[name]]
contigs[name].add_chunk_numbers(chunk_numbers)
# Add taxonomy data to Contig based on what short reads map to them
print('> Processing {0}'.format(args.taxonomy.name))
taxa = taxa_dict(args.taxonomy)
args.taxonomy.close()
print('> Processed {0} short reads from {1}'.format(str(len(taxa)),
args.taxonomy.name))
unique_taxa = {'N/A': 1}
unique_taxa_number = 2
print('> Processing {0}'.format(args.bam.filename))
references_match_contigs = 0
reads_mapping_contigs = 0
mapped_taxa_reads = 0
for reference in args.bam.references:
if reference in contigs:
references_match_contigs += 1
for read in args.bam.fetch(reference=reference):
reads_mapping_contigs += 1
read_name = read.query_name
if read_name in taxa and args.taxa_level in taxa[read_name]:
mapped_taxa_reads += 1
taxa_name = taxa[read_name][args.taxa_level][0]
prob_mass = float(taxa[read_name][args.taxa_level][1])
contigs[reference].add_taxa_data(taxa_name, prob_mass)
if taxa_name not in unique_taxa:
unique_taxa[taxa_name] = unique_taxa_number
unique_taxa_number += 1
args.bam.close()
print('> {0} contigs in {1} matched contigs in {2}'.format(
str(references_match_contigs),
args.bam.filename,
args.names.name))
print('> {0} reads from {1} map to contigs in {2}'.format(
str(reads_mapping_contigs),
args.bam.filename,
args.names.name))
print('> {0} reads from {1} map to contigs in {2} and have assigned '
'taxa from {3} at the level {4}'.format(str(mapped_taxa_reads),
args.bam.filename,
args.names.name,
args.taxonomy.name,
|
# -*- coding: UTF-8 -*-
#
# generated by wxGlade 0.6.8 on Thu Apr 2 20:01:32 2015
#
import wx
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class SocketFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: SocketFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.button_aquarium = wx.ToggleButton(self, wx.ID_ANY, "Aquarium")
self.button_kitchen = wx.ToggleButton(self, wx.ID_ANY, u"K\xfcche")
self.button_bedroom = wx.ToggleButton(self, wx.ID_ANY, "Schlafstube")
self.button_back = wx.Button(self, wx.ID_ANY, u"Zur\xfcck")
self.button_livingroom = wx.ToggleButton(self, wx.ID_ANY, "Wohnstube")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnBtnAquarium, self.button_aquarium)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnBtnKitchen, self.button_kitchen)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnBtnB | edroom, self.button_bedroom)
self.Bind(wx.EVT_BUTTON, self.OnBtnBack, self.button_back)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnBtnLivingroom, self.button_livingroom)
# end wxGlade
def __set_properties(self):
# begin wxGlade: SocketFrame.__set_properties
self.SetTitle("frame_1")
self.SetSize((483, 273))
# end wxGlade
def __do_layout(self):
# begin wxGlade: SocketFrame.__do_layout
grid_sizer_2 = wx.GridSizer(2, | 3, 0, 0)
grid_sizer_2.Add(self.button_aquarium, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 5)
grid_sizer_2.Add(self.button_kitchen, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 5)
grid_sizer_2.Add(self.button_bedroom, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 5)
grid_sizer_2.Add(self.button_back, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 5)
grid_sizer_2.Add(self.button_livingroom, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 5)
self.SetSizer(grid_sizer_2)
self.Layout()
# end wxGlade
def OnBtnAquarium(self, event): # wxGlade: SocketFrame.<event_handler>
print "Event handler 'OnBtnAquarium' not implemented!"
event.Skip()
def OnBtnKitchen(self, event): # wxGlade: SocketFrame.<event_handler>
print "Event handler 'OnBtnKitchen' not implemented!"
event.Skip()
def OnBtnBedroom(self, event): # wxGlade: SocketFrame.<event_handler>
print "Event handler 'OnBtnBedroom' not implemented!"
event.Skip()
def OnBtnBack(self, event): # wxGlade: SocketFrame.<event_handler>
print "Event handler 'OnBtnBack' not implemented!"
event.Skip()
def OnBtnLivingroom(self, event): # wxGlade: SocketFrame.<event_handler>
print "Event handler 'OnBtnLivingroom' not implemented!"
event.Skip()
# end of class SocketFrame
|
from django.contrib.auth.models import User
from djblets.testing.decorators import add_fixtures
from djblets.testing.testcases import TestCase
from reviewboard.accounts.models import LocalSiteProfile
from reviewboard.reviews.models import ReviewRequest
class ProfileTests(TestCase):
"""Testing the Profile model."""
| fixtures = ['test_users']
def test_is_profile_visible_with_public(self):
"""Testing User.is_profile_public with public profiles."""
user1 = User.objects.get(username='admin')
user2 = User.objects.get(username='doc')
self.assertTrue(user1.is_profile_visible(user2))
def test_is_profile_visible_with_private(self):
"""Testing User.is_profile_public with private prof | iles."""
user1 = User.objects.get(username='admin')
user2 = User.objects.get(username='doc')
profile = user1.get_profile()
profile.is_private = True
profile.save()
self.assertFalse(user1.is_profile_visible(user2))
self.assertTrue(user1.is_profile_visible(user1))
user2.is_staff = True
self.assertTrue(user1.is_profile_visible(user2))
@add_fixtures(['test_reviewrequests', 'test_scmtools', 'test_site'])
def test_is_star_unstar_updating_count_correctly(self):
"""Testing if star, unstar affect review request counts correctly."""
user1 = User.objects.get(username='admin')
profile1 = user1.get_profile()
review_request = ReviewRequest.objects.public()[0]
site_profile = profile1.site_profiles.get(local_site=None)
profile1.star_review_request(review_request)
site_profile = LocalSiteProfile.objects.get(pk=site_profile.pk)
self.assertTrue(review_request in
profile1.starred_review_requests.all())
self.assertEqual(site_profile.starred_public_request_count, 1)
profile1.unstar_review_request(review_request)
site_profile = LocalSiteProfile.objects.get(pk=site_profile.pk)
self.assertFalse(review_request in
profile1.starred_review_requests.all())
self.assertEqual(site_profile.starred_public_request_count, 0)
|
# -*- coding: utf-8 -*-
| # Generated by Django 1.10.4 on 2017-05-30 22:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pattern', '0014_pattern_editnumber'),
]
operations = [
migrations.AddFiel | d(
model_name='pattern',
name='json',
field=models.TextField(null=True),
),
]
|
#
import io
import json
import sys
from django.core.management import call_command
from leaves.models import (
Harvester,
Panda,
)
from aloe import after, step
from aloe.tools import guess_types
from aloe_django.steps.models import (
test_existence,
tests_existence,
write_models,
writes_models,
)
from nose.tools import assert_equals
max_rego = 0
@writes_models(Harvester)
def write_with_rego(data, field=None):
for hash_ in data:
hash_['rego'] = hash_['make'][:3].upper() + "001"
write_models(Harvester, data, field=field)
@tests_existence(Harvester)
def check_with_rego(queryset, data):
try:
data['rego'] = data['rego'].upper()
except KeyError:
pass
return test_existence(queryset, data)
@step(r'The database dump is as follows')
def database_dump(step):
if sys.version_info >= (3, 0):
output = io.StringIO()
else:
output = io.BytesIO()
call_command('dumpdata', stdout=output, indent=2)
output = output.getvalue()
assert_equals(json.loads(output), json.loads(step.multiline))
@step(r'I have populated the database')
def database_populated(step):
pass
@step(r'I count the harvesters')
def count_harvesters(step):
print("Harvester count: %d" % Harvester.objects.count())
@writes_models(Panda)
def write_pandas(data, field):
# It is not necessary to call hashes_data/guess_types, but it might be
# present in old code using the library. Test that it is a no-op
# in that case.
data = guess_types(data)
for hash_ in data:
i | f 'name' in has | h_:
hash_['name'] += ' Panda'
return write_models(Panda, data, field)
|
"""Unit tests of authorization searches."""
import pytest
from ..utilities.general import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only
from dlkit.abstract_osid.osid import errors
from dlkit.primordium.id.primitives import Id
from dlkit.primordium.type.primitives import Type
from dlkit.runtime import PROXY_SESSION, proxy_example
from dlkit.runtime.managers import Runtime
REQUEST = proxy_example.SimpleRequest()
CONDITION = PROXY_SESSION.get_proxy_condition()
CONDITION.set_http_request(REQUEST)
PROXY = PROXY_SESSION.get_proxy(CONDITION)
DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority': 'DEFAULT'})
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def authorization_search_class_fixture(request):
# From test_templates/resource.py::ResourceSearch::init_template
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'AUTHORIZATION',
proxy=PROXY,
implementation=request.cls.service_config)
create_form = request.cls.svc_mgr.get_vault_form_for_create([])
create_form.display_name = 'Test catalog'
create_form.description = 'Test catalog description'
request.cls.catalog = request.cls.svc_mgr.create_vault(create_form)
def class_tear_down():
request.cls.svc_mgr.delete_vault(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def authorization_search_test_fixture(request):
# From test_templates/resource.py::ResourceSearch::init_template
request.cls.search = request.cls.catalog.get_authorization_search()
@pytest.mark.usefixtures("authorization_search_class_fixture", "authorization_search_test_fixture")
class TestAuthorizationSearch(object):
"""Tests for AuthorizationSearch"""
@pytest.mark.skip('unimplemented test')
def test_search_among_authorizations(self):
"""Tests search_among_authorizations"""
pass
@pytest.mark.skip('unimplemented test')
def test_order_authorization_results(self):
"""Tests order_authorization_results"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_authorization_search_record(self):
"""Tests get_authorization_search_record"""
pass
@pytest.mark.usefixtures("authorization_search_results_class_fixture", "authorization_search_results_test_fixture")
class TestAuthorizationSearchResults(object):
"""Tests for AuthorizationSearchResults"""
@pytest.mark.skip('unimplemented test')
def test_get_authorizations(self):
"""Tests get_authorizations"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_authorization_query_inspector(self):
"""Tests get_authorization_query_inspector"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_authorization_search_results_record(self):
"""Tests get_authorization_search_results_record"""
pass
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ' | , 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def vault_search_class_fixture(request):
# From test_templates/resource.py::ResourceSearch::init_template
re | quest.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'AUTHORIZATION',
proxy=PROXY,
implementation=request.cls.service_config)
create_form = request.cls.svc_mgr.get_vault_form_for_create([])
create_form.display_name = 'Test catalog'
create_form.description = 'Test catalog description'
request.cls.catalog = request.cls.svc_mgr.create_vault(create_form)
def class_tear_down():
request.cls.svc_mgr.delete_vault(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def vault_search_test_fixture(request):
# From test_templates/resource.py::ResourceSearch::init_template
request.cls.search = request.cls.catalog.get_vault_search()
@pytest.mark.usefixtures("vault_search_class_fixture", "vault_search_test_fixture")
class TestVaultSearch(object):
"""Tests for VaultSearch"""
@pytest.mark.skip('unimplemented test')
def test_search_among_vaults(self):
"""Tests search_among_vaults"""
pass
@pytest.mark.skip('unimplemented test')
def test_order_vault_results(self):
"""Tests order_vault_results"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_vault_search_record(self):
"""Tests get_vault_search_record"""
pass
@pytest.mark.usefixtures("vault_search_results_class_fixture", "vault_search_results_test_fixture")
class TestVaultSearchResults(object):
"""Tests for VaultSearchResults"""
@pytest.mark.skip('unimplemented test')
def test_get_vaults(self):
"""Tests get_vaults"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_vault_query_inspector(self):
"""Tests get_vault_query_inspector"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_vault_search_results_record(self):
"""Tests get_vault_search_results_record"""
pass
|
import plotly.plotly as py
import plotly.graph_objs as go
data = open('Real_Final_database_02.csv')
alldata = data.readlines()
listdata = []
for i in alldata:
listdata.append(i.strip().split(','))
type_z = ['Flood', 'Epidemic', 'Drought', 'Earthquake', 'Storm']
size_fill = [15,20,25,30,35]
fill_colors = ['#00d0f5', '#ff4a2e', 'a36800', '#ad9900', '#8b00db']
trace = []
for i in range(5):
year_x = []
death_z = []
types_y = []
for j in listdata:
if j[0] == 'Indonesia' and j[2] == type_z | [i]:
year_x.append(int(j[1]))
death_z.append(int(j[5]))
types_y.append(type_z[i])
trace.append(g | o.Scatter(
x=year_x,
y=death_z,
name=type_z[i],
mode='markers',
marker=dict(
color = [fill_colors[i] for k in death_z],
size=[size_fill[i] for k in death_z]
)
)
)
data = trace
layout = go.Layout(
title='Total Death In Indonesia',
showlegend=True,
height=600,
width=600,
xaxis=dict(
# set x-axis' labels direction at 45 degree angle
tickangle=-45,
),
yaxis=dict(
title="Total Death",
titlefont=dict(
color='#ff2323'
),
tickfont=dict(
color='#ff2323'
)
),
)
fig = go.Figure(data=data, layout=layout)
plot_url = py.plot(fig, filename='Total_Death_in_Indonesia') |
import bala | nced
balanced.configure('ak-test-2eKlj1ZDfAcZSARMf3NMhBHywDej0avSY')
debit = balanced.Debit.fetch(' | /debits/WD5EW7vbyXlTsudIGF5AkrEA')
debit.description = 'New description for debit'
debit.meta = {
'facebook.id': '1234567890',
'anykey': 'valuegoeshere',
}
debit.save() |
# -* | - coding: utf-8 -*-
from aero.__version__ import __version_info__
__author__ = 'nickl-'
from .base import CommandProcessor as CommandProcessor
class InstallCommand(CommandProcessor):
from .base import coroutine
package = ''
adapter = ''
def wiring(self):
self.out = self.write()
self.ticker.routine(self.progress(None))
return self.each(self.spacing(self.call(self.res())))
def seen(self, command, adapter, package, result= | False):
self.package = package
self.adapter = adapter
return result
@coroutine
def res(self):
while True:
res = (yield)
if res[1] == 0:
print 'Successfully installed package: {} with {}'.format(self.package, self.adapter)
else:
print 'Aborted: Error while installing package: {} {} returned exit code {}'.format(
self.package, self.adapter, res[1]
)
@coroutine
def write(self):
import sys
out = sys.stdout
while True:
text = (yield)
out.write(text)
@coroutine
def spacing(self, target):
while True:
payload = (yield)
print u'\n'
target.send(payload)
@coroutine
def progress(self, responder):
while True: (yield)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-12-01 17:4 | 3
from __future__ import unicode_literals
from django.db | import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0003_bettoride_success'),
]
operations = [
migrations.AlterField(
model_name='bet',
name='isBetSuccessful',
field=models.NullBooleanField(default=None, verbose_name='BetSuccess'),
),
]
|
ature, Maturity, Reference, Section, Specification, Support,
Version)
from .base import TestCase
class TestCache(TestCase):
def setUp(self):
self.cache = Cache()
self.login_user(groups=['change-resource'])
def test_browser_v1_serializer(self):
browser = self.create(Browser)
out = self.cache.browser_v1_serializer(browser)
expected = {
'id': browser.id,
'slug': u'',
'name': {},
'note': {},
'history:PKList': {
'app': u'webplatformcompat',
'model': 'historicalbrowser',
'pks': [browser.history.all()[0].pk],
},
'history_current:PK': {
'app': u'webplatformcompat',
'model': 'historicalbrowser',
'pk': browser.history.all()[0].pk,
},
'versions:PKList': {
'app': u'webplatformcompat',
'model': 'version',
'pks': [],
},
}
self.assertEqual(out, expected)
def test_browser_v1_serializer_empty(self):
self.assertEqual(None, self.cache.browser_v1_serializer(None))
def test_browser_v1_loader(self):
browser = self.create(Browser)
with self.assertNumQueries(3):
obj = self.cache.browser_v1_loader(browser.pk)
with self.assertNumQueries(0):
serialized = self.cache.browser_v1_serializer(obj)
self.assertTrue(serialized)
def test_browser_v1_loader_not_exist(self):
self.assertFalse(Browser.objects.filter(pk=666).exists())
self.assertIsNone(self.cache.browser_v1_loader(666))
def test_browser_v1_invalidator(self):
browser = self.create(Browser)
self.assertEqual([], self.cache.browser_v1_invalidator(browser))
def test_changeset_v1_serializer(self):
created = datetime(2014, 10, 29, 8, 57, 21, 806744, UTC)
changeset = self.create(Changeset, user=self.user)
Changeset.objects.filter(pk=changeset.pk).update(
created=created, modified=created)
changeset = Changeset.objects.get(pk=changeset.pk)
out = self.cache.changeset_v1_serializer(changeset)
expected = {
'id': changeset.id,
'created:DateTime': '1414573041.806744',
'modified:DateTime': '1414573041.806744',
'target_resource_type': '',
'target_resource_id': 0,
'closed': False,
'user:PK': {
'app': u'auth',
'model': 'user',
'pk': self.user.pk,
},
'historical_browsers:PKList': {
'app': u'webplatformcompat',
'model': 'historicalbrowser',
'pks': []
},
'historical_features:PKList': {
'app': u'webplatformcompat',
'model': 'historicalfeature',
'pks': []
},
'historical_maturities:PKList': {
'app': u'webplatformcompat',
'model': 'historicalmaturity',
'pks': []
},
'historical_references:PKList': {
'app': u'webplatformcompat',
'model': 'historicalreference',
'pks': []
},
'historical_sections:PKList': {
'app': u'webplatformcompat',
'model': 'historicalsection',
'pks': []
},
'historical_specifications:PKList': {
'app': u'webplatformcompat',
'model': 'historicalspecification',
'pks': []
},
'historical_supports:PKList': {
'app': u'webplatformcompat',
'model': 'historicalsupport',
'pks': []
},
'historical_versions:PKList': {
'app': u'webplatformcompat',
'model': 'historicalversion',
'pks': []
},
}
self.assertEqual(out, expected)
def test_changeset_v1_serializer_empty(self):
self.assertEqual(None, self.cache.changeset_v1_serializer(None))
def test_changeset_v1_loader(self):
changeset = self.create(Changeset, user=self.user)
with self.assertNumQueries(9):
obj = self.cache.changeset_v1_loader(changeset.pk)
with self.assertNumQueries(0):
serialized = self.cache.changeset_v1_serializer(obj)
self.assertTrue(serialized)
def test_changeset_v1_loader_not_exist(self):
self.assertFalse(Changeset.objects.filter(pk=666).exists())
self.assertIsNone(self.cache.changeset_v1_loader(666))
def test_changeset_v1_invalidator(self):
changeset = self.create(Changeset, user=self.user)
self.assertEqual([], self.cache.changeset_v1_invalidator(changeset))
def test_feature_v1_serializer(self):
feature = self.create(
Feature, slug='the-slug', name='{"en": "A Name"}')
out = self.cache.feature_v1_serializer(feature)
expected = {
'id': feature.id,
'slug': 'the-slug',
'mdn_uri': {},
'experimental': False,
'standardized': True,
'stable': True,
'obsolete': False,
'name': {'en': 'A Name'},
'descendant_count': 0,
'references:PKList': {
'app': 'webplatformcompat',
'model': 'reference',
'pks': [],
},
'supports:PKList': {
'app': 'webplatformcompat',
'model': 'support',
'pks': [],
},
'parent:PK': {
'app': 'webplatformcompat',
'model': 'feature',
'pk': None,
},
'children:PKList': {
'app': 'webplatformcompat',
'model': 'feature',
'pks': [],
},
'row_children:PKList': {
'app': 'webplatformcompat',
'model': 'feature',
'pks': [],
},
'row_children_pks': [],
'page_children_pks': [],
'descendant_pks': [],
'row_descendant_pks': [],
'history:PKList': {
'app': 'webplatformcompat',
'model': 'historicalfeature',
'pks': [feature.history.all()[0].pk],
},
'history_current:PK': {
'app': 'webplatformcompat',
'model': 'historicalfeature',
'pk': feature.history.all()[0].pk,
},
}
self.assertEqual(out, expected)
def test_feature_v1_serializer_mixed_descendants(self):
feature = self.create(
Feature, slug='the-slug', name='{"en": "A Name"}')
child1 = self.create(Feature, slug='child1', parent=feature)
child2 = self.create(Feature, slug='child2', parent=feature)
child21 = self.create(Feature, slug='child2.1', parent=child2)
page1 = self.create(
Feature, slug='page1', parent=feature,
mdn_uri='{"en": "https://example.com/page | 1"}')
page2 = self.create(
Feature, slug='page2', parent=child2,
mdn_uri='{"en": "https://example.com/page2"}')
feature = Feature.objects.get(id=feature.id)
out = self.cache.feature_v1_serializer(feature)
self.assertEqual(out['descendant_count'], 5)
self.assertEqual(
out['descendant_pks'],
[child1. | pk, child2.pk, child21.pk, page2.pk, page1.pk])
self.assertEqual(
out['row_descendant_pks'], [child1.pk, child2.pk, child21.pk])
self.assertEqual(out['page_children_pks'], [page1.pk])
self.assertEqual(out['row_children_pks'], [child1.pk, child2.pk])
@override_settings(PAGINATE_VIEW_FEATURE=2)
def test_feature_v1_serializer_paginated_descendants(self):
feature = self.create(
Feature, slug='the-slug', name='{"e |
running_pipeline['kwargs'].get('username')
backend_name = running_pipeline['backend']
requested_provider = provider.Registry.get_by_backend_name(backend_name)
try:
user = pipeline.get_authenticated_user(username, backend_name)
third_party_auth_successful = True
except User.DoesNotExist:
AUDIT_LOG.warning(
u'Login failed - user with username {username} has no social auth with backend_name {backend_name}'.format(
username=username, backend_name=backend_name))
return HttpResponse(
_("You've successfully logged into your {provider_name} account, but this account isn't linked with an {platform_name} account yet.").format(
platform_name=settings.PLATFORM_NAME, provider_name=requested_provider.NAME
)
+ "<br/><br/>" +
_("Use your {platform_name} username and password to log into {platform_name} below, "
"and then link your {platform_name} account with {provider_name} from your dashboard.").format(
platform_name=settings.PLATFORM_NAME, provider_name=requested_provider.NAME
)
+ "<br/><br/>" +
_("If you don't have an {platform_name} account yet, click <strong>Register Now</strong> at | the top of the page.").format(
pl | atform_name=settings.PLATFORM_NAME
),
content_type="text/plain",
status=403
)
else:
if 'email' not in request.POST or 'password' not in request.POST:
return JsonResponse({
"success": False,
"value": _('There was an error receiving your login information. Please email us.'), # TODO: User error message
}) # TODO: this should be status code 400 # pylint: disable=fixme
email = request.POST['email']
password = request.POST['password']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Unknown user email")
else:
AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email))
# check if the user has a linked shibboleth account, if so, redirect the user to shib-login
# This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu
# address into the Gmail login.
if settings.FEATURES.get('AUTH_USE_SHIB') and user:
try:
eamap = ExternalAuthMap.objects.get(user=user)
if eamap.external_domain.startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return JsonResponse({
"success": False,
"redirect": reverse('shib-login'),
}) # TODO: this should be status code 301 # pylint: disable=fixme
except ExternalAuthMap.DoesNotExist:
# This is actually the common case, logging in user without external linked login
AUDIT_LOG.info("User %s w/o external auth attempting login", user)
# see if account has been locked out due to excessive login failures
user_found_by_email_lookup = user
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
if LoginFailures.is_user_locked_out(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('This account has been temporarily locked due to excessive login failures. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
# see if the user must reset his/her password due to any policy settings
if PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('Your password has expired due to password policy on this account. You must '
'reset your password before you can log in again. Please click the '
'"Forgot Password" link on this page to reset your password before logging in again.'),
}) # TODO: this should be status code 403 # pylint: disable=fixme
# if the user doesn't exist, we want to set the username to an invalid
# username so that authentication is guaranteed to fail and we can take
# advantage of the ratelimited backend
username = user.username if user else ""
if not third_party_auth_successful:
try:
user = authenticate(username=username, password=password, request=request)
# this occurs when there are too many attempts from the same IP address
except RateLimitException:
return JsonResponse({
"success": False,
"value": _('Too many failed login attempts. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
if user is None:
# tick the failed login counters if the user exists in the database
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
LoginFailures.increment_lockout_counter(user_found_by_email_lookup)
# if we didn't find this username earlier, the account for this email
# doesn't exist, and doesn't have a corresponding password
if username != "":
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>"
AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id))
else:
AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email))
return JsonResponse({
"success": False,
"value": _('Email or password is incorrect.'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
# successful login, clear failed login attempts counters, if applicable
if LoginFailures.is_feature_enabled():
LoginFailures.clear_lockout_counter(user)
# Track the user's sign in
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(user.id, {
'email': email,
'username': username,
})
analytics.track(
user.id,
"edx.bi.user.account.authenticated",
{
'category': "conversion",
'label': request.POST.get('course_id'),
'provider': None
},
context={
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
if user is not None and user.is_active:
try:
# We do not log here, because we have a handler registered
# to perform logging on successful logins.
login(request, user)
if request.POST.get('remember') == 'true':
request.session.set_expiry(604800)
log.debug("Setting user session to never expire")
else:
request.session.set_expiry(0)
except Exception as exc: # pylint: disable=broad-except
AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(exc)
raise
redirect_url = try_change_enrollment(request)
if third_party_auth_successful:
redirect_url = pipeline.get_complete_url(backend_name)
response = JsonResponse({
"success": True,
"redirect_url": redirect_url,
})
# Ensure that the external marketing site can
# detect that the user is logged in.
return set_logged_in_cookie(r |
#!/usr/bin/env python
"""Tests for grr.client.client_actions.tempfiles."""
import os
import tempfile
import time
from grr.client.client_actions import tempfiles
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
class GRRTempFileTestDirectory(test_lib.GRRBaseTest):
"""Tests for GRR temp file utils when directory is provided."""
def setUp(self):
"""Create fake filesystem."""
super(GRRTempFileTestDirectory, self).setUp()
self.prefix = config_lib.CONFIG.Get("Client.tempfile_prefix")
self.existsdir = os.path.join(self.temp_dir, "this/exists/")
os.makedirs(self.existsdir)
self.not_exists = os.path.join(self.temp_dir, "does/not/exist/")
self.new_temp_file = os.path.join(self.not_exists, self.prefix)
def _CheckPermissions(self, filename, expected):
# Just look at the last 3 octets.
file_mode = os.stat(filename).st_mode & 0777
self.assertEqual(file_mode, expected)
def testCreateGRRTempFile(self):
fd = tempfiles.CreateGRRTempFile(self.not_exists, suffix=".exe")
self.assertTrue(fd.name.startswith(self.new_temp_file))
self.assertTrue(fd.name.endswith(".exe"))
self.assertTrue(os.path.exists(fd.name))
self._CheckPermissions(fd.name, 0700)
self._CheckPermissions(os.path.dirname(fd.name), 0700)
def testCreateGRRTempFileRelativePath(self):
self.assertRaises(tempfiles.ErrorBadPath,
tempfiles.CreateGRRTempFile, "../../blah")
def testCreateGRRTempFileWithLifetime(self):
fd = tempfiles.CreateGRRTempFile(self.not_exists, lifetime=0.1)
self.assertTrue(os.path.exists(fd.name))
time.sleep(1)
self.assertFalse(os.path.exists(fd.name))
def testDeleteGRRTempFile(self):
grr_tempfile = os.path.join(self.existsdir, self.prefix)
open(grr_tempfile, "w").write("something")
tempfiles.DeleteGRRTempFile(grr_tempfile)
self.assertFalse(os.path.exists(grr_tempfile))
def testDeleteGRRTempFileBadPrefix(self):
self.assertRaises(tempfiles.ErrorNotTempFile,
tempfiles.DeleteGRRTempFile,
os.path.join(self.existsdir, "/blah"))
def testDeleteGRRTempFileRelativePath(self):
self.assertRaises(tempfiles.ErrorBadPath,
tempfiles.DeleteGRRTempFile, "../../blah")
class GRRTempFileTestFilename(test_lib.GRRBaseTest):
"""Tests for GRR temp file utils when filename is provided."""
def setUp(self):
"""Create fake filesystem."""
super(GRRTempFileTestFilename, self).setUp()
# This is where temp files go if a directory is not provided.
# For this test it has to be different from the temp firectory
# so we create a new one.
self.old_client_tempdir = config_lib.CONFIG.Get("Client.tempdir")
self.client_tempdir = tempfile.mkdtemp(
dir=config_lib.CONFIG.Get("Client.tempdir"))
config_lib.CONFIG.Set("Client.tempdir", self.client_tempdir)
def tearDown(self):
os.rmdir(config_lib.CONFIG.Get("Client.tempdir"))
config_lib.CONFIG.Set("Client.tempdir", self.old_client_tempdir)
def testCreateAndDelete(self):
fd = tempfiles.CreateGRRTempFile(filename="process.42.exe", mode="wb")
fd.close()
self.assertTrue(os.path.exists(fd.name))
self.assertTrue(os.path.basename(fd.name) == "process.42.exe")
tempfiles.DeleteGRRTempFile(fd.name)
self.assertFalse(os.path.exists(fd.name))
fd = open(os.path.join(self.temp_dir, "notatmpfile"), "w")
fd.write("something")
fd.close()
self.assertTrue(os.path.exists(fd.name))
self.as | sertRaises(tempfiles.ErrorNotTempFile,
tempfiles.DeleteGRRTempFile,
fd.name)
self.assertTrue(os.path.exists(fd.name))
class DeleteGRRTempFiles(test_lib.EmptyActionTest):
"""Test DeleteGRRTempFiles client action."""
def setUp(self):
super(DeleteGRRTempFiles, self).setUp()
filename = "%s_blah" % config_lib.CONFIG["Client.tempfile_pr | efix"]
self.tempfile = utils.JoinPath(self.temp_dir,
"delete_test", filename)
self.dirname = os.path.dirname(self.tempfile)
os.makedirs(self.dirname)
config_lib.CONFIG.Set("Client.tempdir", self.dirname)
self.not_tempfile = os.path.join(self.temp_dir, "notatempfile")
open(self.not_tempfile, "w").write("something")
self.temp_fd = tempfiles.CreateGRRTempFile(self.dirname)
self.temp_fd2 = tempfiles.CreateGRRTempFile(self.dirname)
self.assertTrue(os.path.exists(self.not_tempfile))
self.assertTrue(os.path.exists(self.temp_fd.name))
self.assertTrue(os.path.exists(self.temp_fd2.name))
self.pathspec = rdfvalue.PathSpec(
path=self.dirname, pathtype=rdfvalue.PathSpec.PathType.OS)
def testDeleteGRRTempFilesInDirectory(self):
result = self.RunAction("DeleteGRRTempFiles",
self.pathspec)[0]
self.assertTrue(os.path.exists(self.not_tempfile))
self.assertFalse(os.path.exists(self.temp_fd.name))
self.assertFalse(os.path.exists(self.temp_fd2.name))
self.assertTrue(self.temp_fd.name in result.data)
self.assertTrue(self.temp_fd2.name in result.data)
def testDeleteGRRTempFilesSpecificPath(self):
self.pathspec = rdfvalue.PathSpec(
path=self.temp_fd.name, pathtype=rdfvalue.PathSpec.PathType.OS)
result = self.RunAction("DeleteGRRTempFiles",
self.pathspec)[0]
self.assertTrue(os.path.exists(self.not_tempfile))
self.assertFalse(os.path.exists(self.temp_fd.name))
self.assertTrue(os.path.exists(self.temp_fd2.name))
self.assertTrue(self.temp_fd.name in result.data)
self.assertFalse(self.temp_fd2.name in result.data)
def testDeleteGRRTempFilesPathDoesNotExist(self):
self.pathspec = rdfvalue.PathSpec(
path="/does/not/exist", pathtype=rdfvalue.PathSpec.PathType.OS)
self.assertRaises(tempfiles.ErrorBadPath,
self.RunAction, "DeleteGRRTempFiles", self.pathspec)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
import demowlcutils
from demowlcutils import ppxml, WLC_login
from pprint import pprint as pp
f | rom jnpr.wlc import WirelessLanController as WLC
wlc = WLC(host='a', user='b', password='c')
r = wlc.RpcMake | r( target='vlan', name='Jeremy')
# you can access the following attributes, refer to the jnpr.wlc.builder
# file for more details
# r.cmd
# r.target
# r.args
|
import django
if django.VERSION >= (1, 5):
from django.conf.urls im | port patterns, url
else:
from djang | o.conf.urls.defaults import patterns, url
def simpleAction(request):
pass
urlpatterns = patterns(
'',
url(r'^simple/action/$', simpleAction, name='simpleAction'),
)
|
#--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import wx
import os
import wx.lib.embeddedimage as emb
import wx.lib.hyperlink as hl
import wx.lib.platebtn as pbtn
from wx.lib.pubsub import pub as Publisher
import invesalius.constants as constants
import invesalius.constants as const
ID_BTN_MEASURE_LINEAR = wx.NewId()
ID_BTN_MEASURE_ANGULAR = wx.NewId()
ID_BTN_ANNOTATION = wx.NewId()
class TaskPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
inner_panel = InnerTaskPanel(self)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(inner_panel, 1, wx.EXPAND | wx.GROW | wx.BOTTOM | wx.RIGHT |
wx.LEFT, 7)
sizer.Fit(self)
self.SetSizer(sizer)
self.Update()
self.SetAutoLayout(1)
class InnerTaskPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.SetBackgroundColour(wx.Colour(255,255,255))
self.SetAutoLayout(1)
# Counter for projects loaded in current GUI
self.proj_count = 0
# Floating items (to be inserted)
self.float_hyper_list = []
# Fixed text and hyperlink items
tooltip = wx.ToolTip(_("Measure distances"))
txt_measure = wx.StaticText(self, -1, _("Measure"))
txt_measure.SetToolTip(tooltip)
tooltip = wx.ToolTip(_("Add text annotations"))
txt_annotation = hl.HyperLinkCtrl( | self, -1,_("Add text annotations"))
txt_annotation.SetUnderlines(False, False, False)
txt_annotation.SetColours("BLACK", "BLACK", "BLACK")
txt_annotation.SetToolTip(tooltip)
txt_annotation.AutoBrowse(False)
txt_annotation.UpdateLink()
txt_annotation.Bind(hl.EVT_HYPERLINK_LEFT, self.OnTextAnnotation)
# | Image(s) for buttons
BMP_ANNOTATE = wx.Bitmap(os.path.join(const.ICON_DIR, "annotation.png"), wx.BITMAP_TYPE_PNG)
BMP_ANGLE = wx.Bitmap(os.path.join(const.ICON_DIR, "measure_angle.jpg"), wx.BITMAP_TYPE_JPEG)
BMP_DISTANCE = wx.Bitmap(os.path.join(const.ICON_DIR, "measure_line.png"), wx.BITMAP_TYPE_PNG)
BMP_ANNOTATE.SetWidth(25)
BMP_ANNOTATE.SetHeight(25)
BMP_ANGLE.SetWidth(25)
BMP_ANGLE.SetHeight(25)
BMP_DISTANCE.SetWidth(25)
BMP_DISTANCE.SetHeight(25)
# Buttons related to hyperlinks
button_style = pbtn.PB_STYLE_SQUARE | pbtn.PB_STYLE_DEFAULT
button_measure_linear = pbtn.PlateButton(self, ID_BTN_MEASURE_LINEAR, "",
BMP_DISTANCE, style=button_style)
button_measure_angular = pbtn.PlateButton(self, ID_BTN_MEASURE_ANGULAR, "",
BMP_ANGLE, style=button_style)
button_annotation = pbtn.PlateButton(self, ID_BTN_ANNOTATION, "",
BMP_ANNOTATE, style=button_style)
# When using PlaneButton, it is necessary to bind events from parent win
self.Bind(wx.EVT_BUTTON, self.OnButton)
# Tags and grid sizer for fixed items
flag_link = wx.EXPAND|wx.GROW|wx.LEFT|wx.TOP
flag_button = wx.EXPAND | wx.GROW
sizer = wx.GridBagSizer(hgap=0, vgap=0)
sizer.Add(txt_measure,pos=(0,0),flag=wx.GROW|wx.EXPAND|wx.TOP,border=3)
sizer.Add(button_measure_linear,pos=(0,1),flag=wx.GROW|wx.EXPAND)
sizer.Add(button_measure_angular,pos=(0,2),flag=wx.GROW|wx.EXPAND)
sizer.Add(txt_annotation, pos=(1,0),flag=wx.GROW|wx.EXPAND)
sizer.Add(button_annotation, pos=(1,2),span=(2,1), flag=wx.GROW|wx.EXPAND)
sizer.AddGrowableCol(0)
# Add line sizers into main sizer
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(sizer, 0, wx.GROW|wx.EXPAND)
main_sizer.Fit(self)
# Update main sizer and panel layout
self.SetSizer(sizer)
self.Fit()
self.sizer = main_sizer
def OnTextAnnotation(self, evt=None):
print "TODO: Send Signal - Add text annotation (both 2d and 3d)"
def OnLinkLinearMeasure(self):
Publisher.sendMessage('Enable style',
constants.STATE_MEASURE_DISTANCE)
def OnLinkAngularMeasure(self):
Publisher.sendMessage('Enable style',
constants.STATE_MEASURE_ANGLE)
def OnButton(self, evt):
id = evt.GetId()
if id == ID_BTN_MEASURE_LINEAR:
self.OnLinkLinearMeasure()
elif id == ID_BTN_MEASURE_ANGULAR:
self.OnLinkAngularMeasure()
else: # elif id == ID_BTN_ANNOTATION:
self.OnTextAnnotation()
|
##
##
# File auto-generated against equivalent DynamicSerialize Java class
from .Property import Propert | y
class Header(object):
def __init__(self, properties=None, multimap=None):
if properties is None:
self.properties = []
else:
self.pr | operties = properties
if multimap is not None:
for k, l in multimap.items():
for v in l:
self.properties.append(Property(k, v))
def getProperties(self):
return self.properties
def setProperties(self, properties):
self.properties = properties
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest as ut
import numpy as np
import scipy.spatial.distance as spd
from .mock_data import Data
from ....algorithms.similarities import cosine_binary, cosine, dice, jaccard
from ....algorithms.similarities import kulsinski, russellrao, sokalsneath
MATRIX = np.array([[1, 0, 3, 5, 0, 2],
[0, 1, 2, 0, 4, 1],
[3, 4, 0, 0, 1, 1],
[5, 0, 1, 2, 3, 0],
[2, 0, 4, 2, 0, 0],
[0, 7, 0, 1, 2, 5],
[4, 2, 5, 3, 5, 4]])
BOOL_MATRIX = MATRIX.astype(bool).astype(float)
class TestSimilarities(ut.TestCase):
def setUp(self):
self.data = Data(MATRIX)
def test_cosine(self):
should_be = spd.squareform(spd.pdist(MATRIX.T, spd.cosine))
actually_is = (1 - cosine(self.data).toarray())
self.assertTrue(np.allclose(should_be, actually_is))
def test_cosine_binary(self):
should_be = spd.squareform(spd.pdist(BOOL_MATRIX.T, spd.cosine))
actually_is = (1 - cosine_binary(self.data).toarray())
self.assertTrue(np.allclose(should_be, actually_is))
def test_dice(self):
should_be = spd.squareform(spd.pdist(BOOL_MATRIX.T, spd.dice))
actually_is = (1 - dice(self.data).toarray())
self.assertTrue(np.allclose(should_be, actually_is))
def test_jaccard(self):
should_be = spd.squareform(spd.pdist(BOOL_MATRIX.T, spd.jaccard))
actually_is = (1 - jaccard(self.data).toarray())
self.assertTrue(np.allclose(should_be, actually_is))
def test_kulsinski(self):
n_items = MATRIX.shape[1]
should_be = np.zeros((n_items, n_items))
for i in range(n_items):
for j in range(n_items):
should_be[i, j] = spd.kulsinski(BOOL_MATRIX.T[i],
BOOL_MATRIX.T[j])
actually_is = (1 - kulsinski(self.data).toarray())
self.assertTrue(np.allclose(should_be, actually_is))
def test_russellrao(self) | :
n_items = MATRIX.shape[1]
should_be = np.zeros((n_items, n_items))
for i in range(n_items):
for j in range(n_items):
should_be[i, j] = spd.russellrao(BOOL_MATRIX.T[i],
B | OOL_MATRIX.T[j])
actually_is = (1 - russellrao(self.data).toarray())
self.assertTrue(np.allclose(should_be, actually_is))
def test_sokalsneath(self):
should_be = spd.squareform(spd.pdist(BOOL_MATRIX.T, spd.sokalsneath))
actually_is = (1 - sokalsneath(self.data).toarray())
self.assertTrue(np.allclose(should_be, actually_is))
if __name__ == '__main__':
ut.main()
|
import subprocess
import os
# setup the path to extopenscad (aka implicitCAD)
cad_bin=os.path.expanduser('~/.cabal/bin/extopenscad')
# set the current input dir to this script file's dir
inp_dir = '.'
# set the output to a directory next to this script file
out_dir = './output'
# set the output file extension to .stl
out_type='.stl'
# make any paths relative (this probably needs work)
out_dir=os.path.abspath(out_dir)
if not os.path.isdir(inp_dir):
inp_dir=os.path.abspath(os.path.join(
os.path.dirname(__file__), inp_dir
)
| )
# the -o option tells extopenscad to ouput a file to the subsequent path
o='-o'
try:
os.makedirs(out_dir)
except OSError as e:
# don't c | omplain if the dir already exists
if e.errno!=17:
raise e
# go through each item in the input dir
for f in os.listdir(inp_dir):
# if the item is a .escad file, process
if '.escad' in f:
# remove .escad from the filename
n=f[0:f.index('.escad')]
# join the output dir with the stripped file and the output filetype
out= os.path.join(out_dir,
n + out_type
)
# join the input directory and the current dir list item
inp=os.path.join(inp_dir, f)
# emit what we came up with for a command
print cad_bin, o, out, inp
# run the command
p=subprocess.Popen([cad_bin, o, out, inp],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# wait for the command's output
s, e=p.communicate()
# print the command's output
print s
print e
|
from pycraft_minetest import *
pos = where()
chat(pos)
maze("maze1.csv")
t = turtle(obsidian)
t.forward(10)
move(3, 10, 5)
chat(where())
sphere(ice, y=-20)
circle([wool, 5], direction="horizontal")
line(gold, 0, 0, 0, 0, 50, 0)
block(iron, y=3)
blocks(wood, x=5, y=6, z=10)
size = readnumber("tell the size...")
cube(redstone, size)
text = readstring("say something...")
chat("I said: " + text)
pyramid(sandstone)
polygon(obsidian, 12, 30)
chat("Hello Minecraft!")
color = 0
uga = turtle([wool, color])
while True:
for i in range(18):
uga.forward(5)
uga.up(20)
uga.up(30)
color += 1
uga.penblock([wool, color % 12])
# GOLD in ICE
# while True:
# if over(ice):
# chat("ice")
# block(gold, y=-1)
# if near(gold):
# chat("gold nearby!")
# TURTLE LOOP
# uga = turtle(redstone)
# passi = 2
# while True:
# uga.forward(passi)
# uga.left(90)
# passi = passi + 2
# uga = turtle(redstone)
# bea = turtle(beacon)
# bea.setposition(0, 1, 0)
# # col = turtle(beacon)
#
# while True:
# uga.forward(1)
# bea.forward(1)
# uga = turtle(redstone)
# bea = turtle(powered_rail)
# bea.setposition(0, 1, 0)
# # col = turtle(beacon)
#
# while True:
# uga.forward(2)
# bea.forward(1)
# ANIMATE CUBE
# x = pos.x
# y = pos.y
# z = pos.z
# while True:
# cube(ice, 5, x, y, z, absolute=True)
# move(x-5, y+1, z+2, absolute=True)
# time.sleep(0. | 1)
# cube(air, 5, x, y, z, | absolute=True)
# x += 1
|
# pilas en | gine: un motor para hacer videojuegos
#
# Copyright 2010-2014 - Hugo Ruscitti
# License: LGPLv3 | (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
from pilasengine.fondos import fondo
class Color(fondo.Fondo):
def __init__(self, pilas, color):
fondo.Fondo.__init__(self, pilas)
(ancho, alto) = self.pilas.obtener_area()
self.imagen = self.pilas.imagenes.crear_superficie(ancho, alto)
self.imagen.pintar(color)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache Licen | se, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable | law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
import mxnet as mx
from mxnet.ndarray.sparse import adam_update
import numpy as np
import argparse
mx.random.seed(0)
np.random.seed(0)
parser = argparse.ArgumentParser(description='Benchmark adam updater')
parser.add_argument('--dim-in', type=int, default=240000, help='weight.shape[0]')
parser.add_argument('--dim-out', type=int, default=512, help='weight.shape[1]')
parser.add_argument('--nnr', type=int, default=5000, help='grad.indices.shape[0]')
parser.add_argument('--repeat', type=int, default=1000, help='num repeat')
parser.add_argument('--dense-grad', action='store_true',
help='if set to true, both gradient and weight are dense.')
parser.add_argument('--dense-state', action='store_true',
help='if set to true, states are dense, indicating standard update')
parser.add_argument('--cpu', action='store_true')
args = parser.parse_args()
dim_in = args.dim_in
dim_out = args.dim_out
nnr = args.nnr
ctx = mx.cpu() if args.cpu else mx.gpu()
ones = mx.nd.ones((dim_in, dim_out), ctx=ctx)
if not args.dense_grad:
weight = ones.tostype('row_sparse')
indices = np.arange(dim_in)
np.random.shuffle(indices)
indices = np.unique(indices[:nnr])
indices = mx.nd.array(indices, ctx=ctx)
grad = mx.nd.sparse.retain(weight, indices)
else:
weight = ones.copy()
grad = ones.copy()
if args.dense_state:
mean = ones.copy()
else:
mean = ones.tostype('row_sparse')
var = mean.copy()
# warmup
for i in range(10):
adam_update(weight, grad, mean, var, out=weight, lr=1, wd=0, beta1=0.9,
beta2=0.99, rescale_grad=0.5, epsilon=1e-8)
weight.wait_to_read()
# measure speed
a = time.time()
for i in range(args.repeat):
adam_update(weight, grad, mean, var, out=weight, lr=1, wd=0, beta1=0.9,
beta2=0.99, rescale_grad=0.5, epsilon=1e-8)
weight.wait_to_read()
b = time.time()
print(b - a)
|
# coding=utf-8
import numpy as np
class CovalentAutocorrelation:
def __init__(self, lattice):
self.reset(lattice)
def reset(self, lattice):
self.lattice = lattice.copy()
def calculate(self, lattice):
return np.sum((lattice == self.lattice) & (lattice != 0))
class MeanSquareDisplacement:
def __init__(self, atom_positions, | lattice, atombox):
proton_number = np.sum(lattice > 0)
self.snapshot = np.zeros((proton_number, 3))
self.displacement = np.zeros_like(self.snapshot)
self.snapshot = self.determine_proton_positions(atom_positions, lattice)
self.atombox = atombox
def determine_proton_positions(self, atom_positions, | lattice):
proton_positions = np.zeros_like(self.snapshot)
proton_labels = lattice[lattice > 0]
site_idx, = np.where(lattice)
proton_positions[proton_labels - 1] = atom_positions[site_idx]
return proton_positions
def update_proton_positions(self, atom_positions, lattice):
self.snapshot[:] = self.determine_proton_positions(atom_positions, lattice)
def update_displacement(self, new_positions, lattice):
"""Update the current position of each proton while considering periodic boundaries.
This assumes that the trajectory time step is small enough that no proton ever moves
more than half of the periodic box length within one step."""
new_proton_positions = self.determine_proton_positions(new_positions, lattice)
displacement = self.atombox.distance(self.snapshot, new_proton_positions)
self.displacement += displacement
self.snapshot = new_proton_positions
def reset_displacement(self):
self.displacement[:] = 0
def msd(self):
return np.sum(self.displacement**2, axis=0) / self.displacement.shape[0]
|
#!/usr/bin/env python
# Little script to make HISTORY.rst more easy to format properly, lots TODO
# pull message down and embed, use arg parse, handle multiple, etc...
import os
import sys
try:
import requests
except ImportError:
requests = None
import urllib.parse
import textwrap
PROJECT_DIRECTORY = os.path.join(os.path.dirname(__file__), "..")
new_path = [PROJECT_DIRECTORY]
new_path.extend( sys.path[1:] ) # remove scripts/ from the path
sys.path = new_path
import pulsar as project
PROJECT_OWNER = project.PROJECT_OWNER
PROJECT_NAME = project.PROJECT_NAME
PROJECT_URL = "https://github.com/%s/%s" % (PROJECT_OWNER, PROJECT_NAME)
PROJECT_API = "https://api.github.com/repos/%s/%s/" % (PROJECT_OWNER, PROJECT_NAME)
AUTHORS_SKIP_CREDIT = ["jmchilton"]
def main(argv):
| history_path = os.path.join(PROJECT_DIRECTORY, "HISTORY.rst")
history = open(history_path, "r", encoding="utf-8").read()
def extend(from_str, line):
from_str += "\n"
return history.replace(from_str, from_str + line + "\n")
ident = argv[1]
message = ""
if len(argv) > 2:
message = argv[2]
elif not (ident.startswith("pr") or ident | .startswith("issue")):
api_url = urllib.parse.urljoin(PROJECT_API, "commits/%s" % ident)
req = requests.get(api_url).json()
commit = req["commit"]
message = commit["message"]
message = get_first_sentence(message)
elif requests is not None and ident.startswith("pr"):
pull_request = ident[len("pr"):]
api_url = urllib.parse.urljoin(PROJECT_API, "pulls/%s" % pull_request)
req = requests.get(api_url).json()
message = req["title"]
login = req["user"]["login"]
if login not in AUTHORS_SKIP_CREDIT:
message = message.rstrip(".")
message += " (thanks to `@%s`_)." % req["user"]["login"]
elif requests is not None and ident.startswith("issue"):
issue = ident[len("issue"):]
api_url = urllib.parse.urljoin(PROJECT_API, "issues/%s" % issue)
req = requests.get(api_url).json()
message = req["title"]
else:
message = ""
to_doc = message + " "
if ident.startswith("pr"):
pull_request = ident[len("pr"):]
text = ".. _Pull Request {0}: {1}/pull/{0}".format(pull_request, PROJECT_URL)
history = extend(".. github_links", text)
to_doc += "`Pull Request {0}`_".format(pull_request)
elif ident.startswith("issue"):
issue = ident[len("issue"):]
text = ".. _Issue {0}: {1}/issues/{0}".format(issue, PROJECT_URL)
history = extend(".. github_links", text)
to_doc += "`Issue {0}`_".format(issue)
else:
short_rev = ident[:7]
text = ".. _{0}: {1}/commit/{0}".format(short_rev, PROJECT_URL)
history = extend(".. github_links", text)
to_doc += "{0}_".format(short_rev)
to_doc = wrap(to_doc)
history = extend(".. to_doc", to_doc)
open(history_path, "w", encoding="utf-8").write(history)
def get_first_sentence(message):
first_line = message.split("\n")[0]
return first_line
def wrap(message):
wrapper = textwrap.TextWrapper(initial_indent="* ")
wrapper.subsequent_indent = ' '
wrapper.width = 78
return "\n".join(wrapper.wrap(message))
if __name__ == "__main__":
main(sys.argv)
|
mile:', ':GAKUEngine:', ':pistol:', ':relieved:',
':wink:', ':grimacing:', ':rainbow_solid:', ':blowfish:',
':kissing_smiling_eyes:', ':tropical_drink:', ':face_with_medical_mask:',
':pill:', ':ruby:', ':cactus:', ':smiley_stuck_out_tongue_winking_eye:',
':boar:', ':smile:', ':face_with_tear_of_joy:', ':Cancer:',
':couple_in_love:', ':horse:', ':two_men_with_heart:', ':bowtie:',
':open_mouth:', ':frog_face:', ':Taurus:', ':octopus:', ':ship:',
':shooting_star:', ':face_with_ok_gesture:', ':wolf_face:', ':heart:',
':loudly_crying_face:', ':frowning:', ':scuba_diver:', ':love_hotel:',
':gentleman_octopus:', ':grinning_cat_face_with_smiling_eyes:',
':face_savouring_delicious_food:', ':rainbow:', ':mount_fuji:',
':victory_hand:', ':glowing_star:', ':ksroom:', ':beer_mug:', ':sweat:',
':hushed:', ':Pisces:', ':Capricorn:', ':stuck_out_tongue_winking_eye:',
':tennis_racquet_and_ball:', ':person_frowning:', ':spouting_whale:',
':tangerine:', ':person_bowing_deeply:', ':stuck_out_tongue_closed_eyes:',
':dog_face:', ':circled_ideograph_secret:', ':Libra:', ':jumping_spider:',
':disappointed_face:', ':hamburger:', ':octocat:', ':sleeping:',
':crescent_moon:', ':no_one_under_eighteen_symbol:', ':kissing:',
':unamused:', ':couple_with_heart:', ':fisted_hand_sign:',
':smiling_cat_face_with_heart_shaped_eyes:', ':anguished:', ':groupme:',
':expressionless:', ':phone_book:', ':full_moon:', ':bactrian_camel:',
':snowboarder:', ':microphone:', ':Gemini:', ':fearful_face:',
':pensive_face:', ':jack_o_lantern:', ':Aries:', ':palm_pre3:',
':speech_balloon:', ':koala:', ':poop:', ':quoll:', ':kissing_closed_eyes:',
':thumbs_up_sign:', ':person_with_folded_hands:', ':puke_finger:',
':Giorgio:', ':princess:', ':waxing_gibbous_moon:', ':two_men_in_love:',
':happijar:', ':guitar:', ':sun_with_face:', ':RV:', ':cloud:',
':grinning:', ':genshin:', ':Sagittarius:',
':disappointed_but_relieved_face:', ':paw_prints:', ':rice_ball:',
':anchor:', ':smirk:', ':pegasus_black:', ':lgtm:', ':persevering_face:',
':elephant:', ':face_with_no_good_gesture:', ':snake:', ':wink2:',
':pizza:', ':white_smiling_face:', ':Leo:', ':sunrise_over_mountains:',
':monster:', ':relaxed:', ':grin:', ':laughing:', ':car:', ':cake:',
':Kagetsuki:', ':ninja:', ':siamese_kitten:', ':weary_face:', ':ghost:',
':milky_way:', ':penguin:', ':drunk:', ':crying_cat_face:', ':dancer:',
':snail:', ':person_raising_both_hands_in_celebration:', ':smiley:',
':penguin_chick:', ':video_game:', ':flushed:', ':shit:', ':worried:',
':cyclone:', ':DSLR_click:', ':jumping_spider_red:', ':ocean_dive_view:',
':astonished_face:', ':happy_person_raising_one_hand:', ':bgok:',
':family:', ':smiley_smile:', ':wheelchair:', ':Happy_FMC:',
':smiley_kissing_heart:', ':hatching_chick:', ':hear_no_evil_monkey:',
':Virgo:', ':skull:', ':two_women_holding_hands:', ':assault_rifle:',
':pouting_face:', ':high_hopes:', ':angry_face:'
]
EMOJIONE = [
':2714:', ':2716:', ':274C:', ':274E:', ':2753:', ':2754:', ':2755:',
':2757:', ':2764:', ':303D:', ':1F401:', ':1F402:', ':1F403:', ':1F404:',
':1F405:', ':1F406:', ':1F407:', ':1F409:', ':1F410:', ':1F411:',
':1F412:', ':1F413:', ':1F414:', ':1F415:', ':1F417:', ':1F418:',
':1F419:', ':1F420:', ':1F421:', ':1F422:', ':1F423:', ':1F425:',
':1F426:', ':1F427:', ':1F428:', ':1F429:', ':1F430:', ':1F431:',
':1F433:', ':1F434:', ':1F435:', ':1F436:', ':1F437:', ':1F438:',
':1F439:', ':1F493:', ':1F494:', ':1F495:', ':1F496:', ':1F497:',
':1F498:', ':1F499:', ':1F590:', ':1F591:', ':1F592:', ':1F593:',
':1F594:', ':1F595:', ':1F596:', ':1F598:', ':1F599:', ':1F59E:',
':1F59F:', ':1F600:', ':1F601:', ':1F602:', ':1F604:', ':1F605:',
':1F606:', ':1F607:', ':1F608:', ':1F609:', ':1F60A:', ':1F60C:',
':1F60D:', ':1F60E:', ':1F60F:', ':1F610:', ':1F611:', ':1F612:',
':1F614:', ':1F615:', ':1F616:', ':1F617:', ':1F618:', ':1F619:',
':1F61A:', ':1F61C:', ':1F61D:', ':1F61E:', ':1F61F:', ':1F620:',
':1F621:', ':1F622:', ':1F624:', ':1F625:', ':1F626:', ':1F627:',
':1F628:', ':1F629:', ':1F62A:', ':1F62C:', ':1F62D:', ':1F62E:',
':1F62F:', ':1F630:', ':1F631:', ':1F632:', ':1F634:', ':1F635:',
':1F636:', ':1F637:', ':1F638:', ':1F639:', ':1F63A:', ':1F63C:',
':1F63D:', ':1F63E:', ':1F63F:', ':1F640:', ':1F641:', ':1F642:',
':1F646:', ':1F647:', ':1F648:', ':1F649:', ':1F64A:', ':1F64B:',
':1F64C:', ':2049:', ':261D:', ':263A:', ':2705:', ':270A:', ':270B:',
':270C:', ':270F:', ':2716:', ':274C:', ':274E:', ':2753:', ':2754:',
':2755:', ':2757:', ':2764:'
]
TWEMOJI = [
':1f400:', ':1f401:', ':1f402:', ':1f403:', ':1f404:', ':1f405:',
':1f406:', ':1f407:', ':1f408:', ':1f409:', ':1f410:', ':1f411:',
':1f412:', ':1f413:', ':1f414:', ':1f415:', ':1f416:', ':1f417:',
':1f418:', ':1f419:', ':1f420:', ':1f421:', ':1f422:', ':1f423:',
':1f424:', ':1f425:', ':1f426:', ':1f427:', ':1f428:', ':1f429:',
':1f430:', ':1f431:', ':1f432:', ':1f433:', ':1f434:', ':1f435:',
':1f436:', ':1f437:', ':1f438:', ':1f439:', ':1f440:', ':1f445:',
':1f446:', ':1f447:', ':1f448:', ':1f449:', ':1f450:', ':1f600:',
':1f601:', ':1f602:', ':1f603:', ':1f604:', ':1f605:', ':1f606:',
':1f607:', ':1f608:', ':1f609:', ':1f60a:', ':1f60b:', ':1f60c:',
':1f60d:', ':1f60e:', ':1f60f:', ':1f610:', ':1f611:', ':1f612:',
':1f613:', ':1f614:', ':1f615:', ':1f616:', ':1f617:', ':1f618:',
':1f619:', ':1f61a:', ':1f61b:', ':1f61c:', ':1f61d:', ':1f61e:',
':1f61f:', ':1f620:', ':1f621:', ':1f622:', ':1f623:', ':1f624:',
':1f625:', ':1f626:', ':1f627:', ':1f628:', ':1f629:', ':1f62a:',
':1f62b:', ':1f62c:', ':1f62d:', ':1f62e:', ':1f62f:', ':1f630:',
':1f631:', ':1f632:', ':1f633:', ':1f634:', ':1f635:', ':1f636:',
':1f637:', ':1f638:', ':1f639:', ':1f63a:', ':1f63b:', ':1f63c:',
':1f63d:', ':1f63e:', ':1f63f:', ':1f640:', ':1f645:', ':1f646:',
':1f647:', ':1f648:', ':1f649:', ':1f64a:', ':1f64f:', ':1f680:',
':1f681:', ':1f682:'
]
EMOJI_GROUPS = {}
def parse_emoji_groups(text):
groups = set(RE_EMOJI_GROUPS.findall(text))
for group in groups:
group_text = EMOJI_GROUPS[group]
group_text = group_text.replace(' ', ' ')
group_text = group_text.replace('\n', "<br/>")
text = text.replace(group, group_text)
return text
def parse_emoji(text, is_escape=True):
if not text:
return ''
if is_escape:
text = escape(text)
text = parse_emoji_groups(text)
if RE_EMOJI_ONLY.match(text.strip()):
quick_emoji = True
emoji_img = '<img src="/static/emoji/%s.png" align="absmiddle"/>'
else:
quick_emoji = False
emoji_img = '<img src="/static/emoji/%s.png" height="20" width="20" align="absmiddle"/>'
def translate_emoji(x):
text = x.group()
line_emoji_img = '<img src="/static/emoji/%s.png" height="48" width="48" align="absmiddle"/>'
if not quick_emoji and RE_EMOJI_LINE.match(text):
return line_emoji_img % text.strip(':')
return emoji_img % text.strip(':')
result = RE_EMOJI.sub(translate_emoji, text)
return result
def all_emojis():
curdir = os.path.abspath(os.path.curdir)
emoji_dir = os.path.join(curdir, 'hub/static/emoji/')
if os.path.isdir(emoji_dir):
files = os.listdir(emoji_dir)
else:
realpath = os.path.dirname(os.path.realpath(__file__))
curdir = os.path.join(realpath, os.path.pardir, 'hub/static/emoji')
curdir = os.path.abspath(curdir)
if os.path.isdir(curdir):
files = os.listdir(emoji_dir)
else | :
return EMOJIS
if files:
return [':{}:'.format(fn[:-4]) for fn in files if fn.endswith('.png')]
else:
return EMOJIS
def url_for_emoji(emoji): |
return '/static/emoji/%s.png' % emoji[1:-1]
def all_line_emojis():
return sum([EMOJIONE, TWEMOJI], [])
RE_EMOJI = re.compile(r'(' + '|'.join(
[re.escape(x) for x in all_emojis |
#
# FBrowserBase.py -- Base class for file browser plugin for fits viewer
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import os, glob
import stat, time
from ginga.misc import Bunch
from ginga import GingaPlugin
from ginga import AstroImage
from ginga.util import paths
from ginga.util.six.moves import map, zip
class FBrowserBase(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(FBrowserBase, self).__init__(fv, fitsimage)
self.keywords = ['OBJECT', 'UT']
self.columns = [('Name', 'name'),
('Size', 'st_size'),
('Mode', 'st_mode'),
('Last Changed', 'st_mtime')
]
self.jumpinfo = []
homedir = paths.home
self.curpath = os.path.join(homedir, '*')
self.do_scanfits = False
self.moving_cursor = False
def close(self):
chname = self.fv.get_channelName(self.fitsimage)
self.fv.stop_local_plugin(chname, str(self))
return True
def file_icon(self, bnch):
if bnch.type == 'dir':
pb = self.folderpb
elif bnch.type == 'fits':
pb = self.fitspb
else:
pb = self.filepb
return pb
def open_file(self, path):
self.logger.debug("path: %s" % (path))
if path == '..':
curdir, curglob = os.path.split(self.curpath)
path = os.path.join(curdir, path, curglob)
if os.path.isdir(path):
path = os.path.join(path, '*')
self.browse(path)
elif os.path.exists(path):
#self.fv.load_file(path)
uri = "file://%s" % (path)
self.fitsimage.make_callback('drag-drop', [uri])
else:
self.browse(path)
def get_info(self, path):
dirname, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
ftype = 'file'
if os.path.isdir(path):
ftype = 'dir'
elif os.path.islink(path):
ftype = 'link'
elif ext.lower() == '.fits':
ftype = 'fits'
try:
filestat = os.stat(path)
bnch = Bunch.Bunch(path=path, name=filename, type=ftype,
st_mode=filestat.st_mode, st_size=filestat.st_size,
st_mtime=filestat.st_mtime)
except OSError as e:
# TODO: identify some kind of error with this path
bnch = Bunch.Bunch(path=path, name=filename, type=ftype,
st_mode=0, st_size=0,
st_mtime=0)
return bnch
def browse(self, path):
self.logger.debug("path: %s" % (path))
if os.path.isdir(path):
dirname = path
globname = None
else:
dirname, globname = os.path.split(path)
dirname = os.path.abspath(dirname)
# check validity of leading path name
if not os.path.isdir(dirname):
self.fv.show_error("Not a valid path: %s" % (dirname))
return
if not globname:
globname = '*'
path = os.path.join(dirname, globname)
# Make a directory listing
self.logger.debug("globbing path: %s" % (path))
filelist = list(glob.glob(path))
filelist.sort(key=str.lower)
filelist.insert(0, os.path.join(dirname, '..'))
self.jumpinfo = list(map(self.get_info, filelist))
self.curpath = path
if self.do_scanfits:
self.scan_fits()
self.makelisting(path)
def scan_fits(self):
for bnch in self.jumpinfo:
if not bnch.type == 'fits':
continue
if 'kwds' not in bnch:
try:
in_f = AstroImage.pyfits.open(bnch.path, 'readonly')
try:
kwds = {}
for kwd in self.keywords:
kwds[kwd] = in_f[0].header.get(kwd, 'N/A')
bnch.kwds = kwds
finally:
in_f.close()
except Exception as e:
continue
def refresh(self):
self.browse(self.curpath)
def scan_headers(self):
self.browse(self.curpath)
def make_thumbs(self):
path = self.curpath
self.logger.info("Generating thumbnails for '%s'..." % (
path))
filelist = glob.glob(path)
filelist.sort(key=str.lower)
# find out our channel
chname = self.fv.get_channelName(self.fitsimage)
# Invoke the method in this channel's Thumbs plugin
# TODO: don't ex | pose gpmon!
rsobj = self.fv.gpmon.getPlugin('Thumbs')
self.fv.nongui_do(rsobj.make_thumbs, chname, filelist)
def start(self):
self.win = None
self.browse(self.curpath)
def pause(self):
pass
def resume(self):
pass
def stop(self):
pass
def redo(s | elf):
return True
#END
|
the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume.drivers.solidfire import SolidFire
LOG = logging.getLogger(__name__)
class SolidFireVolumeTestCase(test.TestCase):
def setUp(self):
super(SolidFireVolumeTestCase, self).setUp()
def fake_issue_api_request(obj, method, params):
if method is 'GetClusterInfo':
LOG.info('Called Fake GetClusterInfo...')
results = {'result': {'clusterInfo':
{'name': 'fake-cluster',
'mvip': '1.1.1.1',
'svip': '1.1.1.1',
'uniqueID': 'unqid',
'repCount': 2,
'attributes': {}}}}
return results
elif method is 'AddAccount':
LOG.info('Called Fake AddAccount...')
return {'result': {'accountID': 25}, 'id': 1}
elif method is 'GetAccountByName':
LOG.info('Called Fake GetAccountByName...')
results = {'result': {'account':
{'accountID': 25,
'username': params['username'],
'status': 'active',
'initiatorSecret': '123456789012',
'targetSecret': '123456789012',
'attributes': {},
'volumes': [6, 7, 20]}},
"id": 1}
return results
elif method is 'CreateVolume':
LOG.info('Called Fake CreateVolume...')
return {'result': {'volumeID': 5}, 'id': 1}
elif method is 'DeleteVolume':
LOG.info('Called Fake DeleteVolume...')
return {'result': {}, 'id': 1}
elif method is 'ListVolumesForAccount':
test_name = 'OS-VOLID-a720b3c0-d1f0-11e1-9b23-0800200c9a66'
LOG.info('Called Fake ListVolumesForAccount...')
result = {'result': {
'volumes': [{'volumeID': 5,
'name': test_name,
'accountID': 25,
'sliceCount': 1,
'totalSize': 1048576 * 1024,
'enable512e': True,
'access': "readWrite",
'status': "active",
'attributes':None,
'qos': None,
'iqn': test_name}]}}
return result
else:
LOG.error('Crap, unimplemented API call in Fake:%s' % method)
def fake_issue_api_request_fails(obj, method, params):
return {'error': {'code': 000,
'name': 'DummyError',
'message': 'This is a fake error response'},
'id': 1}
def fake_set_qos_by_volume_type(self, type_id, ctxt):
return {'minIOPS': 500,
'maxIOPS': 1000,
'burstIOPS': 1000}
def fake_volume_get(obj, key, default=None):
return {'qos': 'fast'}
def test_create_with_qos_type(self):
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
self.stubs.Set(SolidFire, '_set_qos_by_volume_type',
self.fake_set_qos_by_volume_type)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-080020 | 0c9a66',
'volume_type_id': 'fast'}
sfv = SolidFire()
model_update = sfv.create_volume(testvol)
def test_create_volume(self):
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
| 'volume_type_id': None}
sfv = SolidFire()
model_update = sfv.create_volume(testvol)
def test_create_volume_with_qos(self):
preset_qos = {}
preset_qos['qos'] = 'fast'
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'metadata': [preset_qos],
'volume_type_id': None}
sfv = SolidFire()
model_update = sfv.create_volume(testvol)
def test_create_volume_fails(self):
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request_fails)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
sfv = SolidFire()
try:
sfv.create_volume(testvol)
self.fail("Should have thrown Error")
except Exception:
pass
def test_create_sfaccount(self):
sfv = SolidFire()
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
account = sfv._create_sfaccount('project-id')
self.assertNotEqual(account, None)
def test_create_sfaccount_fails(self):
sfv = SolidFire()
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request_fails)
account = sfv._create_sfaccount('project-id')
self.assertEqual(account, None)
def test_get_sfaccount_by_name(self):
sfv = SolidFire()
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
account = sfv._get_sfaccount_by_name('some-name')
self.assertNotEqual(account, None)
def test_get_sfaccount_by_name_fails(self):
sfv = SolidFire()
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request_fails)
account = sfv._get_sfaccount_by_name('some-name')
self.assertEqual(account, None)
def test_delete_volume(self):
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'test_volume',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
sfv = SolidFire()
model_update = sfv.delete_volume(testvol)
def test_delete_volume_fails_no_volume(self):
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'no-name',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
sfv = SolidFire()
try:
model_update = sfv.delete_volume(testvol)
self.fail("Should have thrown Error")
except Exception:
pass
def test_delete_volume_fails_account_lookup(self):
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request_fails)
testvol = {'project_id': 'testprjid',
'name': 'no-name',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
sfv = SolidFire()
self.assertRaises(exception.SfAccountNotFound,
sfv.delete_volume,
testvol)
def test_get_cluster_info(self):
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
sfv = SolidFire()
sfv._ge |
# Generated by Django 2.2.13 on 2020-10-29 21:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("server", "0017_merge_20181217_1829")]
operations = [
migrations.AlterField(
model_name="computer",
name="serial | ",
| field=models.CharField(
max_length=200, unique=True, verbose_name="Serial Number"
),
),
migrations.AlterField(
model_name="secret",
name="secret_type",
field=models.CharField(
choices=[
("recovery_key", "Recovery Key"),
("password", "Password"),
("unlock_pin", "Unlock PIN"),
],
default="recovery_key",
max_length=256,
),
),
]
|
the TP-Link router is up to date.
Return boolean if scanning successful.
"""
_LOGGER.info("Loading wireless clients...")
url = 'http://{}/userRpm/WlanStationRpm.htm'.format(self.host)
referer = 'http://{}'.format(self.host)
page = requests.get(
url, auth=(self.username, self.password),
headers={REFERER: referer}, timeout=4)
result = self.parse_macs.findall(page.text)
if result:
self.last_results = [mac.replace("-", ":") for mac in result]
return True
return False
class Tplink2DeviceScanner(Tplink1DeviceScanner):
"""This class queries a router with newer version of TP-Link firmware."""
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results.keys()
def get_device_name(self, device):
"""Get firmware doesn't save the name of the wireless device."""
return self.last_results.get(device)
def _update_info(self):
"""Ensure the information from the TP-Link router is up to date.
Return boolean if scanning successful.
"""
_LOGGER.info("Loading wireless clients...")
url = 'http://{}/data/map_access_wireless_client_grid.json' \
.format(self.host)
referer = 'http://{}'.format(self.host)
# Router uses Authorization cookie instead of header
# Let's create the cookie
username_password = '{}:{}'.format(self.username, self.password)
b64_encoded_username_password = base64.b64encode(
username_password.encode('ascii')
).decode('ascii')
cookie = 'Authorization=Basic {}' \
.format(b64_encoded_username_password)
response = requests.post(
url, headers={REFERER: referer, COOKIE: cookie},
timeout=4)
try:
result = response.json().get('data')
except ValueError:
_LOGGER.error("Router didn't respond with JSON. "
"Check if credentials are correct.")
return False
if result:
self.last_results = {
device['mac_addr'].replace('-', ':'): device['name']
for device in result
}
return True
return False
class Tplink3DeviceScanner(Tplink1DeviceScanner):
"""This class queries the Archer C9 router with version 150811 or high."""
def __init__(self, config):
"""Initialize the scanner."""
self.stok = ''
self.sysauth = ''
super(Tplink3DeviceScanner, self).__init__(config)
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
self._log_out()
return self.last_results.keys()
def get_device_name(self, device):
"""Get the firmware doesn't save the name of the wireless device.
We are forced to use the MAC address as name here.
"""
return self.last_results.get(device)
def _get_auth_tokens(self):
"""Retrieve auth tokens from the router."""
_LOGGER.info("Retrieving auth tokens...")
url = 'http://{}/cgi-bin/luci/;stok=/login?form=login' \
.format(self.host)
referer = 'http://{}/webpages/login.html'.format(self.host)
# If possible implement RSA encryption of password here.
response = requests.post(
url, params={'operation': 'login', 'username': self.username,
'password': self.password},
headers={REFERER: referer}, timeout=4)
try:
self.stok = response.json().get('data').get('stok')
_LOGGER.info(self.stok)
regex_result = re.search(
'sysauth=(.*);', response.headers['set-cookie'])
self.sysauth = regex_result.group(1)
_LOGGER.info(self.sysauth)
return True
except (ValueError, KeyError):
_LOGGER.error("Couldn't fetch auth tokens! Response was: %s",
response.text)
return False
def _update_info(self):
"""Ensure the information from the TP-Link router is up to | date.
Return boolean if scanning successful.
"""
if (self.stok == '') or | (self.sysauth == ''):
self._get_auth_tokens()
_LOGGER.info("Loading wireless clients...")
url = ('http://{}/cgi-bin/luci/;stok={}/admin/wireless?'
'form=statistics').format(self.host, self.stok)
referer = 'http://{}/webpages/index.html'.format(self.host)
response = requests.post(
url, params={'operation': 'load'}, headers={REFERER: referer},
cookies={'sysauth': self.sysauth}, timeout=5)
try:
json_response = response.json()
if json_response.get('success'):
result = response.json().get('data')
else:
if json_response.get('errorcode') == 'timeout':
_LOGGER.info("Token timed out. Relogging on next scan")
self.stok = ''
self.sysauth = ''
return False
_LOGGER.error(
"An unknown error happened while fetching data")
return False
except ValueError:
_LOGGER.error("Router didn't respond with JSON. "
"Check if credentials are correct")
return False
if result:
self.last_results = {
device['mac'].replace('-', ':'): device['mac']
for device in result
}
return True
return False
def _log_out(self):
_LOGGER.info("Logging out of router admin interface...")
url = ('http://{}/cgi-bin/luci/;stok={}/admin/system?'
'form=logout').format(self.host, self.stok)
referer = 'http://{}/webpages/index.html'.format(self.host)
requests.post(
url, params={'operation': 'write'}, headers={REFERER: referer},
cookies={'sysauth': self.sysauth})
self.stok = ''
self.sysauth = ''
class Tplink4DeviceScanner(Tplink1DeviceScanner):
"""This class queries an Archer C7 router with TP-Link firmware 150427."""
def __init__(self, config):
"""Initialize the scanner."""
self.credentials = ''
self.token = ''
super(Tplink4DeviceScanner, self).__init__(config)
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results
def get_device_name(self, device):
"""Get the name of the wireless device."""
return None
def _get_auth_tokens(self):
"""Retrieve auth tokens from the router."""
_LOGGER.info("Retrieving auth tokens...")
url = 'http://{}/userRpm/LoginRpm.htm?Save=Save'.format(self.host)
# Generate md5 hash of password. The C7 appears to use the first 15
# characters of the password only, so we truncate to remove additional
# characters from being hashed.
password = hashlib.md5(self.password.encode('utf')[:15]).hexdigest()
credentials = '{}:{}'.format(self.username, password).encode('utf')
# Encode the credentials to be sent as a cookie.
self.credentials = base64.b64encode(credentials).decode('utf')
# Create the authorization cookie.
cookie = 'Authorization=Basic {}'.format(self.credentials)
response = requests.get(url, headers={COOKIE: cookie})
try:
result = re.search(r'window.parent.location.href = '
r'"https?:\/\/.*\/(.*)\/userRpm\/Index.htm";',
response.text)
if not result:
return False
self.token = result.group(1)
return True
except ValueError:
_LOGGER.error("Couldn't fetch auth tokens")
|
"""
Tests are performed against csr1000v-universalk9.03.15.00.S.155-2.S-std.
"""
import unittest
from iosxe.iosxe import IOSXE
from iosxe.exceptions import AuthError
node = '172.16.92.134'
username = 'cisco'
password = 'cisco'
port = 55443
class TestIOSXE(unittest.TestCase):
def setUp(self):
self.xe = IOSXE(node=node, username=username, password=password, disable_warnings=True)
def test_iosxe_is_a_IOSXE(self):
self.assertIsInstance(self.xe, IOSXE)
def test_invalid_user_pass_returns_auth_error(self):
self.assertRaises(AuthError, IOSXE, node=node, username='stuff', password='things',
disable_warnings=True)
def test_url_base(self):
| self.assertEqual(self.xe.url_base, 'https://{0}:{1}/api/v1'.format(node, port))
| def test_token_uri(self):
self.assertEqual(self.xe.token_uri, '/auth/token-services')
def test_save_config_success(self):
resp = self.xe.save_config()
self.assertEqual(204, resp.status_code)
|
import glob
import logging
import os
import shutil
from threading import Thread, Lock
class ClientFiles:
"""Processes the clients files to be moved etc"""
def __init__(self, settings, clients):
self.settings = settings
self.clients = clients
# Logging
self._logger = logging.getLogger(__name__)
# Dictionary of files to be moved, client is key
self.client_files = dict()
self.client_list = []
# Path templates
self.file_path = ("\\\\{0}\\" +
self.settings.client_path +
"\\" +
"*")
# Threading lock
self.lock = Lock()
def file_list(self, client):
"""
Creates a list of files for each client | to be moved
:param client: Client to have file list generated
:return: Client files if any, otherwise returns None
""" |
# print("Checking " + client + " for files")
client_files = []
for file_type in self.settings.file_types:
client_files.extend(glob.glob(self.file_path.format(client) +
file_type))
if len(client_files) > 0:
client_files = [file for file in client_files
if os.stat(file).st_size >=
self.settings.file_minsize]
if len(client_files) > 0:
self._logger.log(logging.INFO, client + ": " +
str(len(client_files)) + " file(s)")
# print(client, client_files)
self.client_files[client] = client_files
def thread_file_list(self):
"""
Threadsafe queue support function to prevent conflicts
"""
client_list = self.clients.clients_online
while len(client_list) > 0:
self.lock.acquire()
client = client_list.pop()
self.lock.release()
# Pass popped client to function
self.file_list(client)
def build_file_list(self):
"""
Processes clients to construct class file list
"""
active_threads = []
# Spawn instances for multithreading
for i in range(self.settings.client_connections):
instance = Thread(target=self.thread_file_list)
active_threads.append(instance)
instance.start()
# Allow threads to complete before proceeding
for instance in active_threads:
instance.join()
def thread_move_files(self):
while len(self.client_list) > 0:
self.lock.acquire()
client = self.client_list.pop()
self.lock.release()
# Pass popped client to function
self.move_files(self.client_files[client])
def move_files(self, file_list):
# print(file_list)
for file in file_list:
# print(os.path.basename(file))
os.chdir(self.settings.local_queue)
# Check if file exists locally already
if os.path.exists(os.path.basename(file)):
os.remove(os.path.basename(file))
self._logger.log(logging.WARNING, file + " exists. "
"Current file removed")
try:
shutil.move(file, self.settings.local_queue)
# self.files.append(os.path.basename(file))
except OSError as error:
self._logger.log(logging.CRITICAL, file + " " + str(error))
# print(error)
self._logger.log(logging.INFO, file + " moved successfully")
def build_move_files(self):
"""
Move files from client to local store. This is done as a list per client
so as to not flood a specific sites WAN link and cause issues with
stores connectivity.
"""
active_threads = []
self.client_list = list(self.client_files.keys())
for i in range(self.settings.client_connections):
instance = Thread(target=self.thread_move_files)
active_threads.append(instance)
instance.start()
# Allow threads to complete before proceeding
for instance in active_threads:
instance.join()
def run(self):
self.build_file_list()
self.build_move_files()
|
specifiers declarator
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))]
p[0] = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2])])[0]
def p_parameter_declaration_2(self, p):
""" parameter_declaration : declaration_specifiers abstract_declarator_opt
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))]
# Parameters can have the same names as typedefs. The trouble is that
# the parameter's name gets grouped into declaration_specifiers, making
# it look like an old-style declaration; compensate.
#
if len(spec['type']) > 1 and len(spec['type'][-1].names) == 1 and \
self._is_type_in_scope(spec['type'][-1].names[0]):
decl = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2], init=None)])[0]
# This truly is an old-style parameter declaration
#
else:
decl = c_ast.Typename(
name='',
quals=spec['qual'],
type=p[2] or c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(2)))
typename = spec['type']
decl = self._fix_decl_name_type(decl, typename)
p[0] = decl
def p_identifier_list(self, p):
""" identifier_list : identifier
| identifier_list COMMA identifier
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
def p_initializer_1(self, p):
""" initializer : assignment_expression
"""
p[0] = p[1]
def p_initializer_2(self, p):
""" initializer : brace_open initializer_list_opt brace_close
| brace_open initializer_list COMMA brace_close
"""
if p[2] is None:
p[0] = c_ast.InitList([], self._coord(p.lineno(1)))
else:
p[0] = p[2]
def p_initializer_list(self, p):
| """ initializer_list : designation_opt initializer
| | initializer_list COMMA designation_opt initializer
"""
if len(p) == 3: # single initializer
init = p[2] if p[1] is None else c_ast.NamedInitializer(p[1], p[2])
p[0] = c_ast.InitList([init], p[2].coord)
else:
init = p[4] if p[3] is None else c_ast.NamedInitializer(p[3], p[4])
p[1].exprs.append(init)
p[0] = p[1]
def p_designation(self, p):
""" designation : designator_list EQUALS
"""
p[0] = p[1]
# Designators are represented as a list of nodes, in the order in which
# they're written in the code.
#
def p_designator_list(self, p):
""" designator_list : designator
| designator_list designator
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_designator(self, p):
""" designator : LBRACKET constant_expression RBRACKET
| PERIOD identifier
"""
p[0] = p[2]
def p_type_name(self, p):
""" type_name : specifier_qualifier_list abstract_declarator_opt
"""
#~ print '=========='
#~ print p[1]
#~ print p[2]
#~ print p[2].children()
#~ print '=========='
typename = c_ast.Typename(
name='',
quals=p[1]['qual'],
type=p[2] or c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(2)))
p[0] = self._fix_decl_name_type(typename, p[1]['type'])
def p_abstract_declarator_1(self, p):
""" abstract_declarator : pointer
"""
dummytype = c_ast.TypeDecl(None, None, None)
p[0] = self._type_modify_decl(
decl=dummytype,
modifier=p[1])
def p_abstract_declarator_2(self, p):
""" abstract_declarator : pointer direct_abstract_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
def p_abstract_declarator_3(self, p):
""" abstract_declarator : direct_abstract_declarator
"""
p[0] = p[1]
# Creating and using direct_abstract_declarator_opt here
# instead of listing both direct_abstract_declarator and the
# lack of it in the beginning of _1 and _2 caused two
# shift/reduce errors.
#
def p_direct_abstract_declarator_1(self, p):
""" direct_abstract_declarator : LPAREN abstract_declarator RPAREN """
p[0] = p[2]
def p_direct_abstract_declarator_2(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET assignment_expression_opt RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=p[3],
dim_quals=[],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_3(self, p):
""" direct_abstract_declarator : LBRACKET assignment_expression_opt RBRACKET
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None),
dim=p[2],
dim_quals=[],
coord=self._coord(p.lineno(1)))
def p_direct_abstract_declarator_4(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
dim_quals=[],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_5(self, p):
""" direct_abstract_declarator : LBRACKET TIMES RBRACKET
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None),
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
dim_quals=[],
coord=self._coord(p.lineno(1)))
def p_direct_abstract_declarator_6(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_direct_abstract_declarator_7(self, p):
""" direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN
"""
p[0] = c_ast.FuncDecl(
args=p[2],
type=c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(1)))
# declaration is a list, statement isn't. To make it consistent, block_item
# will always be a list
#
def p_block_item(self, p):
""" block_item : declaration
| statement
"""
p[0] = p[1] if isinstance(p[1], list) else [p[1]]
# Since we made block_item a list, this just combines lists
#
def p_block_item_list(self, p):
""" block_item_list : block_item
| block_item_list block_item
"""
# Empty block items (plain ';') produce [None], so ignore them
p[0] = p[1] if (len(p) == 2 or p[2] == [None]) else p[1] + p[2]
def p_compound_statement_1(self, p):
""" compound_statement : brace_open block_item_list_opt brace_close """
p[0] = c_ast.Compound(
block_items=p[2],
coord=self._coord(p.lineno(1)))
def p_labeled_statement_1(self, p):
""" labeled_statement : ID COLON statement """
p[0] = c_ast.Label(p[1], p[3], self._coord(p.lineno(1)))
def p_labeled_statement_2(self, p):
""" labeled_statement : CASE constant_expression COLON statement """
p[0] = c_ast.Case(p[2], [p[4]], self._coord(p.lineno(1)))
def p_labeled_statement_3(self, p):
""" labele |
'''
Created on Jan 30, 2011
@author: mkiyer
chimerascan: chimeric transcript discovery using RNA-seq
Copyright (C) 2011 Matthew Iyer
This program is free softwar | e: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have r | eceived a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import collections
import logging
from nominate_chimeras import Chimera, parse_discordant_reads
from find_discordant_reads import DiscordantType
def to_fastq(mate, qname, seq, qual):
return "@%s/%d\n%s\n+%s/%d\n%s" % (qname, mate+1, seq, qname, mate+1, qual)
def is_spanning(start, end, juncs):
return any(start < j < end for j in juncs)
def check_fragment(frag, tx5p, tx3p, nonmapping=True):
# mates that have two split segments mapping discordantly
# are automatically considered for spanning read detection
write5p = frag.discordant_type.discordant5p
write3p = frag.discordant_type.discordant3p
# check the padding boundaries to find mates with positions
# past the junction
if (not write5p) and (frag.clust5p.rname in tx5p):
write5p = is_spanning(frag.clust5p.pad_start,
frag.clust5p.pad_end,
tx5p[frag.clust5p.rname])
if (not write3p) and (frag.clust3p.rname in tx3p):
write3p = is_spanning(frag.clust3p.pad_start,
frag.clust3p.pad_end,
tx5p[frag.clust3p.rname])
if nonmapping and (frag.discordant_type.code == DiscordantType.NONMAPPING):
# TODO: automatically completely non-mapping reads that may
# be double-overlapping spanning reads, but only do this in
# single-segment mode to increase sensitivity
write5p = True
write3p = True
elif frag.discordant_type.code == DiscordantType.CONCORDANT_SINGLE:
# one of mates mapped and other is unmapped, so check
# the mapped mate and see whether it matches a chimera
# candidate
# TODO: check junction position to further refine decision
# by omitting reads that are far from the predicted junction
if (frag.clust5p.rname == "*") and (frag.clust3p.rname in tx3p):
write5p = True
if (frag.clust3p.rname == "*") and (frag.clust5p.rname in tx5p):
write3p = True
# write the potential spanning reads
reads = [None, None]
if write5p:
mate = 0 if frag.read1_is_sense else 1
reads[mate] = to_fastq(mate, frag.qname, frag.clust5p.seq, frag.clust5p.qual)
if write3p:
mate = 1 if frag.read1_is_sense else 0
reads[mate] = to_fastq(mate, frag.qname, frag.clust3p.seq, frag.clust3p.qual)
return reads
def nominate_spanning_reads(discordant_reads_fh,
chimeras_fh,
fastq_fh):
# build index of chimera candidates
logging.info("Indexing chimera candidates")
tx5p = collections.defaultdict(lambda: [])
tx3p = collections.defaultdict(lambda: [])
for chimera in Chimera.parse(chimeras_fh):
tx5p[chimera.mate5p.tx_name].append(chimera.mate5p.end)
tx3p[chimera.mate3p.tx_name].append(chimera.mate3p.start)
# parse discordant reads
logging.info("Nominating spanning reads")
read1, read2 = None, None
prev_qname = None
for frag in parse_discordant_reads(discordant_reads_fh):
if frag.discordant_type.is_genome:
continue
qname = frag.qname
if prev_qname is not None and (qname != prev_qname):
if read1 is not None:
print >>fastq_fh, read1
if read2 is not None:
print >>fastq_fh, read2
read1, read2 = None, None
# skip if reads already found
if (read1 is not None) and (read2 is not None):
continue
# update read fastq
r1, r2 = check_fragment(frag, tx5p, tx3p)
if read1 is None: read1 = r1
if read2 is None: read2 = r2
prev_qname = qname
if read1 is not None:
print >>fastq_fh, read1
if read2 is not None:
print >>fastq_fh, read2
def main():
from optparse import OptionParser
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
parser = OptionParser("usage: %prog [options] <qname_sorted_discordant_reads> <chimeras> <output.fq>")
options, args = parser.parse_args()
discordant_reads_file = args[0]
chimeras_file = args[1]
fastq_file = args[2]
nominate_spanning_reads(open(discordant_reads_file, 'r'),
open(chimeras_file, 'r'),
open(fastq_file, 'w'))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from itertools import groupby
from time import time
from functools import partial
import re
import django
django.setup()
from django.db import transaction
from clldutils.dsv import reader
from clldutils.text import split_text
from clldutils.path import Path
from clldutils import jsonlib
import attr
from dplace_app.models import Source
from loader.util import configure_logging, load_regions
from loader.society import society_locations, load_societies, | load_society_relations
from loader.phylogenies import load_phylogenies
from loader.variables import load_vars
from loader.values import load_data
from loader.sources import load_references
from loader.glottocode import loa | d_languages
comma_split = partial(split_text, separators=',', strip=True, brackets={})
semicolon_split = partial(split_text, separators=';', strip=True, brackets={})
def valid_enum_member(choices, instance, attribute, value):
if value not in choices:
raise ValueError(value)
@attr.s
class Variable(object):
category = attr.ib(convert=lambda s: [c.capitalize() for c in comma_split(s)])
id = attr.ib()
title = attr.ib()
definition = attr.ib()
type = attr.ib(
validator=partial(valid_enum_member, ['Continuous', 'Categorical', 'Ordinal']))
units = attr.ib()
source = attr.ib()
changes = attr.ib()
notes = attr.ib()
codes = attr.ib(default=attr.Factory(list))
@attr.s
class Data(object):
soc_id = attr.ib()
sub_case = attr.ib()
year = attr.ib()
var_id = attr.ib()
code = attr.ib()
comment = attr.ib()
references = attr.ib(convert=semicolon_split)
source_coded_data = attr.ib()
admin_comment = attr.ib()
@attr.s
class ObjectWithSource(object):
id = attr.ib()
name = attr.ib()
year = attr.ib()
author = attr.ib()
reference = attr.ib()
base_dir = attr.ib()
@property
def dir(self):
return self.base_dir.joinpath(self.id)
def as_source(self):
return Source.objects.create(
**{k: getattr(self, k) for k in 'year author name reference'.split()})
@attr.s
class RelatedSociety(object):
dataset = attr.ib(convert=lambda s: s.strip())
name = attr.ib(convert=lambda s: s.strip())
id = attr.ib(convert=lambda s: s.strip())
@classmethod
def from_string(cls, s):
match = re.match('([A-Za-z]+):\s*([^\[]+)\[([^\]]+)\]$', s)
if not match:
raise ValueError(s)
return cls(*match.groups())
@attr.s
class RelatedSocieties(object):
id = attr.ib()
related = attr.ib(convert=lambda s: [
RelatedSociety.from_string(ss) for ss in semicolon_split(s)])
@attr.s
class Dataset(ObjectWithSource):
type = attr.ib(validator=partial(valid_enum_member, ['cultural', 'environmental']))
description = attr.ib()
url = attr.ib()
def _items(self, what, **kw):
fname = self.dir.joinpath('{0}.csv'.format(what))
return list(reader(fname, **kw)) if fname.exists() else []
@property
def data(self):
return [Data(**d) for d in self._items('data', dicts=True)]
@property
def references(self):
return self._items('references', namedtuples=True)
@property
def societies(self):
return self._items('societies', namedtuples=True)
@property
def society_relations(self):
return [
RelatedSocieties(**d) for d in self._items('societies_mapping', dicts=True)]
@property
def variables(self):
codes = {vid: list(c) for vid, c in groupby(
sorted(self._items('codes', namedtuples=True), key=lambda c: c.var_id),
lambda c: c.var_id)}
return [
Variable(codes=codes.get(v['id'], []), **v)
for v in self._items('variables', dicts=True)]
@attr.s
class Phylogeny(ObjectWithSource):
scaling = attr.ib()
url = attr.ib()
@property
def trees(self):
return self.dir.joinpath('summary.trees')
@property
def taxa(self):
return list(reader(self.dir.joinpath('taxa.csv'), dicts=True))
class Repos(object):
def __init__(self, dir_):
self.dir = dir_
self.datasets = [
Dataset(base_dir=self.dir.joinpath('datasets'), **r) for r in
reader(self.dir.joinpath('datasets', 'index.csv'), dicts=True)]
self.phylogenies = [
Phylogeny(base_dir=self.dir.joinpath('phylogenies'), **r) for r in
reader(self.dir.joinpath('phylogenies', 'index.csv'), dicts=True)]
def path(self, *comps):
return self.dir.joinpath(*comps)
def read_csv(self, *comps, **kw):
return list(reader(self.path(*comps), **kw))
def read_json(self, *comps):
return jsonlib.load(self.path(*comps))
def load(repos, test=True):
configure_logging(test=test)
repos = Repos(repos)
for func in [
load_societies,
load_society_relations,
load_regions,
society_locations,
load_vars,
load_languages,
load_references,
load_data,
load_phylogenies,
]:
with transaction.atomic():
if not test:
print("%s..." % func.__name__) # pragma: no cover
start = time()
res = func(repos)
if not test: # pragma: no cover
print("{0} loaded in {1:.2f} secs".format(res, time() - start))
if __name__ == '__main__': # pragma: no cover
load(Path(sys.argv[1]), test=False)
sys.exit(0)
|
#
# Copyright 2015 by Justin MacCallum, Alberto Perez, Ken Dill
# All rights reserved
#
import numpy as np
import math
class ProteinBase(object):
'''
Base class for other Protein classes.
Provides functionality for translation/rotation and adding H-bonds.
'''
def __init__(self):
self._translation_vector = np.zeros(3)
self._rotatation_matrix = np.eye(3)
self._disulfide_list = []
self._general_bond = []
self._prep_files = []
self._frcmod_files = []
self._lib_files = []
def set_translation(self, translation_vector):
'''
Set the translation vector.
:param translation_vector: ``numpy.array(3)`` in nanometers
Translation happens after rotation.
'''
self._translation_vector = np.array(translation_vector)
def set_rotation(self, rotation_axis, theta):
'''
Set the rotation.
:param rotation_axis: ``numpy.array(3)`` in nanometers
:param theta: angle of rotation in degrees
Rotation happens after translation.
'''
theta = theta * 180 / math.pi
rotation_axis = rotation_axis / np.linalg.norm(rotation_axis)
a = np.cos(theta / 2.)
b, c, d = -rotation_axis * np.sin(theta / 2.)
self._rotatation_matrix = np.array([[a*a+b*b-c*c-d*d, 2*(b*c-a*d), 2*(b*d+a*c)],
[2*(b*c+a*d), a*a+c*c-b*b-d*d, 2*(c*d-a*b)],
[2*(b*d-a*c), 2*(c*d+a*b), a*a+d*d-b*b-c*c]])
def add_bond(self, res_index_i, res_index_j, atom_name_i, atom_name_j, bond_type):
'''
Add a general bond.
:param | res_index_i: one-based index of residue i
:param res_index_j: one-based index of residue j
:param atom_name_i: string name of i
:param atom_name_j: string name of j
:param bond_type: string specifying the "S", "D","T"... bond
.. note::
| indexing starts from one and the residue numbering from the PDB file is ignored.
'''
self._general_bond.append((res_index_i, res_index_j,atom_name_i,atom_name_j,bond_type))
def add_disulfide(self, res_index_i, res_index_j):
'''
Add a disulfide bond.
:param res_index_i: one-based index of residue i
:param res_index_j: one-based index of residue j
.. note::
indexing starts from one and the residue numbering from the PDB file is ignored. When loading
from a PDB or creating a sequence, residue name must be CYX, not CYS.
'''
self._disulfide_list.append((res_index_i, res_index_j))
def add_prep_file(self,fname):
'''
Add a prep file.
This will be needed when using residues that
are not defined in the general amber force field
'''
self._prep_files.append(fname)
def add_frcmod_file(self,fname):
'''
Add a frcmod file.
This will be needed when using residues that
are not defined in the general amber force field
'''
self._frcmod_files.append(fname)
def add_lib_file(self,fname):
'''
Add a lib file.
This will be needed when using residues that
are not defined in the general amber force field
'''
self._lib_files.append(fname)
def _gen_translation_string(self, mol_id):
return '''translate {mol_id} {{ {x} {y} {z} }}'''.format(mol_id=mol_id,
x=self._translation_vector[0],
y=self._translation_vector[1],
z=self._translation_vector[2])
def _gen_rotation_string(self, mol_id):
return ''
def _gen_bond_string(self,mol_id):
bond_strings = []
for i,j,a,b,t in self._general_bond:
d = 'bond {mol_id}.{i}.{a} {mol_id}.{j}.{b} "{t}"'.format(mol_id=mol_id, i=i, j=j, a=a, b=b, t=t)
bond_strings.append(d)
return bond_strings
def _gen_disulfide_string(self, mol_id):
disulfide_strings = []
for i, j in self._disulfide_list:
d = 'bond {mol_id}.{i}.SG {mol_id}.{j}.SG'.format(mol_id=mol_id, i=i, j=j)
disulfide_strings.append(d)
return disulfide_strings
def _gen_read_prep_string(self):
prep_string = []
for p in self._prep_files:
prep_string.append('loadAmberPrep {}'.format(p))
return prep_string
def _gen_read_frcmod_string(self):
frcmod_string = []
for p in self._frcmod_files:
frcmod_string.append('loadAmberParams {}'.format(p))
return frcmod_string
def _gen_read_lib_string(self):
lib_string = []
for p in self._lib_files:
lib_string.append('loadoff {}'.format(p))
return lib_string
class ProteinMoleculeFromSequence(ProteinBase):
'''
Class to create a protein from sequence. This class will create a protein molecule from sequence. This class is pretty dumb and relies on AmberTools
to do all of the heavy lifting.
:param sequence: sequence of the protein to create
The sequence is specified in Amber/Leap format. There are special NRES and CRES variants for the N-
and C-termini. Different protonation states are also available via different residue names. E.g. ASH
for neutral ASP.
'''
def __init__(self, sequence):
super(ProteinMoleculeFromSequence, self).__init__()
self._sequence = sequence
def prepare_for_tleap(self, mol_id):
# we don't need to do anything
pass
def generate_tleap_input(self, mol_id):
leap_cmds = []
leap_cmds.append('source leaprc.gaff')
leap_cmds.extend(self._gen_read_frcmod_string())
leap_cmds.extend(self._gen_read_prep_string())
leap_cmds.extend(self._gen_read_lib_string())
leap_cmds.append('{mol_id} = sequence {{ {seq} }}'.format(mol_id=mol_id, seq=self._sequence))
leap_cmds.extend(self._gen_disulfide_string(mol_id))
leap_cmds.extend(self._gen_bond_string(mol_id))
leap_cmds.append(self._gen_rotation_string(mol_id))
leap_cmds.append(self._gen_translation_string(mol_id))
return leap_cmds
class ProteinMoleculeFromPdbFile(ProteinBase):
'''
Create a new protein molecule from a pdb file.
This class is dumb and relies on AmberTools for the heavy lifting.
:param pdb_path: string path to the pdb file
.. note::
no processing happens to this pdb file. It must be understandable by tleap and atoms/residues may
need to be added/deleted/renamed. These manipulations should happen to the file before MELD is invoked.
'''
def __init__(self, pdb_path):
super(ProteinMoleculeFromPdbFile, self).__init__()
with open(pdb_path) as pdb_file:
self._pdb_contents = pdb_file.read()
def prepare_for_tleap(self, mol_id):
# copy the contents of the pdb file into the current working directory
pdb_path = '{mol_id}.pdb'.format(mol_id=mol_id)
with open(pdb_path, 'w') as pdb_file:
pdb_file.write(self._pdb_contents)
def generate_tleap_input(self, mol_id):
leap_cmds = []
leap_cmds.append('source leaprc.gaff')
leap_cmds.extend(self._gen_read_frcmod_string())
leap_cmds.extend(self._gen_read_prep_string())
leap_cmds.extend(self._gen_read_lib_string())
leap_cmds.append('{mol_id} = loadPdb {mol_id}.pdb'.format(mol_id=mol_id))
leap_cmds.extend(self._gen_bond_string(mol_id))
leap_cmds.extend(self._gen_disulfide_string(mol_id))
leap_cmds.append(self._gen_rotation_string(mol_id))
leap_cmds.append(self._gen_translation_string(mol_id))
#print leap_cmds
return leap_cmds
|
"""
Django settings for gnucash_explorer project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = None
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickj | acking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'gnucash_explorer.urls'
WSGI_APPLICATION = 'gnucash_explorer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
| USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
try:
from gnucash_explorer.local_settings import *
except ImportError as e:
print('You should set up your local_settings.py')
|
('id'))
return {'vsa': _vsa_view(context, vsa, True, instances)}
def delete(self, req, id):
"""Delete a VSA."""
context = req.environ['nova.context']
authorize(context)
LOG.audit(_("Delete VSA with id: %s"), id, context=context)
try:
self.vsa_api.delete(context, vsa_id=id)
except exception.NotFound:
raise exc.HTTPNotFound()
def associate_address(self, req, id, body):
""" /zadr-vsa/{vsa_id}/associate_address
auto or manually associate an IP to VSA
"""
context = req.environ['nova.context']
authorize(context)
if body is None:
ip = 'auto'
else:
ip = body.get('ipAddress', 'auto')
LOG.audit(_("Associate address %(ip)s to VSA %(id)s"),
locals(), context=context)
try:
instances = self._get_instances_by_vsa_id(context, id)
if instances is None or len(instances) == 0:
raise exc.HTTPNotFound()
for instance in instances:
self.network_api.allocate_for_instance(context, instance,
vpn=False)
# Placeholder
return
except exception.NotFound:
raise exc.HTTPNotFound()
def disassociate_address(self, req, id, body):
""" /zadr-vsa/{vsa_id}/disassociate_address
auto or manually associate an IP to VSA
"""
context = req.environ['nova.context']
authorize(context)
if body is None:
ip = 'auto'
else:
ip = body.get('ipAddress', 'auto')
LOG.audit(_("Disassociate address from VSA %(id)s"),
locals(), context=context)
# Placeholder
def make_volume(elem):
volumes.make_volume(elem)
elem.set('name')
elem.set('vsaId')
class VsaVolumeDriveController(volumes.VolumeController):
"""The base class for VSA volumes & drives.
A child resource of the VSA object. Allows operations with
volumes and drives created to/from particular VSA
"""
def __init__(self):
self.volume_api = volume.API()
self.vsa_api = vsa.API()
super(VsaVolumeDriveController, self).__init__()
def _translation(self, context, vol, vsa_id, details):
if details:
translation = volumes._translate_volume_detail_view
else:
translation = volumes._translate_volume_summary_view
d = translation(context, vol)
d['vsaId'] = vsa_id
d['name'] = vol['name']
return d
def _check_volume_ownership(self, context, vsa_id, id):
obj = self.object
try:
volume_ref = self.volume_api.get(context, id)
except exception.NotFound:
LOG.error(_("%(obj)s with ID %(id)s not found"), locals())
raise
own_vsa_id = self.volume_api.get_volume_metadata_value(volume_ref,
| self.direction)
if own_vsa_id != vsa_id:
LOG.error(_("%(obj)s with ID %(id)s belongs to VSA %(own_vsa_id)s"\
" and not to VSA %(vsa_id)s."), locals())
raise exception.Invalid()
def _items(self, req, vsa_id, details):
"""Return summary or detailed list of volumes for particular VSA."""
context = req.environ['nova.context']
a | uthorize(context)
vols = self.volume_api.get_all(context,
search_opts={'metadata': {self.direction: str(vsa_id)}})
limited_list = common.limited(vols, req)
res = [self._translation(context, vol, vsa_id, details) \
for vol in limited_list]
return {self.objects: res}
def index(self, req, vsa_id):
"""Return a short list of volumes created from particular VSA."""
LOG.audit(_("Index. vsa_id=%(vsa_id)s"), locals())
return self._items(req, vsa_id, details=False)
def detail(self, req, vsa_id):
"""Return a detailed list of volumes created from particular VSA."""
LOG.audit(_("Detail. vsa_id=%(vsa_id)s"), locals())
return self._items(req, vsa_id, details=True)
def create(self, req, vsa_id, body):
"""Create a new volume from VSA."""
LOG.audit(_("Create. vsa_id=%(vsa_id)s, body=%(body)s"), locals())
context = req.environ['nova.context']
authorize(context)
if not body:
raise exc.HTTPUnprocessableEntity()
vol = body[self.object]
size = vol['size']
LOG.audit(_("Create volume of %(size)s GB from VSA ID %(vsa_id)s"),
locals(), context=context)
try:
# create is supported for volumes only (drives created through VSA)
volume_type = self.vsa_api.get_vsa_volume_type(context)
except exception.NotFound:
raise exc.HTTPNotFound()
new_volume = self.volume_api.create(context,
size,
vol.get('displayName'),
vol.get('displayDescription'),
None,
volume_type=volume_type,
metadata=dict(from_vsa_id=str(vsa_id)))
return {self.object: self._translation(context, new_volume,
vsa_id, True)}
def update(self, req, vsa_id, id, body):
"""Update a volume."""
context = req.environ['nova.context']
authorize(context)
try:
self._check_volume_ownership(context, vsa_id, id)
except exception.NotFound:
raise exc.HTTPNotFound()
except exception.Invalid:
raise exc.HTTPBadRequest()
vol = body[self.object]
updatable_fields = [{'displayName': 'display_name'},
{'displayDescription': 'display_description'},
{'status': 'status'},
{'providerLocation': 'provider_location'},
{'providerAuth': 'provider_auth'}]
changes = {}
for field in updatable_fields:
key = field.keys()[0]
val = field[key]
if key in vol:
changes[val] = vol[key]
obj = self.object
LOG.audit(_("Update %(obj)s with id: %(id)s, changes: %(changes)s"),
locals(), context=context)
try:
volume = self.volume_api.get(context, id)
self.volume_api.update(context, volume, fields=changes)
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
def delete(self, req, vsa_id, id):
"""Delete a volume."""
context = req.environ['nova.context']
authorize(context)
LOG.audit(_("Delete. vsa_id=%(vsa_id)s, id=%(id)s"), locals())
try:
self._check_volume_ownership(context, vsa_id, id)
except exception.NotFound:
raise exc.HTTPNotFound()
except exception.Invalid:
raise exc.HTTPBadRequest()
return super(VsaVolumeDriveController, self).delete(req, id)
def show(self, req, vsa_id, id):
"""Return data about the given volume."""
context = req.environ['nova.context']
authorize(context)
LOG.audit(_("Show. vsa_id=%(vsa_id)s, id=%(id)s"), locals())
try:
self._check_volume_ownership(context, vsa_id, id)
except exception.NotFound:
raise exc.HTTPNotFound()
except exception.Invalid:
raise exc.HTTPBadRequest()
return super(VsaVolumeDriveController, self).show(req, id)
class VsaVolumeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volume', selector='volume')
make_volume(root)
return xmlutil.MasterTemplate(root, 1)
class VsaVolumesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumes')
elem = xmlutil.SubTemplateE |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A TensorFlow pad function that works like numpy.
Specifically, tf.pad can't more than double the length of a Tensor, while
numpy's can. For example:
x = np.array(range(3))
np.pad(x, [0, 5], mode='symmetric')
-> array([0, 1, 2, 2, 1, 0, 0, 1])
x = tf.constant(range(3))
tf.pad(x, [(0, 5)], mode='symmetric')
-> fails
"""
from typing import Union
import tensorflow as tf
def tf_pad(samples, padding,
mode):
if samples.shape.ndims != 2:
raise ValueError(f'tensor must be rank 2: {samples.shape}')
if mode == 'SYMMETRIC':
return tf_pad_symmetric(samples, padding)
else:
return tf.pad(samples, [(0, 0), (0, padding)], mode=mode)
def tf_pad_s | ymmetric(tensor, padding):
"""Symmetric pad a 2D Tensor."""
if tensor.shape.ndims != 2:
raise ValueError(f'tensor must be rank 2: {tensor.shape}')
t_len = | tf.shape(tensor)[1]
return tf.cond(
padding > t_len,
lambda: _repeat_n_times_with_extra(tensor, padding, t_len),
lambda: tf.pad(tensor, [(0, 0), (0, padding)], mode='SYMMETRIC'))
def _repeat_n_times_with_extra(tensor, padding,
t_len):
"""Pad symmetric longer than the original tensor."""
assert tensor.shape.ndims == 2, tensor.ndims
num_copies = tf.math.floordiv(padding, t_len)
r = tf.reverse(tensor, axis=[1])
f = tf.concat([r, tensor], axis=1)
copies = tf.tile(f, [1, tf.math.floordiv(num_copies, 2)])
copies = tf.cond(
tf.math.mod(num_copies, 2) == 0,
lambda: copies,
lambda: tf.concat([copies, r], axis=1),
)
pre_pad_tensor = tf.concat([tensor, copies], axis=1)
extra = tf.math.mod(padding, t_len)
return tf.pad(pre_pad_tensor, paddings=[(0, 0), (0, extra)], mode='SYMMETRIC')
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parsing API Discovery document."""
import mock_api_types
from gcutil_lib import mock_api_types
class Parser(object):
"""Discovery document parser.
Parses discovery document types, resources and methods. Result of parsing is a
dictionary method_id -> method.
"""
__slots__ = ('_discovery_document', '_parsed_schemas', '_parsed_methods',
'_base_url', '_common_parameters')
def __init__(self, doc):
self._discovery_document = doc
self._parsed_schemas = {}
self._parsed_methods = {}
self._base_url = ''
self._common_parameters = {}
def _ParseType(self, discovery_type):
ref = discovery_type.get('$ref')
if ref:
return self._GetSchema(ref)
type_name = discovery_type['type']
if type_name == 'any':
return mock_api_types.AnyType()
elif type_name == 'array':
return mock_api_types.ArrayType(self._ParseType(discovery_type['items']))
elif type_name == 'boolean':
return mock_api_types.BooleanType()
elif type_name == 'integer':
return self._ParseIntegerType(discovery_type)
elif type_name == 'number':
return self._ParseNumberType(discovery_type)
elif type_name == 'object':
return self._ParseObjectType(discovery_type)
elif type_name == 'string':
return self._ParseStringType(discovery_type)
else:
raise ValueError('Unrecognized type {type}'.format(type=type_name))
def _ParseIntegerType(self, discovery_type):
value_format = discovery_type.get('format')
if value_format in (None, 'int32', 'uint32'):
return mock_api_types.IntegerType(value_format or 'int32')
raise ValueError('Invalid integer format {value}'.format(
value=value_format))
def _ParseNumberType(self, discovery_type):
value_format = discovery_type.get('format')
if value_format in (None, 'double', 'float'):
return mock_api_types.NumberType(value_format or 'double')
raise ValueError('Invalid number format {value}'.format(
value=value_format))
def _ParseStringType(self, discovery_type):
value_format = discovery_type.get('format')
if value_format in (None, 'byte', 'date', 'date-time', 'int64', 'uint64'):
return mock_api_types.StringType(value_format)
raise ValueError('Invalid string format {value}'.format(
value=value_format))
def _ParseObjectType(self, discovery_type):
properties, additional = self._ParseProperties(discovery_type)
object_type = mock_api_types.ObjectType()
object_type.Define('', properties, additional)
return object_type
def _ParseSchema(self, discovery_schema):
properties, additional = self._ParseProperties(discovery_schema)
return self._CreateSchema(
discovery_schema.get('id'), properties, additional)
def _ParseProperties(self, discovery_object_type):
"""Parses properties of a discovery document object tyoe."""
assert discovery_object_type.get('type') == 'object'
properties = []
for property_name, property_type in (
discovery_object_type.get('properties', {}).iteritems()):
properties.append(mock_api_types.Property(
| property_name, self._ParseType(property_type)))
additional = None
additional_properties = discovery_object_type.get('additionalProperties')
if additional_properties is not None:
additional = self._ParseType(additional_properties)
return properties, additional
def _ParseSchemas(self, discovery_schemas):
for _, discovery_schema in discover | y_schemas.iteritems():
self._ParseSchema(discovery_schema)
def _ParseMethods(self, discovery_methods):
for method_name, discovery_method in discovery_methods.iteritems():
self._ParseMethod(method_name, discovery_method)
def _ParseParameter(self, parameter_name, parameter_type):
return mock_api_types.Parameter(
parameter_name, self._ParseType(parameter_type))
def _ParseParameters(self, discovery_method_parameters):
parameters = []
for parameter_name, parameter_type in (
discovery_method_parameters.iteritems()):
parameters.append(
self._ParseParameter(parameter_name, parameter_type))
parameters.sort(key=lambda parameter: parameter.name)
return parameters
def _ParseMethod(self, method_name, discovery_method):
parameters = self._ParseParameters(discovery_method.get('parameters', {}))
# Parse request type
discovery_method_request = discovery_method.get('request')
if discovery_method_request is None:
request_type = None
else:
request_type = self._ParseType(discovery_method_request)
# Parse response type.
discovery_method_response = discovery_method.get('response')
if discovery_method_response is None:
response_type = None
else:
response_type = self._ParseType(discovery_method_response)
return self._CreateMethod(
discovery_method.get('id'), method_name,
discovery_method.get('path', ''), parameters,
request_type, response_type)
def _ParseResources(self, discovery_resources):
for _, discovery_resource in discovery_resources.iteritems():
self._ParseResource(discovery_resource)
# Return all accumulated methods.
return self._parsed_methods
def _ParseResource(self, discovery_resource):
discovery_methods = discovery_resource.get('methods')
if discovery_methods:
self._ParseMethods(discovery_methods)
discovery_resources = discovery_resource.get('resources')
if discovery_resources:
self._ParseResources(discovery_resources)
def _ParseGlobals(self, discovery_document):
self._base_url = discovery_document.get('baseUrl')
self._common_parameters = self._ParseParameters(
discovery_document.get('parameters', {}))
def Parse(self):
self._ParseGlobals(self._discovery_document)
self._ParseSchemas(self._discovery_document.get('schemas'))
return self._ParseResources(self._discovery_document.get('resources'))
def _GetSchema(self, name):
schema = self._parsed_schemas.get(name)
if schema is None:
self._parsed_schemas[name] = schema = mock_api_types.ObjectType()
return schema
def _CreateSchema(self, name, properties, additional):
schema = self._GetSchema(name)
schema.Define(name, properties, additional)
return schema
def _CreateMethod(self, method_id, name, path, parameters, request, response):
if method_id in self._parsed_methods:
raise ValueError('Duplicate method {method}'.format(method=method_id))
all_parameters = dict((p.name, p) for p in self._common_parameters)
all_parameters.update(dict((p.name, p) for p in parameters))
path = self._base_url + path
method = mock_api_types.Method(
method_id, name, path, all_parameters, request, response)
self._parsed_methods[method_id] = method
return method
|
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). |
from . import res_company
# WARNING: Order of imports matters on this module, so don't put res_company
# below the other modules since it will lead to a missing column error when
# the module is initialized for the first time since there are fields with
# default values wich refer to this new res.company field.
from . import event
f | rom . import event_mail
from . import event_type
from . import res_config_settings
|
from contextional import GCM
with GCM("A") as A:
@GCM.add_setup
def setUp():
pass
@GCM.add_teardown
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
raise Exception
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup
def setUp():
raise Exception
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown
def tearDown():
raise Exception()
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown
def tearDown():
raise Exception()
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_teardown
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown
def tearDown():
raise Exception()
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_teardown
def tearDown():
raise Exception()
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown
def tearDown():
raise Exception()
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
@GCM.add_teardown
def tearDown():
raise Exception()
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
raise Exception()
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup
def setUp():
pass
@GCM.add_teardown
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown
def tearDown():
pass
A.create_tests()
expected_stream_output = [
"A",
" B",
" some test ... ok",
"A",
" # setup w/ description ",
" B",
" # setup w/ description ",
" some test ... ok",
" # teardown w/ description ",
" # teardown w/ description ",
"A",
" # setup w/ description ERROR",
" B",
" some test ... FAIL",
" # teardown w/ description ",
"A",
" # setup (1/1) ERROR",
" B",
" some test ... FAIL",
" # teardown w/ description ",
"A",
" # setup w/ description ",
" B",
" # setup w/ description ",
" some test ... ok",
" # teardown (1/2) ERROR",
" # teardown w/ description ",
"A",
" # setup w/ description ",
| " B",
" # setup w/ description ",
" some test ... ok",
" # teardown (1/2) ERROR",
"A",
" # setup w/ description ",
" B",
" # setup w/ description ",
" some test ... ok",
" # teardown (1/2) ERROR",
"A",
" # setup w/ description ",
" B",
" # setup w/ description ",
" some test ... ok",
" # teardown (1/2) ERROR",
| " # teardown (1/2) ERROR",
"A",
" # setup w/ description ",
" B",
" # setup w/ description ",
" some test ... ok",
" # teardown w/ description ",
" # teardown (2/3) ERROR",
" # teardown w/ description ",
"A",
" # setup w/ description ",
" B",
" # setup w/ description ",
" some test ... ok",
" # teardown w/ description ERROR",
" # teardown w/ description ",
"A",
" B",
" some test ... ok",
]
|
from django.conf.urls import include, url
from django.contrib import admin
from Poller import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^(?P<pk>\d+)/$', views.Deta | ilView.as_view(), name='detail'),
url(r'^(?P<pk>\d+)/results/$', views.ResultsView.as_view(), name='results'),
url(r'^(?P<question_id>\d+) | /vote/$', views.vote, name='vote'),
]
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 fileencoding=utf-8
import json
from django.conf import settings
from django.core.management import BaseCommand
from django.utils import timezone
from django.utils.dateparse import parse_datetime
from django.utils.translation import ugettext_lazy
from onadata.apps.logger.models import Instance
class Command(BaseCommand):
help | = ugettext_lazy("Fixes deleted instances by syncing "
"deleted items from mongo.")
| def handle(self, *args, **kwargs):
# Reset all sql deletes to None
Instance.objects.exclude(
deleted_at=None, xform__downloadable=True).update(deleted_at=None)
# Get all mongo deletes
query = '{"$and": [{"_deleted_at": {"$exists": true}}, ' \
'{"_deleted_at": {"$ne": null}}]}'
query = json.loads(query)
xform_instances = settings.MONGO_DB.instances
cursor = xform_instances.find(query)
for record in cursor:
# update sql instance with deleted_at datetime from mongo
try:
i = Instance.objects.get(
uuid=record["_uuid"], xform__downloadable=True)
except Instance.DoesNotExist:
continue
else:
deleted_at = parse_datetime(record["_deleted_at"])
if not timezone.is_aware(deleted_at):
deleted_at = timezone.make_aware(
deleted_at, timezone.utc)
i.set_deleted(deleted_at)
|
port ZCA_Dataset
from pylearn2.utils import serial
if __name__ == "__main__":
rng = np.random.RandomState(1234)
# data augmentation
zero_pad = 0
affine_transform_a = 0
affine_transform_b = 0
horizontal_flip = False
# batch
# keep a factor of 10000 if possible
# 10000 = (2*5)^4
batch_size = 100
number_of_batches_on_gpu = 45000/batch_size
BN = True
BN_epsilon=1e-4 # for numerical stability
BN_fast_eval= True
dropout_hidden = 1.
shuffle_examples = True
shuffle_batches = False
# Termination criteria
n_epoch = 300
monitor_step = 2
# LR
LR = .3
LR_fin = .001
LR_decay = (LR_fin/LR)**(1./n_epoch)
M= 0.
# BinaryConnect
BinaryConnect = True
stochastic = True
# Old hyperparameters
binary_training=False
stochastic_training=False
binary_test=False
stochastic_test=False
if BinaryConnect == True:
binary_training=True
if stochastic == True:
stochastic_training=True
else:
binary_test=True
print 'Loading the dataset'
preprocessor = serial.load("${PYLEARN2_DATA_PATH}/cifar10/pylearn2_gcn_whitened/preprocessor.pkl")
train_set = ZCA_Dataset(
preprocessed_dataset=serial.load("${PYLEARN2_DATA_PATH}/cifar10/pylearn2_gcn_whitened/train.pkl"),
preprocessor = preprocessor,
start=0, stop = 45000)
valid_set = ZCA_Dataset(
preprocessed_dataset= serial.load("${PYLEARN2_DATA_PATH}/cifar10/pylearn2_gcn_whitened/train.pkl"),
preprocessor = preprocessor,
start=45000, stop = 50000)
test_set = ZCA_Dataset(
preprocessed_dataset= serial.load("${PYLEARN2_DATA_PATH}/cifar10/pylearn2_gcn_whitened/test.pkl"),
preprocessor = preprocessor)
# bc01 format
# print train_set.X.shape
| train_set.X = train_set.X.reshape(45000,3,32,32)
valid_set.X = valid_set.X.reshape(5000,3,32,32)
test_set.X = test_set.X.reshape(10000,3,32,32)
# flatten targets
train_set.y = np.hstack(train_set.y)
valid_set.y = np.hstack(valid_set.y)
test_set.y = np.hstack(test_set.y)
# Onehot the targets
train_set.y = np.float32(np.eye(10)[train_set.y])
valid_set.y = np.floa | t32(np.eye(10)[valid_set.y])
test_set.y = np.float32(np.eye(10)[test_set.y])
# for hinge loss
train_set.y = 2* train_set.y - 1.
valid_set.y = 2* valid_set.y - 1.
test_set.y = 2* test_set.y - 1.
print 'Creating the model'
class DeepCNN(Network):
def __init__(self, rng):
Network.__init__(self, n_hidden_layer = 8, BN = BN)
print " C3 layer:"
self.layer.append(ReLU_conv_layer(
rng,
filter_shape=(128, 3, 3, 3),
pool_shape=(1,1),
pool_stride=(1,1),
BN = BN,
BN_epsilon=BN_epsilon,
binary_training=binary_training,
stochastic_training=stochastic_training,
binary_test=binary_test,
stochastic_test=stochastic_test
))
print " C3 P2 layers:"
self.layer.append(ReLU_conv_layer(
rng,
filter_shape=(128, 128, 3, 3),
pool_shape=(2,2),
pool_stride=(2,2),
BN = BN,
BN_epsilon=BN_epsilon,
binary_training=binary_training,
stochastic_training=stochastic_training,
binary_test=binary_test,
stochastic_test=stochastic_test
))
print " C2 layer:"
self.layer.append(ReLU_conv_layer(
rng,
filter_shape=(256, 128, 2, 2),
pool_shape=(1,1),
pool_stride=(1,1),
BN = BN,
BN_epsilon=BN_epsilon,
binary_training=binary_training,
stochastic_training=stochastic_training,
binary_test=binary_test,
stochastic_test=stochastic_test
))
print " C2 P2 layers:"
self.layer.append(ReLU_conv_layer(
rng,
filter_shape=(256, 256, 2, 2),
pool_shape=(2,2),
pool_stride=(2,2),
BN = BN,
BN_epsilon=BN_epsilon,
binary_training=binary_training,
stochastic_training=stochastic_training,
binary_test=binary_test,
stochastic_test=stochastic_test
))
print " C2 layer:"
self.layer.append(ReLU_conv_layer(
rng,
filter_shape=(512, 256, 2, 2),
pool_shape=(1,1),
pool_stride=(1,1),
BN = BN,
BN_epsilon=BN_epsilon,
binary_training=binary_training,
stochastic_training=stochastic_training,
binary_test=binary_test,
stochastic_test=stochastic_test
))
print " C2 P2 layers:"
self.layer.append(ReLU_conv_layer(
rng,
filter_shape=(512, 512, 2, 2),
pool_shape=(2,2),
pool_stride=(2,2),
BN = BN,
BN_epsilon=BN_epsilon,
binary_training=binary_training,
stochastic_training=stochastic_training,
binary_test=binary_test,
stochastic_test=stochastic_test
))
print " C2 layer:"
self.layer.append(ReLU_conv_layer(
rng,
filter_shape=(1024, 512, 2, 2),
pool_shape=(1,1),
pool_stride=(1,1),
BN = BN,
BN_epsilon=BN_epsilon,
binary_training=binary_training,
stochastic_training=stochastic_training,
binary_test=binary_test,
stochastic_test=stochastic_test
))
print " FC layer:"
self.layer.append(ReLU_layer(
rng = rng,
n_inputs = 1024,
n_units = 1024,
BN = BN,
BN_epsilon=BN_epsilon,
dropout=dropout_hidden,
binary_training=binary_training,
stochastic_training=stochastic_training,
binary_test=binary_test,
stochastic_test=stochastic_test
))
print " L2 SVM layer:"
self.layer.append(linear_layer(
rng = rng,
n_inputs= 1024,
n_units = 10,
BN = BN,
BN_epsilon=BN_epsilon,
dropout = dropout_hidden,
binary_training=binary_training,
stochastic_training=stochastic_training,
binary_test=binary_test,
stochastic_test=stochastic_test
))
model = DeepCNN(rng = rng)
print 'Creating the trainer'
trainer = Trainer(rng = rng,
train_set = train_set, valid_set = valid_set, test_set = test_set,
model = model, load_path = None, save_path = None,
zero_pad=zero_pad,
affine_transform_a=affine_transform_a, # |
#
# BTZen - library to asynchronously access Bluetooth devices.
#
# Copyright (C) 2015-2021 by Artur Wroblewski <wrobell@riseup.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied | warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Bluetooth services implemented by BlueZ pr | otocol stack.
"""
from .data import Make, ServiceType, Trigger, TriggerCondition
from .service import ServiceInterface, register_service
from .util import to_uuid
register_service(
Make.STANDARD,
ServiceType.BATTERY_LEVEL,
ServiceInterface(
to_uuid(0x180f),
'org.bluez.Battery1',
'Percentage',
'y'
),
trigger=Trigger(TriggerCondition.ON_CHANGE),
)
# vim: sw=4:et:ai
|
p2_test_server.py']
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _TEST_CASES
def unimplemented_test_cases_server(self):
return _TEST_CASES
def __str__(self):
return 'http2'
class Http2Client:
"""Represents the HTTP/2 Interop Test
This pretends to be a language in order to be built and run, but really it
isn't.
"""
def __init__(self):
self.client_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ['tools/http2_interop/http2_interop.test', '-test.v'] + args
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _TEST_CASES
def unimplemented_test_cases_server(self):
return []
def __str__(self):
return 'http2'
class NodeLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ['tools/run_tests/interop/with_nvm.sh',
'node', 'src/node/interop/interop_client.js'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['tools/run_tests/interop/with_nvm.sh',
'node', 'src/node/interop/interop_server.js'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'node'
class PHPLanguage:
def __init__(self):
self.client_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ['src/php/bin/interop_client.sh'] + args
def cloud_to_prod_env(self):
return {}
def global_env(self):
ret | urn {}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION
def unimplemented_test_cases_server(self):
return []
def __str__(self):
return 'php'
class PHP7Language:
def __init__(self):
self.clie | nt_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ['src/php/bin/interop_client.sh'] + args
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION
def unimplemented_test_cases_server(self):
return []
def __str__(self):
return 'php7'
class RubyLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ['tools/run_tests/interop/with_rvm.sh',
'ruby', 'src/ruby/pb/test/client.rb'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['tools/run_tests/interop/with_rvm.sh',
'ruby', 'src/ruby/pb/test/server.rb'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_SERVER_COMPRESSION
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'ruby'
class PythonLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return [
'py27/bin/python',
'src/python/grpcio_tests/setup.py',
'run_interop',
'--client',
'--args="{}"'.format(' '.join(args))
]
def client_cmd_http2interop(self, args):
return [ 'py27/bin/python',
'src/python/grpcio_tests/tests/http2/_negative_http2_client.py',
] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return [
'py27/bin/python',
'src/python/grpcio_tests/setup.py',
'run_interop',
'--server',
'--args="{}"'.format(' '.join(args))
]
def global_env(self):
return {'LD_LIBRARY_PATH': '{}/libs/opt'.format(DOCKER_WORKDIR_ROOT),
'PYTHONPATH': '{}/src/python/gens'.format(DOCKER_WORKDIR_ROOT)}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'python'
_LANGUAGES = {
'c++' : CXXLanguage(),
'csharp' : CSharpLanguage(),
'csharpcoreclr' : CSharpCoreCLRLanguage(),
'go' : GoLanguage(),
'java' : JavaLanguage(),
'node' : NodeLanguage(),
'php' : PHPLanguage(),
'php7' : PHP7Language(),
'ruby' : RubyLanguage(),
'python' : PythonLanguage(),
}
# languages supported as cloud_to_cloud servers
_SERVERS = ['c++', 'node', 'csharp', 'csharpcoreclr', 'java', 'go', 'ruby', 'python']
_TEST_CASES = ['large_unary', 'empty_unary', 'ping_pong',
'empty_stream', 'client_streaming', 'server_streaming',
'cancel_after_begin', 'cancel_after_first_response',
'timeout_on_sleeping_server', 'custom_metadata',
'status_code_and_message', 'unimplemented_method',
'client_compressed_unary', 'server_compressed_unary',
'client_compressed_streaming', 'server_compressed_streaming',
'unimplemented_service']
_AUTH_TEST_CASES = ['compute_engine_creds', 'jwt_token_creds',
'oauth2_auth_token', 'per_rpc_creds']
_HTTP2_TEST_CASES = ['tls', 'framing']
_HTTP2_BADSERVER_TEST_CASES = ['rst_after_header', 'rst_after_data', 'rst_during_data',
'goaway', 'ping', 'max_streams']
DOCKER_WORKDIR_ROOT = '/var/local/git/grpc'
def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None):
"""Wraps given cmdline array to create 'docker run' cmdline from it."""
docker_cmdline = ['docker', 'run', '-i', '--rm=true']
# turn environ into -e docker args
if environ:
for k,v in environ.items():
docker_cmdline += ['-e', '%s=%s' % (k,v)]
# set working directory
workdir = DOCKER_WORKDIR_ROOT
if cwd:
workdir = os.path.join(workdir, cwd)
docker_cmdline += ['-w', workdir]
docker_cmdline += docker_args + [image] + cmdline
return docker_cmdline
def bash_cmdline(cmdline):
"""Creates bash -c cmdline from args list."""
# Use login shell:
# * makes error messages clearer if executables are missing
return ['bash', '-c', ' '.join(cmdline)]
def auth_options(language, test_case):
"""Returns (cmdline, env) tuple with cloud_to_prod_auth test options."""
language = str(language)
cmdargs = []
env = {}
# TODO(jtattermusch): this file path only works inside docker
key_filepath = '/root/service_account/stubbyCloudTestingTest-ee3fce360ac5.json'
oauth_scope_arg = '--oauth_scope=https://www.googleapis.com/auth/xapi.zoo'
key_file_arg = '--service_account_key_file=%s' % key_filepath
default_account_arg = '--default_service_account=830293263384-compute@developer.gserviceaccount.com'
if test_case in ['jwt_token_creds', 'per_rpc_creds', 'oauth2_auth_token']:
if language in ['csharp', 'csharpcoreclr', 'node', 'php', 'php7', 'python', 'ruby']:
env['GOOGLE_APPLICATION_CREDENTIALS'] = key_filepath
else:
cmdargs += [key_file_arg]
if test_case in ['per_rpc_creds', 'oauth2_auth_token']:
cmdargs += [oauth_scope_arg]
if test_case == 'oauth2_auth_token' and language == 'c++':
# C++ oauth2 test uses GCE creds and thus needs to know the default account
cmdargs += [default_account_arg]
if test_case == 'compute_engine_creds':
cmdargs += [oauth_scope_arg, default_account_arg]
return (cmdargs, env)
def _job_kill_handler(job):
if job._spec.container_name:
dockerjob.docker_kill(job._spec.container_name)
# When the job times out and we decide to kill it,
# we need to wait a before restarting the job
# to prevent "container name already in use" error.
# TODO(jtattermusch): figure out a cleaner way to to this.
time.sleep(2)
def cloud_to_prod_jobspec(language, test_case, server_host_name,
server_host_detail, docker_image=None, auth=False):
"""Creates jobspec for cloud-to-prod interop test"""
container_name = None
cmdargs = [
'--server_host=%s' % serve |
# coding: utf-8
from __future__ import absolute_import
import flask
import auth
import model
import util
from main import app
yahoo_config = dict(
access_token_url='https://api.login.yahoo.com/oauth/v2/get_token',
authorize_url='https://api.login.yahoo.com/oauth/v2/request_auth',
base_url='https://query.yahooapis.com/',
consumer_key=model.Config.get_master_db().yahoo_consumer_key,
consumer_secret=model.Config.get_master_db().yahoo_consumer_secret,
request_token_url='https://api.login.yahoo.com/oauth/v2/get_request_token',
)
yahoo = auth.create_oauth_app(yahoo_config, 'yahoo')
@app.route('/api/auth/callback/yahoo/')
def yahoo_authorized():
response = yahoo.authorized_response()
if response is None:
flask.flash('You denied the request to sign in. | ')
return flask.redirect(util.get_next_url())
flask.session['oauth_token'] = (
response['oauth_token'],
response['oauth_token_secret'],
)
fields = 'guid, emails, familyName, givenName, nickname'
me = yahoo.get(
'/v1/yql',
data={
'format': 'json',
'q': 'select %s from social.profile where gu | id = me;' % fields,
'realm': 'yahooapis.com',
},
)
user_db = retrieve_user_from_yahoo(me.data['query']['results']['profile'])
return auth.signin_user_db(user_db)
@yahoo.tokengetter
def get_yahoo_oauth_token():
return flask.session.get('oauth_token')
@app.route('/signin/yahoo/')
def signin_yahoo():
return auth.signin_oauth(yahoo)
def retrieve_user_from_yahoo(response):
auth_id = 'yahoo_%s' % response['guid']
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
return user_db
names = [response.get('givenName', ''), response.get('familyName', '')]
emails = response.get('emails', {})
if not isinstance(emails, list):
emails = [emails]
emails = [e for e in emails if 'handle' in e]
emails.sort(key=lambda e: e.get('primary', False))
email = emails[0]['handle'] if emails else ''
return auth.create_user_db(
auth_id=auth_id,
name=' '.join(names).strip() or response['nickname'],
username=response['nickname'],
email=email,
verified=bool(email),
)
|
#!/usr/bin/env python -tt
# encoding: utf-8
#
"""Use a descriptive macro instead of assert(false);"""
error_msg = 'Use NEVER_HERE() from base/macros.h here.'
regexp = r"""assert *\( *(0|false) | *\)"""
forbidden = [
'assert(0)',
'assert(fals | e)',
]
allowed = [
'NEVER_HERE()',
]
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2016 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module contains the :class:`.StringMixIn` type, which implements the
interface for the ``unicode`` type (``str`` on py3k) in a dynamic manner.
"""
from __future__ import unicode_literals
from sys import getdefaultencoding
from .compat import bytes, py26, py3k, str
__all__ = ["StringMixIn"]
def inheritdoc(method):
"""Set __doc__ of *method* to __doc__ of *method* in its parent class.
Since this is used on :class:`.StringMixIn`, the "parent class" used is
``str``. This function can be used as a decorator.
"""
method.__doc__ = getattr(str, method.__name__).__doc__
return method
class StringMixIn(object):
"""Implement the interface for ``unicode``/``str`` in a dynamic manner.
To use this class, inherit from it and override the :meth:`__unicode__`
method (same on py3k) to return the string representation of the object.
The various string methods will operate on the value of :meth:`__unicode__`
instead of the immutable ``self`` like the regular ``str`` type.
"""
if py3k:
def __str__(self):
return self.__unicode__()
def __bytes__(self):
return bytes(self.__unicode__(), getdefaultencoding())
else:
def __str__(self):
return bytes(self.__unicode__())
def __unicode__(self):
raise NotImplementedError()
def __repr__(self):
return repr(self.__unicode__())
def __lt__(self, other):
return self.__unicode__() < other
def __le__(self, other):
return self.__unicode__() <= other
def __eq__(self, other):
return self.__unicode__() == other
def __ne__(self, other):
return self.__unicode__() != other
def __gt__(self, other):
return self.__unicode__() > other
def __ge__(self, other):
return self.__unicode__() >= other
if py3k:
def __bool__(self):
return bool(self.__unicode__())
else:
def __nonzero__(self):
return bool(self.__unicode__())
def __len__(self):
return len(self.__unicode__())
def __iter__(self):
for char in self.__unicode__():
yield char
def __getitem__(self, key):
return self.__unicode__()[key]
def __reversed__(self):
return reverse | d(self.__unicode__())
def __contains__(self, item):
return str(item) in self.__unicode__()
def __getattr__(self, attr):
return getattr(self.__unicode__(), attr)
if py3k:
maketrans = str.maketrans # Static method can't rely on __getattr__
if py26:
@inheritdoc
def encode(self, encoding=None, errors=None):
if encoding is None:
enco | ding = getdefaultencoding()
if errors is not None:
return self.__unicode__().encode(encoding, errors)
return self.__unicode__().encode(encoding)
del inheritdoc
|
#############
# Python types and their operations
##########################################################################
class Callable:
class invoke:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Callable',
'invoke',
args=['[Lorg/python/Object;', 'Ljava/util/Map;'],
returns='Lorg/python/Object;'
),
)
class Iterable:
class next:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Iterable',
'__next__',
args=[],
returns='Lorg/python/Object;'
)
)
class Type:
class for_class:
def __init__(self, name):
self.name = name
def process(self, context):
context.add_opcodes(
JavaOpcodes.LDC_W(JavaClassref(self.name)),
JavaOpcodes.INVOKESTATIC(
'org/python/types/Type',
'pythonType',
args=['Ljava/lang/Class;'],
returns='Lorg/python/types/Type;'
)
)
class for_name:
def __init__(self, name):
self.name = name
def process(self, context):
context.add_opcodes(
JavaOpcodes.LDC_W(self.name),
JavaOpcodes.INVOKESTATIC(
'org/python/types/Type',
'pythonType',
args=['Ljava/lang/String;'],
returns='Lorg/python/types/Type;'
)
)
class to_python:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKESTATIC(
'org/python/types/Type',
'toPython',
args=['Ljava/lang/Object;'],
returns='Lorg/python/Object;'
)
)
class Object:
class get_attribute:
def __init__(self, attr, use_null=False):
self.attr = attr
self.use_null = use_null
def process(self, context):
context.add_opcodes(
JavaOpcodes.LDC_W(self.attr),
)
if self.use_null:
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
'__getattribute_null',
args=['Ljava/lang/String;'],
returns='Lorg/python/Object;'
),
)
else:
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
'__getattribute__',
args=['Ljava/lang/String;'],
returns='Lorg/python/Object;'
),
)
class set_attr:
def __init__(self, attr):
self.attr = attr
def process(self, context):
context.add_opcodes(
JavaOpcodes.LDC_W(self.attr),
JavaOpcodes.SWAP(),
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
'__setattr__',
args=['Ljava/lang/String;', 'Lorg/python/Object;'],
returns='V'
),
)
class del_attr:
def __init__(self, attr=None):
self.attr = attr
def process(self, context):
if self.attr:
context.add_opcodes(
JavaOpcodes.LDC_W(self.attr),
)
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
'__delattr__',
args=['Ljava/lang/String;'],
returns='V'
),
)
class get_item:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
'__getitem__',
| args=['Lorg/python/Object;'],
returns='Lorg/python/Object;'
),
)
class set_item:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
'__setitem__',
args=['Lorg/python/Object;', 'Lorg/python/Object;'],
r | eturns='V'
),
)
class del_item:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
'__delitem__',
args=['Lorg/python/Object;'],
returns='V'
),
)
class iter:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
'__iter__',
args=[],
returns='Lorg/python/Iterable;'
)
)
class as_boolean:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
'__bool__',
args=[],
returns='Lorg/python/Object;'
),
JavaOpcodes.CHECKCAST('org/python/types/Bool'),
JavaOpcodes.GETFIELD('org/python/types/Bool', 'value', 'Z'),
)
class NONE:
def process(self, context):
context.add_opcodes(
JavaOpcodes.GETSTATIC('org/python/types/NoneType', 'NONE', 'Lorg/python/Object;')
)
class Dict:
def process(self, context):
context.add_opcodes(
Java.New('org/python/types/Dict'),
Java.Init('org/python/types/Dict')
)
class set_item:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEVIRTUAL(
'org/python/types/Dict',
'__setitem__',
args=['Lorg/python/Object;', 'Lorg/python/Object;'],
returns='V'
)
)
class Set:
def process(self, context):
context.add_opcodes(
Java.New('org/python/types/Set'),
Java.Init('org/python/types/Set')
)
class add:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEVIRTUAL(
'org/python/types/Set',
'add',
args=['Lorg/python/Object;'],
returns='Lorg/python/Object;'
),
JavaOpcodes.POP()
)
class List:
def process(self, context):
context.add_opcodes(
Java.New('org/python/types/List'),
Java.Init('org/python/types/List')
)
class append:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEVIRTUAL(
'org/python/types/List',
'append',
args=['Lorg/python/Object;'],
returns='Lorg/python/Object;'
),
JavaOpcodes.POP()
)
class Str:
def __init__(self, value=None):
self.value = value
def process(self, context):
if self.value:
context.add_opcodes(
Java.New('org/python/types/Str'),
JavaOpcodes.LDC_W(self.value),
Java.Init('org/python/types/Str', 'Ljava/lang/String;')
)
else:
context.add_opcodes(
Java.New('org/python/types/Str'),
J |
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: equation needs to be fixed up in the docstring. Jamil#19
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-100.0] * self.N, [100.0] * self.N)
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 + 2 * x[1] ** 2
- 0.3 * cos(3 * pi * x[0] + 4 * pi * x[1]) + 0.3)
class BoxBetts(Benchmark):
r"""
BoxBetts objective function.
The BoxBetts global optimization problem is a multimodal
minimization problem defined as follows
.. math::
f_{\text{BoxBetts}}(x) = \sum_{i=1}^k g(x_i)^2
Where, in this exercise:
.. math::
g(x) = e^{-0.1i x_1} - e^{-0.1i x_2} - x_3\left[e^{-0.1i}
- e^{-i}\right]
And :math:`k = 10`.
Here, :math:`x_1 \in [0.9, 1.2], x_2 \in [9, 11.2], x_3 \in [0.9, 1.2]`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 10, 1]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = ([0.9, 1.2], [9.0, 11.2], [0.9, 1.2])
self.global_optimum = [[1.0, 10.0, 1.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
i = arange(1, 11)
g = (exp(-0.1 * i * x[0]) - exp(-0.1 * i * x[1])
- (exp(-0.1 * i) - exp(-i)) * x[2])
return sum(g**2)
class Branin01(Benchmark):
r"""
Branin01 objective function.
The Branin01 global optimization problem is a multimodal minimization
problem defined as follows
.. math::
f_{\text{Branin01}}(x) = \left(- 1.275 \frac{x_1^{2}}{\pi^{2}} + 5
\frac{x_1}{\pi} + x_2 -6\right)^{2} + \left(10 -\frac{5}{4 \pi} \right)
\cos\left(x_1\right) + 10
with :math:`x_1 \in [-5, 10], x_2 \in [0, 15]`
*Global optimum*: :math:`f(x) = 0.39788735772973816` for :math:`x =
[-\pi, 12.275]` or :math:`x = [\pi, 2.275]` or :math:`x = [3\pi, 2.475]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: Jamil#22, one of the solutions is different
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-5., 10.), (0., 15.)]
self.global_optimum = [[-pi, 12.275], [pi, 2.275], [3 * pi, 2.475]]
self.fglob = 0.39788735772973816
def fun(self, x, *args):
self.nfev += 1
return ((x[1] - (5.1 / (4 * pi ** 2)) * x[0] ** 2
+ 5 * x[0] / pi - 6) ** 2
+ 10 * (1 - 1 / (8 * pi)) * cos(x[0]) + 10)
class Branin02(Benchmark):
r"""
Branin02 objective function.
The Branin02 global optimization problem is a multimodal minimization
problem defined as follows
.. math::
f_{\text{Branin02}}(x) = \left(- 1.275 \frac{x_1^{2}}{\pi^{2}}
+ 5 \frac{x_1}{\pi} + x_2 - 6 \right)^{2} + \left(10 - \frac{5}{4 \pi}
\right) \cos\left(x_1\right) \cos\left(x_2\right)
+ \log(x_1^2+x_2^2 + 1) + 10
with :math:`x_i \in [-5, 15]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 5.559037` for :math:`x = [-3.2, 12.53]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-5.0, 15.0), (-5.0, 15.0)]
self.global_optimum = [[-3.1969884, 12.52625787]]
self.fglob = 5.5589144038938247
def fun(self, x, *args):
self.nfev += 1
return ((x[1] - (5.1 / (4 * pi ** 2)) * x[0] ** 2
+ 5 * x[0] / pi - 6) ** 2
+ 10 * (1 - 1 / (8 * pi)) * cos(x[0]) * cos(x[1])
+ log(x[0] ** 2.0 + x[1] ** 2.0 + 1.0) + 10)
class Brent(Benchmark):
r"""
Brent objective function.
The Brent [1]_ global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Brent}}(x) = (x_1 + 10)^2 + (x_2 + 10)^2 + e^{(-x_1^2 -x_2^2)}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [-10, -10]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO solution is different to Jamil#24
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-10.0] * self.N, [10.0] * self.N)
self.custom_bounds = ([-10, 2], [-10, 2])
self.global_optimum = [[-10.0, -10.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return ((x[0] + 10.0) ** 2.0 + (x[1] + 10.0) ** 2.0
+ exp(-x[0] ** 2.0 - x[1] ** 2.0))
class Brown(Benchmark):
r"""
Brown objective function.
The Brown [1]_ global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Brown}}(x) = \sum_{i=1}^{n-1}\left[
\left(x_i^2\right)^{x_{i + 1}^2 + 1}
+ \left(x_{i + 1}^2\right)^{x_i^2 + 1}\right]
with :math:`x_i \in [-1, 4]` for :math:`i=1,...,n`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for
:math:`i=1,...,n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-1.0] * self.N, [4.0] * self.N)
self.custom_bounds = ([-1.0, 1.0], [-1.0, 1.0])
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
x0 = x[:-1]
x1 = x[1:]
return sum((x0 ** 2.0) ** (x1 ** 2.0 + 1.0)
+ (x1 ** 2.0) ** (x0 ** 2.0 + 1.0))
class Bukin02(Benchmark):
r"""
Bukin02 objective function.
The Bukin02 [1]_ global optimization problem is a multimodal minimization
| problem defined as follows:
.. math::
f_{\text{Bukin02}}(x) = 100 (x_2^2 - 0.01x_1^2 + 1)
+ 0.01(x_1 + 10)^2
with :math:`x_1 \in [-15, -5], x_2 \in [-3, 3]`
*Global optimum*: :math:`f(x) = -124.75` for :math:`x = [-15, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimiz | ation Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: I think that Gavana and Jamil are wrong on this function. In both
sources the x[1] term is not squared. As such there will be a minimum at
the smallest value of x[1].
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-15.0, -5.0), (-3.0, 3.0)]
self.global_optimum = [[-15.0, 0.0]]
self.fglob = -124.75
def fun(self, x, *args):
self.nfev += 1
return (100 * (x[1] ** 2 - 0.01 * x[0] ** 2 + 1.0)
+ 0.01 * (x[0] + 10.0) ** 2.0)
class Bukin04(Benchmark):
r"""
Bukin04 objective function.
The Bukin04 [1]_ global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Bukin04}}(x) = 100 x_2^{2} + 0.01 \lvert{x_1 + 10}
\rvert
with :math:`x_1 \in [-1 |
#!/usr/bin/python
import os
import signal
import struct
import sys
import time
ALLOWED_INTERFACES = [ "cpu_dma_latency", "network_latency", "ne | twork_throughput" ]
def read_pmqos(name):
filename = "/dev/%s" % name
old = open(filename)
old_value = struct.unpack("i", old.read())[0]
print "PMQOS value for %s is %d"%(name, old_value)
if __name__=="__main__":
if len(sys.argv) < 2:
print "Must specify what to read"
sys.exit(1)
read = sys.argv[1]
if read not in ALLOWED_INTERFACES:
print "Cannot read %s"%read
sys.exit(1)
read_pm | qos(read)
|
tainerTest()
self.element = MockGuiWidget("")
self.setting_presenter = MockSettingPresenter(self.settings['file_extension'], self.element)
self.presenters = MockSettingPresenterContainer()
self.presenters.add(self.setting_presenter)
self.presenters.add(MockSettingPresenter(self.settings['overwrite_mode'],
MockGuiWidget(self.settings['overwrite_mode'].options['skip'])))
self.presenters.add(MockSettingPresenter(self.settings['ignore_invisible'], MockGuiWidget(False)))
def test_assign_setting_values_to_elements(self):
self.settings['file_extension'].value = "png"
self.settings['ignore_invisible'].value = True
self.presenters.assign_setting_values_to_elements()
self.assertEqual(self.presenters[self.settings['file_extension']].value, "png")
self.assertEqual(self.presenters[self.settings['file_extension']].enabled, False)
self.assertEqual(self.presenters[self.settings['ignore_invisible']].value, True)
def test_assign_element_values_to_settings_with_streamline(self):
self.presenters[self.settings['file_extension']].value = "jpg"
self.presenters[self.settings['ignore_invisible']].value = True
self.presenters.assign_element_values_to_settings()
self.assertEqual(self.settings['file_extension'].value, "png")
self.assertEqual(self.settings['file_extension'].ui_enabled, False)
def test_assign_element_values_to_settings_no_streamline(self):
# `value_changed_signal` is None, so no event handlers are invoked.
self.presenters.connect_value_changed_events()
self.presenters[self.settings['file_extension']].value = "jpg"
self.presenters[self.settings['ignore_invisible']].value = True
self.presenters.assign_element_values_to_settings()
self.assertEqual(self.settings['file_extension'].value, "jpg")
self.assertEqual(self.settings['file_extension'].ui_enabled, True)
#===============================================================================
class TestShelfSettingStream(unittest.TestCase):
@mock.patch(LIB_NAME + '.settings.gimpshelf.shelf', new=gimpmocks.MockGimpShelf())
def setUp(self):
self.prefix = 'prefix'
self.stream = settings.GimpShelfSettingStream(self.prefix)
self.settings = SettingContainerTest()
@mock.patch(LIB_NAME + '.settings.gimpshelf.shelf', new=gimpmocks.MockGimpShelf())
def test_write(self):
self.settings['file_extension'].value = "png"
self.settings['ignore_invisible'].value = True
self.stream.write(self.settings)
self.assertEqual(settings.gimpshelf.shelf[self.prefix + 'file_extension'], "png")
self.assertEqual(settings.gimpshelf.shelf[self.prefix + 'ignore_invisible'], True)
@mock.patch(LIB_NAME + '.settings.gimpshelf.shelf', new=gimpmocks.MockGimpShelf())
def test_read(self):
settings.gimpshelf.shelf[self.prefix + 'file_extension'] = "png"
settings.gimpshelf.shelf[self.prefix + 'ignore_invisible'] = True
self.stream.read([self.settings['file_extension'], self.settings['ignore_invisible']])
self.assertEqual(self.settings['file_extension'].value, "png")
self.assertEqual(self.settings['ignore_invisible'].value, True)
@mock.patch(LIB_NAME + '.settings.gimpshelf.shelf', new=gimpmocks.MockGimpShelf())
def test_read_settings_not_found(self):
with self.assertRaises(settings.SettingsNotFoundInStreamError):
self.stream.read(self.settings)
@mock.patch(LIB_NAME + '.settings.gimpshelf.shelf', new=gimpmocks.MockGimpShelf())
def test_read_invalid_setting_value(self):
setting_with_invalid_value = settings.IntSetting('int', -1)
setting_with_invalid_value.min_value = 0
self.stream.write([setting_with_invalid_value])
self.stream.read([setting_with_invalid_value])
self.assertEqual(setting_with_invalid_value.value, setting_with_invalid_value.default_value)
@mock.patch('__builtin__.open')
class TestJSONFileSettingStream(unittest.TestCase):
def setUp(self):
self.stream = settings.JSONFileSettingStream("/test/file")
self.settings = SettingContainerTest()
def test_write(self, mock_file):
self.settings['file_extension'].value = "jpg"
self.settings['ignore_invisible'].value = True
mock_file.return_value.__enter__.return_value = MockStringIO()
file_ = mock_file.return_value.__enter__.return_value
self.stream.write(self.settings)
settings = json.loads(file_.read())
self.assertEqual(self.settings['file_extension'].value, "jpg")
self.assertEqual(self.settings['ignore_invisible'].value, True)
def test_read(self, mock_file):
self.settings['file_extension'].value = "jpg"
self.settings['ignore_invisible'].value = True
mock_file.return_value.__enter__.return_value = MockStringIO()
self.stream.write(self.settings)
self.stream.read(self.settings)
self.assertEqual(self.settings['file_extension'].value, "jpg")
self.assertEqual(self.settings['ignore_invisible'].value, True)
def test_write_ioerror_oserror(self, mock_file):
mock_file.side_effect = IOError("Whatever other I/O error it could be")
with self.assertRaises(settings.SettingStreamWriteError):
self.stream.write(self.settings)
mock_file.side_effect = OSError("Permission denied or whatever other OS er | ror it could be")
with self.assertRaises(settings.SettingStreamWriteError):
self.stream.write(self.settings)
def test_read_ioerror_oserror(self, mock_file):
mock_file.side_effect = I | OError("File not found or whatever other I/O error it could be")
with self.assertRaises(settings.SettingStreamReadError):
self.stream.read(self.settings)
mock_file.side_effect = OSError("Permission denied or whatever other OS error it could be")
with self.assertRaises(settings.SettingStreamReadError):
self.stream.read(self.settings)
def test_read_invalid_file_extension(self, mock_file):
mock_file.side_effect = ValueError("Invalid file format; must be JSON")
with self.assertRaises(settings.SettingStreamInvalidFormatError):
self.stream.read(self.settings)
def test_read_invalid_setting_value(self, mock_file):
mock_file.return_value.__enter__.return_value = MockStringIO()
setting_with_invalid_value = settings.IntSetting('int', -1)
setting_with_invalid_value.min_value = 0
self.stream.write([setting_with_invalid_value])
self.stream.read([setting_with_invalid_value])
self.assertEqual(setting_with_invalid_value.value, setting_with_invalid_value.default_value)
def test_read_settings_not_found(self, mock_file):
mock_file.return_value.__enter__.return_value = MockStringIO()
self.stream.write([settings.IntSetting('int', -1)])
with self.assertRaises(settings.SettingsNotFoundInStreamError):
self.stream.read(self.settings)
#===============================================================================
@mock.patch('__builtin__.open')
class TestSettingPersistor(unittest.TestCase):
@mock.patch(LIB_NAME + '.settings.gimpshelf.shelf', new=gimpmocks.MockGimpShelf())
def setUp(self):
self.settings = SettingContainerTest()
self.first_stream = settings.GimpShelfSettingStream('')
self.second_stream = settings.JSONFileSettingStream('filename')
self.setting_persistor = settings.SettingPersistor([self.first_stream, self.second_stream],
[self.first_stream, self.second_stream])
@mock.patch(LIB_NAME + '.settings.gimpshelf.shelf', new=gimpmocks.MockGimpShelf())
def test_load_save(self, mock_file):
mock_file.return_value.__enter__.return_value = MockStringIO()
self.settings['file_extension'].value = "png"
self.settings['ignore_invisible'].value = True
status = self.setting_persistor.save(self.settings)
self.assertEqual(status, settings.SettingPersistor.SUCCESS)
self.settings['file_extension'].value = "jpg"
self.settings['ignore_invisible'].value = False
status = self.setting_persistor.load(self.settings)
self.assertEqual(status, settings.SettingPersistor.SUCCE |
_methods.classes.TH1
from iqtools.iqbase import IQBase
from iqtools.tcapdata import TCAPData
from iqtools.tdmsdata import TDMSData
from iqtools.bindata import BINData
from iqtools.iqtdata import IQTData
from iqtools.tiqdata import TIQData
from iqtools.csvdata import CSVData
from iqtools.wavdata import WAVData
from iqtools.xdatdata import XDATData
# ------------ TOOLS ----------------------------
def get_iq_object(filename, header_filename=None):
"""
Return suitable object accorting to extension.
Parameters
----------
filename
Returns
-------
"""
# Object generation
_, file_extension = os.path.splitext(filename)
iq_data = None
if file_extension.lower() == '.txt' or file_extension.lower() == '.csv':
log.info('This is an ASCII file.')
iq_data = CSVData(filename)
if file_extension.lower() == '.bin':
log.info('This is a raw binary file.')
iq_data = BINData(filename)
if file_extension.lower() == '.wav':
log.info('This is a wav file.')
iq_data = WAVData(filename)
if file_extension.lower() == '.iqt':
log.info('This is an iqt file.')
iq_data = IQTData(filename)
if file_extension.lower() == '.iq':
log.info('This is an iq file.')
iq_data = IQTData(filename)
if file_extension.lower() == '.tiq':
log.info('This is a tiq file.')
iq_data = TIQData(filename)
if file_extension.lower() == '.tdms':
log.info('This is a TDMS file.')
iq_data = TDMSData(filename)
if file_extension.lower() == '.dat':
log.info('This is a TCAP file.')
if not header_filename:
log.info('TCAP files need a text header file as well. Aborting....')
return None
else:
iq_data = TCAPData(filename, header_filename)
if file_extension.lower() == '.xdat':
log.info('This is a XDAT file.')
if not header_filename:
log.info('XDAT files need a text header file as well. Aborting....')
return None
else:
iq_data = XDATData(filename, header_filename)
return iq_data
def get_eng_notation(value, unit='', decimal_place=2):
"""
Convert numbers to scientific notation
Parameters
----------
value input number float or integer
decimal_place How many decimal places should be left
unit The unit will be shown, otherwise powers of ten
Returns
-------
"""
ref = {24: 'Y', 21: 'Z', 18: 'E', 15: 'P',
12: 'T', 9: 'G', 6: 'M', 3: 'k', 0: '',
-3: 'm', -6: 'u', -9: 'n', -12: 'p',
-15: 'f', -18: 'a', -21: 'z', -24: 'y',
}
if value == 0:
return '{}{}'.format(0, unit)
flag = '-' if value < 0 else ''
num = max([key for key in ref.keys() if abs(value) >= 10 ** key])
if num == 0:
mult = ''
else:
mult = ref[num] if unit else 'e{}'.format(num)
ret | urn '{}{}{}{}'.format(flag, int(abs(value) / 10 ** num * 10 ** decimal_place) / 10 ** decimal_place, mult,
unit)
def get_cplx_spectrogram(x, nframes, lframes):
sig = np.reshap | e(x, (nframes, lframes))
zz = np.fft.fft(sig, axis=1)
return zz
def get_inv_cplx_spectrogram(zz, nframes, lframes):
inv_zz = np.fft.ifft(zz, axis=1)
inv_zz = np.reshape(inv_zz, (1, nframes * lframes))[0]
return inv_zz
def get_root_th2d(xx, yy, zz, name='', title=''):
from ROOT import TH2D
h = TH2D(name, title, np.shape(xx)[
1], xx[0, 0], xx[0, -1], np.shape(yy)[0], yy[0, 0], yy[-1, 0])
for j in range(np.shape(yy)[0]):
for i in range(np.shape(xx)[1]):
h.SetBinContent(i, j, zz[j, i])
return h
def make_test_signal(f, fs, length=1, nharm=0, noise=False):
"""Make a sine signal with/without noise."""
t = np.arange(0, length, 1 / fs)
x = np.zeros(len(t))
for i in range(nharm + 2):
x += np.sin(2 * np.pi * i * f * t)
if noise:
x += np.random.normal(0, 1, len(t))
return t, x
def shift_phase(x, phase):
"""
Shift phase in frequency domain
x: complex or analytical signal
phase: amount in radians
returns: shifted complex signal
"""
XX = np.fft.fft(x)
angle = np.unwrap(np.angle(XX)) + phase
YY = np.abs(XX) * np.exp(1j * angle)
return np.fft.ifft(YY)
def write_signal_to_bin(cx, filename, fs=1, center=0, write_header=True):
"""
filename: name of the output filename
x: data vector to write to filename
fs: sampling Frequency
center: center Frequency
write_header: if set to true, then the first 4 bytes of the file are 32-bit
sampling Frequency and then follows the center frequency also in 32-bit. the
Data follows afterwards in I, Q format each 32-bit as well.
"""
# 32-bit little endian floats
# insert header
if write_header:
cx = np.insert(cx, 0, complex(fs, center))
cx = cx.astype(np.complex64)
cx.tofile(filename + '.bin')
def write_signal_to_csv(filename, cx, fs=1, center=0):
# insert ascii header which looks like a complex number
cx = np.insert(cx, 0, complex(fs, center))
with open(filename + '.csv', 'w') as f:
for i in range(len(cx)):
f.write('{}|{}\n'.format(
np.real(cx[i]), np.imag(cx[i])))
def write_signal_to_wav(filename, cx, fs=1):
""" Save the singal as an audio wave """
wavfile.write(filename + '.wav', fs,
abs(cx) / max(abs(cx)))
def make_analytical(x):
"""Make an analytical signal from the real signal"""
yy = hilbert(x)
ii = np.real(yy)
qq = np.imag(yy)
x_bar = np.vectorize(complex)(ii, qq)
ins_ph = np.angle(x_bar) * 180 / np.pi
return x_bar, ins_ph
def read_result_csv(filename):
"""
Read special format CSV result file from RSA5000 series output
:param filename:
:return:
"""
p = np.genfromtxt(filename, skip_header=63)
with open(filename) as f:
cont = f.readlines()
for l in cont:
l = l.split(',')
if 'Frequency' in l and len(l) == 3:
center = float(l[1])
if 'XStart' in l and len(l) == 3:
start = float(l[1])
if 'XStop' in l and len(l) == 3:
stop = float(l[1])
f = np.linspace(start - center, stop - center, len(p))
return f, p
def read_specan_xml(filename):
"""
Read the resulting saved trace file Specan from the Tektronix RSA5000 series
these files are produced while saving traces.
:param filename:
:return:
"""
with open(filename, 'rb') as f:
ba = f.read()
xml_tree_root = et.fromstring(ba)
for elem in xml_tree_root.iter(tag='Count'):
count = int(elem.text)
for elem in xml_tree_root.iter(tag='XStart'):
start = float(elem.text)
for elem in xml_tree_root.iter(tag='XStop'):
stop = float(elem.text)
for elem in xml_tree_root.iter(tag='XUnits'):
xunits = elem.text
for elem in xml_tree_root.iter(tag='YUnits'):
yunits = elem.text
for elem in xml_tree_root.iter(tag='y'):
pwr = float(elem.text)
p = np.zeros(count)
i = 0
for elem in xml_tree_root.iter(tag='y'):
p[i] = float(elem.text)
i += 1
f = np.linspace(start, stop, count)
return f, p, (xunits, yunits)
def read_data_csv(filename):
"""
Read special format CSV data file from RSA5100 series output.
Please note that 50 ohm power termination is already considered
for these data.
:param filename:
:return:
"""
data = np.genfromtxt(filename, skip_header=10, delimiter=",")
# has one dimension more, should use ravel
data = np.ravel(data).view(dtype='c16')
return data
def parse_filename(filename):
"""
Parses filenames of experimental data in the following format:
58Ni26+_374MeVu_250uA_pos_0_0.tiq
:param filename:
:return:
"""
filename = filename.split('_')
descr = filename[0]
energy = float(filename[1].replace('MeVu', 'e6'))
current = float(filename[2].replace('uA', 'e-6'))
return descr, energy, current
|
return self._wrap_output(output=output,
index=self.grouper.result_index,
names=names)
def _wrap_transformed_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.obj.index,
names=names)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self._selection_name, index=keys)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
result = DataFrame(values, index=index).stack()
result.name = self._selection_name
return result
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(),
name=self._selection_name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
@Substitution(klass='Series', selected='A.')
@Appender(_transform_template)
def transform(self, func, *args, **kwargs):
func = self._is_cython_func(func) or func
# if string function
if isinstance(func, compat.string_types):
if func in base.cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
return self._transform_fast(
lambda: getattr(self, func)(*args, **kwargs), func)
# reg transform
klass = self._selected_obj.__class__
results = []
wrapper = lambda x: func(x, *args, **kwargs)
for name, group in self:
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
indexer = self._get_index(name)
s = klass(res, indexer)
results.append(s)
from pandas.core.reshape.concat import concat
result = concat(results).sort_index()
# we will only try to coerce the result type if
# we have a numeric dtype, as these are *always* udfs
# the cython take a different path (and casting)
dtype = self._selected_obj.dtype
if is_numeric_dtype(dtype):
result = maybe_downcast_to_dtype(result, dtype)
result.name = self._selected_obj.name
result.index = self._selected_obj.index
return result
def _transform_fast(self, func, func_nm):
"""
fast version of transform, only applicable to
builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self, func)
ids, _, ngroup = self.grouper.group_info
cast = self._transform_should_cast(func_nm)
out = algorithms.take_1d(func().values, ids)
if cast:
out = self._try_cast(out, self.obj)
return Series(out, index=self.obj.index, name=self.obj.name)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notna(b)
try:
indices = [self._get_index(name) for name, group in self
if true_and_notna(group)]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna=True):
""" Returns number of unique elements in the group """
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
msg = ('val.dtype must be object, got {dtype}'
.format(dtype=val.dtype))
assert val.dtype == object, msg
val, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
_isna = lambda a: a == -1
else:
_isna = isna
ids, val = ids[sorter], val[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, val[1:] != val[:-1]]
# 1st item of each group is a new unique observation
mask = _isna(val)
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype('int64', copy=False)
if len(ids):
# NaN/NaT group exists if the head of ids is -1,
# so remove it from res and exclude its index from idx
| if ids[0] == -1:
res = out[1:]
| idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids[idx]] = out
return Series(res,
index=ri,
name=self._selection_name)
@Appender(Series.describe.__doc__)
def describe(self, **kwargs):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
from pandas.core.reshape.tile import cut
from pandas.core.res |
import json, logging
class Farm:
"""
Main class holding the whole farm.
"""
def __in | it__(self,json_file):
"""
"""
# load in json file
data = json.loads(open(json_file,'r').read())
# Make sure required data is in the json file
required_attr_list = ["latitude","longitude"]
for a in required_attr_list:
if a not in data.keys():
err = "Json file does not have all required data. Missing {}\n".format(a)
raise ValueError( | err)
# parse resulting json
for k in data.keys():
self.__setattr__("_{}".format(k),data[k])
# Get the utc offset for our current time
self._utc_offset = datetime.now() - datetime.utcnow()
# get the current sunrise, sunset etc.
self._get_suntimes()
|
c_unknown = mutagen.File(aac_files["unknown"])
id3_t1 = mutagen.File(id3_files["t1"])
id3_picard = mutagen.File(id3_files["picard"])
id3_unknown = mutagen.File(id3_files["unknown"])
def test_artist_name(self):
vorbis_tag_type = self.vorbis.album_artist
aac_tag_type = self.aac.album_artist
id3_tag_type = self.id3.album_artist
self.check_tag_equality(vorbis_tag_type, aac_tag_type, id3_tag_type)
def test_mbid(self):
vorbis_tag_type = self.vorbis.mbid
aac_tag_type = self.aac.mbid
id3_tag_type = self.id3.mbid
self.check_tag_equality(vorbis_tag_type, aac_tag_type, id3_tag_type)
def test_album(self):
vorbis_tag_type = self.vorbis.album
aac_tag_type = self.aac.album
id3_tag_type = self.id3.album
self.check_tag_equality(vorbis_tag_type, aac_tag_type, id3_tag_type)
def test_release_date(self):
vorbis_tag_type = self.vorbis.release_date
aac_tag_type = self.aac.release_date
id3_tag_type = self.id3.release_date
self.check_tag_equality(vorbis_tag_type, aac_tag_type, id3_tag_type)
def test_title(self):
vorbis_tag_type = self.vorbis.title
aac_tag_type = self.aac.title
id3_tag_type = self.id3.title
self.check_tag_equality(vorbis_tag_type, aac_tag_type, id3_tag_type)
def test_artist(self):
vorbis_tag_type = self.vorbis.artist
aac_tag_type = self.aac.artist
id3_tag_type = self.id3.artist
self.check_tag_equality(vorbis_tag_type, aac_tag_type, id3_tag_type)
def test_disc_num(self):
vorbis_tag_type = self.vorbis.disc_num
aac_tag_type = self.aac.disc_num
id3_tag_type = self.id3.disc_num
self.check_tag_equality(vorbis_tag_type, aac_tag_type, id3_tag_type)
def test_track_num(self):
vorbis_tag_type = self.vorbis.track_num
aac_tag_type = self.aac.track_num
id3_tag_type = self.id3.track_num
self.check_tag_equality(vorbis_tag_type, aac_tag_type, id3_tag_type)
def test_length(self):
vorbis_tag_type = self.vorbis.length
aac_tag_type = self.aac.length
id3_tag_type = self.id3.length
self.check_tag_equality(vorbis_tag_type, aac_tag_type, id3_tag_type)
def check_tag_equality(self, vorbis_tag_type, aac_tag_type, id3_tag_type):
vorbis_tag = vorbis_tag_type(self.vorbis_t1)
aac_tag = aac_tag_type(self.aac_t1)
id3_tag = id3_tag_type(self.id3_t1)
msg = "Testing equality of t1 " + vorbis_tag.name + ": "
self.check_equality(vorbis_tag, aac_tag, id3_tag, msg)
vorbis_tag = vorbis_tag_type(self.vorbis_picard)
aac_tag = aac_tag_type(self.aac_picard)
id3_tag = id3_tag_type(self.id3_picard)
msg = "Testing equality of picard " + vorbis_tag.name + ": "
self.check_equality(vorbis_tag, aac_tag, id3_tag, msg)
vorbis_tag = vorbis_tag_type(self.vorbis_unknown)
aac_tag = aac_tag_type(self.aac_unknown)
id3_tag = id3_tag_type(self.id3_unknown)
msg = "Testing equality of unknown " + vorbis_tag.name + ": "
self.check_equality(vorbis_tag, aac_tag, id3_tag, msg)
###################
# Test writing tags
###################
class TestTagWriteToEmptyFile(TestUtil.TestUtilMixin):
def test_artist_name(self):
self.write_test(self.format.album_artist, "albumartist")
def test_mbid(self):
self.write_test(self.format.mbid, "mbid")
def test_album(self):
self.write_test(self.format.album, "album")
def test_release_date(self):
self.write_test(self.format.release_date, " | date")
def test_title(self):
self.write_test(self.format.title, "title")
def test_artist(self):
self.write_test(self.format.artist, "artist")
def test_disc_num(self):
self.wri | te_test(self.format.disc_num, "discnumber")
def test_track_num(self):
self.write_test(self.format.track_num, "tracknumber")
def write_test(self, tag_builder, tag_name):
correct_tag = self.tags.get(tag_name)
tag = tag_builder(self.meta)
self.check_tag(tag_builder, None)
tag.value = correct_tag
tag.save()
self.meta = mutagen.File(self.file_name)
self.check_tag(tag_builder, correct_tag)
#############
# Vorbis format tests
#############
class TestReadGenericTagsVorbis_t1(TestReadGenericTags, unittest.TestCase):
@classmethod
def setUpClass(self):
self.meta = mutagen.File(vorbis_files["t1"])
self.tags = ref.t1_tags
self.format = format.Vorbis.Format
class TestReadGenericTagsVorbis_picard(TestReadGenericTags, unittest.TestCase):
@classmethod
def setUpClass(self):
self.meta = mutagen.File(vorbis_files["picard"])
self.tags = ref.picard_tags
self.format = format.Vorbis.Format
class TestReadGenericTagsVorbis_NOMETA(TestReadGenericTags, unittest.TestCase):
@classmethod
def setUpClass(self):
self.meta = mutagen.File(vorbis_files["unknown"])
self.tags = ref.unknown_tags
self.format = format.Vorbis.Format
class TestWriteVorbisTags_t1(TestTagWriteToEmptyFile, unittest.TestCase):
@classmethod
def setUpClass(cls):
shutil.copy(vorbis_files["to_write"], vorbis_files["copy_to"])
cls.file_name = vorbis_files["copy_to"]
cls.meta = mutagen.File(cls.file_name)
cls.tags = ref.t1_tags
cls.format = format.Vorbis.Format
@classmethod
def tearDownClass(cls):
os.remove(cls.file_name)
class TestWriteVorbisTags_picard(TestTagWriteToEmptyFile, unittest.TestCase):
@classmethod
def setUpClass(cls):
shutil.copy(vorbis_files["to_write"], vorbis_files["copy_to"])
cls.file_name = vorbis_files["copy_to"]
cls.meta = mutagen.File(cls.file_name)
cls.tags = ref.picard_tags
cls.format = format.Vorbis.Format
@classmethod
def tearDownClass(cls):
os.remove(cls.file_name)
#############
# ID3 format tests
#############
class TestReadGenericTagsID3_t1(TestReadGenericTags, unittest.TestCase):
def setUp(self):
self.meta = mutagen.File(id3_files["t1"])
self.tags = ref.t1_tags
self.format = format.ID3.Format
class TestReadGenericTagsID3_picard(TestReadGenericTags, unittest.TestCase):
def setUp(self):
self.meta = mutagen.File(id3_files["picard"])
self.tags = ref.picard_tags
self.format = format.ID3.Format
class TestReadGenericTagsID3_NOMETA(TestReadGenericTags, unittest.TestCase):
def setUp(self):
self.meta = mutagen.File(id3_files["unknown"])
self.tags = ref.unknown_tags
self.format = format.ID3.Format
class TestWriteId3Tags_t1(TestTagWriteToEmptyFile, unittest.TestCase):
@classmethod
def setUpClass(cls):
shutil.copy(id3_files["to_write"], id3_files["copy_to"])
cls.file_name = id3_files["copy_to"]
cls.meta = mutagen.File(cls.file_name)
cls.tags = ref.t1_tags
cls.format = format.ID3.Format
@classmethod
def tearDownClass(cls):
os.remove(cls.file_name)
class TestWriteId3Tags_picard(TestTagWriteToEmptyFile, unittest.TestCase):
@classmethod
def setUpClass(cls):
shutil.copy(id3_files["to_write"], id3_files["copy_to"])
cls.file_name = id3_files["copy_to"]
cls.meta = mutagen.File(cls.file_name)
cls.tags = ref.picard_tags
cls.format = format.ID3.Format
@classmethod
def tearDownClass(cls):
os.remove(cls.file_name)
#############
# AAC format tests
#############
class TestReadGenericTagsAAC_t1(TestReadGenericTags, unittest.TestCase):
def setUp(self):
self.meta = |
) #uni069C.medi
glyphs.append(0x0118) #uni0672.fina
glyphs.append(0x0074) #beh_dotless_alt.init
glyphs.append(0x0290) #uni0766.init
glyphs.append(0x012A) #uni067B.medi
glyphs.append(0x01DD) #uni06B9.init
glyphs.append(0x0172) #uni069A.fina
glyphs.append(0x02BD) #uni06C5.fina
glyphs.append(0x02A6) #uni076D
glyphs.append(0x029E) #uni076A
glyphs.append(0x02A2) #uni076B
glyphs.append(0x02A4) #uni076C
glyphs.append(0x005D) #uni060C
glyphs.append(0x0234) #uni060B
glyphs.append(0x008A) #uni067E.fina
glyp | hs.append(0x0222) #uni060F
glyphs.append(0x0221) #uni060E
glyphs.append(0x0211) #uni06440673.isol
glyphs.append(0x024E) #uni0754.init
glyphs.append(0x024F) #uni0754.medi
glyphs.append(0x0299) #uni0768.medi
glyphs.append(0x01B8) #uni06B0.fina
glyphs.append(0x02B3) #fourdots.below
glyphs.append(0x01E4) #uni06BC.medi
glyphs.append(0x00B2) #uni0634.fina
glyphs.append(0x012C) #uni067C.fina
glyp | hs.append(0x010B) #uni0651064B
glyphs.append(0x010D) #uni0651064F
glyphs.append(0x010C) #uni0651064E
glyphs.append(0x0150) #uni0687.fina
glyphs.append(0x0050) #uni06BE
glyphs.append(0x01E5) #uni06BD
glyphs.append(0x01B6) #uni06AE.medi
glyphs.append(0x0059) #uni06BA
glyphs.append(0x01E1) #uni06BC
glyphs.append(0x009E) #uni062C.init
glyphs.append(0x0139) #uni0680.init
glyphs.append(0x02DB) #uni076A0627.isol
glyphs.append(0x0114) #uni06540652
glyphs.append(0x01E2) #uni06BC.fina
glyphs.append(0x027B) #uni0761.fina
glyphs.append(0x022B) #uni06EE.fina
glyphs.append(0x01A0) #uni06A7.fina
glyphs.append(0x002B) #uni0647.init
glyphs.append(0x01B1) #uni06AD.init
glyphs.append(0x0090) #uni062A.fina
glyphs.append(0x01C9) #uni06B4.init
glyphs.append(0x01CB) #uni06B5
glyphs.append(0x01C7) #uni06B4
glyphs.append(0x01D3) #uni06B7
glyphs.append(0x01CF) #uni06B6
glyphs.append(0x01BB) #uni06B1
glyphs.append(0x01B7) #uni06B0
glyphs.append(0x01C3) #uni06B3
glyphs.append(0x01BF) #uni06B2
glyphs.append(0x02D2) #uni06D0.init
glyphs.append(0x01DB) #uni06B9
glyphs.append(0x01D7) #uni06B8
glyphs.append(0x0070) #uni062F.fina
glyphs.append(0x00BD) #uni063A.medi
glyphs.append(0x01E8) #uni06BF.fina
glyphs.append(0x01B2) #uni06AD.medi
glyphs.append(0x0082) #uni0622.fina
glyphs.append(0x02BF) #uni06C6.fina
glyphs.append(0x021A) #uni06B70627.fina
glyphs.append(0x023B) #uni065E
glyphs.append(0x0162) #uni0690.fina
glyphs.append(0x01AE) #uni06AC.medi
glyphs.append(0x016E) #uni0697.fina
glyphs.append(0x01EF) #uni06CD.fina
glyphs.append(0x0052) #uni06D2.fina
glyphs.append(0x010F) #uniFC63
glyphs.append(0x007A) #twodots.below
glyphs.append(0x00B9) #uni0638.medi
glyphs.append(0x01DC) #uni06B9.fina
glyphs.append(0x014D) #uni0685.init
glyphs.append(0x0199) #uni06A5.init
glyphs.append(0x00E6) #uni06440623.isol
glyphs.append(0x01AF) #uni06AD
glyphs.append(0x01AA) #uni06AB.medi
glyphs.append(0x008E) #uni062A.init
glyphs.append(0x0178) #uni069B.medi
glyphs.append(0x0144) #uni0683.fina
glyphs.append(0x00C2) #uni0641.init
glyphs.append(0x0243) #uni0751.medi
glyphs.append(0x024B) #uni0753.medi
glyphs.append(0x002C) #uni0647.medi
glyphs.append(0x006C) #uni0655
glyphs.append(0x006E) #uni0656
glyphs.append(0x0228) #uni0657
glyphs.append(0x0067) #uni0650
glyphs.append(0x0068) #uni0651
glyphs.append(0x0069) #uni0652
glyphs.append(0x006A) #uni0653
glyphs.append(0x0271) #uni075E.medi
glyphs.append(0x0229) #uni0658
glyphs.append(0x0236) #uni0659
glyphs.append(0x001B) #uni0639.init
glyphs.append(0x0245) #uni0752.fina
glyphs.append(0x002D) #uni0647.fina
glyphs.append(0x0283) #uni0763.fina
glyphs.append(0x0093) #uni062B.medi
glyphs.append(0x0107) #sukun_01
glyphs.append(0x013E) #uni0681.medi
glyphs.append(0x00C7) #veh.medi
glyphs.append(0x0177) #uni069B.init
glyphs.append(0x0273) #uni075F.fina
glyphs.append(0x007B) #alef_alt.isol
glyphs.append(0x0251) #uni0755.fina
glyphs.append(0x0120) #uni0677.fina
glyphs.append(0x01B0) #uni06AD.fina
glyphs.append(0x00ED) #uni06440671.fina
glyphs.append(0x0102) #kasratan_01
glyphs.append(0x029B) #uni0769.fina
glyphs.append(0x027C) #uni0761.init
glyphs.append(0x02AB) #twodots.small.below
glyphs.append(0x01E6) #uni06BD.fina
glyphs.append(0x0293) #uni0767.fina
glyphs.append(0x01CC) #uni06B5.fina
glyphs.append(0x020D) #uni06AD.morocco
glyphs.append(0x01B4) #uni06AE.fina
glyphs.append(0x006B) #uni0654
glyphs.append(0x00AA) #uni0632.fina
glyphs.append(0x00B0) #uni0634.init
glyphs.append(0x01DF) #uni06BB
glyphs.append(0x022C) #uni06EF
glyphs.append(0x0252) #uni0755.init
glyphs.append(0x01EA) #uni06BF.medi
glyphs.append(0x0294) #uni0767.init
glyphs.append(0x0057) #uni06AF.medi
glyphs.append(0x02D8) #uni06FA.fina
glyphs.append(0x000A) #uni062D.fina
glyphs.append(0x028F) #uni0766.fina
glyphs.append(0x0274) #uni075F.init
glyphs.append(0x00DF) #uni06CC.fina
glyphs.append(0x026F) #uni075E.fina
glyphs.append(0x0214) #uni06440675.fina
glyphs.append(0x0184) #uni069E.medi
glyphs.append(0x001F) #uni0643.medi
glyphs.append(0x0151) #uni0687.init
glyphs.append(0x01D2) #uni06B6.medi
glyphs.append(0x00E7) #uni06440623.fina
glyphs.append(0x01AD) #uni06AC.init
glyphs.append(0x01BA) #uni06B0.medi
glyphs.append(0x029C) #uni0769.init
glyphs.append(0x020A) #uni06FC.medi
glyphs.append(0x02AF) #twodots.vert.small.below
glyphs.append(0x0143) #uni0683
glyphs.append(0x013F) #uni0682
glyphs.append(0x013B) #uni0681
glyphs.append(0x0137) #uni0680
glyphs.append(0x014F) #uni0687
glyphs.append(0x0077) #uni0686
glyphs.append(0x014B) #uni0685
glyphs.append(0x0147) #uni0684
glyphs.append(0x0156) #uni068A.fina
glyphs.append(0x0153) #uni0689
glyphs.append(0x00A7) #uni0688
glyphs.append(0x02DC) #uni076A0627.fina
glyphs.append(0x0262) #uni075A
glyphs.append(0x0266) #uni075C
glyphs.append(0x0264) #uni075B
glyphs.append(0x0116) #uni06550650
glyphs.append(0x026A) #uni075D
glyphs.append(0x0272) #uni075F
glyphs.append(0x01D4) #uni06B7.fina
glyphs.append(0x005E) #uni061B
glyphs.append(0x02F5) #ZWSP
glyphs.append(0x0235) #uni061E
glyphs.append(0x005C) #uni061F
glyphs.append(0x003D) #onedot.above
glyphs.append(0x0079) #onedot.below
glyphs.append(0x00A8) #uni0688.fina
glyphs.append(0x01BE) #uni06B1.medi
glyphs.append(0x00BC) #uni063A.init
glyphs.append(0x014E) #uni0685.medi
glyphs.append(0x014C) #uni0685.fina
glyphs.append(0x010E) #uni06510650
glyphs.append(0x021B) #uni06B80627.isol
glyphs.append(0x00E1) #uni06D3.fina
glyphs.append(0x0152) #uni0687.medi
glyphs.append(0x02B4) #threedots.horz.below
glyphs.append(0x0017) #uni0637.init
glyphs.append(0x023D) #uni0750.fina
glyphs.append(0x01C2) #uni06B2.medi
glyphs.append(0x00C3) #uni0641.medi
glyphs.append(0x013A) #uni0680.medi
glyph |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2018, Eric Jacob <erjac77@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: f5bigip_ltm_traffic_class
short_description: BIG-IP ltm traffic class module
description:
- Configures a traffic class.
version_added: "2.4"
author:
- "Gabriel Fortin (@GabrielFortin)"
options:
app_service:
description:
- Specifies the application service to which the object belongs.
classification:
description:
- Specifies the actual textual tag to be associated with the flow if the traffic class is matched.
required: true
description:
description:
- Specifies descriptive text that identifies the component.
destination_address:
description:
- Specifies destination IP addresses for the system to use when evaluating traffic flow.
destination_mask:
description:
- Specifies a destination IP address mask for the system to use when evaluating traffic flow.
destination_port:
description:
- Specifies a destination port for the system to use when evaluating traffic flow.
default: 0
name:
description:
- Specifies a unique name for the component.
required: true
partition:
description:
- Specifies the administrative partition in which the component object resides.
default: Common
protocol:
description:
- Specifies a protocol for the system to use when evaluating traffic flow.
default: any
source_address:
description:
- Specifies source IP addresses for the system to use when evaluating traffic flow.
| source_mask:
description:
- Specifies a source IP address mask for the system to use when evaluating traffic flow.
source_port:
description:
- Specifies a source port | for the system to use when evaluating traffic flow.
default: 0
state:
description:
- Specifies the state of the component on the BIG-IP system.
default: present
choices: ['absent', 'present']
requirements:
- BIG-IP >= 12.0
- ansible-common-f5
- f5-sdk
'''
EXAMPLES = '''
- name: Create LTM Traffic Class
f5bigip_ltm_traffic_class:
f5_hostname: 172.16.227.35
f5_username: admin
f5_password: admin
f5_port: 443
name: my_traffic_class
partition: Common
classification: traffic_class
description: My ltm traffic class
destination_port: 21
protocol: tcp
source_port: 21
state: present
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible_common_f5.base import F5_NAMED_OBJ_ARGS
from ansible_common_f5.base import F5_PROVIDER_ARGS
from ansible_common_f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
app_service=dict(type='str'),
classification=dict(type='str'),
description=dict(type='str'),
destination_address=dict(type='str'),
destination_mask=dict(type='str'),
destination_port=dict(type='int'),
file_name=dict(type='str'),
protocol=dict(type='str'),
source_address=dict(type='str'),
source_mask=dict(type='str'),
source_port=dict(type='int')
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
return argument_spec
@property
def supports_check_mode(self):
return True
class F5BigIpLtmTrafficClass(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
'create': self._api.tm.ltm.traffic_class_s.traffic_class.create,
'read': self._api.tm.ltm.traffic_class_s.traffic_class.load,
'update': self._api.tm.ltm.traffic_class_s.traffic_class.update,
'delete': self._api.tm.ltm.traffic_class_s.traffic_class.delete,
'exists': self._api.tm.ltm.traffic_class_s.traffic_class.exists
}
def main():
params = ModuleParams()
module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode)
try:
obj = F5BigIpLtmTrafficClass(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from flask_testing import TestCase
import mock
import journalist
from utils import db_helper, env
from db import (Journalist, Submission, Reply, get_one_or_else,
LoginThrottledException)
class TestDatabase(TestCase):
def create_app(self):
return journalist.app
def setUp(self):
env.setup()
def tearDown(self):
env.teardown()
@mock.patch('flask.abort')
def test_get_one_or_else_returns_one(self, mock):
new_journo, _ = db_helper.init_journalist()
query = Journalist.query.filter(
Journalist.username == new_journo.username)
with mock.patch('logger') as mock_logger:
selected_journo = get_one_or_else(query, mock_logger, mock)
self.assertEqual(new_journo, selected_journo)
@mock.patch('flask.abort')
def test_get_one_or_else_multiple_results(self, mock):
journo_1, _ = db_helper.init_journalist()
journo_2, _ = db_helper.init_journalist()
with mock.patch('logger') as mock_logger:
get_one_or_else(Journalist.query, mock_logger, mock)
mock_logger.error.assert_called() # Not specifying very long log line
mock.assert_called_with(500)
@mock.patch('flask.abort')
def test_get_one_or_else_no_result_found(self, mock):
query = Journalist.query.filter(Journalist.username == "alice")
with mock.patch('logger') as mock_logger:
get_one_or_else(query, mock_logger, mock)
log_line = ('Found none when one was expected: '
'No row was found for one()')
mock_logger.error.assert_called_with(log_line)
mock.assert_called_with(404)
# Check __repr__ do not throw exceptions
def test_submission_string_representation(self):
source, _ = db_helper.init_source()
db_helper.submit(source, 2)
test_submission = Submission.query.first()
test_submission.__repr__()
def test_reply_string_representation(self):
journalist, _ = db_helper.init_journalist()
source, _ = db_helper.init_source()
db_helper.reply(journalist, source, 2)
test_reply = Reply.query.first()
test_reply.__repr__()
def test_journalist_string_repr | esentation(self):
test_journalist, _ = db_helper.init_journalist()
test_journalist.__repr__()
def test_source_string_representation(self):
test_source, _ = db_helper.init_source()
test_source.__repr__()
def test_throttle_login(self):
journalist, _ = db_helper.init_journalist() |
for _ in range(Journalist._MAX_LOGIN_ATTEMPTS_PER_PERIOD):
Journalist.throttle_login(journalist)
with self.assertRaises(LoginThrottledException):
Journalist.throttle_login(journalist)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Fungiform
~~~~~~~~~
A form handling system that previously was used for Pocoo's Zine
and Plurk's Solace software. Unbundled into a separate library that
| is framework independent.
This is still a preview release. Check the source for more information.
| :copyright: (c) 2010 by Armin Ronacher, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name = 'Fungiform',
version = '0.2',
url = 'http://github.com/mitsuhiko/fungiform',
license = 'BSD License',
author = 'Armin Ronacher',
author_email = 'armin.ronacher@active-4.com',
description = 'form library',
long_description = __doc__,
keywords = 'form library',
packages = ['fungiform', 'fungiform.tests'],
platforms = 'any',
zip_safe = False,
test_suite = 'fungiform.tests.suite',
include_package_data = True,
classifiers = [
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Development Status :: 4 - Beta'
],
)
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import colorlog
import logging
import os
from multiprocessing import Lock, Manager
import sqlite3
from .base import Store
class SqliteStore(Store):
"""
Stores data for one space
This is a basic permanent key-value store.
Example::
store = SqliteStore(db='shellstore.db', id=space.id)
"""
def on_init(self,
prefix='sqlite',
id=None,
db=None,
**kwargs):
"""
Adds processing to initialization
:pa | ram prefix: the main keyword for configuration of this space
:type prefix: str
:param id: the unique identifier of the related space (optional)
:type id: str
:param db: name of the file that contains Sqlite data (optional)
:type db: str
Example::
store = SqliteStore(context=context, prefix='sqlite')
Here we create a new store powered by Sqlite, and use
settings under the key | ``sqlite`` in the context of this bot.
"""
assert prefix
self.prefix = prefix
self.id = id if id else '*id'
if db:
self.context.set(self.prefix+'.db', db)
def check(self):
"""
Checks configuration
"""
self.context.check(self.prefix+'.db', 'store.db')
def get_db(self):
"""
Gets a handle on the database
"""
db = self.context.get(self.prefix+'.db', 'store.db')
return sqlite3.connect(db)
def bond(self, id=None):
"""
Creates or uses a file to store data
:param id: the unique identifier of the related space
:type id: str
"""
if id:
self.id = id
handle = self.get_db()
try:
handle.execute("CREATE TABLE store \
(id INTEGER PRIMARY KEY, \
context TEXT, \
key TEXT UNIQUE, \
value TEXT)")
except sqlite3.OperationalError as feedback:
logging.debug(feedback)
def _set(self, key, value, handle=None):
"""
Sets a permanent value
:param key: name of the value
:type key: str
:param value: actual value
:type value: any serializable type is accepted
:param handle: an optional instance of a Sqlite database
:type handle: a connection
This functions stores or updates a value in the back-end storage
system.
Example::
store._set('parameter_123', 'George')
"""
handle = handle if handle else self.get_db()
cursor = handle.cursor()
cursor.execute("DELETE FROM store WHERE context=? AND key=?",
(self.id, key))
cursor.execute("INSERT INTO store (context,key,value) VALUES (?,?,?)",
(self.id, key, value))
handle.commit()
cursor.close()
def _get(self, key, handle=None):
"""
Gets a permanent value
:param key: name of the value
:type key: str
:param handle: an optional instance of a Sqlite database
:type handle: a connection
:return: the actual value, or None
Example::
value = store._get('parameter_123')
"""
handle = handle if handle else self.get_db()
cursor = handle.cursor()
cursor.execute("SELECT value FROM store WHERE context=? AND key=?",
(self.id, key))
result = cursor.fetchone()
try:
return result[0]
except TypeError:
return None
def _clear(self, key=None, handle=None):
"""
Forgets a value or all values
:param key: name of the value to forget, or None
:type key: str
:param handle: an optional instance of a Sqlite database
:type handle: a connection
To clear only one value, provide the name of it.
For example::
store._clear('parameter_123')
To clear all values in the store, just call the function
without a value.
For example::
store._clear()
"""
handle = handle if handle else self.get_db()
if not key:
cursor = handle.cursor()
cursor.execute("DELETE FROM store WHERE context=?",
(self.id,))
handle.commit()
cursor.close()
else:
cursor = handle.cursor()
cursor.execute("DELETE FROM store WHERE context=? AND key=?",
(self.id, key))
handle.commit()
cursor.close()
|
from xmediusmailrelayserver impor | t * | |
import pytest
import responses
from urlobject import URLObject
from flask import Flask
from flask_dance.contrib.reddit import make_reddit_blueprint, reddit
from flask_dance.consumer import OAuth2ConsumerBlueprint
from flask_dance.consumer.storage import MemoryStorage
@pytest.fixture
def make_app():
"A callable to create a Flask app with the Reddit provider"
def _make_app(*args, **kwargs):
app = Flask(__name__)
app.secret_key = "whatever"
blueprint = make_reddit_blueprint(*args, **kwargs)
app.register_blueprint(blueprint)
return app
return _make_app
def test_blueprint_factory():
reddit_bp = make_reddit_blueprint(
client_id="foo", client_secret="bar", scope="identity", redirect_to="index"
)
assert isinstance(reddit_bp, OAuth2ConsumerBlueprint)
assert reddit_bp.session.scope == "identity"
assert reddit_bp.session.base_url == "https://oauth.reddit.com/"
assert reddit_bp.session.client_id == "foo"
assert reddit_bp.client_secret == "bar"
assert reddit_bp.authorization_url == "https://www.reddit.com/api/v1/authorize"
assert reddit_bp.token_url == "https://www.reddit.com/api/v1/access_token"
def test_blueprint_factory_with_permanent_token():
reddit_bp = make_reddit_blueprint(
client_id="foo",
client_secret="bar",
scope="identity",
redirect_to="index",
permanent=True,
)
assert isinstance(reddit_bp, OAuth2ConsumerBlueprint)
assert reddit_bp.session.scope == "identity"
assert reddit_bp.session.base_url == "https://oauth.reddit.com/"
assert reddit_bp.session.client_id == "foo"
assert reddit_bp.client_secret == "bar"
assert reddit_bp.authorization_url == "https://www.reddit.com/api/v1/authorize"
assert reddit_bp.token_url == "https://www.reddit.com/api/v1/access_token"
assert reddit_bp.authorization_url_params["duration"] == "permanent"
def test_load_from_config(make_app):
app = make_app()
app.config["REDDIT_OAUTH_CLIENT_ID"] = "foo"
app.config["REDDIT_OAUTH_CLIENT_SECRET"] = "bar"
resp = app.test_client().get("/reddit")
url = resp.headers["Location"]
client_id = URLObject(url).query.dict.get("client_id")
assert client_id == "foo"
@responses.activate
def test_context_local(make_app):
responses.add(responses.GET, "https://google.com")
# set up two apps with two different set of auth tokens
app1 = make_app(
"foo1",
"bar1",
redirect_to="url1",
storage=MemoryStorage({"access_token": "app1"}),
)
app2 = make_app(
"foo2",
"bar2",
redirect_to="url2",
storage=MemoryStorage({"access_token": "app2"}),
)
# outside of a request context, referencing functions on the `reddit` object
# will rais | e an exception
with pytest.raises(RuntimeError):
reddit.get("https://google.com")
# inside of a request context, `reddit` should be a | proxy to the correct
# blueprint session
with app1.test_request_context("/"):
app1.preprocess_request()
reddit.get("https://google.com")
request = responses.calls[0].request
assert request.headers["Authorization"] == "Bearer app1"
with app2.test_request_context("/"):
app2.preprocess_request()
reddit.get("https://google.com")
request = responses.calls[1].request
assert request.headers["Authorization"] == "Bearer app2"
|
0,
"Colonel***": 540000,
"General": 660000,
"General*": 800000,
"General**": 950000,
"General***": 1140000,
"Field Marshal": 1350000,
"Field Marshal*": 1600000,
"Field Marshal**": 1875000,
"Field Marshal***": 2185000,
"Supreme Marshal": 2550000,
"Supreme Marshal*": 3000000,
"Supreme Marshal**": 3500000,
"Supreme Marshal***": 4150000,
"National Force": 4900000,
"National Force*": 5800000,
"National Force**": 7000000,
"National Force***": 9000000,
"World Class Force": 11500000,
"World Class Force*": 14500000,
"World Class Force**": 18000000,
"World Class Force***": 22000000,
"Legendary Force": 26500000,
"Legendary Force*": 31500000,
"Legendary Force**": 37000000,
"Legendary Force***": 42000000,
"God of War": 50000000,
"God of War*": 100000000 ,
"God of War**": 200000000,
"God of War***": 500000000,
"Titan": 1000000000,
"Titan*": 2000000000,
"Titan**": 4000000000,
"Titan***": 10000000000}
# Lista ordenada de rangos segun importancia
self.rank_to_pos = [
"Recruit",
"Private",
"Private*",
"Private**",
"Private***",
"Corporal",
"Corporal*",
"Corporal**",
"Corporal***",
"Sergeant",
"Sergeant*",
"Sergeant**",
"Sergeant***",
"Lieutenant",
"Lieutenant*",
"Lieutenant**",
"Lieutenant***",
"Captain",
"Captain*",
"Captain**",
"Captain***",
"Major",
"Major*",
"Major**",
"Major***",
"Commander",
"Commander*",
"Commander**",
"Commander***",
"Lt Colonel",
"Lt Colonel*",
"Lt Colonel**",
"Lt Colonel***",
"Colonel",
"Colonel*",
"Colonel**",
"Colonel***",
"General",
"General*",
"General**",
"General***",
"Field Marshal",
"Field Marshal*",
"Field Marshal**",
"Field Marshal***",
"Supreme Marshal",
"Supreme Marshal*",
"Supreme Marshal**",
"Supreme Marshal***",
"National Force",
"National Force*",
"National Force**",
"National Force***",
"World Class Force",
"World Class Force*",
"World Class Force**",
"World Class Force***",
"Legendary Force",
"Legendary Force*",
"Legendary Force**",
"Legendary Force***",
"God of War",
"God of War*",
"God of War**",
"God of War***",
"Titan",
"Titan*",
"Titan**",
"Titan***",]
# Bandera de ejecucion, util en caso de que se decida matar de forma manual los threads para a | ctualizar y guardar los datos
self.run = True
# Se paraleliza la carga de datos en un hilo nuevo, el cual es demonio del invocador en caso de "muerte prematura"
th = threading.Thread(target=self.data_loader)
th.daemon = True
th.start()
# Metodo invocador, carga datos y crea threads para guardar y actualizar informacion, solo llamad | o desde constructor
def data_loader(self):
self.load_data()
self.data_saver_th = threading.Thread(target=self.data_saver)
self.data_saver_th.daemon = True
self.data_saver_th.start()
self.data_updater_th = threading.Thread(target=self.data_updater)
self.data_updater_th.daemon = True
self.data_updater_th.start()
# Metodo para volcar informacion a archivo fisico, solo llamado de metodo data_loader
def data_saver(self):
while self.run:
self.save_data()
time.sleep(60)
# Metodo para actualizar informacion, solo llamado de metodo data_loader
def data_updater(self):
while self.run:
for irc_nick in self.data:
self.update_data(irc_nick)
time.sleep(30)
time.sleep(600)
# ---------------------------------------------------------------------------------- #
# @ PUBLIC METHODS #
# ---------------------------------------------------------------------------------- #
# Metodo para actualizar informacion local del objeto desde archivo
def load_data(self):
try:
f = open('data/er_nick-data.csv', 'rt')
reader = csv.reader(f)
for nick_irc,id,nick_er,level,strength,rank_points,citizenship in reader:
self.data[nick_irc] = {'id': int(id), 'nick': nick_er, 'level': int(level), 'strength': float(strength), 'rank_points': int(rank_points), 'citizenship': citizenship}
f.close()
except:
pass
# Metodo para guardar informacion local del objeto en archivo
def save_data(self):
try:
f = open('data/er_nick-data.csv', 'wt')
writer = csv.writer(f)
for u in self.data:
writer.writerow([u, self.data[u]['id'], self.data[u]['nick'], self.data[u]['level'], self.data[u]['strength'], self.data[u]['rank_points'], self.data[u]['citizenship']])
f.close()
except:
pass
# Metodo scraper para actualizar informacion local del objeto del nick de irc especificado
def update_data(self, irc_nick):
try:
id = self.data[irc_nick]['id']
c = urlopen('http://www.erepublik.com/es/citizen/profile/%d' % id)
page = c.read()
c.close()
self.data[irc_nick]['nick'] = re.search('<meta name="title" content="(.+?) - Ciudadano del Nuevo Mundo" \/>', page.decode('utf-8')).group(1)
self.data[irc_nick]['level'] = int(re.search('<strong class="citizen_level">(.+?)<\/strong>', page.decode('utf-8'), re.DOTALL).group(1))
self.data[irc_nick]['strength'] = float(re.search('<span class="military_box_info mb_bottom">(.+?)</span>', page.decode('utf-8'), re.DOTALL).group(1).strip('\r\n\t ').replace(',',''))
self.data[irc_nick]['rank_points'] = int(re.search('<span class="rank_numbers">(.+?) \/', page.decode('utf-8'), re.DOTALL).group(1).replace(',',''))
self.data[irc_nick]['citizenship'] = re.search('<a href="http\:\/\/www.erepublik.com\/es\/country\/society\/([^ \t\n\x0B\f\r]+?)">', page.decode('utf-8')).group(1)
except:
pass
# Metodo para actualizar informacion local del objeto con nick de irc e id especificados, fuerza actualizacion del mismo
def reg_nick_write(self, nick, id):
if(nick.lower() in self.data.keys()):
self.data[nick.lower()]['id'] = int(id)
else:
self.data[nick.lower()] = {'id': int(id), 'nick': nick, 'level': 1, 'strength': 0, 'rank_points': 0, 'citizenship': ''}
self.update_data(nick.lower())
# Metodo para obtener ID del nick de irc especificado
def get_id(self, nick):
return self.data[nick.lower()]['id']
# Metodo para obtener LEVEL del nick de irc especificado
def get_level(self, nick):
return self.data[nick.lower()]['level']
# Metodo para obtener STRENGTH del nick de irc especificado
def get_strength(self, nick):
return self.data[nick.lower()]['strength']
# Metodo para obtener RANK POINTS del nick de irc especificado
def get_rank_points(self, nick):
return self.data[nick.lower()]['rank_points']
# Metodo para obtener CITIZENSHIP del nick de irc especificado
def get_citizenship(self, nick):
|
"""
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_cod | ed_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(
n_samples=1,
n_components= | n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0,
)
(idx,) = w.nonzero()
# distort the clean signal
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx], use_line_collection=True)
# plot the noise-free reconstruction
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs, normalize=False)
omp.fit(X, y)
coef = omp.coef_
(idx_r,) = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r], use_line_collection=True)
# plot the noisy reconstruction
omp.fit(X, y_noisy)
coef = omp.coef_
(idx_r,) = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r], use_line_collection=True)
# plot the noisy reconstruction with number of non-zeros set by CV
omp_cv = OrthogonalMatchingPursuitCV(normalize=False)
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
(idx_r,) = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r], use_line_collection=True)
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle("Sparse signal recovery with Orthogonal Matching Pursuit", fontsize=16)
plt.show()
|
import logging
from django.contrib import messages
from django.contrib.auth import authenticate
from django.core.urlresolvers import reverse
from django.http.response import Http404, HttpResponseRedirect
from django.shortcuts import render, redirect, render_to_response
# Create your views here.
from django.template.context import RequestContext
from rest_framework.authtoken.models import Token
from api.models import App
from ui.forms import LoginForm
log = logging.getLogger(__name__)
def login(request):
# if request.user.is_authenticated():
# return redirect('/')
callback = request.GET.get('callback', '')
if not callback.endswith("/"):
callback=callback+"/"
log.debug("callback %s",callback)
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user is not None:
auth_app = user.crowduser.auth_apps.all()
try:
app = App.objects.get(callback=callback)
except Exception:
raise Http404
token = Token.objects.get(user=user)
if app not in auth_app:
log.debug("not in app")
return redirect(reverse(auth)+"?callback="+callback+"&token="+token.key)
else:
log.debug("in app")
# log.debug("Username %s",user.username)
# get the app
# apptoken = request.META.get('HTTP_AUTHORIZATION', b'')
callback = request.GET.get('callback', '')
if type(callback) == type(''):
raise Http404
token = Token.objects.get(user=user)
redirect_to = callback+"?token="+token.key
return HttpResponseRedirect(redirect_to)
else:
messages.info(request,'username and password not valid')
form.helper.form_action = reverse('login') + '?callback=' + callback
render_to_response('ui/login.html', {'form': form}, context_instance=RequestContext(request))
else:
form.helper.form_action = reverse('login') + '?callback=' + callback
render_to_response('ui/login.html', {'form': form}, context_instance=RequestContext(request))
else:
form = LoginForm()
form.helper.form_action = reverse('login') + '?callback=' + callback
# context = | {'form': form,'callback':callback}
# context = {}
return render_to_response('ui/login.html', {'form': form}, context_instance=RequestContext(request))
def auth(request):
callback = request.GET.get('callback', '')
token = request.GET.get('token', '')
if not callback.endswith("/"):
callback=callback+"/"
if re | quest.method == 'POST':
token = Token.objects.get(key=token)
app = App.objects.get(callback=callback)
crowduser = token.user.crowduser
crowduser.auth_apps.add(app)
crowduser.save()
redirect_to = callback+"?token="+token.key+"&id="+crowduser.user.pk
return HttpResponseRedirect(redirect_to)
else:
app = App.objects.get(callback=callback)
return render_to_response('ui/app.html', {'app': app,'callback':callback,'token':token}, context_instance=RequestContext(request))
|
"""Attachment utils."""
from pathlib import Path
from uuid import uuid4
from blobstash.docstore.error import DocStoreError
from blobstash.filetree import FileTreeClient
_FILETREE_POINTER_FMT = "@filetree/ref:{}"
_FILETREE_ATTACHMENT_FS_PREFIX = "_filetree:docstore"
class Attachment:
"""An attachment represents a file stored in FileTree and tied to the document via a pointer."""
def __init__(self, pointer, node):
self.pointer = pointer
self.node = node
def __repr__(self):
return "blobstash.docstore.attachment.Attachment(pointer={!r}, node={!r})".format(
self.pointer, self.node
)
def add_attachment(client, path):
"""Creates a new attachment (i.e. upload the file or directory to FileTree), and returns a pointer object."""
p = Path(path)
if p.is_file():
with open(p.absolute(), "rb") as fileobj:
node = FileTreeClient(client=client).fput_node(
p.name, fileobj, content_type=None
)
else:
fs = FileTreeClient(client=client).fs(
uuid4().hex, prefix=_FILETREE_ATTACHMENT_FS_PREFIX
)
fs.upload(path)
node = fs.node()
pointer = _FILETREE_POINTER_FMT.format(node.ref)
return Attachment(pointer, node)
def fadd_attachment(client, name, fileobj, content_type=None):
"""Creates a new attachment from the fileobj content with name as filename and returns a pointer object."""
node = FileTreeClient(client=client).fput_node(name, fi | leobj, content_type)
pointer = _FILETREE_POINTER_FMT.format(node.ref)
return Attachment(pointer, node)
def fget_attachment(client, attachment):
"""Returns a fileobj (that needs to be closed) with the content off the attachment."""
node = attachment.node
if node.is_dir():
raise DocStoreError(
"cannot get a fileobj for a directory, please use get_attachment instea | d"
)
return FileTreeClient(client=client).fget_node(node)
def get_attachment(client, attachment, path):
node = attachment.node
if node.is_file():
FileTreeClient(client=client).get_node(node, path)
return
FileTreeClient(client=client).fs(
ref=node.ref, prefix=_FILETREE_ATTACHMENT_FS_PREFIX
).download(path)
|
#! /usr/bin/python
import MDAnalysis
import sys
from pylab import *
my_traj = sys.argv[1]
myTitle = sys.argv[2]
u = MDAnalysis.Universe(my_traj,my_traj)
OH = u.selectAtoms("segid B and resid 18 and name HO")
xArr = []
yArr = []
zArr = []
data = []
for ts in u.trajectory:
xArr.append(OH.coordinates()[0,0])
yArr.append(OH.coordinates()[0,1])
zArr.append(OH.coordinates()[0,2])
def normalise(myArray):
newArray = []
mean = sum(myArray)/float(len(myArray))
for i in range(0,len(myArray)):
newArray.append(myArray[i]-mean)
return newArray
for myArray in (xArr,yArr,zArr):
newArray = nor | malise(myArray)
data.append(newArray)
boxplot(data)
xticks([1,2,3],["x","y","z"])
ylim(-0.4,0.4)
title("%s_HO"%myTitle)
savefig("%s_HO. | png"%myTitle)
|
from flask import Flask | , render_template, request, jso | nify, session, redirect, escape, url_for
import bcrypt
class ServerError(Exception):pass
def loginForm(db, form):
error = None
try:
username = form['username']
cur = db.query("SELECT COUNT(1) FROM users WHERE user = %s", [username])
if not cur.fetchone()[0]:
raise ServerError('Incorrect username / password')
password = form['password']
cur = db.query("SELECT pass FROM users WHERE user = %s;", [username])
for row in cur.fetchall():
if bcrypt.hashpw(password.encode('utf-8'), row[0]) == row[0]:
session['username'] = form['username']
return error
raise ServerError('Incorrect username / password')
except ServerError as e:
error = str(e)
return error
def registerUser(db, form, ROUNDS):
error = None
try:
username = form['username']
password = form['password']
email = form['email']
if not username or not password or not email:
raise ServerError('Fill in all fields')
password = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt(ROUNDS))
cur = db.query("SELECT COUNT(*) FROM users WHERE user = %s",[username])
c = cur.fetchone()
if c[0] == 0:
cur = db.query("INSERT INTO users (`user`, `email`, `pass`) VALUES (%s,%s,%s)", [username, email, password])
return None
else:
return "User exists"
except ServerError as e:
error = str(e)
return error
def getUsers(db):
error = None
try:
userlist = []
cur = db.query("SELECT user, email FROM users")
for row in cur.fetchall():
userlist.append({'name': row[0], 'email': row[1]})
return userlist
except:
error = "Failed"
return error
def deleteUser(db, user):
error = None
try:
cur = db.query("DELETE FROM users WHERE user = %s",[user])
return None
except:
return "Failed" |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import versatileimagefield.fields
import jsonfield.fields
from decimal import Decimal
import saleor.product.models.fields
import django.core.validators
import django_prices.models
import satchless.item
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='AttributeChoiceValue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('display', models.CharField(max_length=100, verbose_name='display name')),
('color', models.CharField(blank=True, max_length=7, verbose_name='color', validators=[django.core.validators.RegexValidator('^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$')])),
('image', versatileimagefield.fields.VersatileImageField(upload_to='attributes', null=True, verbose_name='image', blank=True)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=128, verbose_name='name')),
('slug', models.SlugField(verbose_name='slug')),
('description', models.TextField(verbose_name='description', blank=True)),
('hidden', models.BooleanField(default=False, verbose_name='hidden')),
('lft', models.PositiveIntegerField(editable=False, db_index=True)),
('rght', models.PositiveIntegerField(editable=False, db_index=True)),
('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),
('level', models.PositiveIntegerField(editable=False, db_index=True)),
('parent', models.ForeignKey(related_name='children', verbose_name='parent', blank=True, to='product.Category', null=True)),
],
options={
'verbose_name_plural': 'categories',
},
),
migrations.CreateModel(
name='FixedProductDiscount',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('discount', django_prices.models.PriceField(currency=b'USD', verbose_name='discount value', max_digits=12, decimal_places=2)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=128, verbose_name='name')),
('description', models.TextField(verbose_name='description')),
('price', django_prices.models.PriceField(currency=b'USD', verbose_name='price', max_digits=12, decimal_places=2)),
('weight', saleor.product.models.fields.WeightField(unit=b'lb', verbose_name='weight', max_digits=6, decimal_places=2)),
('available_on', models.DateField(null=True, verbose_name='available on', blank=True)),
],
bases=(models.Model, satchless.item.ItemRange),
),
migrations.CreateModel(
name='ProductAttribute',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.SlugField(unique=True, verbose_name='internal name')),
('display', models.CharField(max_length=100, verbose_name='display name')),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='ProductImage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image', versatileimagefield.fields.VersatileImageField(upload_to='products')),
('ppoi', versatileimagefield.fields.PPOIField(default='0.5x0.5', max_length=20, editable=False)),
('alt', models.CharField(max_length=128, verbose_name='short description', blank=True)),
('order', models.PositiveIntegerField(editable=False)),
('product', models.ForeignKey(related_name='images', to='product.Product')),
],
options={
'ordering': ['order'],
},
),
migrations.CreateModel(
name='ProductVariant',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sku', models.CharField(unique=True, max_length=32, verbose_name='SKU')),
('name', models.CharField(max_length=100, verbose_name='variant name', blank=True)),
('price_override', django_prices.models.PriceField(decimal_places=2, currency=b'USD', max_digits=12, blank=True, null=True, verbose_name='price override')),
('weight_override', saleor.product.models.fields.WeightField(decimal_places=2, max_digits=6, blank=True, null=True, verbose_name='weight override', unit=b'lb')),
('attributes', jsonfield.fields.JSONField(default={}, verbose_name='attributes')),
('product', models.ForeignKey(related_name='variants', to='product.Product')),
],
bases=(models.Model, satchless.item.Item),
),
migrations.CreateModel(
| n | ame='Stock',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('location', models.CharField(max_length=100, verbose_name='location')),
('quantity', models.IntegerField(default=Decimal('1'), verbose_name='quantity', validators=[django.core.validators.MinValueValidator(0)])),
('cost_price', django_prices.models.PriceField(decimal_places=2, currency=b'USD', max_digits=12, blank=True, null=True, verbose_name='cost price')),
('variant', models.ForeignKey(related_name='stock', verbose_name='variant', to='product.ProductVariant')),
],
),
migrations.AddField(
model_name='product',
name='attributes',
field=models.ManyToManyField(related_name='products', null=True, to='product.ProductAttribute', blank=True),
),
migrations.AddField(
model_name='product',
name='categories',
field=models.ManyToManyField(related_name='products', verbose_name='categories', to='product.Category'),
),
migrations.AddField(
model_name='fixedproductdiscount',
name='products',
field=models.ManyToManyField(to='product.Product', blank=True),
),
migrations.AddField(
model_name='attributechoicevalue',
name='attribute',
field=models.ForeignKey(related_name='values', to='product.ProductAttribute'),
),
migrations.AlterUniqueTogether(
name='stock',
unique_together=set([('variant', 'location')]),
),
]
|
TION_EXCEPTION = '09000'
# Class 0A - Feature Not Supported
FEATURE_NOT_SUPPORTED = '0A000'
# Class 0B - Invalid Transaction Initiation
INVALID_TRANSACTION_INITIATION = '0B000'
# Class 0F - Locator Exception
LOCATOR_EXCEPTION = '0F000'
INVALID_LOCATOR_SPECIFICATION = '0F001'
# Class 0L - Invalid Grantor
INVALID_GRANTOR = '0L000'
INVALID_GRANT_OPERATION = '0LP01'
# Class 0P - Invalid Role Specification
INVALID_ROLE_SPECIFICATION = '0P000'
# Class 0Z - Diagnostics Exception
DIAGNOSTICS_EXCEPTION = '0Z000'
STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER = '0Z002'
# Class 20 - Case Not Found
CASE_NOT_FOUND = '20000'
# Class 21 - Cardinality Violation
CARDINALITY_VIOLATION = '21000'
# Class 22 - Data Exception
DATA_EXCEPTION = '22000'
STRING_DATA_RIGHT_TRUNCATION = '22001'
NULL_VALUE_NO_INDICATOR_PARAMETER = '22002'
NUMERIC_VALUE_OUT_OF_RANGE = '22003'
NULL_VALUE_NOT_ALLOWED = '22004'
ERROR_IN_ASSIGNMENT = '22005'
INVALID_DATETIME_FORMAT = '22007'
DATETIME_FIELD_OVERFLOW = '22008'
INVALID_TIME_ZONE_DISPLACEMENT_VALUE = '22009'
ESCAPE_CHARACTER_CONFLICT = '2200B'
INVALID_USE_OF_ESCAPE_CHARACTER = '2200C'
INVALID_ES | CAPE_OCTET = '2200D'
ZERO_LENGTH_CHARACTER_STRING = '2200F'
MOST_SPECIFIC_TYPE_MISMATCH = '2200G'
NOT_AN_XML_DOCUMEN | T = '2200L'
INVALID_XML_DOCUMENT = '2200M'
INVALID_XML_CONTENT = '2200N'
INVALID_XML_COMMENT = '2200S'
INVALID_XML_PROCESSING_INSTRUCTION = '2200T'
INVALID_INDICATOR_PARAMETER_VALUE = '22010'
SUBSTRING_ERROR = '22011'
DIVISION_BY_ZERO = '22012'
INVALID_ARGUMENT_FOR_NTILE_FUNCTION = '22014'
INTERVAL_FIELD_OVERFLOW = '22015'
INVALID_ARGUMENT_FOR_NTH_VALUE_FUNCTION = '22016'
INVALID_CHARACTER_VALUE_FOR_CAST = '22018'
INVALID_ESCAPE_CHARACTER = '22019'
INVALID_REGULAR_EXPRESSION = '2201B'
INVALID_ARGUMENT_FOR_LOGARITHM = '2201E'
INVALID_ARGUMENT_FOR_POWER_FUNCTION = '2201F'
INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION = '2201G'
INVALID_ROW_COUNT_IN_LIMIT_CLAUSE = '2201W'
INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE = '2201X'
INVALID_LIMIT_VALUE = '22020'
CHARACTER_NOT_IN_REPERTOIRE = '22021'
INDICATOR_OVERFLOW = '22022'
INVALID_PARAMETER_VALUE = '22023'
UNTERMINATED_C_STRING = '22024'
INVALID_ESCAPE_SEQUENCE = '22025'
STRING_DATA_LENGTH_MISMATCH = '22026'
TRIM_ERROR = '22027'
ARRAY_SUBSCRIPT_ERROR = '2202E'
INVALID_TABLESAMPLE_REPEAT = '2202G'
INVALID_TABLESAMPLE_ARGUMENT = '2202H'
FLOATING_POINT_EXCEPTION = '22P01'
INVALID_TEXT_REPRESENTATION = '22P02'
INVALID_BINARY_REPRESENTATION = '22P03'
BAD_COPY_FILE_FORMAT = '22P04'
UNTRANSLATABLE_CHARACTER = '22P05'
NONSTANDARD_USE_OF_ESCAPE_CHARACTER = '22P06'
# Class 23 - Integrity Constraint Violation
INTEGRITY_CONSTRAINT_VIOLATION = '23000'
RESTRICT_VIOLATION = '23001'
NOT_NULL_VIOLATION = '23502'
FOREIGN_KEY_VIOLATION = '23503'
UNIQUE_VIOLATION = '23505'
CHECK_VIOLATION = '23514'
EXCLUSION_VIOLATION = '23P01'
# Class 24 - Invalid Cursor State
INVALID_CURSOR_STATE = '24000'
# Class 25 - Invalid Transaction State
INVALID_TRANSACTION_STATE = '25000'
ACTIVE_SQL_TRANSACTION = '25001'
BRANCH_TRANSACTION_ALREADY_ACTIVE = '25002'
INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION = '25003'
INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION = '25004'
NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION = '25005'
READ_ONLY_SQL_TRANSACTION = '25006'
SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED = '25007'
HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL = '25008'
NO_ACTIVE_SQL_TRANSACTION = '25P01'
IN_FAILED_SQL_TRANSACTION = '25P02'
# Class 26 - Invalid SQL Statement Name
INVALID_SQL_STATEMENT_NAME = '26000'
# Class 27 - Triggered Data Change Violation
TRIGGERED_DATA_CHANGE_VIOLATION = '27000'
# Class 28 - Invalid Authorization Specification
INVALID_AUTHORIZATION_SPECIFICATION = '28000'
INVALID_PASSWORD = '28P01'
# Class 2B - Dependent Privilege Descriptors Still Exist
DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST = '2B000'
DEPENDENT_OBJECTS_STILL_EXIST = '2BP01'
# Class 2D - Invalid Transaction Termination
INVALID_TRANSACTION_TERMINATION = '2D000'
# Class 2F - SQL Routine Exception
SQL_ROUTINE_EXCEPTION = '2F000'
MODIFYING_SQL_DATA_NOT_PERMITTED = '2F002'
PROHIBITED_SQL_STATEMENT_ATTEMPTED = '2F003'
READING_SQL_DATA_NOT_PERMITTED = '2F004'
FUNCTION_EXECUTED_NO_RETURN_STATEMENT = '2F005'
# Class 34 - Invalid Cursor Name
INVALID_CURSOR_NAME = '34000'
# Class 38 - External Routine Exception
EXTERNAL_ROUTINE_EXCEPTION = '38000'
CONTAINING_SQL_NOT_PERMITTED = '38001'
MODIFYING_SQL_DATA_NOT_PERMITTED = '38002'
PROHIBITED_SQL_STATEMENT_ATTEMPTED = '38003'
READING_SQL_DATA_NOT_PERMITTED = '38004'
# Class 39 - External Routine Invocation Exception
EXTERNAL_ROUTINE_INVOCATION_EXCEPTION = '39000'
INVALID_SQLSTATE_RETURNED = '39001'
NULL_VALUE_NOT_ALLOWED = '39004'
TRIGGER_PROTOCOL_VIOLATED = '39P01'
SRF_PROTOCOL_VIOLATED = '39P02'
EVENT_TRIGGER_PROTOCOL_VIOLATED = '39P03'
# Class 3B - Savepoint Exception
SAVEPOINT_EXCEPTION = '3B000'
INVALID_SAVEPOINT_SPECIFICATION = '3B001'
# Class 3D - Invalid Catalog Name
INVALID_CATALOG_NAME = '3D000'
# Class 3F - Invalid Schema Name
INVALID_SCHEMA_NAME = '3F000'
# Class 40 - Transaction Rollback
TRANSACTION_ROLLBACK = '40000'
SERIALIZATION_FAILURE = '40001'
TRANSACTION_INTEGRITY_CONSTRAINT_VIOLATION = '40002'
STATEMENT_COMPLETION_UNKNOWN = '40003'
DEADLOCK_DETECTED = '40P01'
# Class 42 - Syntax Error or Access Rule Violation
SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION = '42000'
INSUFFICIENT_PRIVILEGE = '42501'
SYNTAX_ERROR = '42601'
INVALID_NAME = '42602'
INVALID_COLUMN_DEFINITION = '42611'
NAME_TOO_LONG = '42622'
DUPLICATE_COLUMN = '42701'
AMBIGUOUS_COLUMN = '42702'
UNDEFINED_COLUMN = '42703'
UNDEFINED_OBJECT = '42704'
DUPLICATE_OBJECT = '42710'
DUPLICATE_ALIAS = '42712'
DUPLICATE_FUNCTION = '42723'
AMBIGUOUS_FUNCTION = '42725'
GROUPING_ERROR = '42803'
DATATYPE_MISMATCH = '42804'
WRONG_OBJECT_TYPE = '42809'
INVALID_FOREIGN_KEY = '42830'
CANNOT_COERCE = '42846'
UNDEFINED_FUNCTION = '42883'
RESERVED_NAME = '42939'
UNDEFINED_TABLE = '42P01'
UNDEFINED_PARAMETER = '42P02'
DUPLICATE_CURSOR = '42P03'
DUPLICATE_DATABASE = '42P04'
DUPLICATE_PREPARED_STATEMENT = '42P05'
DUPLICATE_SCHEMA = '42P06'
DUPLICATE_TABLE = '42P07'
AMBIGUOUS_PARAMETER = '42P08'
AMBIGUOUS_ALIAS = '42P09'
INVALID_COLUMN_REFERENCE = '42P10'
INVALID_CURSOR_DEFINITION = '42P11'
INVALID_DATABASE_DEFINITION = '42P12'
INVALID_FUNCTION_DEFINITION = '42P13'
INVALID_PREPARED_STATEMENT_DEFINITION = '42P14'
INVALID_SCHEMA_DEFINITION = '42P15'
INVALID_TABLE_DEFINITION = '42P16'
INVALID_OBJECT_DEFINITION = '42P17'
INDETERMINATE_DATATYPE = '42P18'
INVALID_RECURSION = '42P19'
WINDOWING_ERROR = '42P20'
COLLATION_MISMATCH = '42P21'
INDETERMINATE_COLLATION = '42P22'
# Class 44 - WITH CHECK OPTION Violation
WITH_CHECK_OPTION_VIOLATION = '44000'
# Class 53 - Insufficient Resources
INSUFFICIENT_RESOURCES = '53000'
DISK_FULL = '53100'
OUT_OF_MEMORY = '53200'
TOO_MANY_CONNECTIONS = '53300'
CONFIGURATION_LIMIT_EXCEEDED = '53400'
# Class 54 - Program Limit Exceeded
PROGRAM_LIMIT_EXCEEDED = '54000'
STATEMENT_TOO_COMPLEX = '54001'
TOO_MANY_COLUMNS = '54011'
TOO_MANY_ARGUMENTS = '54023'
# Class 55 - Object Not In Prerequisite State
OBJECT_NOT_IN_PREREQUISITE_STATE = '55000'
OBJECT_IN_USE = '55006'
CANT_CHANGE_RUNTIME_PARAM = '55P02'
LOCK_NOT_AVAILABLE = '55P03'
# Class 57 - Operator Intervention
OPERATOR_INTERVENTION = '57000'
QUERY_CANCELED = '57014'
ADMIN_SHUTDOWN = '57P01'
CRASH_SHUTDOWN = '57P02'
CANNOT_CONNECT_NOW = '57P03'
DATABASE_DROPPED = '57P04'
# Class 58 - System Error (errors external to PostgreSQL itself)
SYSTEM_ERROR = '58000'
IO_ERROR = '58030'
UNDEFINED_FILE = '58P01'
DUPLICATE_FILE = '58P02'
# Class F0 - Configuration File Error
CONFIG_FILE_ERROR = 'F0000'
LOCK_FILE_EXISTS = 'F0001'
# Class HV - Foreign Data Wrapper Error (SQL/MED)
FDW_ERROR = 'HV000'
FDW_OUT_OF_MEMORY = 'HV001'
FDW_DYNAMIC_PARAMETER_VALUE_NEEDED = 'HV002'
FDW_INVALID_DATA_TYPE = 'HV004'
FDW_COLUMN_NAME_NOT_FOUND = 'HV005'
FDW_INVALID_DATA_TYPE_DESCRIPTORS = 'HV006'
FDW_INVALID_COLUMN_NAME = 'HV007'
FDW_INVALID_COLUMN_NUMBER = 'HV008'
FDW_INVALID_USE_OF_NULL_POINTER = 'HV009'
FDW_INVALID_STRING_FORMAT = 'HV00A'
FDW_INVALID_HANDLE = 'HV00B'
FDW_INVALID_OPTION_INDEX = 'HV00C'
FDW_INVALID_OPTION_NAME = 'HV00D'
FDW_O |
"""Autocomplete fields for QuerySetSequence choices."""
from dal_contenttypes.fields import (
ContentTypeModelMultipleFieldMixin,
GenericModelMixin,
)
from django import forms
from django.contrib.contenttypes.models import ContentType
from queryset_sequence import QuerySetSequence
class QuerySetSequenceFieldMixin(object):
"""Base methods for QuerySetSequence fields."""
def get_queryset_for_content_type(self, content_type_id):
"""Return the QuerySet from the QuerySetSequence for a ctype."""
content_type = ContentType.objects.get_for_id(content_type_id)
for queryset in self.queryset.query._querysets:
if queryset.model == content_type.model_class():
return queryset
def raise_invalid_choice(self, params=None):
"""
Raise a ValidationError for invalid_choice.
The validation error left unprecise about the exact error for security
reasons, to prevent an attacker doing information gathering to reverse
valid content type and object ids.
"""
raise forms.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params=params,
)
def get_content_type_id_object_id(self, value):
"""Return a tuple of ctype id, object id for value."""
return value.split('-', 1)
class QuerySetSequenceModelField(GenericModelMixin,
QuerySetSequenceFieldMixin,
forms.ModelChoiceField):
"""Replacement for ModelChoiceField supporting QuerySetSequence choices."""
def to_python(self, value):
"""
Given a string like '3-5', return the model of ctype #3 and pk 5.
Note that in the case of ModelChoiceField, to_python is also in charge
of security, it's important to get the results from self.queryset.
"""
if not value:
return value
content_type_id, object_id = self.get_content_type_id_object_id(value)
queryset = self.get_queryset_for_content_type(content_type_id)
if queryset is None:
self.raise_invalid_choice()
try:
return queryset.get(pk=object_id)
except queryset.model.DoesNotExist:
self.raise_invalid_choice()
class QuerySetSequenceModelMultipleField(ContentTypeModelMultipleFieldMixin,
QuerySetSequenceFieldMixin,
forms.ModelMultipleChoiceField):
"""ModelMultipleChoiceField with support for QuerySetSequence choices."""
def _deduplicate_values(self, value):
# deduplicate given values to avoid creating many querysets or
# requiring the database backend deduplicate efficiently.
try:
return frozenset(value)
except TypeError:
# list of lists isn't hashable, for example
raise forms.ValidationError(
self.error_messages['list'],
code='list',
)
def _get_ctype_objects(self, values):
pks = {}
for val in values:
content_type_id, object_id = self.get_content_type_id_object_id(
val)
pks.setdefault(content_type_id, [])
pks[content_type_id].append(object_id)
return pks
def _get_queryset_for_pks(self, pks):
querysets = []
for content_type_id, object_ids in pks.items():
queryset = self.get_queryset_for_content_type(content_type_id)
if queryset is None:
self.raise_invalid_choice(
params=dict(
value='%s | -%s' % (content_type_id, object_ids[0])
)
)
querysets.append(queryset.filter(pk__in=object_ids))
return QuerySetSequence(*querysets)
def _check_values(self, value):
valu | es = self._deduplicate_values(value)
pks = self._get_ctype_objects(values)
queryset = self._get_queryset_for_pks(pks)
fetched_values = [
'%s-%s' % (ContentType.objects.get_for_model(o).pk, o.pk)
for o in queryset
]
for val in value:
if val not in fetched_values:
self.raise_invalid_choice(params={'value': val})
return queryset
|
# -*- coding: utf-8 -*-
"""This module contains custom roles to use in Sphinx.
"""
from docutils import nodes
def setup(app):
"""Install the extension.
Parameter | s:
app: Sphinx application context.
"""
app.add_role('arts', arts_docserver_role)
def arts_docserver_role(name, rawtext, text, lineno, inliner, options=None,
content=None):
"""Create a link to ARTS do | cserver.
Parameters:
name (str): The role name used in the document.
rawtext (str): The entire markup snippet, with role.
text (str): The text marked with the role.
lineno (str): The line number where rawtext appears in the input.
inliner (str): The inliner instance that called us.
options (dict): Directive options for customization.
content (list): The directive content for customization.
Returns:
list, list: Nodes to insert into the document, System messages.
"""
if content is None:
content = []
if options is None:
options = {}
url = 'http://radiativetransfer.org/docserver-trunk/all/{}'.format(text)
node = nodes.reference(rawtext, text, refuri=url, **options)
return [node], []
|
--------------------------------------------------------------------
"""
Reference GENI GCF Clearinghouse. Uses SFA Certificate and credential objects.
Run from gcf-ch.py
Will produce signed user credentials from a GID, return a
list of aggregates read from a config file, and create a new Slice Credential.
"""
import datetime
import traceback
import uuid
import os
import dateutil.parser
from SecureXMLRPCServer import SecureXMLRPCServer
from expedient.common.federation.geni.util import cred_util
from expedient.common.federation.geni.util import cert_util
from expedient.common.federation.geni.util import urn_util
from expedient.common.federation.sfa.trust import gid
# Substitute eg "openflow//stanford"
# Be sure this matches init-ca.py:CERT_AUTHORITY
# This is in publicid format
SLICE_AUTHORITY = "geni//gpo//gcf"
# Credential lifetimes in seconds
# Extend slice lifetimes to actually use the resources
USER_CRED_LIFE = 86400
SLICE_CRED_LIFE = 3600
# Make the max life of a slice 30 days (an arbitrary length).
SLICE_MAX_LIFE_SECS = 30 * 24 * 60 * 60
# The list of Aggregates that this Clearinghouse knows about
# should be defined in the gcf_config file in the am_* properties.
# ListResources will refer the client to these aggregates
# Clearinghouse.runserver currently does the register_aggregate_pair
# calls for each row in that file
# but this should be doable dynamically
# Some sample pairs:
# GPOMYPLC = ('urn:publicid:IDN+plc:gpo1+authority+sa',
# 'http://myplc1.gpolab.bbn.com:12348')
# TESTGCFAM = ('urn:publicid:IDN+geni.net:gpo+authority+gcf',
# 'https://127.0.0.1:8001')
# OTHERGPOMYPLC = ('urn:publicid:IDN+plc:gpo+authority+site2',
# 'http://128.89.81.74:12348')
# ELABINELABAM = ('urn:publicid:IDN+elabinelab.geni.emulab.net',
# 'https://myboss.elabinelab.geni.emulab.net:443/protogeni/xmlrpc/am')
class SampleClearinghouseServer(object):
"""A sample clearinghouse with barebones functionality."""
def __init__(self, delegate):
self._delegate = delegate
def GetVersion(self):
return self._delegate.GetVersion()
def CreateSlice(self, urn=None):
return self._delegate.CreateSlice(urn_req=urn)
def RenewSlice(self, urn, expire_str):
try:
return self._delegate.RenewSlice(urn, expire_str)
except:
self._delegate.logger.error(traceback.format_exc())
raise
def DeleteSlice(self, urn):
return self._delegate.DeleteSlice(urn)
def ListAggregates(self):
return self._delegate.ListAggregates()
def CreateUserCredential(self, cert):
return self._delegate.CreateUserCredential(cert)
class Clearinghouse(object):
def __init__(self):
self.logger = cred_util.logging.getLogger('gcf-ch')
self.slices = {}
self.aggs = []
def load_aggregates(self):
"""Loads aggregates from the clearinghouse section of the config file.
In the config section there are keys for each am, am_1, am_2, ..., am_n
The value for each key is the urn and url of the aggregate separated by a comma
Returns True if aggregates were loaded, False otherwise.
"""
for (key, val) in self.config['clearinghouse'].items():
if not key.startswith('am_'):
continue
(urn,url) = val.split(',')
urn = urn.strip()
url = url.strip()
if not urn:
self.logger.warn('Empty URN for aggregate %s in gcf_config' % key)
continue
if not url:
self.logger.warn('Empty URL for aggregate %s in gcf_config' % key)
continue
if urn in [x for (x, _) in self.aggs]:
self.logger.warn('Duplicate URN %s in gcf_config' % key)
continue
self.logger.info("Registering AM %s at %s", urn, url)
self.aggs.append((urn, url))
def runserver(self, addr, keyfile=None, certfile=None,
ca_certs=None, authority=None,
user_l | en=None, slice_len=None, config=None):
"""Run the clearinghouse server."""
# ca_certs is a dir of several certificates for peering
# If not supplied just use the certfile as the only trusted root
self.keyfile = keyfile
self.certfile = certfile
self.config = config
| # Error check the keyfile, certfile all exist
if keyfile is None or not os.path.isfile(os.path.expanduser(keyfile)):
raise Exception("Missing CH key file %s" % keyfile)
if certfile is None or not os.path.isfile(os.path.expanduser(certfile)):
raise Exception("Missing CH cert file %s" % certfile)
if ca_certs is None:
ca_certs = certfile
self.logger.info("Using only my CH cert as a trusted root cert")
self.trusted_root_files = cred_util.CredentialVerifier(ca_certs).root_cert_files
if not os.path.exists(os.path.expanduser(ca_certs)):
raise Exception("Missing CA cert(s): %s" % ca_certs)
global SLICE_AUTHORITY, USER_CRED_LIFE, SLICE_CRED_LIFE
SLICE_AUTHORITY = authority
USER_CRED_LIFE = int(user_len)
SLICE_CRED_LIFE = int(slice_len)
# Load up the aggregates
self.load_aggregates()
# This is the arg to _make_server
ca_certs_onefname = cred_util.CredentialVerifier.getCAsFileFromDir(ca_certs)
# This is used below by CreateSlice
self.ca_cert_fnames = []
if os.path.isfile(os.path.expanduser(ca_certs)):
self.ca_cert_fnames = [os.path.expanduser(ca_certs)]
elif os.path.isdir(os.path.expanduser(ca_certs)):
self.ca_cert_fnames = [os.path.join(os.path.expanduser(ca_certs), name) for name in os.listdir(os.path.expanduser(ca_certs)) if name != cred_util.CredentialVerifier.CATEDCERTSFNAME]
# Create the xmlrpc server, load the rootkeys and do the ssl thing.
self._server = self._make_server(addr, keyfile, certfile,
ca_certs_onefname)
self._server.register_instance(SampleClearinghouseServer(self))
self.logger.info('GENI CH Listening on port %d...' % (addr[1]))
self._server.serve_forever()
def _make_server(self, addr, keyfile=None, certfile=None,
ca_certs=None):
"""Creates the XML RPC server."""
# ca_certs is a file of concatenated certs
return SecureXMLRPCServer(addr, keyfile=keyfile, certfile=certfile,
ca_certs=ca_certs)
def GetVersion(self):
self.logger.info("Called GetVersion")
version = dict()
version['gcf-ch_api'] = 1
return version
# FIXME: Change that URN to be a name and non-optional
# Currently gcf-test.py doesnt supply it, and
# Omni takes a name and constructs a URN to supply
def CreateSlice(self, urn_req = None):
self.logger.info("Called CreateSlice URN REQ %r" % urn_req)
slice_gid = None
if urn_req and self.slices.has_key(urn_req):
# If the Slice has expired, treat this as
# a request to renew
slice_cred = self.slices[urn_req]
if slice_cred.expiration <= datetime.datetime.utcnow():
# Need to renew this slice
self.logger.info("CreateSlice on %r found existing cred that expired at %r - will renew", urn_req, slice_cred.expiration)
slice_gid = slice_cred.get_gid_object()
else:
self.logger.debug("Slice cred is still valid at %r until %r - return it", datetime.datetime.utcnow(), slice_cred.expiration)
return slice_cred.save_to_string()
# First ensure we have a slice_urn
if urn_req:
# FIXME: Validate urn_req has the right form
# to be issued by this CH
if not urn_ut |
class ParserError(Exception):
pass
class Sentence(object):
def __init__(self, subject, verb, obj):
# We take ('noun','thing') tuples and convert them.
self.subject = subject[1]
self.verb = verb[1]
self.object = obj[1]
def peek(word_list):
if word_list:
word = word_list[0]
return word[0]
else:
return None
def match(word_list, expecting):
if word_list:
word = word_list.pop(0)
if word[0] == expecting:
return word
else:
return None
else:
return None
def skip(word_list, word_type):
while peek(word_list) == word_type:
match(word_list, word_type)
def parse_verb(word_list):
skip(word_list, 'stop')
if peek(word_list) == 'verb':
return match(word_list, 'verb')
else:
raise ParserError("Expected a verb next.")
def parse_object(word_list):
skip(word_list, 'stop')
next_word = peek(word_list)
if next_word == 'noun':
return match(word_list, 'noun')
elif next_word == 'direction':
return match(word_list, 'direction')
else:
raise ParserError("Expected a noun or direction next.")
def parse_subject(word_list):
skip(word_list, 'stop')
next_word = peek(word_list)
if next_word == 'noun':
return match(word_li | st, 'noun')
elif next_word == 'verb':
return ('noun', 'player')
else:
| raise ParserError("Expected a verb next.")
def parse_sentence(word_list):
subj = parse_subject(word_list)
verb = parse_verb(word_list)
obj = parse_object(word_list)
return Sentence(subj, verb, obj)
|
xmako should be
# integrated better with Django's rendering and event system.
response = render_to_response(path, context)
response.mako_context = context
response.mako_template = path
return response
@ddt.ddt
class TestInstructorDashboard(ModuleStoreTestCase, LoginEnrollmentTestCase, XssTestMixin):
"""
Tests for the instructor dashboard (not legacy).
"""
def setUp(self):
"""
Set up tests
"""
super(TestInstructorDashboard, self).setUp()
self.course = CourseFactory.create(
grading_policy={"GRADE_CUTOFFS": {"A": 0.75, "B": 0.63, "C": 0.57, "D": 0.5}},
display_name='<script>alert("XSS")</script>'
)
self.course_mode = CourseMode(
course_id=self.course.id,
mode_slug=CourseMode.DEFAULT_MODE_SLUG,
mode_display_name=CourseMode.DEFAULT_MODE.name,
min_price=40
)
self.course_info = CourseFactory.create(
org="ACME",
number="001",
run="2017",
name="How to defeat the Road Runner"
)
self.course_mode.save()
# Create instructor account
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password="test")
# URL for instructor dash
self.url = reverse('instructor_dashboard', kwargs={'course_id': text_type(self.course.id)})
def get_dashboard_enrollment_message(self):
"""
Returns expected dashboard enrollment message with link to Insights.
"""
return u'Enrollment data is now available in <a href="http://example.com/courses/{}" ' \
'rel="noopener" target="_blank">Example</a>.'.format(text_type(self.course.id))
def get_dashboard_analytics_message(self):
"""
Returns expected dashboard demographic message with link to Insights.
"""
return u'For analytics about your course, go to <a href="http://example.com/courses/{}" ' \
'rel="noopener" target="_blank">Example</a>.'.format(text_type(self.course.id))
def test_instructor_tab(self):
"""
Verify that the instructor tab appears for staff only.
"""
def has_instructor_tab(user, course):
"""Returns true if the "Instructor" tab is shown."""
tabs = get_course_tab_list(user, course)
return len([tab for tab in tabs if tab.name == 'Instructor']) == 1
self.assertTrue(has_instructor_tab(self.instructor, self.course))
staff = StaffFactory(course_key=self.course.id)
self.assertTrue(has_instructor_tab(staff, self.course))
student = UserFactory.create()
self.assertFalse(has_instructor_tab(student, self.course))
researcher = UserFactory.create()
CourseAccessRoleFactory(
course_id=self.course.id,
user=researcher,
role='data_researcher',
org=self.course.id.org
)
self.assertTrue(has_instructor_tab(researcher, self.course))
org_researcher = UserFactory.create()
CourseAccessRoleFactory(
course_id=None,
user=org_researcher,
role='data_researcher',
org=self.course.id.org
)
self.assertTrue(has_instructor_tab(org_researcher, self.course))
@ddt.data(
('staff', False),
('instructor', False),
('data_researcher', True),
('global_staff', True),
)
@ddt.unpack
def test_data_download(self, access_role, can_access):
"""
Verify that the Data | Down | load tab only shows up for certain roles
"""
download_section = '<li class="nav-item"><button type="button" class="btn-link data_download" '\
'data-section="data_download">Data Download</button></li>'
user = UserFactory.create(is_staff=access_role == 'global_staff')
CourseAccessRoleFactory(
course_id=self.course.id,
user=user,
role=access_role,
org=self.course.id.org
)
self.client.login(username=user.username, password="test")
response = self.client.get(self.url)
if can_access:
self.assertContains(response, download_section)
else:
self.assertNotContains(response, download_section)
@override_settings(ANALYTICS_DASHBOARD_URL='http://example.com')
@override_settings(ANALYTICS_DASHBOARD_NAME='Example')
def test_data_download_only(self):
"""
Verify that only the data download tab is visible for data researchers.
"""
user = UserFactory.create()
CourseAccessRoleFactory(
course_id=self.course.id,
user=user,
role='data_researcher',
org=self.course.id.org
)
self.client.login(username=user.username, password="test")
response = self.client.get(self.url)
matches = re.findall(
rb'<li class="nav-item"><button type="button" class="btn-link .*" data-section=".*">.*',
response.content
)
assert len(matches) == 1
@ddt.data(
("How to defeat the Road Runner", "2017", "001", "ACME"),
)
@ddt.unpack
def test_instructor_course_info(self, display_name, run, number, org):
"""
Verify that it shows the correct course information
"""
url = reverse(
'instructor_dashboard',
kwargs={
'course_id': six.text_type(self.course_info.id)
}
)
response = self.client.get(url)
content = pq(response.content)
self.assertEqual(
display_name,
content('#field-course-display-name b').contents()[0].strip()
)
self.assertEqual(
run,
content('#field-course-name b').contents()[0].strip()
)
self.assertEqual(
number,
content('#field-course-number b').contents()[0].strip()
)
self.assertEqual(
org,
content('#field-course-organization b').contents()[0].strip()
)
@ddt.data(True, False)
def test_membership_reason_field_visibility(self, enbale_reason_field):
"""
Verify that reason field is enabled by site configuration flag 'ENABLE_MANUAL_ENROLLMENT_REASON_FIELD'
"""
configuration_values = {
"ENABLE_MANUAL_ENROLLMENT_REASON_FIELD": enbale_reason_field
}
site = Site.objects.first()
SiteConfiguration.objects.create(
site=site,
site_values=configuration_values,
enabled=True
)
url = reverse(
'instructor_dashboard',
kwargs={
'course_id': six.text_type(self.course_info.id)
}
)
response = self.client.get(url)
reason_field = '<textarea rows="2" id="reason-field-id" name="reason-field" ' \
'placeholder="Reason" spellcheck="false"></textarea>'
if enbale_reason_field:
self.assertContains(response, reason_field)
else:
self.assertNotContains(response, reason_field)
def test_membership_site_configuration_role(self):
"""
Verify that the role choices set via site configuration are loaded in the membership tab
of the instructor dashboard
"""
configuration_values = {
"MANUAL_ENROLLMENT_ROLE_CHOICES": [
"role1",
"role2",
]
}
site = Site.objects.first()
SiteConfiguration.objects.create(
site=site,
site_values=configuration_values,
enabled=True
)
url = reverse(
'instructor_dashboard',
kwargs={
'course_id': six.text_type(self.course_info.id)
}
)
response = self.client.get(url)
self.assertContains(response, '<option value="role1">role1</option>')
self.assertContains(res |
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The Zuul module adds triggers that configure jobs for use with Zuul_.
To change the Zuul notification URL, set a global default::
- defaults:
name: global
zuul-url: http://127.0.0.1:8001/jenkins_endpoint
The above URL is the default.
.. _Zuul: http://ci.openstack.org/zuul/
"""
def zuul():
"""yaml: zuul
Configure this job to be triggered by Zuul.
Example::
triggers:
- zuul
"""
def zuul_post():
"""yaml: zuul-post
Configure this post-merge job to be triggered by Zuul.
Example::
triggers:
- zuul-post
"""
import jenkins_jobs.modules.base
ZUUL_PARAMETERS = [
{'string':
{'description': 'Zuul provided key to link builds with Gerrit events',
'name': 'ZUUL_UUID'}},
{'strin | g':
{'description': 'Zuul provided key to link builds with Gerrit'
' events (deprecated use ZUUL_UUID instead)',
'name': 'UUID'}},
{'string':
{'description': 'Zuul pipeline triggering this job',
'name': 'ZUUL_PIPELINE'}},
{'string':
{'description': 'Zuul provided project name',
'name': 'GERRIT_PROJECT'}},
{'string':
{'description': 'Branch name of triggering pr | oject',
'name': 'ZUUL_PROJECT'}},
{'string':
{'description': 'Zuul provided branch name',
'name': 'GERRIT_BRANCH'}},
{'string':
{'description': 'Branch name of triggering change',
'name': 'ZUUL_BRANCH'}},
{'string':
{'description': 'Zuul provided list of dependent changes to merge',
'name': 'GERRIT_CHANGES'}},
{'string':
{'description': 'List of dependent changes to merge',
'name': 'ZUUL_CHANGES'}},
{'string':
{'description': 'Reference for the merged commit(s) to use',
'name': 'ZUUL_REF'}},
{'string':
{'description': 'The commit SHA1 at the head of ZUUL_REF',
'name': 'ZUUL_COMMIT'}},
{'string':
{'description': 'List of included changes',
'name': 'ZUUL_CHANGE_IDS'}},
{'string':
{'description': 'ID of triggering change',
'name': 'ZUUL_CHANGE'}},
{'string':
{'description': 'Patchset of triggering change',
'name': 'ZUUL_PATCHSET'}},
]
ZUUL_POST_PARAMETERS = [
{'string':
{'description': 'Zuul provided key to link builds with Gerrit events',
'name': 'ZUUL_UUID'}},
{'string':
{'description': 'Zuul provided key to link builds with Gerrit'
' events (deprecated use ZUUL_UUID instead)',
'name': 'UUID'}},
{'string':
{'description': 'Zuul pipeline triggering this job',
'name': 'ZUUL_PIPELINE'}},
{'string':
{'description': 'Zuul provided project name',
'name': 'GERRIT_PROJECT'}},
{'string':
{'description': 'Branch name of triggering project',
'name': 'ZUUL_PROJECT'}},
{'string':
{'description': 'Zuul provided ref name',
'name': 'GERRIT_REFNAME'}},
{'string':
{'description': 'Name of updated reference triggering this job',
'name': 'ZUUL_REF'}},
{'string':
{'description': 'Name of updated reference triggering this job',
'name': 'ZUUL_REFNAME'}},
{'string':
{'description': 'Zuul provided old reference for ref-updated',
'name': 'GERRIT_OLDREV'}},
{'string':
{'description': 'Old SHA at this reference',
'name': 'ZUUL_OLDREV'}},
{'string':
{'description': 'Zuul provided new reference for ref-updated',
'name': 'GERRIT_NEWREV'}},
{'string':
{'description': 'New SHA at this reference',
'name': 'ZUUL_NEWREV'}},
{'string':
{'description': 'Shortened new SHA at this reference',
'name': 'ZUUL_SHORT_NEWREV'}},
]
DEFAULT_URL = 'http://127.0.0.1:8001/jenkins_endpoint'
class Zuul(jenkins_jobs.modules.base.Base):
sequence = 0
def handle_data(self, parser):
changed = False
jobs = (parser.data.get('job', {}).values() +
parser.data.get('job-template', {}).values())
for job in jobs:
triggers = job.get('triggers')
if not triggers:
continue
if ('zuul' not in job.get('triggers', []) and
'zuul-post' not in job.get('triggers', [])):
continue
if 'parameters' not in job:
job['parameters'] = []
if 'notifications' not in job:
job['notifications'] = []
# This isn't a good pattern, and somewhat violates the
# spirit of the global defaults, but Zuul is working on
# a better design that should obviate the need for most
# of this module, so this gets it doen with minimal
# intrusion to the rest of JJB.
if parser.data.get('defaults', {}).get('global'):
url = parser.data['defaults']['global'].get(
'zuul-url', DEFAULT_URL)
notifications = [{'http': {'url': url}}]
job['notifications'].extend(notifications)
if 'zuul' in job.get('triggers', []):
job['parameters'].extend(ZUUL_PARAMETERS)
job['triggers'].remove('zuul')
if 'zuul-post' in job.get('triggers', []):
job['parameters'].extend(ZUUL_POST_PARAMETERS)
job['triggers'].remove('zuul-post')
changed = True
return changed
|
from django.conf.urls.defaults import *
urlpatterns = patte | rns('mmda.tags.views',
url(r'^(?P<tag_id>.+?)/$', 'show | _tag', name='show-tag')
)
|
ns import \
ContinuousHazardPopulationMetadata
from safe.impact_functions.bases.continuous_rh_continuous_re import \
ContinuousRHContinuousRE
from safe.impact_functions.impact_function_manager import ImpactFunctionManager
from safe.impact_functions.core import (
evacuated_population_needs,
population_rounding,
has_no_data)
from safe.storage.raster import Raster
from safe.utilities.i18n import tr
from safe.common.utilities import format_int
from safe.common.tables import Table, TableRow
from safe.common.utilities import create_classes, create_label, humanize_class
from safe.common.exceptions import (
FunctionParametersError, ZeroImpactException)
from safe.gui.tools.minimum_needs.needs_profile import add_needs_parameters, \
filter_needs_parameters
__author__ = 'lucernae'
__date__ = '24/03/15'
__revision__ = '$Format:%H$'
__copyright__ = ('Copyright 2014, Australia Indonesia Facility for '
'Disaster Reduction')
class ContinuousHazardPopulationFunction(ContinuousRHContinuousRE):
# noinspection PyUnresolvedReferences
"""Plugin for impact of population as derived by continuous hazard."""
_metadata = ContinuousHazardPopulationMetadata()
def __init__(self):
super(ContinuousHazardPopulationFunction, self).__init__()
self.impact_function_manager = ImpactFunctionManager()
# AG: Use the proper minimum needs, update the parameters
self.parameters = add_needs_parameters(self.parameters)
def _tabulate(
self,
high,
low,
medium,
question,
total_impact):
# Generate impact report for the pdf map
table_body = [
question,
TableRow([tr('People impacted '),
'%s' % format_int(total_impact)],
header=True),
TableRow([tr('People in high hazard area '),
'%s' % format_int(high)],
header=True),
TableRow([tr('People in medium hazard area '),
'%s' % format_int(medium)],
header=True),
TableRow([tr('People in low hazard area'),
'%s' % format_int(low)],
header=True)]
return table_body
def _tabulate_notes(
self,
minimum_needs,
table_body,
total,
total_impact,
no_data_warning):
# Extend impact report for on-screen display
table_body.extend([
TableRow(tr('Notes'), header=True),
tr('Map shows population count in high, medium, and low hazard '
'area.'),
tr('Total population: %s') % format_int(total),
TableRow(tr(
'Table below shows the minimum needs for all '
'affected people'))])
if no_data_warning:
table_body.extend([
tr('The layers contained `no data`. This mis | sing data was '
'carried through to the impact layer.' | ),
tr('`No data` values in the impact layer were treated as 0 '
'when counting the affected or total population.')
])
total_needs = evacuated_population_needs(
total_impact, minimum_needs)
for frequency, needs in total_needs.items():
table_body.append(TableRow(
[
tr('Needs should be provided %s' % frequency),
tr('Total')
],
header=True))
for resource in needs:
table_body.append(TableRow([
tr(resource['table name']),
format_int(resource['amount'])]))
return table_body, total_needs
def run(self):
"""Plugin for impact of population as derived by continuous hazard.
Hazard is reclassified into 3 classes based on the extrema provided
as impact function parameters.
Counts number of people exposed to each category of the hazard
:returns:
Map of population exposed to high category
Table with number of people in each category
"""
self.validate()
self.prepare()
thresholds = [
p.value for p in self.parameters['Categorical thresholds'].value]
# Thresholds must contain 3 thresholds
if len(thresholds) != 3:
raise FunctionParametersError(
'The thresholds must consist of 3 values.')
# Thresholds must monotonically increasing
monotonically_increasing_flag = all(
x < y for x, y in zip(thresholds, thresholds[1:]))
if not monotonically_increasing_flag:
raise FunctionParametersError(
'Each threshold should be larger than the previous.')
# The 3 categories
low_t = thresholds[0]
medium_t = thresholds[1]
high_t = thresholds[2]
# Extract data as numeric arrays
hazard_data = self.hazard.layer.get_data(nan=True) # Category
no_data_warning = False
if has_no_data(hazard_data):
no_data_warning = True
# Calculate impact as population exposed to each category
exposure_data = self.exposure.layer.get_data(nan=True, scaling=True)
if has_no_data(exposure_data):
no_data_warning = True
# Make 3 data for each zone. Get the value of the exposure if the
# exposure is in the hazard zone, else just assign 0
low_exposure = numpy.where(hazard_data < low_t, exposure_data, 0)
medium_exposure = numpy.where(
(hazard_data >= low_t) & (hazard_data < medium_t),
exposure_data, 0)
high_exposure = numpy.where(
(hazard_data >= medium_t) & (hazard_data <= high_t),
exposure_data, 0)
impacted_exposure = low_exposure + medium_exposure + high_exposure
# Count totals
total = int(numpy.nansum(exposure_data))
low_total = int(numpy.nansum(low_exposure))
medium_total = int(numpy.nansum(medium_exposure))
high_total = int(numpy.nansum(high_exposure))
total_impact = high_total + medium_total + low_total
# Check for zero impact
if total_impact == 0:
table_body = [
self.question,
TableRow(
[tr('People impacted'),
'%s' % format_int(total_impact)], header=True)]
message = Table(table_body).toNewlineFreeString()
raise ZeroImpactException(message)
# Don't show digits less than a 1000
total = population_rounding(total)
total_impact = population_rounding(total_impact)
low_total = population_rounding(low_total)
medium_total = population_rounding(medium_total)
high_total = population_rounding(high_total)
minimum_needs = [
parameter.serialize() for parameter in
filter_needs_parameters(self.parameters['minimum needs'])
]
table_body = self._tabulate(
high_total, low_total, medium_total, self.question, total_impact)
impact_table = Table(table_body).toNewlineFreeString()
table_body, total_needs = self._tabulate_notes(
minimum_needs, table_body, total, total_impact, no_data_warning)
impact_summary = Table(table_body).toNewlineFreeString()
map_title = tr('People in each hazard areas (low, medium, high)')
# Style for impact layer
colours = [
'#FFFFFF', '#38A800', '#79C900', '#CEED00',
'#FFCC00', '#FF6600', '#FF0000', '#7A0000']
classes = create_classes(impacted_exposure.flat[:], len(colours))
interval_classes = humanize_class(classes)
style_classes = []
for i in xrange(len(colours)):
style_class = dict()
if i == 1:
label = create_label(
interval_classes[i],
tr('Low Population [%i people/cell]' % classes[i]))
elif |
c.save()
dev = tvm.device(target, 0)
vm_exec = tvm.runtime.vm.Executable.load_exec(code, lib)
return VirtualMachine(vm_exec, dev), dev
def get_output(rt_mod, names, inputs):
for name, inp in zip(names, inputs):
rt_mod.set_input(name, inp)
rt_mod.run()
return rt_mod.get_output(0).asnumpy()
def get_output_vm(vm, names, inputs):
params = dict(zip(names, inputs))
return vm.invoke("main", **params).numpy()
def get_dense_with_shape(data_shape, weight_shape, out_dtype="float16"):
data = relay.var("data", shape=data_shape, dtype="float16")
weight = relay.var("weight", shape=weight_shape, dtype="float16")
return relay.nn.dense(data, weight, out_dtype=out_dtype)
def get_dense(M, N, K, out_dtype="float16"):
return get_dense_with_shape((M, K), (N, K), out_dtype)
def get_dense_bias(M, N, K, out_dtype="float16"):
dense = get_dense(M, N, K, out_dtype=out_dtype)
bias = relay.var("bias", shape=(N,), dtype=out_dtype)
return relay.nn.bias_add(dense, bias)
def get_dense_bias_relu(M, N, K, out_dtype="float16"):
return relay.nn.relu(get_dense_bias(M, N, K, out_dtype=out_dtype))
def get_dense_bias_gelu(M, N, K, out_dtype="float16"):
bias_add = get_dense_bias(M, N, K, out_dtype)
mul = bias_add * relay.const((1.0 / math.sqrt(2.0)), dtype=out_dtype)
if out_dtype == "float16":
erf = relay.cast(relay.op.erf(relay.cast(mul, "float32")), "float16")
else:
erf = relay.op.erf(mul)
mul_half = erf * relay.const(0.5, dtype=out_dtype)
add = mul_half + relay.const(0.5, dtype=out_dtype)
return add * bias_add
def get_batch_matmul_with_shape(x_shape, y_shape, out_dtype="float16"):
x = relay.var("x", shape=x_shape, dtype="float16")
y = relay.var("y", shape=y_shape, dtype="float16")
return relay.nn.batch_matmul(x, y, out_dtype=out_dtype)
def get_batch_matmul(batch, M, N, K, out_dtype="float16"):
return get_batch_matmul_with_shape((batch, M, K), (batch, N, K), out_dtype="float16")
def get_conv2d_nchw(d_shape, w_shape, padding, out_dtype="float16"):
data = relay.var("data", shape=d_shape, dtype="float16")
weight = relay.var("weight", shape=w_shape, dtype="float16")
out_channel = w_shape[0]
return relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=w_shape[2:],
channels=out_channel,
padding=padding,
out_dtype=out_dtype,
)
def get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype="float16"):
conv2d = get_conv2d_nchw(d_shape, w_shape, padding, out_dtype=out_dtype)
bias = relay.var("bias", shape=(w_shape[0],), dtype=out_dtype)
return relay.nn.bias_add(conv2d, bias)
def silu(x):
return x * relay.sigmoid(x)
def hardswish(x, out_dtype="float16"):
return x * (
relay.clip(x + relay.const(3, dtype=out_dtype), a_min=0, a_max=6)
/ relay.const(6, dtype=out_dtype)
)
def get_conv2d_nchw_bias_relu(d_shape, w_shape, padding, out_dtype="float16"):
return relay.nn.relu(get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype=out_dtype))
def get_conv2d_nchw_bias_sigmoid(d_shape, w_shape, padding, out_dtype="float16"):
return relay.sigmoid(get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype=out_dtype))
def get_conv2d_nchw_bias_silu(d_shape, w_shape, padding, out_dtype="float16"):
conv_out = get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype=out_dtype)
return silu(conv_out)
def get_conv2d_nchw_bias_hardswish(d_shape, w_shape, padding, out_dtype="float16"):
conv_out = get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype=out_dtype)
return hardswish(conv_out, out_dtype)
def get_conv2d_nchw_bias_residual(d_shape, w_shape, padding, out_dtype="float16"):
data = relay.var("data", shape=d_shape, dtype="float16")
weight = relay.var("weight", shape=w_shape, dtype="float16")
bias = relay.var("bias", shape=(w_shape[0],), dtype=out_dtype)
out_channel = w_shape[0]
conv2d = relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=w_shape[2:],
channels=out_channel,
padding=padding,
out_dtype=out_dtype,
)
bias_add = relay.nn.bias_add(conv2d, bias)
return bias_add, data
def profile_and_build(mod, params, sm, tmp_dir="./tmp", lib_path="compile.so", use_fast_math=False):
mod = partition_for_cutlass(mod)
mod, num_cutlass_partition = tune_cutlass_kernels(
mod, sm, profile_all=False, use_multiprocessing=False, tmp_dir=tmp_dir
)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target="cuda", params=params)
lib = build_cutlass_kernels(lib, sm, tmp_dir, lib_path, use_fast_math=use_fast_math)
dev = tvm.device("cuda", 0)
rt_mod = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
return rt_mod, dev, num_cutlass_partition
def profile_and_build_vm(
mod,
params,
sm,
tmp_dir="./tmp",
lib_path="compile.so",
vmcode_path="vmcode.ro",
use_fast_math=False,
):
mod = partition_for_cutlass(mod)
mod, num_cutlass_partition = tune_cutlass_kernels(mod, sm, tmp_dir=tmp_dir)
with tvm.transform.PassContext(opt_level=3):
vm_exec = relay.vm.compile(mod, target="cuda", params=params)
vm_exec = build_cutlass_kernels_vm(
vm_exec, sm, tmp_dir, lib_path, vmcode_path, use_fast_math=use_fast_math
)
dev = tvm.device("cuda", 0)
return VirtualMachine(vm_exec, dev), dev, num_cutlass_partition
def verify_dense(
func, M, N, K, ref_target="cuda", sm=80, atol=1e-5, rtol=1e-5, run_benchmark=False
):
if not has_cutlass():
return
mod = tvm.IRModule.from_expr(func)
typ = relay.transform.InferType()(mod)["main"].body.checked_type
out_dtype = typ.dtype
use_vm = any(isinstance(s, tvm.tir.Any) for s in typ.shape)
np_data = np.random.uniform(-1, 1, (M, K)).astype("float16")
np_weight = np.random.uniform(-1, 1, (N, K)).astype("float16")
np_bias = np.random.uniform(-1, 1, (N,)).astype(out_dtype)
params = {"weight": np_weight, "bias": np_bias}
if use_vm:
if ref_target == "cuda" and out_dtype == "float16":
# Uncomment "return" below to see the accuracy difference of static vs dynamic TVM native fp16 dense
# The static one can use a tensorcore schedule, but the dynamic one cannot
rt_mod, dev = get_ref_vm(tvm.IRModule.from_expr(get_dense(M, N, K)), params)
num_partition = 1
logging.warning(
"The reference fp16 dense with dynamic shape using fp16 accumulation has accuracy issues."
)
return
else:
rt_mod, dev, num_partition = profile_and_build_vm(mod, para | ms, sm)
rt_mod_ref, dev = get_ref_vm(mod, params, target=ref_target)
x = tvm.nd.array(np_data, device=dev)
out = get_output_vm(rt_mod, ["data"], [x])
ref_out = get_output_vm(rt_mod_ref, ["data"], [x])
else:
rt_mod_ref, dev = get_ref_rt_mod(mod, params, target=ref_target)
rt_mod, dev, num_partition = profile_and_build(mod, params, sm)
x = tvm.nd.array(np_data, device=dev)
out = get_output(rt_mod, ["dat | a"], [x])
ref_out = get_output(rt_mod_ref, ["data"], [x])
assert num_partition > 0
np.testing.assert_allclose(out, ref_out, atol=atol, rtol=rtol)
if run_benchmark:
print("CUTLASS:", rt_mod.benchmark(dev, number=1, repeat=600))
print("TVM with target %s:" % ref_target, rt_mod_ref.benchmark(dev, number=1, repeat=600))
def verify_batch_matmul(
func, batch, M, N, K, ref_target="cuda", sm=80, atol=1e-5, rtol=1e-5, run_benchmark=False
):
if not has_cutlass():
return
mod = tvm.IRModule.from_expr(func)
typ = relay.transform.InferType()(mod)["main"].body.checked_type
use_vm = any(isinstance(s, tvm.tir.Any) for s in typ.shape)
x_np = np.random.uniform(-1, 1, (batch, M, K)).astype("float16")
y_np = np.random.uniform(-1, 1, (batch, N, K)).astype("float16")
if use_vm:
rt_mod, dev, num_partition = profile_and_build_vm(mod, {}, sm)
rt_mod_ref, dev = |
""" Fantasm: A taskqueue-based Finite State Machine for App Engine Python
Docs and examples: http://code.google.com/p/fantasm/
Copyright 2010 VendAsta Technologies Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from fantasm import constants
from google.appengine.api.taskqueue.taskqueue import Queue
class NoOpQueue( Queue ):
""" A Queue instance that does not Queue """
def add(self, task, transactional=False):
""" see taskqueue.Queue.add """
pass
def knuthHash(number):
"""A decent hash function for integers."""
return (number * 2654435761) % 2**32
def boolConverter(boolStr):
""" A converter that maps some common bool string to True """
return {'1': True, 'True': True, 'true': True}.get(boolStr, False)
def outputAction(action):
""" Outputs the name of the action
@param action: an FSMAction instance
"""
if action:
return str(action.__class__.__name__).split('.')[-1]
def outputTransitionConfig(transitionConfig):
""" Outputs a GraphViz directed graph node
@param transitionConfig: a config._TransitionConfig instance
@return: a string
"""
label = transitionConfig.event
if transitionConfig.action:
label += '/ ' + outputAction(transitionConfig.action)
return '"%(fromState)s" -> "%(toState)s" [label="%(label)s"];' % \
{'fromState': transitionConfig.fromState.name,
'toState': transitionConfig.toState.name,
'label': label}
def outputStateConfig(stateConfig, colorMap=None):
""" Outputs a GraphViz directed graph node
@param stateConfig: a config._StateConfig instance
@return: a string
"""
colorMap = colorMap or {}
actions = []
if stateConfig.entry:
actions.append('entry/ %(entry)s' % {'entry': outputAction(stateConfig.entry)})
if stateConfig.action:
actions.append('do/ %(do)s' % {'do': outputAction(stateConfig.action)})
if stateConfig.exit:
actions.append('exit/ %(exit)s' % {'exit': outputAction(stateConfig.exit)})
label = '%(stateName)s|%(actions)s' % {'stateName': stateConfig.name, 'actions': '\\l'.join(actions)}
if stateConfig.continuation:
label += '|continuation = True'
if stateConfig.fanInPeriod != constants.NO_FAN_IN:
label += '|fan in period = %(fanin)ds' % {'fanin': stateConfig.fanInPeriod}
shape = 'Mrecord'
if colorMap.get(stateConfig.name):
return '"%(stateName)s" [style=filled,fillcolor="%(fillcolor)s",shape=%(shape)s,label="{%(label)s}"];' % \
{'stateName': stateConfig.name,
'fillcolor': colorMap.get(stateConfig.name, 'white'),
'shape': shape,
'label': label}
else:
return '"%(stateName)s" [shape=%(shape)s,label="{%(label)s}"];' % \
{'stateName': stateConfig.name,
'shape': shape,
'label': label}
def outputMachineConfig(machineConfig, colorMap=None, skipStateNames=None):
""" Outputs a GraphViz directed graph of the state machine
@param machineConfig: a config._MachineConfig instance
@return: a string
"""
skipStateNames = skipStateNames or ()
lines = []
lines.append('digraph G {')
lines.append('label="%(machineName)s"' % {'machineName': machineConfig.name})
lines.append('labelloc="t"')
lines.append('"__start__" [label="start",shape=circle,style=filled,fillcolor=black,fontcolor=white,fontsize=9];')
lines.append('"__end__" [label="end",shape=doublecircle,style=filled,fillcolor=black,fontcolor=white,fontsize=9];')
for stateConfig in machineConfig.states.values():
if stateConfig.name in skipStateNames:
continue
lines.append(outputStateConfig(stateConfig, colorMap=colorMap))
if stateConfig.initial:
lines.append('"__start__" -> | "%(stateName)s"' % {'stateName': stateConfig.name})
if stateConfig.final:
lines.append('"%(stateName)s" -> "__end__"' % {'stateName': stateConfig.name})
for transitionConfig in machineConfig.transitions.values():
if transitionConfig.fromState.name in skipStateNames or \
transitionConfig.toState.name in skipStateNames:
continue
lines.append(outputTransitionConfig(transitionConfig))
lines.append('}')
return '\n' | .join(lines) |
# proxy module
from __future__ import absolute_import
from envisage.plugins.remote_editor.i_remote_shell i | mpo | rt *
|
import os
def get_files(extension): |
for root, dirnames, filenames in os.walk("."):
for filename in filenames:
if filename.endswith(extension):
| yield os.path.join(root, filename)
def get_roles():
return sorted(os.listdir("./roles"))
def pytest_generate_tests(metafunc):
if "jinja2_file" in metafunc.fixturenames:
metafunc.parametrize("jinja2_file", get_files(".j2"))
if "json_file" in metafunc.fixturenames:
metafunc.parametrize("json_file", get_files(".json"))
if "ini_file" in metafunc.fixturenames:
metafunc.parametrize("ini_file", get_files(".fact"))
if "role" in metafunc.fixturenames:
metafunc.parametrize("role", get_roles())
|
from flask_user import DBAdapter
class DataStoreAdapter(DBAdapter):
""" An Wrapper to be use by Flask User to interact with
the database in this case, the DataStore """
def __init__(self, db, objMOdel):
super().__init__(db, objMOdel)
def get_object(self, ObjectClass, pk):
""" Retrieve an single Entity specified by a pk or id. """
return ObjectClass.get(pk)
def find_all_objects(self, ObjectClass, **kwargs):
""" Retrieve all Entity matching all the filters in kwargs. """
# TODO:
# The filters should be case sensitive
for field, value in kwargs.items():
ObjectClass.add_query_filter(field, "=", value)
return ObjectClass.fetch()
def find_first_object(self, ObjectClass, **kwargs):
""" Retrieve the first Entity matching the fil | ters in
kwargs or None. """
# TODO:
# The filters should be case sensitive
for field, value in kwargs.items():
ObjectClass.add_query_filter(field, "=", value)
entity = ObjectClass.fetch(limit=1)
return entity
def ifind_first_object(self, ObjectClass, **kwargs):
""" Retrieve the first Entity matching the filters in
kwargs or None. """
# TODO: |
# The filters should be case insensitive
for field, value in kwargs.items():
ObjectClass.add_query_filter(field, "=", value)
entity = ObjectClass.fetch(limit=1)
return entity
def add_object(self, ObjectClass, **kwargs):
""" Create an Entity with the fields specified in kwargs. """
entity = ObjectClass(**kwargs)
entity.put()
return entity
def update_object(self, entity, **kwargs):
""" Update an Entity with the fields specified in kwargs. """
entity.update(**kwargs)
return entity
def delete_object(self, entity):
""" Delete and Entity. """
return entity.delete(entity.id)
def commit(self):
""" Should commit a session connection to the DataStore. """
pass
|
from __future__ import absolute_import
import os
import sys
from collections import defaultdict
from .gemini_constants import *
from . import GeminiQuery
class Site(object):
def __init__(self, row):
self.chrom = row['chrom']
self.end = int(row['end'])
self.gt_type = None
def _prune_run(run):
"""
Prune the current run of genotypes.
Remove genotypes from the left of the first
non-homozygous genotype, since, owing to the
same logic behind run length encoding, those
genotypes cannot be part of a longer run than
we have seen before.
For example:
breaks = * * *
run = H H H h H H H U H H H h H H H H H H
prune_1 = H H H U H H H h H H H H H H
prune_2 = H H H h H H H H H H
prune_3 = H H H H H H
"""
try:
first_het_idx = run.index('H')
except:
first_het_idx = None
try:
first_unk_idx = run.index('U')
except:
first_unk_idx = None
if first_het_idx is not None and first_unk_idx is not None:
idx_of_first_disruption = min(run.index('H'), run.index('U'))
elif first_het_idx is not None:
idx_of_first_disruption = first_het_idx
elif first_unk_idx is not None:
idx_of_first_disruption = first_unk_idx
else:
# no interuptions, return an empty list
return 0, 0, len(run), []
hets_removed = run[0:idx_of_first_disruption+1].count('H')
unks_removed = run[0:idx_of_first_disruption+1].count('U')
homs_removed = idx_of_first_disruption - (hets_removed + unks_removed) + 1
return hets_removed, unks_removed, homs_removed, run[idx_of_first_disruption+1:]
def sweep_genotypes_for_rohs(args, chrom, samples):
"""
Sweep through the genotypes for each sample in search of ROHs.
Note: If the genotype was homozygous, the end position
of the variant is stored. Otherwise 'H' for het
and 'U' for unknown.
"""
hom_count = 0
het_count = 0
unk_count = 0
curr_run = []
for sample in samples:
sites = iter(samples[sample])
for site in sites:
# retain the last homozygote from previous
# run. See function docs for details
if len(curr_run):
hets_removed, unks_removed, homs_removed, curr_run = \
_prune_run(curr_run)
# reset for next run
hom_count -= homs_removed
het_count -= hets_removed
unk_count -= unks_removed
# sweep through the active sites until we encounter
# too many HETS or UNKNOWN genotypes.
while het_count <= args.max_hets and unk_count <= args.max_unknowns:
if site != 'H' and site != 'U':
hom_count +=1
curr_run.append(site)
elif site == 'H':
curr_run.append(site)
het_count += 1
elif site == 'U' | :
curr_run.append(site)
un | k_count += 1
try:
site = next(sites)
except StopIteration:
break
# skip the current run unless it contains enough sites.
if hom_count >= args.min_snps:
run_start = min(c for c in curr_run if c not in ['H', 'U'])
run_end = max(c for c in curr_run if c not in ['H', 'U'])
run_length = run_end - run_start
# report the run if it is long enough.
if run_length >= args.min_size:
density_per_kb = float(len(curr_run) * 1000) / float(run_length)
print("\t".join(str(s) for s in [chrom,
run_start, run_end, sample,
hom_count, round(density_per_kb, 4),
run_length]))
else:
curr_run = []
hom_count = 0
het_count = 0
unk_count = 0
def get_homozygosity_runs(args):
gq = GeminiQuery.GeminiQuery(args.db)
# get a mapping of sample ids to sample indices
idx2smp = gq.index2sample
smp2idx = gq.sample2index
sm_index = []
# prepare a lookup of just the samples
# for which the user wishes to search for ROHs
if args.samples is not None:
sample_filter = args.samples.strip().split(",")
for sample in sample_filter:
try:
idx = smp2idx[sample]
except:
raise ValueError("Sample %s could not be found.\n" \
% (sample))
sm_index.append(smp2idx[sample])
else:
for sample in smp2idx:
sm_index.append(smp2idx[sample])
###########################################################################
# Phase 1. Retrieve the variants for each chrom/sample
###########################################################################
query = "SELECT chrom, start, end, gt_types, gt_depths \
FROM variants \
WHERE type = 'snp' \
AND filter is NULL \
AND depth >= " + str(args.min_total_depth) + \
" ORDER BY chrom, end"
sys.stderr.write("LOG: Querying and ordering variants by chromosomal position.\n")
gq.run(query, needs_genotypes=True)
print("\t".join(['chrom',
'start', 'end', 'sample',
'num_of_snps','density_per_kb',
'run_length_in_bp']))
variants_seen = 0
samples = defaultdict(list)
prev_chrom = None
curr_chrom = None
for row in gq:
variants_seen += 1
if variants_seen % 10000 == 0:
sys.stderr.write("LOG: Loaded %d variants. Current variant on %s, position %d.\n" \
% (variants_seen, row['chrom'], row['end']))
gt_types = row['gt_types']
gt_depths = row['gt_depths']
curr_chrom = row['chrom']
# the chromosome has changed. search for ROHs in the previous chrom
if curr_chrom != prev_chrom and prev_chrom is not None:
sweep_genotypes_for_rohs(args, prev_chrom, samples)
samples = defaultdict(list)
# associate the genotype for the variant with each sample
for idx in sm_index:
sample = idx2smp[idx]
gt_type = gt_types[idx]
depth = gt_depths[idx]
# the genotype must have had sufficient depth to be considered
if depth < args.min_genotype_depth:
continue
if (gt_type == HOM_ALT or gt_type == HOM_REF):
samples[sample].append(row['end'])
elif gt_type == HET:
samples[sample].append('H')
elif gt_type == UNKNOWN:
samples[sample].append('U')
prev_chrom = curr_chrom
# search for ROHs in the final chromosome
sweep_genotypes_for_rohs(args, curr_chrom, samples)
def run(parser, args):
if os.path.exists(args.db):
# run the roh caller
get_homozygosity_runs(args)
|
from google.appengine.ext import ndb
class Users(ndb.Model):
password = ndb.Strin | gProperty(required=True)
full_name = ndb.StringProperty(required=True)
phone_no = ndb.StringProperty(required=False)
timestamp = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def query_user(cls, ancestor_key):
return cls.query(ancestor=ancestor_key).orde | r(cls.full_name)
@classmethod
def user_key(cls, user_email):
return ndb.Key('MyAppName_User', user_email) |
# 053. Maximum Subarray
# The simple O(n) solution.
import unittest
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
ret = nums[0]
pre = nums[0]
for i in nums[1:]:
if ret < i and ret < 0:
ret = pre = i
continue
cur = pre + i
if ret < cur:
| ret = pre = cur
continue
if cur >= 0:
pre = cur
continue
# if cur < 0: # Better start over.
pre = 0
return ret
class SolutionUnitTest(unittest.TestCase):
def setup(self): |
pass
def tearDown(self):
pass
def testMaxSubArray(self):
s = Solution()
self.assertEqual(s.maxSubArray([-2,1,-3,4,-1,2,1,-5,4]), 6)
self.assertEqual(s.maxSubArray([-2,1]), 1)
self.assertEqual(s.maxSubArray([-1]), -1)
if __name__ == '__main__':
unittest.main()
|
# Copyright ( | c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
from telemetry import timeline_model
def Import(data):
trace = json.loads(data) # pylint: disable=W06 | 12
model = timeline_model.TimelineModel()
# TODO(nduca): Actually import things.
return model
|
import logging
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from corehq.apps.adm.dispatcher import ADMSectionDispatcher
from corehq.apps.adm.models import REPORT_SECTION_OPTIONS, ADMReport
| from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DTSortType
from corehq.apps.reports.generic import GenericReportView, GenericTabularReport
from corehq.apps.reports.standard import DatespanMixin, ProjectReportParametersMixin
from dimagi.utils.decorators.memoized import memoized
from django.util | s.translation import ugettext as _, ugettext_noop
class ADMSectionView(GenericReportView):
section_name = ugettext_noop("Active Data Management")
base_template = "reports/base_template.html"
dispatcher = ADMSectionDispatcher
hide_filters = True
emailable = True
# adm-specific stuff
adm_slug = None
def __init__(self, request, base_context=None, domain=None, **kwargs):
self.adm_sections = dict(REPORT_SECTION_OPTIONS)
if self.adm_slug not in self.adm_sections:
raise ValueError("The adm_slug provided, %s, is not in the list of valid ADM report section slugs: %s." %
(self.adm_slug, ", ".join([key for key, val in self.adm_sections.items()]))
)
self.subreport_slug = kwargs.get("subreport_slug")
super(ADMSectionView, self).__init__(request, base_context, domain=domain, **kwargs)
self.context['report'].update(sub_slug=self.subreport_slug)
if self.subreport_data:
self.name = mark_safe("""%s <small>%s</small>""" %\
(self.subreport_data.get('value', {}).get('name'),
self.adm_sections.get(self.adm_slug, _("ADM Report"))))
@property
def subreport_data(self):
raise NotImplementedError
@property
def default_report_url(self):
return reverse('default_adm_report', args=[self.request.project])
@classmethod
def get_url(cls, domain=None, render_as=None, **kwargs):
subreport = kwargs.get('subreport')
url = super(ADMSectionView, cls).get_url(domain=domain, render_as=render_as, **kwargs)
return "%s%s" % (url, "%s/" % subreport if subreport else "")
class DefaultReportADMSectionView(GenericTabularReport, ADMSectionView, ProjectReportParametersMixin, DatespanMixin):
section_name = ugettext_noop("Active Data Management")
base_template = "reports/base_template.html"
dispatcher = ADMSectionDispatcher
fix_left_col = True
fields = ['corehq.apps.reports.filters.users.UserTypeFilter',
'corehq.apps.reports.filters.select.GroupFilter',
'corehq.apps.reports.filters.dates.DatespanFilter']
hide_filters = False
# adm-specific stuff
adm_slug = None
@property
@memoized
def subreport_data(self):
default_subreport = ADMReport.get_default(self.subreport_slug, domain=self.domain,
section=self.adm_slug, wrap=False)
if default_subreport is None:
return dict()
return default_subreport
@property
@memoized
def adm_report(self):
if self.subreport_data:
try:
adm_report = ADMReport.get_correct_wrap(self.subreport_data.get('key')[-1])
adm_report.set_domain_specific_values(self.domain)
return adm_report
except Exception as e:
logging.error("Could not fetch ADM Report: %s" % e)
return None
@property
@memoized
def adm_columns(self):
if self.adm_report:
column_config = self.report_column_config
if not isinstance(column_config, dict):
ValueError('report_column_config should return a dict')
for col in self.adm_report.columns:
col.set_report_values(**column_config)
return self.adm_report.columns
return []
@property
def headers(self):
if self.subreport_slug is None:
raise ValueError("Cannot render this report. A subreport_slug is required.")
header = DataTablesHeader(DataTablesColumn(_("FLW Name")))
for col in self.adm_report.columns:
sort_type = DTSortType.NUMERIC if hasattr(col, 'returns_numerical') and col.returns_numerical else None
help_text = _(col.description) if col.description else None
header.add_column(DataTablesColumn(_(col.name), sort_type=sort_type, help_text=help_text))
header.custom_sort = self.adm_report.default_sort_params
return header
@property
def rows(self):
rows = []
for user in self.users:
row = [self.table_cell(user.raw_username,
user.username_in_report)]
for col in self.adm_columns:
val = col.raw_value(**user._asdict())
row.append(self.table_cell(col.clean_value(val),
col.html_value(val)))
rows.append(row)
self.statistics_rows = [["Total"], ["Average"]]
for ind, col in enumerate(self.adm_columns):
column_data = [row[1+ind] for row in rows]
self.statistics_rows[0].append(col.calculate_totals(column_data))
self.statistics_rows[1].append(col.calculate_averages(column_data))
return rows
@property
def report_column_config(self):
"""
Should return a dict of values important for rendering the ADMColumns in this report.
"""
return dict(
domain=self.domain,
datespan=self.datespan
)
@classmethod
def override_navigation_list(cls, context):
current_slug = context.get('report', {}).get('sub_slug')
domain = context.get('domain')
subreport_context = []
subreports = ADMReport.get_default_subreports(domain, cls.adm_slug)
if not subreports:
subreport_context.append({
'url': '#',
'warning_label': 'No ADM Reports Configured',
})
return subreport_context
for report in subreports:
key = report.get("key", [])
entry = report.get("value", {})
report_slug = key[-2]
if cls.show_subreport_in_navigation(report_slug):
subreport_context.append({
'is_active': current_slug == report_slug,
'url': cls.get_url(domain=domain, subreport=report_slug),
'description': entry.get('description', ''),
'title': entry.get('name', 'Untitled Report'),
})
return subreport_context
@classmethod
def show_subreport_in_navigation(cls, subreport_slug):
return True
|
#!/usr/bin/env python
# (c) 2015-2018, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
Test the Model constructors.
"""
import itertools
import pytest
import numpy as np
import tbmodels
def test_on_site_too_long(get_model):
"""
Check that error is raised when the on_site list is too long.
"""
with pytest.raises(ValueError):
get_model(0.1, 0.2, on_site=[1, 2, 3])
def test_no_size_given(get_model, models_equal):
"""
Check that the Model can be created without explicit size,
"""
model1 = get_model(0.1, 0.2, size=None)
model2 = get_model(0.1, 0.2)
models_equal(model1, model2)
def test_size_from_hop():
"""
Check that the size can be obtained from hopping matrices.
"""
model = tbmodels.Model(hop={(0, 0, 0): np.zeros((4, 4))})
assert model.size == 4
def test_size_unknown(get_model):
"""
Check that an error is raised when the size can not be inferred.
"""
with pytest.raises(ValueError):
get_model(0.1, 0.2, size=None, on_site=None, pos=None)
def test_add_on_site(get_model, models_equal):
"""
Check that adding on-site interaction in the constructor has the
same as effect as adding it after construction.
"""
model1 = get_model(0.1, 0.2, on_site=(1, -2))
model2 = get_model(0.1, 0.2, size=2, on_site=None)
model2.add_on_site((1, -2))
models_equal(model1, model2)
def test_invalid_add_on_site(get_model):
"""
Check that an error is raised when trying to add a list of on-site
interactions that is too long to an existing model.
"""
model = get_model(0.1, 0.2)
with pytest.raises(ValueError):
model.add_on_site((1, 2, 3))
def test_explicit_dim(get_model, models_equal):
"""
Check that explicitly setting the dimension does not change the model.
"""
model1 = get_model(0.1, 0.2, dim=3)
model2 = get_model(0.1, 0.2)
models_equal(model1, model2)
def test_no_dim(get_model):
"""Check that an error is raised when the dimension can not be inferred."""
with pytest.raises(ValueError):
get_model(0.1, 0.2, pos=None)
def test_dim_from_uc():
"""Check that the dimension can be inferred from the unit cell."""
model = tbmodels.Model(uc=((1, 0), (0, 1)), size=5)
assert model.dim == 2
def test_pos_outside_uc(get_model, models_equal):
"""Check that positions outside the UC are mapped back inside."""
model1 = get_model(0.1, 0.2, pos=((0.0, 0.0, 0.0), (-0.5, -0.5, 0.0)))
model2 = get_model(0.1, 0.2)
models_equal(model1, model2)
@pytest.mark.parametrize("sparse", [True, False])
def test_from_hop_list(get_model, models_equal, sparse):
"""
Check the 'from_hop_list' constructor.
"""
t1 = 0.1
t2 = 0.2
hoppings = []
for phase, R in zip([1, -1j, 1j, -1], itertools.product([0, -1], [0, -1], [0])):
hoppings.append([t1 * phase, 0, 1, R])
for R in ((r[0], r[1], 0) for r in itertools.permutations([0, 1])):
hoppings.append([t2, 0, 0, R])
hoppings.append([-t2, 1, 1, R])
model1 = tbmodels.Model.from_hop_list(
hop_list=hoppings,
contains_cc=False,
on_site=(1, -1),
occ=1,
pos=((0.0,) * 3, (0.5, 0.5, 0.0)),
sparse=sparse,
)
model2 = get_model(t1, t2, sparse=sparse)
models_equal(model1, model2)
@pytest.mark.parametrize("sparse", [True, False])
def test_from_hop_list_with_cc(get_model, models_close, sparse):
"""
Check the 'from_hop_list' constructor, where complex conjugate terms
are included in the list.
"""
t1 = 0.1
t2 = 0.2
hoppings = []
for phase, R in zip([1, -1j, 1j, -1], itertools.product([0, -1], [0, -1], [0])):
hoppings.append([t1 * phase, 0, 1, R])
for phase, R in zip([1, -1j, 1j, -1], itertools.product([0, -1], [0, -1], [0])):
hoppings.append([np.conjugate(t1 * phase), 1, 0, tuple(-x for x in R)])
for R in ((r[0], r[1], 0) for r in itertools.permutations([0, 1])):
hoppings.append([t2, 0, 0, R])
hoppings.append([t2, 0, 0, tuple(-x for x in R)])
hoppings.append([-t2, 1, 1, R])
hoppings.append([-t2, 1, 1, tuple(-x for x in R)])
model1 = tbmodels.Model.from_hop_list(
hop_list=hoppings,
contains_cc=True,
on_site=(1, - | 1),
occ=1,
pos=((0.0,) * 3, (0.5, 0.5, 0.0)),
sparse=sparse,
)
model2 = get_model(t1, t2, sparse=sparse)
models_close(model1, model2)
@pytest.mark.parametrize("sparse", [True, False])
def test_pos_outside_uc_with_hoppings(
get_model, models_equal, sparse
): # pylint: disable=invalid-name
"""
Check the 'from_hop_list' constructo | r with positions outside of the UC.
"""
t1 = 0.1
t2 = 0.2
hoppings = []
for phase, R in zip([1, -1j, 1j, -1], [(1, 1, 0), (1, 0, 0), (0, 1, 0), (0, 0, 0)]):
hoppings.append([t1 * phase, 0, 1, R])
for R in ((r[0], r[1], 0) for r in itertools.permutations([0, 1])):
hoppings.append([t2, 0, 0, R])
hoppings.append([-t2, 1, 1, R])
model1 = tbmodels.Model.from_hop_list(
hop_list=hoppings,
contains_cc=False,
on_site=(1, -1),
occ=1,
pos=((0.0,) * 3, (-0.5, -0.5, 0.0)),
sparse=sparse,
)
model2 = get_model(t1, t2, sparse=sparse)
models_equal(model1, model2)
def test_invalid_hopping_matrix():
"""
Check that an error is raised when the passed size does not match the
shape of hopping matrices.
"""
with pytest.raises(ValueError):
tbmodels.Model(size=2, hop={(0, 0, 0): np.eye(4)})
def test_non_hermitian_1():
"""
Check that an error is raised when the given hoppings do not correspond
to a hermitian Hamiltonian.
"""
with pytest.raises(ValueError):
tbmodels.Model(size=2, hop={(0, 0, 0): np.eye(2), (1, 0, 0): np.eye(2)})
def test_non_hermitian_2():
"""
Check that an error is raised when the given hoppings do not correspond
to a hermitian Hamiltonian.
"""
with pytest.raises(ValueError):
tbmodels.Model(
size=2,
hop={(0, 0, 0): np.eye(2), (1, 0, 0): np.eye(2), (-1, 0, 0): 2 * np.eye(2)},
)
def test_wrong_key_length():
"""
Check that an error is raised when the reciprocal lattice vectors
have inconsistent lengths.
"""
with pytest.raises(ValueError):
tbmodels.Model(
size=2,
hop={(0, 0, 0): np.eye(2), (1, 0, 0): np.eye(2), (-1, 0, 0, 0): np.eye(2)},
contains_cc=False,
)
def test_wrong_pos_length():
"""
Check that an error is raised when the number of positions does not
match the given size.
"""
with pytest.raises(ValueError):
tbmodels.Model(
size=2,
hop={(0, 0, 0): np.eye(2), (1, 0, 0): np.eye(2), (-1, 0, 0): np.eye(2)},
contains_cc=False,
pos=((0.0,) * 3, (0.5,) * 3, (0.2,) * 3),
)
def test_wrong_pos_dim():
"""
Check that an error is raised when the positions have inconsistent
dimensions.
"""
with pytest.raises(ValueError):
tbmodels.Model(
size=2,
hop={(0, 0, 0): np.eye(2), (1, 0, 0): np.eye(2), (-1, 0, 0): np.eye(2)},
contains_cc=False,
pos=((0.0,) * 3, (0.5,) * 4),
)
def test_wrong_uc_shape():
"""
Check that an error is raised when the unit cell is not square.
"""
with pytest.raises(ValueError):
tbmodels.Model(
size=2,
hop={(0, 0, 0): np.eye(2), (1, 0, 0): np.eye(2), (-1, 0, 0): np.eye(2)},
contains_cc=False,
pos=((0.0,) * 3, (0.5,) * 3),
uc=np.array([[1, 2], [3, 4], [5, 6]]),
)
def test_hop_list_no_size():
"""
Check that an error is raised when using 'from_hop_list' and
the size is not known.
"""
with pytest.raises(ValueError):
tbmodels.Model.from_hop_list(hop_list=(1.2, 0, 1, (1, 2, 3)))
|
from django.contrib.admindocs.middlewar | e import XViewMiddleware
from django.http import HttpResponse
|
from django.utils.decorators import decorator_from_middleware
from django.views.generic import View
xview_dec = decorator_from_middleware(XViewMiddleware)
def xview(request):
return HttpResponse()
class XViewClass(View):
def get(self, request):
return HttpResponse()
|
# -*- coding: utf-8 -*-
#pylint: skip-file
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('..'))
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.napoleon',
]
autodoc_default_flags = ["members", "show-inheritance"]
autodoc_member_order = "bysource"
templates_path = ['_templates']
source_suffix = '.rst'
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'rspace'
copyright = u'GPL'
version = '0.1'
release = '0.1'
language = 'en'
today_fmt = '%d de %B , %Y'
exclude_patterns = ['_build']
show_authors = True
pygments_style = 'sphinx'
html_title = 'rspace Docs'
html_short_title = 'rspace Docs'
html_last_updated_fmt = '%d de %B , %Y'
html_domain_indices = | True
html_show_sourcelink = False
html_show_sphinx = False
html_show_copyright = True
htmlhelp_basename = 'rspacedoc'
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'rspace', u'cptab Docs',
u'Juan Diego Godoy Robles', 'rspace', 'PyConES 2016 - Almería',
'Misce | llaneous'),
]
|
class StretchException(Exception):
"""Common base class for all exceptions raised explicitly by stretch.
Exceptions which are subclasses of this typ | e will be handled nicely by
stretch and will not cause the program to exit. Any exceptions raised
which are not a subclass o | f this type will exit(1) and print a traceback
to stdout.
"""
level = "error"
def __init__(self, message, **kwargs):
Exception.__init__(self, message)
self.message = message
self.kwargs = kwargs
def format_message(self):
return self.message
def __unicode__(self):
return self.message
def __str__(self):
return self.message.encode('utf-8')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.