code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python
#
# Copyright (c) 2017 Stratosphere Laboratory.
#
# This file is part of ManaTI Project
# (see <https://stratosphereips.org>). It was created by 'Raul B. Netto <raulbeni@gmail.com>'
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. See the file 'docs/LICENSE' or see <http://www.gnu.org/licenses/>
# for copying permission.
#
import Levenshtein
import datetime
from tld import get_tld
import pprint as pp
import pythonwhois
from pythonwhois.shared import WhoisException
from contextlib import contextmanager
from collections import Iterable
from passivetotal.common.utilities import is_ip
import re
from passivetotal.libs.whois import *
import dateutil.parser
import config.settings as settings
from peewee import *
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import sys
import argparse
import os
import json
import time
import warnings
warnings.filterwarnings("ignore")
reload(sys)
sys.setdefaultencoding("utf-8")
from manati.analysis_sessions.models import WhoisConsult
KEY_DOMAIN_NAME = 'domain_name'
KEY_REGISTRAR = 'registrar'
KEY_NAME = 'name'
KEY_ORG = 'org'
KEY_ZIPCODE = 'zipcode'
KEY_CREATION_DATE = 'creation_date'
KEY_EXPIRATION_DATE = 'expiration_date'
KEY_EMAILS = 'emails'
KEY_NAME_SERVERS = 'name_servers'
RELATION_THRESHOLD = 75 #roc curve of the thesis
weights = [0,1,1,1,1,1,1,1]
def __levenshtein__(str1, str2):
str1 = str1.encode('utf-8')
str2 = str2.encode('utf-8')
return Levenshtein.distance(str1.lower(),str2.lower())
def __dist_domain__name__(domain_name_a, domain_name_b):
return __levenshtein__(str(domain_name_a).lower(), str(domain_name_b).lower())
def __dist_registrar__(registrar_a, registrar_b):
registrar_a = registrar_a if not registrar_a is None else ''
registrar_b = registrar_b if not registrar_b is None else ''
registrar_a = registrar_a.encode('utf-8') if not isinstance(registrar_a, list) else registrar_a[0].encode('utf-8')
registrar_b = registrar_b.encode('utf-8') if not isinstance(registrar_b, list) else registrar_b[0].encode('utf-8')
return __levenshtein__(str(registrar_a).lower(), str(registrar_b).lower())
def __dist_name__(name_a, name_b):
return __levenshtein__(str(name_a).lower(), str(name_b).lower())
def __dist_org_by_min_dist__(orgs_a=[], orgs_b=[]):
orgs_seed = orgs_a.split(',') if not isinstance(orgs_a, list) else orgs_a
orgs_file = orgs_b.split(',') if not isinstance(orgs_b, list) else orgs_b
if not orgs_seed and not orgs_file:
return float(0)
elif not orgs_seed:
orgs_seed = ['']
elif not orgs_file:
orgs_file = ['']
dist_org = __levenshtein__(str(orgs_seed[0]), str(orgs_file[0]))
for org_s in orgs_seed:
org_s = org_s.encode('utf-8')
for org_f in orgs_file:
org_f = org_f.encode('utf-8')
dist_org = min(str(dist_org), str(__levenshtein__(str(org_s), str(org_f))))
return float(dist_org)
def __dist_zipcode_by_min_dist__(zipcodes_a=[], zipcodes_b=[]):
zipcodes_seed = zipcodes_a.split(',') if not isinstance(zipcodes_a, list) else zipcodes_a
zipcodes_file = zipcodes_b.split(',') if not isinstance(zipcodes_b, list) else zipcodes_b
if not zipcodes_seed and not zipcodes_file:
return float(0)
elif not zipcodes_seed:
zipcodes_seed = ['']
elif not zipcodes_file:
zipcodes_file = ['']
dist_zipcode = __levenshtein__(str(zipcodes_seed[0]), str(zipcodes_file[0]))
for zipcode_s in zipcodes_seed:
for zipcode_f in zipcodes_file:
dist_zipcode = min(str(dist_zipcode), str(__levenshtein__(str(zipcode_s), str(zipcode_f))))
return float(dist_zipcode)
def get_date_aux(date):
try:
return datetime.datetime.strptime(date, '%d-%m-%Y') \
if not isinstance(date, datetime.datetime) else date
except Exception as ex:
return dateutil.parser.parse(date)
# ttl by proportion, more close tu cero, more close is the ttl
def get_diff_ttl(creation_date_a, creation_date_b,expiration_date_a, expiration_date_b):
if not creation_date_a and not creation_date_b and not expiration_date_a and not expiration_date_a:
return float(0)
elif not creation_date_a and not creation_date_b and expiration_date_a and expiration_date_b:
if expiration_date_a == expiration_date_a:
return float(0)
else:
return float(1)
elif creation_date_a and creation_date_b and not expiration_date_a and not expiration_date_b:
if creation_date_a == creation_date_a:
return float(0)
else:
return float(1)
elif not creation_date_a or not creation_date_b or not expiration_date_a or not expiration_date_b:
return float(1)
else:
cd_a = get_date_aux(creation_date_a)
ed_a = get_date_aux(expiration_date_a)
cd_b = get_date_aux(creation_date_b)
ed_b = get_date_aux(expiration_date_b)
ttl_days_b = float(abs(cd_b - ed_b).days) # time to live
ttl_days_a = float(abs(cd_a - ed_a).days)
if ttl_days_b == ttl_days_a:
return float(0)
else:
return float(1) - ((ttl_days_b / ttl_days_a) if ttl_days_b <= ttl_days_a else (ttl_days_a / ttl_days_b))
# Method computing distance where emails are measured with "taking the minimun distance techniques "
def get_diff_emails_by_min_dist(emails_a=[], emails_b=[]):
emails_seed = emails_a.split(',') if not isinstance(emails_a, list) else emails_a
emails_file = emails_b.split(',') if not isinstance(emails_b, list) else emails_b
if not emails_seed and not emails_file:
return float(0)
elif not emails_seed:
emails_seed = ['']
elif not emails_file:
emails_file = ['']
dist_email = __levenshtein__(str(emails_seed[0]), str(emails_file[0]))
for email_s in emails_seed:
for email_f in emails_file:
dist_email = min(str(dist_email), str(__levenshtein__(str(email_s), str(email_f))))
return float(dist_email)
# Method computing distance where name_servers are measured with "taking the minimun distance techniques "
def get_diff_name_servers_by_min_dist(name_servers_a=[], name_servers_b=[]):
if name_servers_a is None:
name_servers_a = []
if name_servers_b is None:
name_servers_b = []
name_servers_seed = name_servers_a.split(',') if not isinstance(name_servers_a, list) else name_servers_a
name_servers_file = name_servers_b.split(',') if not isinstance(name_servers_b, list) else name_servers_b
if not name_servers_seed and not name_servers_file:
return float(0)
elif not name_servers_seed:
name_servers_seed = ['']
elif not name_servers_file:
name_servers_file = ['']
dist_name_server = __levenshtein__(str(name_servers_seed[0]), str(name_servers_file[0]))
for name_server_s in name_servers_seed:
for name_server_f in name_servers_file:
dist_name_server = min(str(dist_name_server), str(__levenshtein__(str(name_server_s), str(name_server_f))))
return float(dist_name_server)
def features_domains_attr(domain_name_a, registrar_a, name_a, orgs_a, zipcodes_a, creation_date_a,
expiration_date_a, emails_str_a, name_servers_str_a,
domain_name_b, registrar_b, name_b, orgs_b, zipcodes_b, creation_date_b,
expiration_date_b, emails_str_b, name_servers_str_b, ):
dist_domain_name = __dist_domain__name__(domain_name_a, domain_name_b)
dist_registrar = __dist_registrar__(registrar_a, registrar_b)
dist_name = __dist_name__(name_a, name_b)
dist_org = round(__dist_org_by_min_dist__(orgs_a, orgs_b),2)
dist_zipcode = round(__dist_zipcode_by_min_dist__(zipcodes_a, zipcodes_b),2)
diff_ttl = round(get_diff_ttl(creation_date_a, creation_date_b,expiration_date_a, expiration_date_b),2)
diff_emails = round(get_diff_emails_by_min_dist(emails_str_a, emails_str_b),2)
diff_name_servers = round(get_diff_name_servers_by_min_dist(name_servers_str_a,name_servers_str_b),2)
dict_result = dict(dist_domain_name=dist_domain_name,
dist_registrar=dist_registrar,
dist_name=dist_name,
dist_org=dist_org,
dist_zipcode=dist_zipcode,
dist_duration= diff_ttl,
diff_emails=diff_emails,
diff_name_servers=diff_name_servers)
return dict_result, [dist_domain_name, dist_registrar, dist_name, dist_org, dist_zipcode,
diff_ttl, diff_emails, diff_name_servers]
def features_domains(whois_info_a={}, whois_info_b={}):
domain_name_a = whois_info_a.get(KEY_DOMAIN_NAME,'')
registrar_a = whois_info_a.get(KEY_REGISTRAR,'')
name_a = whois_info_a.get(KEY_NAME,'')
orgs_a = whois_info_a.get(KEY_ORG,[]) # []
zipcode_a = whois_info_a.get(KEY_ZIPCODE,[]) # []
creation_date_a = whois_info_a.get(KEY_CREATION_DATE,None)
expiration_date_a = whois_info_a.get(KEY_EXPIRATION_DATE,None)
emails_a = whois_info_a.get(KEY_EMAILS, []) # []
name_servers_a = whois_info_a.get(KEY_NAME_SERVERS, []) # []
domain_name_b = whois_info_b.get(KEY_DOMAIN_NAME, '')
registrar_b = whois_info_b.get(KEY_REGISTRAR, '')
name_b = whois_info_b.get(KEY_NAME, '')
orgs_b = whois_info_b.get(KEY_ORG, []) # []
zipcode_b = whois_info_b.get(KEY_ZIPCODE, []) # []
creation_date_b = whois_info_b.get(KEY_CREATION_DATE, '')
expiration_date_b = whois_info_b.get(KEY_EXPIRATION_DATE, '')
emails_b = whois_info_b.get(KEY_EMAILS, []) # []
name_servers_b = whois_info_b.get(KEY_NAME_SERVERS, []) # []
return features_domains_attr(domain_name_a, registrar_a, name_a, orgs_a, zipcode_a, creation_date_a,
expiration_date_a, emails_a,name_servers_a,
domain_name_b, registrar_b, name_b, orgs_b, zipcode_b, creation_date_b,
expiration_date_b, emails_b, name_servers_b)
def distance_domains(whois_info_a, whois_info_b):
feature_distance,feature_values = features_domains(whois_info_a, whois_info_b)
multiply = list(np.multiply(feature_values, weights))
sum_features = sum(multiply)
return abs(sum_features), feature_distance
def get_input_and_target_from(dmfs):
inputs = []
target = []
for dmf in dmfs:
inputs.append([1] + dmf.get_features().values())
target.append(dmf.related)
return inputs, target
def get_whois_distance(features_whois_a,features_whois_b):
return distance_domains(features_whois_a, features_whois_b)
# linear regression alg
def distance_related_by_whois_obj(external_module,domain_a, domain_b):
global weights
result = WhoisConsult.get_features_info_by_set_url(external_module, [domain_a,domain_b])
domains = result.keys()
try:
whois_info_a = result[domains[0]]
whois_info_b = result[domains[1]]
except Exception as e:
whois_info_a = result[domains[0]]
whois_info_b = result[domains[0]]
distance, feature_distance = get_whois_distance(whois_info_a,whois_info_b)
return distance <= RELATION_THRESHOLD,distance,feature_distance
def get_whois_information_features_of(external_module, domains):
WhoisConsult.get_features_info_by_set_url(external_module, domains)
# for domain in domains:
# WhoisConsult.get_features_info(external_module,domain)
|
stratosphereips/Manati
|
manati/share_modules/whois_distance.py
|
Python
|
agpl-3.0
| 12,100
|
#!/usr/bin/python2.7
# Copyright 2012 Google Inc. All Rights Reserved.
#
"""Common generator index utilities."""
__author__ = 'akesling@google.com (Alex Kesling)'
from googleapis.codegen import cpp_generator
from googleapis.codegen import csharp_generator
from googleapis.codegen import dart_generator
from googleapis.codegen import gwt_generator
from googleapis.codegen import java_generator
from googleapis.codegen import objc_generator
from googleapis.codegen import php_generator
from googleapis.codegen import python_generator
from googleapis.codegen import sample_generator
# Multiple generators per language are possible, as is the case with
# Java below. Template trees can specify a specific generator in their
# features.json file (with the "generator" attribute); this will refer
# to a key in these dictionaries. If a template tree does not
# include this specification, the language name is used as a key.
_GENERATORS_BY_LANGUAGE = {
'cpp': cpp_generator.CppGenerator,
'csharp': csharp_generator.CSharpGenerator,
'dart': dart_generator.DartGenerator,
'gwt': gwt_generator.GwtGenerator,
'java': java_generator.Java14Generator,
'objc': objc_generator.ObjCGenerator,
'php': php_generator.PHPGenerator,
'python': python_generator.PythonGenerator,
'sample': sample_generator.SampleGenerator,
}
_ALL_GENERATORS = {
'java1_12': java_generator.Java12Generator,
'java1_13': java_generator.Java12Generator,
'java1_14': java_generator.Java14Generator,
'java1_15': java_generator.Java14Generator,
}
_ALL_GENERATORS.update(_GENERATORS_BY_LANGUAGE)
def GetGeneratorByLanguage(language_or_generator):
"""Return the appropriate generator for this language.
Args:
language_or_generator: (str) the language for which to return a generator,
or the name of a specific generator.
Raises:
ValueError: If provided language isn't supported.
Returns:
The appropriate code generator object (which may be None).
"""
try:
return _ALL_GENERATORS[language_or_generator]
except KeyError:
raise ValueError('Unsupported language: %s' % language_or_generator)
def SupportedLanguages():
"""Return the list of languages we support.
Returns:
list(str)
"""
return sorted(_GENERATORS_BY_LANGUAGE)
|
ivannaranjo/google-api-dotnet-client
|
ClientGenerator/src/googleapis/codegen/generator_lookup.py
|
Python
|
apache-2.0
| 2,301
|
"""
Badge Awarding backend for Badgr-Server.
"""
import logging
import mimetypes
import requests
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
from lazy import lazy
from requests.packages.urllib3.exceptions import HTTPError
from badges.backends.base import BadgeBackend
from badges.models import BadgeAssertion
from eventtracking import tracker
MAX_SLUG_LENGTH = 255
LOGGER = logging.getLogger(__name__)
class BadgrBackend(BadgeBackend):
"""
Backend for Badgr-Server by Concentric Sky. http://info.badgr.io/
"""
badges = []
def __init__(self):
super(BadgrBackend, self).__init__()
if not settings.BADGR_API_TOKEN:
raise ImproperlyConfigured("BADGR_API_TOKEN not set.")
@lazy
def _base_url(self):
"""
Base URL for all API requests.
"""
return "{}/v1/issuer/issuers/{}".format(settings.BADGR_BASE_URL, settings.BADGR_ISSUER_SLUG)
@lazy
def _badge_create_url(self):
"""
URL for generating a new Badge specification
"""
return "{}/badges".format(self._base_url)
def _badge_url(self, slug):
"""
Get the URL for a course's badge in a given mode.
"""
return "{}/{}".format(self._badge_create_url, slug)
def _assertion_url(self, slug):
"""
URL for generating a new assertion.
"""
return "{}/assertions".format(self._badge_url(slug))
def _log_if_raised(self, response, data):
"""
Log server response if there was an error.
"""
try:
response.raise_for_status()
except HTTPError:
LOGGER.error(
u"Encountered an error when contacting the Badgr-Server. Request sent to %r with headers %r.\n"
u"and data values %r\n"
u"Response status was %s.\n%s",
response.request.url, response.request.headers,
data,
response.status_code, response.content
)
raise
def _create_badge(self, badge_class):
"""
Create the badge class on Badgr.
"""
image = badge_class.image
# We don't want to bother validating the file any further than making sure we can detect its MIME type,
# for HTTP. The Badgr-Server should tell us if there's anything in particular wrong with it.
content_type, __ = mimetypes.guess_type(image.name)
if not content_type:
raise ValueError(
u"Could not determine content-type of image! Make sure it is a properly named .png file. "
u"Filename was: {}".format(image.name)
)
files = {'image': (image.name, image, content_type)}
try: # TODO: eventually we should pass both
URLValidator(badge_class.criteria)
criteria_type = 'criteria_url'
except ValidationError:
criteria_type = 'criteria_text'
data = {
'name': badge_class.display_name,
criteria_type: badge_class.criteria,
'description': badge_class.description,
}
result = requests.post(
self._badge_create_url, headers=self._get_headers(), data=data, files=files,
timeout=settings.BADGR_TIMEOUT
)
self._log_if_raised(result, data)
def _send_assertion_created_event(self, user, assertion):
"""
Send an analytics event to record the creation of a badge assertion.
"""
tracker.emit(
'edx.badge.assertion.created', {
'user_id': user.id,
'badge_slug': assertion.badge_class.slug,
'badge_name': assertion.badge_class.display_name,
'issuing_component': assertion.badge_class.issuing_component,
'course_id': unicode(assertion.badge_class.course_id),
'enrollment_mode': assertion.badge_class.mode,
'assertion_id': assertion.id,
'assertion_image_url': assertion.image_url,
'assertion_json_url': assertion.assertion_url,
'issuer': assertion.data.get('issuer'),
}
)
def _create_assertion(self, badge_class, user, evidence_url):
"""
Register an assertion with the Badgr server for a particular user for a specific class.
"""
data = {
'email': user.email,
'evidence': evidence_url,
}
response = requests.post(
self._assertion_url(badge_class.slug), headers=self._get_headers(), data=data,
timeout=settings.BADGR_TIMEOUT
)
self._log_if_raised(response, data)
assertion, __ = BadgeAssertion.objects.get_or_create(user=user, badge_class=badge_class)
assertion.data = response.json()
assertion.backend = 'BadgrBackend'
assertion.image_url = assertion.data['image']
assertion.assertion_url = assertion.data['json']['id']
assertion.save()
self._send_assertion_created_event(user, assertion)
return assertion
@staticmethod
def _get_headers():
"""
Headers to send along with the request-- used for authentication.
"""
return {'Authorization': 'Token {}'.format(settings.BADGR_API_TOKEN)}
def _ensure_badge_created(self, badge_class):
"""
Verify a badge has been created for this badge class, and create it if not.
"""
slug = badge_class.slug
if slug in BadgrBackend.badges:
return
response = requests.get(self._badge_url(slug), headers=self._get_headers(), timeout=settings.BADGR_TIMEOUT)
if response.status_code != 200:
self._create_badge(badge_class)
BadgrBackend.badges.append(slug)
def award(self, badge_class, user, evidence_url=None):
"""
Make sure the badge class has been created on the backend, and then award the badge class to the user.
"""
self._ensure_badge_created(badge_class)
return self._create_assertion(badge_class, user, evidence_url)
|
gymnasium/edx-platform
|
lms/djangoapps/badges/backends/badgr.py
|
Python
|
agpl-3.0
| 6,288
|
import os
import sys
import time
from owanimo.app.error import ERROR as e
from owanimo.script import allegory_special
from owanimo.util import define
from owanimo.util.log import LOG as L
class Allegory(allegory_special.Allegory):
def __init__(self, runner, profile, player):
allegory_special.Allegory.__init__(self, runner, profile, player)
time.sleep(2)
def before(self):
L.info("*** Start Allegory : %s *** " % __file__)
self.start()
def test(self):
# Step 1 : Login
result = self.check(True, self.login(), e.LOGIN)
self.flush(self.step())
if not result: return
# Step 2 : Select Special Quest
result = self.check(True,
self.quest_special_material_highest(), e.SELECT_SP_QUEST)
self.flush(self.step())
if not result: return
# Step 3 : Select Support
result = self.check(True, self.support(), e.SUPPORT)
self.flush(self.step())
if not result: return
# Step 4 : Normal Quest
result = self.check(True, self.quest_puyo_all(), e.QUEST)
self.flush(self.step())
if not result: return
# Step 5 : Normal Quest Result
result = self.check(True, self.quest_result(), e.QUEST_RESULT)
self.flush(self.step())
if not result: return
def after(self):
L.info("*** End Allegory : %s *** " % __file__)
self.stop()
|
setsulla/owanimo
|
script/quest_special_material_highest.py
|
Python
|
mit
| 1,446
|
"""
Create cropped test set image
$ ipython -i --pdb scripts/create_test_head_crop_image.py -- --size 256 --data 256_20151023 --model localize_pts_dec17 --overwrite
"""
import argparse
import os
import sys
from time import strftime
import pandas as pd
import numpy as np
from skimage.io import imread
from tqdm import tqdm
from skimage.transform import resize
from utils import add_padding_to_bbox
import importlib
def get_cropped_test_img(fname, bbox_pred, pad=None, as_grey=False, return_bbox=False):
img = imread(fname, as_grey=as_grey)
h = img.shape[0]
w = img.shape[1]
bbox_pred = bbox_pred * [w, h, w, h]
bbox_pred = np.round(bbox_pred).astype(int)
l = min(max(bbox_pred[0], 0), w)
t = min(max(bbox_pred[1], 0), h)
r = min(max(l + bbox_pred[2], 0), w)
b = min(max(t + bbox_pred[3], 0), h)
if pad is not None:
l, t, r, b = add_padding_to_bbox(
l, t, (r - l), (b - t), pad / 100.0,
img.shape[1], img.shape[0],
format='ltrb'
)
cropped_img = img[t:b, l:r]
if return_bbox:
return cropped_img, bbox_pred
else:
return cropped_img
def load_data(fname, data_grey=False):
n = 6925
size = int(fname.split('_')[0])
if data_grey:
X_fname = 'cache/X_test_grey_%s.npy' % fname
else:
X_fname = 'cache/X_test_%s.npy' % fname
num_channels = 1 if data_grey else 3
X_shape = (n, num_channels, size, size)
print 'Load test data from %s' % X_fname
X = np.memmap(X_fname, dtype=np.float32, mode='r', shape=X_shape)
return X
def get_current_date():
return strftime('%Y%m%d')
def load_model(fname):
model = importlib.import_module('model_definitions.%s' % fname)
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--size', required=True, type=int, help='Size of the image')
parser.add_argument('--data', required=True, help='Input uncropped image')
parser.add_argument('--data_grey', action='store_true', help='Is the data grey?')
parser.add_argument('--model', required=True, help='Localization model')
parser.add_argument('--model_batch_size', default=16, help='Batch size')
parser.add_argument('--overwrite', action='store_true', help='Overwirte existing cache')
parser.add_argument('--as_grey', action='store_true', help='Save image as grayscale')
parser.add_argument('--pad', default=0, type=int, help='Padding of the bounding box')
args = parser.parse_args()
print 'Loading model: %s' % args.model
model = load_model(args.model)
localization_net = model.net
model_fname = model.model_fname[2:] # Hack to remove the "./"
localization_net.load_params_from(model_fname)
localization_net.batch_iterator_train.batch_size = args.model_batch_size
localization_net.batch_iterator_test.batch_size = args.model_batch_size
print
print 'Loading data: %s' % args.data
df = pd.read_csv('data/sample_submission.csv')
X_test = load_data(args.data, args.data_grey)
print X_test.shape
print
print 'Preparing output'
size_fname = '%s_pad%s' % (args.size, args.pad) if args.pad > 0 else str(args.size)
if args.as_grey:
X_fname = 'cache/X_test_cropped_grey_%s_%s_%s.npy' % (args.model, size_fname, get_current_date())
X_shape = (len(df), 1, args.size, args.size)
else:
X_fname = 'cache/X_test_cropped_%s_%s_%s.npy' % (args.model, size_fname, get_current_date())
X_shape = (len(df), 3, args.size, args.size)
if os.path.exists(X_fname) and not args.overwrite:
print '%s exists. Use --overwrite' % X_fname
sys.exit(1)
print 'Will write X_test_cropped to %s with shape of %s' % (X_fname, X_shape)
print
X_fp = np.memmap(X_fname, dtype=np.float32, mode='w+', shape=X_shape)
print 'Predicting bounding boxes'
test_bboxes_pred = localization_net.predict(X_test)
print
assert len(test_bboxes_pred) == len(X_fp)
for (i, row), bbox_pred in tqdm(zip(df.iterrows(), test_bboxes_pred), total=len(df)):
fname = os.path.join('data/imgs/', row['Image'])
try:
cropped_img = get_cropped_test_img(fname, bbox_pred, pad=args.pad, as_grey=args.as_grey)
cropped_img = resize(cropped_img, (args.size, args.size))
cropped_img = cropped_img.astype(np.float32)
if args.as_grey:
cropped_img = cropped_img.reshape(1, args.size, args.size)
else:
cropped_img = cropped_img.transpose(2, 0, 1)
assert cropped_img.dtype == np.float32
X_fp[i] = cropped_img
X_fp.flush()
except Exception, e:
print '%s has failed' % i
print e
print bbox_pred
|
felixlaumon/kaggle-right-whale
|
scripts/create_test_cropped_image.py
|
Python
|
mit
| 4,813
|
import os
# toolchains options
ARCH='arm'
CPU='am335x-vmm'
CROSS_TOOL='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Program Files (x86)\CodeSourcery\Sourcery_CodeBench_Lite_for_ARM_EABI\bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'C:/Keil'
elif CROSS_TOOL == 'iar':
print '================ERROR============================'
print 'Not support IAR yet!'
print '================================================='
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -Wall -march=armv7-a -mtune=cortex-a8'+\
' -ftree-vectorize -ffast-math -mfpu=vfpv3-d16 -mfloat-abi=softfp'
#DEVICE = ' '
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -D__ASSEMBLY__'
LINK_SCRIPT = 'bb_vmm.lds'
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-beaglebone.map,-cref,-u,system_vectors'+ ' -T %s' % LINK_SCRIPT
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -Wall'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2 -Wall'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' +\
SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --device DARMP'
CFLAGS = DEVICE + ' --apcs=interwork'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread-beaglebone.map --scatter beaglebone_ram.sct'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB'
EXEC_PATH += '/arm/bin40/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = ' --cpu DARMP'
CFLAGS = ''
AFLAGS = ''
LFLAGS = ' --config beaglebone_ram.icf'
EXEC_PATH += '/arm/bin/'
RT_USING_MINILIBC = False
POST_ACTION = ''
|
wzyy2/Embedded-Multi-OS-TOY
|
app/rtconfig.py
|
Python
|
gpl-2.0
| 2,690
|
# Copyright 2019 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mycroft_bus_client import MessageBusClient as _MessageBusClient
from mycroft_bus_client.client import MessageWaiter
from mycroft.messagebus.load_config import load_message_bus_config
from mycroft.util.process_utils import create_echo_function
class MessageBusClient(_MessageBusClient):
def __init__(self, host=None, port=None, route=None, ssl=None):
config_overrides = dict(host=host, port=port, route=route, ssl=ssl)
config = load_message_bus_config(**config_overrides)
super().__init__(config.host, config.port, config.route, config.ssl)
def echo():
message_bus_client = MessageBusClient()
def repeat_utterance(message):
message.msg_type = 'speak'
message_bus_client.emit(message)
message_bus_client.on('message', create_echo_function(None))
message_bus_client.on('recognizer_loop:utterance', repeat_utterance)
message_bus_client.run_forever()
if __name__ == "__main__":
echo()
|
forslund/mycroft-core
|
mycroft/messagebus/client/client.py
|
Python
|
apache-2.0
| 1,543
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('admin_complex_filter', '0002_auto_20151030_1603'),
]
operations = [
migrations.AddField(
model_name='item',
name='available',
field=models.BooleanField(default=False),
),
]
|
vladiibine/django-examples
|
admin-gallery/widget_customization/admin_complex_filter/migrations/0003_item_available.py
|
Python
|
mit
| 417
|
from django.contrib import auth
from django.contrib.auth import authenticate, login
from django.core.files.temp import NamedTemporaryFile
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.db import transaction
from django.db.utils import IntegrityError
import json
from django_facebook import exceptions as facebook_exceptions, \
settings as facebook_settings, signals
from django_facebook.api import get_facebook_graph
from django_facebook.utils import get_registration_backend, get_form_class, \
get_profile_model, to_bool, get_user_model, get_instance_for,\
get_user_attribute, try_get_profile, get_model_for_attribute,\
get_instance_for_attribute, update_user_attributes
from random import randint
import logging
import sys
import urllib.request, urllib.parse, urllib.error
try:
import urllib.request, urllib.error, urllib.parse
except ImportError:
import urllib.error as urllib2
logger = logging.getLogger(__name__)
class CONNECT_ACTIONS:
class LOGIN:
pass
class CONNECT(LOGIN):
pass
class REGISTER:
pass
def connect_user(request, access_token=None, facebook_graph=None, connect_facebook=False):
'''
Given a request either
- (if authenticated) connect the user
- login
- register
'''
user = None
graph = facebook_graph or get_facebook_graph(request, access_token)
converter = get_instance_for('user_conversion', graph)
assert converter.is_authenticated()
facebook_data = converter.facebook_profile_data()
force_registration = request.REQUEST.get('force_registration') or\
request.REQUEST.get('force_registration_hard')
logger.debug('force registration is set to %s', force_registration)
if connect_facebook and request.user.is_authenticated() and not force_registration:
# we should only allow connect if users indicate they really want to connect
# only when the request.CONNECT_FACEBOOK = 1
# if this isn't present we just do a login
action = CONNECT_ACTIONS.CONNECT
# default behaviour is not to overwrite old data
user = _connect_user(request, converter, overwrite=True)
else:
email = facebook_data.get('email', False)
email_verified = facebook_data.get('verified', False)
kwargs = {}
if email and email_verified:
kwargs = {'facebook_email': email}
auth_user = authenticate(facebook_id=facebook_data['id'], **kwargs)
if auth_user and not force_registration:
action = CONNECT_ACTIONS.LOGIN
# Has the user registered without Facebook, using the verified FB
# email address?
# It is after all quite common to use email addresses for usernames
update = getattr(auth_user, 'fb_update_required', False)
profile = try_get_profile(auth_user)
current_facebook_id = get_user_attribute(
auth_user, profile, 'facebook_id')
if not current_facebook_id:
update = True
# login the user
user = _login_user(request, converter, auth_user, update=update)
else:
action = CONNECT_ACTIONS.REGISTER
# when force registration is active we should remove the old
# profile
try:
user = _register_user(request, converter,
remove_old_connections=force_registration)
except facebook_exceptions.AlreadyRegistered as e:
# in Multithreaded environments it's possible someone beats us to
# the punch, in that case just login
logger.info(
'parallel register encountered, slower thread is doing a login')
auth_user = authenticate(
facebook_id=facebook_data['id'], **kwargs)
if not auth_user:
# We don't have a valid user so raise
raise e
action = CONNECT_ACTIONS.LOGIN
user = _login_user(request, converter, auth_user, update=False)
_update_likes_and_friends(request, user, converter)
_update_access_token(user, graph)
logger.info('connect finished with action %s', action)
return action, user
def _login_user(request, facebook, authenticated_user, update=False):
login(request, authenticated_user)
if update:
_connect_user(request, facebook)
return authenticated_user
def _connect_user(request, facebook, overwrite=True):
'''
Update the fields on the user model and connects it to the facebook account
'''
if not request.user.is_authenticated():
raise ValueError(
'Connect user can only be used on authenticated users')
if not facebook.is_authenticated():
raise ValueError(
'Facebook needs to be authenticated for connect flows')
data = facebook.facebook_profile_data()
facebook_id = data['id']
# see if we already have profiles connected to this Facebook account
old_connections = _get_old_connections(facebook_id, request.user.id)[:20]
if old_connections and not request.REQUEST.get('confirm_connect'):
raise facebook_exceptions.AlreadyConnectedError(list(old_connections))
user = _update_user(request.user, facebook, overwrite=overwrite)
return user
def _update_likes_and_friends(request, user, facebook):
# store likes and friends if configured
sid = transaction.savepoint()
try:
if facebook_settings.FACEBOOK_STORE_LIKES:
facebook.get_and_store_likes(user)
if facebook_settings.FACEBOOK_STORE_FRIENDS:
facebook.get_and_store_friends(user)
transaction.savepoint_commit(sid)
except IntegrityError as e:
logger.warn('Integrity error encountered during registration, '
'probably a double submission %s' % e,
exc_info=sys.exc_info(), extra={
'request': request,
'data': {
'body': str(e),
}
})
transaction.savepoint_rollback(sid)
def _update_access_token(user, graph):
'''
Conditionally updates the access token in the database
'''
profile = try_get_profile(user)
model_or_profile = get_instance_for_attribute(
user, profile, 'access_token')
# store the access token for later usage if the profile model supports it
if model_or_profile:
# update if not equal to the current token
new_token = graph.access_token != model_or_profile.access_token
token_message = 'a new' if new_token else 'the same'
logger.info(
'found %s token %s', token_message, graph.access_token[:10])
if new_token:
logger.info('access token changed, updating now')
model_or_profile.update_access_token(graph.access_token)
model_or_profile.save()
# see if we can extend the access token
# this runs in a task, after extending the token we fire an event
model_or_profile.extend_access_token()
def _register_user(request, facebook, profile_callback=None,
remove_old_connections=False):
'''
Creates a new user and authenticates
The registration form handles the registration and validation
Other data on the user profile is updates afterwards
if remove_old_connections = True we will disconnect old
profiles from their facebook flow
'''
if not facebook.is_authenticated():
raise ValueError(
'Facebook needs to be authenticated for connect flows')
# get the backend on new registration systems, or none
# if we are on an older version
backend = get_registration_backend()
logger.info('running backend %s for registration', backend)
# gets the form class specified in FACEBOOK_REGISTRATION_FORM
form_class = get_form_class(backend, request)
facebook_data = facebook.facebook_registration_data()
data = request.POST.copy()
for k, v in list(facebook_data.items()):
if not data.get(k):
data[k] = v
if remove_old_connections:
_remove_old_connections(facebook_data['facebook_id'])
if request.REQUEST.get('force_registration_hard'):
data['email'] = data['email'].replace(
'@', '+test%s@' % randint(0, 1000000000))
form = form_class(data=data, files=request.FILES,
initial={'ip': request.META['REMOTE_ADDR']})
if not form.is_valid():
# show errors in sentry
form_errors = form.errors
error = facebook_exceptions.IncompleteProfileError(
'Facebook signup incomplete')
error.form = form
raise error
try:
# for new registration systems use the backends methods of saving
new_user = None
if backend:
new_user = backend.register(request,
form=form, **form.cleaned_data)
# fall back to the form approach
if new_user is None:
raise ValueError(
'new_user is None, note that backward compatability for the older versions of django registration has been dropped.')
except IntegrityError as e:
# this happens when users click multiple times, the first request registers
# the second one raises an error
raise facebook_exceptions.AlreadyRegistered(e)
# update some extra data not yet done by the form
new_user = _update_user(new_user, facebook)
signals.facebook_user_registered.send(sender=get_user_model(),
user=new_user, facebook_data=facebook_data, request=request, converter=facebook)
# IS this the correct way for django 1.3? seems to require the backend
# attribute for some reason
new_user.backend = 'django_facebook.auth_backends.FacebookBackend'
auth.login(request, new_user)
return new_user
def _get_old_connections(facebook_id, current_user_id=None):
'''
Gets other accounts connected to this facebook id, which are not
attached to the current user
'''
user_or_profile_model = get_model_for_attribute('facebook_id')
other_facebook_accounts = user_or_profile_model.objects.filter(
facebook_id=facebook_id)
kwargs = {}
if current_user_id:
# if statement since we need to support both
user_model = get_user_model()
if user_or_profile_model == user_model:
kwargs['id'] = current_user_id
else:
kwargs['user'] = current_user_id
other_facebook_accounts = other_facebook_accounts.exclude(**kwargs)
return other_facebook_accounts
def _remove_old_connections(facebook_id, current_user_id=None):
'''
Removes the facebook id for profiles with the specified facebook id
which arent the current user id
'''
other_facebook_accounts = _get_old_connections(
facebook_id, current_user_id)
other_facebook_accounts.update(facebook_id=None)
def _update_user(user, facebook, overwrite=True):
'''
Updates the user and his/her profile with the data from facebook
'''
# if you want to add fields to ur user model instead of the
# profile thats fine
# partial support (everything except raw_data and facebook_id is included)
facebook_data = facebook.facebook_registration_data(username=False)
facebook_fields = ['facebook_name', 'facebook_profile_url', 'gender',
'date_of_birth', 'about_me', 'website_url', 'first_name', 'last_name']
profile = try_get_profile(user)
# which attributes to update
attributes_dict = {}
# send the signal that we're updating
signals.facebook_pre_update.send(sender=get_user_model(), user=user,
profile=profile, facebook_data=facebook_data)
# set the facebook id and make sure we are the only user with this id
current_facebook_id = get_user_attribute(user, profile, 'facebook_id')
facebook_id_changed = facebook_data['facebook_id'] != current_facebook_id
overwrite_allowed = overwrite or not current_facebook_id
# update the facebook id and access token
facebook_id_overwritten = False
if facebook_id_changed and overwrite_allowed:
# when not overwriting we only update if there is no
# profile.facebook_id
logger.info('profile facebook id changed from %s to %s',
repr(facebook_data['facebook_id']),
repr(current_facebook_id))
attributes_dict['facebook_id'] = facebook_data['facebook_id']
facebook_id_overwritten = True
if facebook_id_overwritten:
_remove_old_connections(facebook_data['facebook_id'], user.id)
# update all fields on both user and profile
for f in facebook_fields:
facebook_value = facebook_data.get(f, False)
current_value = get_user_attribute(user, profile, f, None)
if facebook_value and not current_value:
attributes_dict[f] = facebook_value
# write the raw data in case we missed something
serialized_fb_data = json.dumps(facebook.facebook_profile_data())
current_raw_data = get_user_attribute(user, profile, 'raw_data')
if current_raw_data != serialized_fb_data:
attributes_dict['raw_data'] = serialized_fb_data
image_url = facebook_data['image']
# update the image if we are allowed and have to
if facebook_settings.FACEBOOK_STORE_LOCAL_IMAGE:
image_field = get_user_attribute(user, profile, 'image', True)
if not image_field:
image_name, image_file = _update_image(
facebook_data['facebook_id'], image_url)
image_field.save(image_name, image_file)
# save both models if they changed
update_user_attributes(user, profile, attributes_dict)
if getattr(user, '_fb_is_dirty', False):
user.save()
if getattr(profile, '_fb_is_dirty', False):
profile.save()
signals.facebook_post_update.send(sender=get_user_model(),
user=user, profile=profile, facebook_data=facebook_data)
return user
def _update_image(facebook_id, image_url):
'''
Updates the user profile's image to the given image url
Unfortunately this is quite a pain to get right with Django
Suggestions to improve this are welcome
'''
image_name = 'fb_image_%s.jpg' % facebook_id
image_temp = NamedTemporaryFile()
try:
image_response = urllib.request.urlopen(image_url)
except AttributeError:
image_response = urllib.request.urlopen(image_url)
image_content = image_response.read()
image_temp.write(image_content)
http_message = image_response.info()
image_size = len(image_content)
try:
content_type = http_message.type
except AttributeError:
content_type = http_message.get_content_type()
image_file = InMemoryUploadedFile(
file=image_temp, name=image_name, field_name='image',
content_type=content_type, size=image_size, charset=None
)
image_file.seek(0)
image_temp.flush()
return image_name, image_file
def update_connection(request, graph):
'''
A special purpose view for updating the connection with an existing user
- updates the access token (already done in get_graph)
- sets the facebook_id if nothing is specified
- stores friends and likes if possible
'''
converter = get_instance_for('user_conversion', graph)
user = _connect_user(request, converter, overwrite=False)
_update_likes_and_friends(request, user, converter)
_update_access_token(user, graph)
return user
|
abendleiter/Django-facebook
|
django_facebook/connect.py
|
Python
|
bsd-3-clause
| 15,841
|
"""Tests for :mod:`robottelo.vm`."""
import six
import unittest2
from robottelo import ssh
from robottelo.vm import VirtualMachine, VirtualMachineError
if six.PY2:
from mock import call, patch
else:
from unittest.mock import call, patch
class VirtualMachineTestCase(unittest2.TestCase):
"""Tests for :class:`robottelo.vm.VirtualMachine`."""
provisioning_server = 'provisioning.example.com'
def setUp(self):
super(VirtualMachineTestCase, self).setUp()
self.settings_patcher = patch('robottelo.vm.settings', spec=True)
self.settings = self.settings_patcher.start()
self.settings.clients.provisioning_server = None
def tearDown(self):
super(VirtualMachineTestCase, self).tearDown()
self.settings_patcher.stop()
def configure_provisoning_server(self):
"""Helper for configuring the provisioning server on robottelo config.
"""
self.settings.clients.provisioning_server = self.provisioning_server
@patch('time.sleep')
@patch('robottelo.ssh.command', side_effect=[
ssh.SSHCommandResult(),
ssh.SSHCommandResult(stdout=['(192.168.0.1)']),
ssh.SSHCommandResult()
])
def test_dont_create_if_already_created(
self, ssh_command, sleep):
"""Check if the creation steps are run more than once"""
self.configure_provisoning_server()
vm = VirtualMachine()
with patch.multiple(
vm,
image_dir='/opt/robottelo/images',
provisioning_server='provisioning.example.com'
):
vm.create()
vm.create()
self.assertEqual(vm.ip_addr, '192.168.0.1')
self.assertEqual(ssh_command.call_count, 3)
def test_invalid_distro(self):
"""Check if an exception is raised if an invalid distro is passed"""
with self.assertRaises(VirtualMachineError):
vm = VirtualMachine(distro='invalid_distro') # noqa
def test_provisioning_server_not_configured(self):
"""Check if an exception is raised if missing provisioning_server"""
with self.assertRaises(VirtualMachineError):
vm = VirtualMachine() # noqa
@patch('robottelo.ssh.command')
def test_run(self, ssh_command):
"""Check if run calls ssh.command"""
self.configure_provisoning_server()
vm = VirtualMachine()
def create_mock():
"""A mock for create method to set instance vars to run work"""
vm._created = True
vm.ip_addr = '192.168.0.1'
with patch.object(vm, 'create', side_effect=create_mock):
vm.create()
vm.run('ls')
ssh_command.assert_called_once_with(
'ls', hostname='192.168.0.1', timeout=None)
def test_run_raises_exception(self):
"""Check if run raises an exception if the vm is not created"""
self.configure_provisoning_server()
vm = VirtualMachine()
with self.assertRaises(VirtualMachineError):
vm.run('ls')
@patch('robottelo.ssh.command')
def test_destroy(self, ssh_command):
"""Check if destroy runs the required ssh commands"""
self.configure_provisoning_server()
image_dir = '/opt/robottelo/images'
vm = VirtualMachine()
with patch.multiple(
vm,
image_dir=image_dir,
_created=True
):
vm.destroy()
self.assertEqual(ssh_command.call_count, 3)
ssh_command_args_list = [
call('virsh destroy {0}'.format(vm.hostname),
hostname=self.provisioning_server),
call('virsh undefine {0}'.format(vm.hostname),
hostname=self.provisioning_server),
call('rm {0}/{1}.img'.format(image_dir, vm.hostname),
hostname=self.provisioning_server),
]
self.assertListEqual(ssh_command.call_args_list, ssh_command_args_list)
|
kbidarkar/robottelo
|
tests/robottelo/test_vm.py
|
Python
|
gpl-3.0
| 3,966
|
# Author: Travis Oliphant
# 1999 -- 2002
import types
import warnings
import sigtools
from scipy import special, linalg
from scipy.fftpack import fft, ifft, ifftshift, fft2, ifft2, fftn, \
ifftn, fftfreq
from numpy import polyadd, polymul, polydiv, polysub, \
roots, poly, polyval, polyder, cast, asarray, isscalar, atleast_1d, \
ones, sin, linspace, real, extract, real_if_close, zeros, array, arange, \
where, sqrt, rank, newaxis, argmax, product, cos, pi, exp, \
ravel, size, less_equal, sum, r_, iscomplexobj, take, \
argsort, allclose, expand_dims, unique, prod, sort, reshape, \
transpose, dot, any, mean, cosh, arccosh, \
arccos, concatenate, flipud, ndarray
import numpy as np
from scipy.misc import factorial
_modedict = {'valid':0, 'same':1, 'full':2}
_boundarydict = {'fill':0, 'pad':0, 'wrap':2, 'circular':2, 'symm':1,
'symmetric':1, 'reflect':4}
_SWAP_INPUTS_DEPRECATION_MSG = """\
Current default behavior of convolve and correlate functions is deprecated.
Convolve and corelate currently swap their arguments if the second argument
has dimensions larger than the first one, and the mode is relative to the input
with the largest dimension. The new behavior is to never swap the inputs, which
is what most people expects, and is how correlation is usually defined.
You can control the behavior with the old_behavior flag - the flag will
disappear in scipy 0.9.0, and the functions will then implement the new
behavior only."""
def _valfrommode(mode):
try:
val = _modedict[mode]
except KeyError:
if mode not in [0,1,2]:
raise ValueError, "Acceptable mode flags are 'valid' (0)," \
"'same' (1), or 'full' (2)."
val = mode
return val
def _bvalfromboundary(boundary):
try:
val = _boundarydict[boundary] << 2
except KeyError:
if val not in [0,1,2] :
raise ValueError, "Acceptable boundary flags are 'fill', 'wrap'" \
" (or 'circular'), \n and 'symm' (or 'symmetric')."
val = boundary << 2
return val
def correlate(in1, in2, mode='full', old_behavior=True):
"""Cross-correlate two N-dimensional arrays.
Cross-correlate in1 and in2 with the output size determined by the mode
argument.
Arguments
---------
in1: array
first input.
in2: array
second input. Should have the same number of dimensions as in1.
mode: str {'valid', 'same', 'full'}
a string indicating the size of the output:
- 'valid': the output consists only of those elements that do not
rely on the zero-padding.
- 'same': the output is the same size as the largest input centered
with respect to the 'full' output.
- 'full': the output is the full discrete linear cross-correlation
of the inputs. (Default)
old_behavior: bool
If True (default), the old behavior of correlate is implemented:
- if in1.size < in2.size, in1 and in2 are swapped (correlate(in1,
in2) == correlate(in2, in1))
- For complex inputs, the conjugate is not taken for in2
If False, the new, conventional definition of correlate is implemented.
Returns
-------
out: array
an N-dimensional array containing a subset of the discrete linear
cross-correlation of in1 with in2.
Note
----
The correlation z of two arrays x and y of rank d is defined as
z[...,k,...] = sum[..., i_l, ...]
x[..., i_l,...] * conj(y[..., i_l + k,...])
"""
val = _valfrommode(mode)
if old_behavior:
warnings.warn(DeprecationWarning(_SWAP_INPUTS_DEPRECATION_MSG))
if np.iscomplexobj(in2):
in2 = in2.conjugate()
if in1.size < in2.size:
swp = in2
in2 = in1
in1 = swp
if mode == 'valid':
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
for i in range(len(ps)):
if ps[i] <= 0:
raise ValueError("Dimension of x(%d) < y(%d) " \
"not compatible with valid mode" % \
(x.shape[i], y.shape[i]))
z = sigtools._correlateND(in1, in2, out, val)
else:
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = [slice(0, i) for i in in1.shape]
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
else:
raise ValueError("Uknown mode %s" % mode)
return z
def _centered(arr, newsize):
# Return the center newsize portion of the array.
newsize = asarray(newsize)
currsize = array(arr.shape)
startind = (currsize - newsize) / 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT. See convolve.
"""
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
# Always use 2**n-sized FFT
fsize = 2**np.ceil(np.log2(size))
IN1 = fftn(in1,fsize)
IN1 *= fftn(in2,fsize)
ret = ifftn(IN1)
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if product(s1,axis=0) > product(s2,axis=0):
osize = s1
else:
osize = s2
return _centered(ret,osize)
elif mode == "valid":
return _centered(ret,abs(s2-s1)+1)
def convolve(in1, in2, mode='full', old_behavior=True):
"""Convolve two N-dimensional arrays.
Convolve in1 and in2 with output size determined by mode.
Arguments
---------
in1: array
first input.
in2: array
second input. Should have the same number of dimensions as in1.
mode: str {'valid', 'same', 'full'}
a string indicating the size of the output:
- 'valid': the output consists only of those elements that do not
rely on the zero-padding.
- 'same': the output is the same size as the largest input centered
with respect to the 'full' output.
- 'full': the output is the full discrete linear cross-correlation
of the inputs. (Default)
Returns
-------
out: array
an N-dimensional array containing a subset of the discrete linear
cross-correlation of in1 with in2.
"""
volume = asarray(in1)
kernel = asarray(in2)
if rank(volume) == rank(kernel) == 0:
return volume*kernel
elif not volume.ndim == kernel.ndim:
raise ValueError("in1 and in2 should have the same rank")
slice_obj = [slice(None,None,-1)]*len(kernel.shape)
if old_behavior:
warnings.warn(DeprecationWarning(_SWAP_INPUTS_DEPRECATION_MSG))
if (product(kernel.shape,axis=0) > product(volume.shape,axis=0)):
temp = kernel
kernel = volume
volume = temp
del temp
return correlate(volume, kernel[slice_obj], mode, old_behavior=True)
else:
if mode == 'valid':
for d1, d2 in zip(volume.shape, kernel.shape):
if not d1 >= d2:
raise ValueError(
"in1 should have at least as many items as in2 in " \
"every dimension for valid mode.")
if np.iscomplexobj(kernel):
return correlate(volume, kernel[slice_obj].conj(), mode, old_behavior=False)
else:
return correlate(volume, kernel[slice_obj], mode, old_behavior=False)
def order_filter(a, domain, rank):
"""Perform an order filter on an N-dimensional array.
Description:
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Inputs:
in -- an N-dimensional input array.
domain -- a mask array with the same number of dimensions as in. Each
dimension should have an odd number of elements.
rank -- an non-negative integer which selects the element from the
sorted list (0 corresponds to the largest element, 1 is the
next largest element, etc.)
Output: (out,)
out -- the results of the order filter in an array with the same
shape as in.
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError, "Each dimension of domain argument " \
"should have an odd number of elements."
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume,kernel_size=None):
"""Perform a median filter on an N-dimensional array.
Description:
Apply a median filter to the input array using a local window-size
given by kernel_size.
Inputs:
in -- An N-dimensional input array.
kernel_size -- A scalar or an N-length list giving the size of the
median filter window in each dimension. Elements of
kernel_size should be odd. If kernel_size is a scalar,
then this scalar is used as the size in each dimension.
Outputs: (out,)
out -- An array the same size as input containing the median filtered
result.
"""
volume = asarray(volume)
if kernel_size is None:
kernel_size = [3] * len(volume.shape)
kernel_size = asarray(kernel_size)
if len(kernel_size.shape) == 0:
kernel_size = [kernel_size.item()] * len(volume.shape)
kernel_size = asarray(kernel_size)
for k in range(len(volume.shape)):
if (kernel_size[k] % 2) != 1:
raise ValueError, "Each element of kernel_size should be odd."
domain = ones(kernel_size)
numels = product(kernel_size,axis=0)
order = int(numels/2)
return sigtools._order_filterND(volume,domain,order)
def wiener(im,mysize=None,noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Description:
Apply a Wiener filter to the N-dimensional array in.
Inputs:
in -- an N-dimensional array.
kernel_size -- A scalar or an N-length list giving the size of the
Wiener filter window in each dimension. Elements of
kernel_size should be odd. If kernel_size is a scalar,
then this scalar is used as the size in each dimension.
noise -- The noise-power to use. If None, then noise is estimated as
the average of the local variance of the input.
Outputs: (out,)
out -- Wiener filtered result with the same shape as in.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * len(im.shape)
mysize = asarray(mysize);
# Estimate the local mean
lMean = correlate(im,ones(mysize), 'same', old_behavior=False) / product(mysize,axis=0)
# Estimate the local variance
lVar = correlate(im**2,ones(mysize), 'same', old_behavior=False) / product(mysize,axis=0) - lMean**2
# Estimate the noise power if needed.
if noise==None:
noise = mean(ravel(lVar),axis=0)
res = (im - lMean)
res *= (1-noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0, old_behavior=True):
"""Convolve two 2-dimensional arrays.
Description:
Convolve in1 and in2 with output size determined by mode and boundary
conditions determined by boundary and fillvalue.
Inputs:
in1 -- a 2-dimensional array.
in2 -- a 2-dimensional array.
mode -- a flag indicating the size of the output
'valid' (0): The output consists only of those elements that
do not rely on the zero-padding.
'same' (1): The output is the same size as the input centered
with respect to the 'full' output.
'full' (2): The output is the full discrete linear convolution
of the inputs. (*Default*)
boundary -- a flag indicating how to handle boundaries
'fill' : pad input arrays with fillvalue. (*Default*)
'wrap' : circular boundary conditions.
'symm' : symmetrical boundary conditions.
fillvalue -- value to fill pad input arrays with (*Default* = 0)
Outputs: (out,)
out -- a 2-dimensional array containing a subset of the discrete linear
convolution of in1 with in2.
"""
if old_behavior:
warnings.warn(DeprecationWarning(_SWAP_INPUTS_DEPRECATION_MSG))
if old_behavior:
warnings.warn(DeprecationWarning(_SWAP_INPUTS_DEPRECATION_MSG))
if (product(np.shape(in2),axis=0) > product(np.shape(in1),axis=0)):
temp = in1
in1 = in2
in2 = temp
del temp
else:
if mode == 'valid':
for d1, d2 in zip(np.shape(in1), np.shape(in2)):
if not d1 >= d2:
raise ValueError(
"in1 should have at least as many items as in2 in " \
"every dimension for valid mode.")
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
return sigtools._convolve2d(in1,in2,1,val,bval,fillvalue)
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0, old_behavior=True):
"""Cross-correlate two 2-dimensional arrays.
Description:
Cross correlate in1 and in2 with output size determined by mode
and boundary conditions determined by boundary and fillvalue.
Inputs:
in1 -- a 2-dimensional array.
in2 -- a 2-dimensional array.
mode -- a flag indicating the size of the output
'valid' (0): The output consists only of those elements that
do not rely on the zero-padding.
'same' (1): The output is the same size as the input centered
with respect to the 'full' output.
'full' (2): The output is the full discrete linear convolution
of the inputs. (*Default*)
boundary -- a flag indicating how to handle boundaries
'fill' : pad input arrays with fillvalue. (*Default*)
'wrap' : circular boundary conditions.
'symm' : symmetrical boundary conditions.
fillvalue -- value to fill pad input arrays with (*Default* = 0)
Outputs: (out,)
out -- a 2-dimensional array containing a subset of the discrete linear
cross-correlation of in1 with in2.
"""
if old_behavior:
warnings.warn(DeprecationWarning(_SWAP_INPUTS_DEPRECATION_MSG))
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
return sigtools._convolve2d(in1, in2, 0,val,bval,fillvalue)
def medfilt2d(input, kernel_size=3):
"""Median filter two 2-dimensional arrays.
Description:
Apply a median filter to the input array using a local window-size
given by kernel_size (must be odd).
Inputs:
in -- An 2 dimensional input array.
kernel_size -- A scalar or an length-2 list giving the size of the
median filter window in each dimension. Elements of
kernel_size should be odd. If kernel_size is a scalar,
then this scalar is used as the size in each dimension.
Outputs: (out,)
out -- An array the same size as input containing the median filtered
result.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if len(kernel_size.shape) == 0:
kernel_size = [kernel_size.item()] * 2
kernel_size = asarray(kernel_size)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError, "Each element of kernel_size should be odd."
return sigtools._medfilt2d(image, kernel_size)
def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16):
"""Calculate the minimax optimal filter using Remez exchange algorithm.
Description:
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified bands
using the remez exchange algorithm.
Inputs:
numtaps -- The desired number of taps in the filter.
bands -- A montonic sequence containing the band edges. All elements
must be non-negative and less than 1/2 the sampling frequency
as given by Hz.
desired -- A sequency half the size of bands containing the desired gain
in each of the specified bands
weight -- A relative weighting to give to each band region.
type --- The type of filter:
'bandpass' : flat response in bands.
'differentiator' : frequency proportional response in bands.
Outputs: (out,)
out -- A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
"""
# Convert type
try:
tnum = {'bandpass':1, 'differentiator':2}[type]
except KeyError:
raise ValueError, "Type must be 'bandpass', or 'differentiator'"
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, x, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If a[0]
is not 1, then both a and b are normalized by a[0].
x : array_like
An N-dimensional input array.
axis : int
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis (*Default* = -1)
zi : array_like (optional)
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
max(len(a),len(b))-1. If zi=None or is not given then initial
rest is assumed. SEE signal.lfiltic for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array (optional)
If zi is None, this is not returned, otherwise, zf holds the
final filter delay values.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements
::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[nb]*x[n-nb]
- a[1]*y[n-1] - ... - a[na]*y[n-na]
using the following difference equations::
y[m] = b[0]*x[m] + z[0,m-1]
z[0,m] = b[1]*x[m] + z[1,m-1] - a[1]*y[m]
...
z[n-3,m] = b[n-2]*x[m] + z[n-2,m-1] - a[n-2]*y[m]
z[n-2,m] = b[n-1]*x[m] - a[n-1]*y[m]
where m is the output sample number and n=max(len(a),len(b)) is the
model order.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -nb
b[0] + b[1]z + ... + b[nb] z
Y(z) = ---------------------------------- X(z)
-1 -na
a[0] + a[1]z + ... + a[na] z
"""
if isscalar(a):
a = [a]
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b,a,y,x=None):
"""
Construct initial conditions for lfilter
Given a linear filter (b,a) and initial conditions on the output y
and the input x, return the inital conditions on the state vector zi
which is used by lfilter to generate the output given the input.
If M=len(b)-1 and N=len(a)-1. Then, the initial conditions are given
in the vectors x and y as::
x = {x[-1],x[-2],...,x[-M]}
y = {y[-1],y[-2],...,y[-N]}
If x is not given, its inital conditions are assumed zero.
If either vector is too short, then zeros are added
to achieve the proper length.
The output vector zi contains::
zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]} where K=max(M,N).
"""
N = size(a)-1
M = size(b)-1
K = max(M,N)
y = asarray(y)
zi = zeros(K,y.dtype.char)
if x is None:
x = zeros(M,y.dtype.char)
else:
x = asarray(x)
L = size(x)
if L < M:
x = r_[x,zeros(M-L)]
L = size(y)
if L < N:
y = r_[y,zeros(N-L)]
for m in range(M):
zi[m] = sum(b[m+1:]*x[:M-m],axis=0)
for m in range(N):
zi[m] -= sum(a[m+1:]*y[:N-m],axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves divisor out of signal.
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = [];
rem = num;
else:
input = ones(N-D+1, float)
input[1:] = 0
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def boxcar(M,sym=1):
"""The M-point boxcar window.
"""
return ones(M, float)
def triang(M,sym=1):
"""The M-point triangular window.
"""
if M < 1:
return array([])
if M == 1:
return ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M + 1
n = arange(1,int((M+1)/2)+1)
if M % 2 == 0:
w = (2*n-1.0)/M
w = r_[w, w[::-1]]
else:
w = 2*n/(M+1.0)
w = r_[w, w[-2::-1]]
if not sym and not odd:
w = w[:-1]
return w
def parzen(M,sym=1):
"""The M-point Parzen window.
"""
if M < 1:
return array([])
if M == 1:
return ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
n = arange(-(M-1)/2.0,(M-1)/2.0+0.5,1.0)
na = extract(n < -(M-1)/4.0, n)
nb = extract(abs(n) <= (M-1)/4.0, n)
wa = 2*(1-abs(na)/(M/2.0))**3.0
wb = 1-6*(abs(nb)/(M/2.0))**2.0 + 6*(abs(nb)/(M/2.0))**3.0
w = r_[wa,wb,wa[::-1]]
if not sym and not odd:
w = w[:-1]
return w
def bohman(M,sym=1):
"""The M-point Bohman window.
"""
if M < 1:
return array([])
if M == 1:
return ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
fac = abs(linspace(-1,1,M)[1:-1])
w = (1 - fac)* cos(pi*fac) + 1.0/pi*sin(pi*fac)
w = r_[0,w,0]
if not sym and not odd:
w = w[:-1]
return w
def blackman(M,sym=1):
"""The M-point Blackman window.
"""
if M < 1:
return array([])
if M == 1:
return ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
n = arange(0,M)
w = 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
if not sym and not odd:
w = w[:-1]
return w
def nuttall(M,sym=1):
"""A minimum 4-term Blackman-Harris window according to Nuttall.
"""
if M < 1:
return array([])
if M == 1:
return ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
a = [0.3635819, 0.4891775, 0.1365995, 0.0106411]
n = arange(0,M)
fac = n*2*pi/(M-1.0)
w = a[0] - a[1]*cos(fac) + a[2]*cos(2*fac) - a[3]*cos(3*fac)
if not sym and not odd:
w = w[:-1]
return w
def blackmanharris(M,sym=1):
"""The M-point minimum 4-term Blackman-Harris window.
"""
if M < 1:
return array([])
if M == 1:
return ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
a = [0.35875, 0.48829, 0.14128, 0.01168];
n = arange(0,M)
fac = n*2*pi/(M-1.0)
w = a[0] - a[1]*cos(fac) + a[2]*cos(2*fac) - a[3]*cos(3*fac)
if not sym and not odd:
w = w[:-1]
return w
def flattop(M,sym=1):
"""The M-point Flat top window.
"""
if M < 1:
return array([])
if M == 1:
return ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
a = [0.2156, 0.4160, 0.2781, 0.0836, 0.0069]
n = arange(0,M)
fac = n*2*pi/(M-1.0)
w = a[0] - a[1]*cos(fac) + a[2]*cos(2*fac) - a[3]*cos(3*fac) + \
a[4]*cos(4*fac)
if not sym and not odd:
w = w[:-1]
return w
def bartlett(M,sym=1):
"""The M-point Bartlett window.
"""
if M < 1:
return array([])
if M == 1:
return ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
n = arange(0,M)
w = where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))
if not sym and not odd:
w = w[:-1]
return w
def hanning(M,sym=1):
"""The M-point Hanning window.
"""
if M < 1:
return array([])
if M == 1:
return ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
n = arange(0,M)
w = 0.5-0.5*cos(2.0*pi*n/(M-1))
if not sym and not odd:
w = w[:-1]
return w
hann = hanning
def barthann(M,sym=1):
"""Return the M-point modified Bartlett-Hann window.
"""
if M < 1:
return array([])
if M == 1:
return ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
n = arange(0,M)
fac = abs(n/(M-1.0)-0.5)
w = 0.62 - 0.48*fac + 0.38*cos(2*pi*fac)
if not sym and not odd:
w = w[:-1]
return w
def hamming(M,sym=1):
"""The M-point Hamming window.
"""
if M < 1:
return array([])
if M == 1:
return ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
n = arange(0,M)
w = 0.54-0.46*cos(2.0*pi*n/(M-1))
if not sym and not odd:
w = w[:-1]
return w
def kaiser(M,beta,sym=1):
"""Return a Kaiser window of length M with shape parameter beta.
"""
if M < 1:
return array([])
if M == 1:
return ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
n = arange(0,M)
alpha = (M-1)/2.0
w = special.i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/special.i0(beta)
if not sym and not odd:
w = w[:-1]
return w
def gaussian(M,std,sym=1):
"""Return a Gaussian window of length M with standard-deviation std.
"""
if M < 1:
return array([])
if M == 1:
return ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M + 1
n = arange(0,M)-(M-1.0)/2.0
sig2 = 2*std*std
w = exp(-n**2 / sig2)
if not sym and not odd:
w = w[:-1]
return w
def general_gaussian(M,p,sig,sym=1):
"""Return a window with a generalized Gaussian shape.
exp(-0.5*(x/sig)**(2*p))
half power point is at (2*log(2)))**(1/(2*p))*sig
"""
if M < 1:
return array([])
if M == 1:
return ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
n = arange(0,M)-(M-1.0)/2.0
w = exp(-0.5*(n/sig)**(2*p))
if not sym and not odd:
w = w[:-1]
return w
# contributed by Kumar Appaiah.
def chebwin(M, at, sym=1):
"""Dolph-Chebyshev window.
INPUTS:
M : int
Window size
at : float
Attenuation (in dB)
sym : bool
Generates symmetric window if True.
"""
if M < 1:
return array([])
if M == 1:
return ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
# compute the parameter beta
order = M - 1.0
beta = cosh(1.0/order * arccosh(10**(abs(at)/20.)))
k = r_[0:M]*1.0
x = beta*cos(pi*k/M)
#find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = zeros(x.shape)
p[x > 1] = cosh(order * arccosh(x[x > 1]))
p[x < -1] = (1 - 2*(order%2)) * cosh(order * arccosh(-x[x < -1]))
p[np.abs(x) <=1 ] = cos(order * arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = real(fft(p))
n = (M + 1) / 2
w = w[:n] / w[0]
w = concatenate((w[n - 1:0:-1], w))
else:
p = p * exp(1.j*pi / M * r_[0:M])
w = real(fft(p))
n = M / 2 + 1
w = w / w[1]
w = concatenate((w[n - 1:0:-1], w[1:n]))
if not sym and not odd:
w = w[:-1]
return w
def slepian(M,width,sym=1):
"""Return the M-point slepian window.
"""
if (M*width > 27.38):
raise ValueError, "Cannot reliably obtain slepian sequences for"\
" M*width > 27.38."
if M < 1:
return array([])
if M == 1:
return ones(1,'d')
odd = M % 2
if not sym and not odd:
M = M+1
twoF = width/2.0
alpha = (M-1)/2.0
m = arange(0,M)-alpha
n = m[:,newaxis]
k = m[newaxis,:]
AF = twoF*special.sinc(twoF*(n-k))
[lam,vec] = linalg.eig(AF)
ind = argmax(abs(lam),axis=-1)
w = abs(vec[:,ind])
w = w / max(w)
if not sym and not odd:
w = w[:-1]
return w
def hilbert(x, N=None):
"""Compute the analytic signal.
The transformation is done along the first axis.
Parameters
----------
x : array-like
Signal data
N : int, optional
Number of Fourier components. Default: ``x.shape[0]``
Returns
-------
xa : ndarray, shape (N,) + x.shape[1:]
Analytic signal of `x`
Notes
-----
The analytic signal `x_a(t)` of `x(t)` is::
x_a = F^{-1}(F(x) 2U) = x + i y
where ``F`` is the Fourier transform, ``U`` the unit step function,
and ``y`` the Hilbert transform of ``x``. [1]
References
----------
.. [1] Wikipedia, "Analytic signal".
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = asarray(x)
if N is None:
N = len(x)
if N <=0:
raise ValueError, "N must be positive."
if iscomplexobj(x):
print "Warning: imaginary part of x ignored."
x = real(x)
Xf = fft(x,N,axis=0)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N/2] = 1
h[1:N/2] = 2
else:
h[0] = 1
h[1:(N+1)/2] = 2
if len(x.shape) > 1:
h = h[:, newaxis]
x = ifft(Xf*h)
return x
def hilbert2(x,N=None):
"""Compute the '2-D' analytic signal of `x` of length `N`.
See also
--------
hilbert
"""
x = asarray(x)
x = asarray(x)
if N is None:
N = x.shape
if len(N) < 2:
if N <=0:
raise ValueError, "N must be positive."
N = (N,N)
if iscomplexobj(x):
print "Warning: imaginary part of x ignored."
x = real(x)
Xf = fft2(x,N,axes=(0,1))
h1 = zeros(N[0],'d')
h2 = zeros(N[1],'d')
for p in range(2):
h = eval("h%d"%(p+1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1/2] = 1
h[1:N1/2] = 2
else:
h[0] = 1
h[1:(N1+1)/2] = 2
exec("h%d = h" % (p+1), globals(), locals())
h = h1[:,newaxis] * h2[newaxis,:]
k = len(x.shape)
while k > 2:
h = h[:, newaxis]
k -= 1
x = ifft2(Xf*h,axes=(0,1))
return x
def cmplx_sort(p):
"sort roots based on magnitude."
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p,indx,0), indx
def unique_roots(p,tol=1e-3,rtype='min'):
"""Determine the unique roots and their multiplicities in two lists
Inputs:
p -- The list of roots
tol --- The tolerance for two roots to be considered equal.
rtype --- How to determine the returned root from the close
ones: 'max': pick the maximum
'min': pick the minimum
'avg': average roots
Outputs: (pout, mult)
pout -- The list of sorted roots
mult -- The multiplicity of each root
"""
if rtype in ['max','maximum']:
comproot = np.maximum
elif rtype in ['min','minimum']:
comproot = np.minimum
elif rtype in ['avg','mean']:
comproot = np.mean
p = asarray(p)*1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5*tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr-curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r,p,k,tol=1e-3,rtype='avg'):
"""Compute b(s) and a(s) from partial fraction expansion: r,p,k
If M = len(b) and N = len(a)
b(s) b[0] x**(M-1) + b[1] x**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] x**(N-1) + a[1] x**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
See Also
--------
residue, poly, polyval, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r,indx,0)
pout, mult = unique_roots(p,tol=tol,rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]]*mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra,a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]]*mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]]*(mult[k]-m-1))
b = polyadd(b,r[indx]*poly(t2))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b,a,tol=1e-3,rtype='avg'):
"""Compute partial-fraction expansion of b(s) / a(s).
If M = len(b) and N = len(a)
b(s) b[0] s**(M-1) + b[1] s**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] s**(N-1) + a[1] s**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Returns
-------
r : ndarray
Residues
p : ndarray
Poles
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, poly, polyval, unique_roots
"""
b,a = map(asarray,(b,a))
rscale = a[0]
k,b = polydiv(b,a)
p = roots(a)
r = p*0.0
pout, mult = unique_roots(p,tol=tol,rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]]*mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]]*mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig,0,-1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn,1),an)
term2 = polymul(bn,polyder(an,1))
bn = polysub(term1,term2)
an = polymul(an,an)
r[indx+m-1] = polyval(bn,pout[n]) / polyval(an,pout[n]) \
/ factorial(sig-m)
indx += sig
return r/rscale, p, k
def residuez(b,a,tol=1e-3,rtype='avg'):
"""Compute partial-fraction expansion of b(z) / a(z).
If M = len(b) and N = len(a)
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See also: invresz, poly, polyval, unique_roots
"""
b,a = map(asarray,(b,a))
gain = a[0]
brev, arev = b[::-1],a[::-1]
krev,brev = polydiv(brev,arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p*0.0
pout, mult = unique_roots(p,tol=tol,rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]]*mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]]*mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig,0,-1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn,1),an)
term2 = polymul(bn,polyder(an,1))
bn = polysub(term1,term2)
an = polymul(an,an)
r[indx+m-1] = polyval(bn,1.0/pout[n]) / polyval(an,1.0/pout[n]) \
/ factorial(sig-m) / (-pout[n])**(sig-m)
indx += sig
return r/gain, p, k
def invresz(r,p,k,tol=1e-3,rtype='avg'):
"""Compute b(z) and a(z) from partial fraction expansion: r,p,k
If M = len(b) and N = len(a)
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See also: residuez, poly, polyval, unique_roots
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r,indx,0)
pout, mult = unique_roots(p,tol=tol,rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]]*mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra,a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]]*mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]]*(mult[k]-m-1))
brev = polyadd(brev,(r[indx]*poly(t2))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def get_window(window,Nx,fftbins=1):
"""Return a window of length Nx and type window.
If fftbins is 1, create a "periodic" window ready to use with ifftshift
and be multiplied by the result of an fft (SEE ALSO fftfreq).
Window types: boxcar, triang, blackman, hamming, hanning, bartlett,
parzen, bohman, blackmanharris, nuttall, barthann,
kaiser (needs beta), gaussian (needs std),
general_gaussian (needs power, width),
slepian (needs width)
If the window requires no parameters, then it can be a string.
If the window requires parameters, the window argument should be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If window is a floating point number, it is interpreted as the beta
parameter of the kaiser window.
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, types.TupleType):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, types.StringType):
if window in ['kaiser', 'ksr', 'gaussian', 'gauss', 'gss',
'general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs',
'slepian', 'optimal', 'slep', 'dss']:
raise ValueError, "That window needs a parameter -- pass a tuple"
else:
winstr = window
if winstr in ['blackman', 'black', 'blk']:
winfunc = blackman
elif winstr in ['triangle', 'triang', 'tri']:
winfunc = triang
elif winstr in ['hamming', 'hamm', 'ham']:
winfunc = hamming
elif winstr in ['bartlett', 'bart', 'brt']:
winfunc = bartlett
elif winstr in ['hanning', 'hann', 'han']:
winfunc = hanning
elif winstr in ['blackmanharris', 'blackharr','bkh']:
winfunc = blackmanharris
elif winstr in ['parzen', 'parz', 'par']:
winfunc = parzen
elif winstr in ['bohman', 'bman', 'bmn']:
winfunc = bohman
elif winstr in ['nuttall', 'nutl', 'nut']:
winfunc = nuttall
elif winstr in ['barthann', 'brthan', 'bth']:
winfunc = barthann
elif winstr in ['flattop', 'flat', 'flt']:
winfunc = flattop
elif winstr in ['kaiser', 'ksr']:
winfunc = kaiser
elif winstr in ['gaussian', 'gauss', 'gss']:
winfunc = gaussian
elif winstr in ['general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs']:
winfunc = general_gaussian
elif winstr in ['boxcar', 'box', 'ones']:
winfunc = boxcar
elif winstr in ['slepian', 'slep', 'optimal', 'dss']:
winfunc = slepian
else:
raise ValueError, "Unknown window type."
params = (Nx,)+args + (sym,)
else:
winfunc = kaiser
params = (Nx,beta,sym)
return winfunc(*params)
def resample(x,num,t=None,axis=0,window=None):
"""Resample to num samples using Fourier method along the given axis.
The resampled signal starts at the same value of x but is sampled
with a spacing of len(x) / num * (spacing of x). Because a
Fourier method is used, the signal is assumed periodic.
Window controls a Fourier-domain window that tapers the Fourier
spectrum before zero-padding to alleviate ringing in the resampled
values for sampled signals you didn't intend to be interpreted as
band-limited.
If window is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) )
If window is an array of the same length as x.shape[axis] it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
If window is a string then use the named window. If window is a
float, then it represents a value of beta for a kaiser window. If
window is a tuple, then the first component is a string
representing the window, and the next arguments are parameters for
that window.
Possible windows are:
'flattop' -- 'flat', 'flt'
'boxcar' -- 'ones', 'box'
'triang' -- 'traing', 'tri'
'parzen' -- 'parz', 'par'
'bohman' -- 'bman', 'bmn'
'blackmanharris' -- 'blackharr', 'bkh'
'nuttall', -- 'nutl', 'nut'
'barthann' -- 'brthan', 'bth'
'blackman' -- 'black', 'blk'
'hamming' -- 'hamm', 'ham'
'bartlett' -- 'bart', 'brt'
'hanning' -- 'hann', 'han'
('kaiser', beta) -- 'ksr'
('gaussian', std) -- 'gauss', 'gss'
('general gauss', power, width) -- 'general', 'ggs'
('slepian', width) -- 'slep', 'optimal', 'dss'
The first sample of the returned vector is the same as the first
sample of the input vector, the spacing between samples is changed
from dx to
dx * len(x) / num
If t is not None, then it represents the old sample positions, and the new
sample positions will be returned as well as the new samples.
"""
x = asarray(x)
X = fft(x,axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftfreq(Nx))
elif isinstance(window, ndarray) and window.shape == (Nx,):
W = window
else:
W = ifftshift(get_window(window,Nx))
newshape = ones(len(x.shape))
newshape[axis] = len(W)
W.shape = newshape
X = X*W
sl = [slice(None)]*len(x.shape)
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num,Nx))
Y = zeros(newshape,'D')
sl[axis] = slice(0,(N+1)/2)
Y[sl] = X[sl]
sl[axis] = slice(-(N-1)/2,None)
Y[sl] = X[sl]
y = ifft(Y,axis=axis)*(float(num)/float(Nx))
if x.dtype.char not in ['F','D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0,num)*(t[1]-t[0])* Nx / float(num) + t[0]
return y, new_t
def detrend(data, axis=-1, type='linear', bp=0):
"""Remove linear trend along axis from data.
If type is 'constant' then remove mean only.
If bp is given, then it is a sequence of points at which to
break a piecewise-linear fit to the data.
"""
if type not in ['linear','l','constant','c']:
raise ValueError, "Trend type must be linear or constant"
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant','c']:
ret = data - expand_dims(mean(data,axis),axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0,bp,N]))
if any(bp > N):
raise ValueError, "Breakpoints must be less than length " \
"of data along given axis."
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0: axis = axis + rnk
newdims = r_[axis,0:axis,axis+1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, prod(dshape, axis=0)/N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m+1] - bp[m]
A = ones((Npts,2),dtype)
A[:,0] = cast[dtype](arange(1,Npts+1)*1.0/Npts)
sl = slice(bp[m],bp[m+1])
coef,resids,rank,s = linalg.lstsq(A,newdata[sl])
newdata[sl] = newdata[sl] - dot(A,coef)
# Put data back in original shape.
tdshape = take(dshape,newdims,0)
ret = reshape(newdata,tuple(tdshape))
vals = range(1,rnk)
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret,tuple(olddims))
return ret
def lfilter_zi(b,a):
#compute the zi state from the filter parameters. see [Gust96].
#Based on:
# [Gust96] Fredrik Gustafsson, Determining the initial states in
# forward-backward filtering, IEEE Transactions on
# Signal Processing, pp. 988--992, April 1996,
# Volume 44, Issue 4
n=max(len(a),len(b))
zin = (np.eye(n-1) - np.hstack((-a[1:n,newaxis],
np.vstack((np.eye(n-2),zeros(n-2))))))
zid= b[1:n] - a[1:n]*b[0]
zi_matrix=linalg.inv(zin)*(np.matrix(zid).transpose())
zi_return=[]
#convert the result into a regular array (not a matrix)
for i in range(len(zi_matrix)):
zi_return.append(float(zi_matrix[i][0]))
return array(zi_return)
def filtfilt(b,a,x):
b, a, x = map(asarray, [b, a, x])
# FIXME: For now only accepting 1d arrays
ntaps=max(len(a),len(b))
edge=ntaps*3
if x.ndim != 1:
raise ValueError, "filtfilt only accepts 1-d arrays."
#x must be bigger than edge
if x.size < edge:
raise ValueError, "Input vector needs to be bigger than " \
"3 * max(len(a),len(b)."
if len(a) < ntaps:
a=r_[a,zeros(len(b)-len(a))]
if len(b) < ntaps:
b=r_[b,zeros(len(a)-len(b))]
zi = lfilter_zi(b,a)
#Grow the signal to have edges for stabilizing
#the filter with inverted replicas of the signal
s=r_[2*x[0]-x[edge:1:-1],x,2*x[-1]-x[-1:-edge:-1]]
#in the case of one go we only need one of the extrems
# both are needed for filtfilt
(y,zf)=lfilter(b,a,s,-1,zi*s[0])
(y,zf)=lfilter(b,a,flipud(y),-1,zi*y[-1])
return flipud(y[edge-1:-edge+1])
from scipy.signal.filter_design import cheby1, firwin
def decimate(x, q, n=None, ftype='iir', axis=-1):
"""downsample the signal x by an integer factor q, using an order n filter
By default an order 8 Chebyshev type I filter is used or a 30 point FIR
filter with hamming window if ftype is 'fir'.
Parameters
----------
x : N-d array
the signal to be downsampled
q : int
the downsampling factor
n : int or None
the order of the filter (1 less than the length for 'fir')
ftype : {'iir' or 'fir'}
the type of the lowpass filter
axis : int
the axis along which to decimate
Returns
-------
y : N-d array
the down-sampled signal
See also: resample
"""
if not isinstance(q, int):
raise TypeError, "q must be an integer"
if n is None:
if ftype == 'fir':
n = 30
else:
n = 8
if ftype == 'fir':
b = firwin(n+1, 1./q, window='hamming')
a = 1.
else:
b, a = cheby1(n, 0.05, 0.8/q)
y = lfilter(b, a, x, axis=axis)
sl = [None]*y.ndim
sl[axis] = slice(None, None, q)
return y[sl]
|
stefanv/scipy3
|
scipy/signal/signaltools.py
|
Python
|
bsd-3-clause
| 53,313
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-31 18:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('videos', '0034_auto_20160730_1623'),
]
operations = [
migrations.AddField(
model_name='scene',
name='description',
field=models.TextField(blank=True, default=''),
),
]
|
curtwagner1984/YAPO
|
videos/migrations/0035_scene_description.py
|
Python
|
gpl-3.0
| 460
|
# -*- encoding: utf-8 -*-
from pysis.services.base import Service
class Blastcells(Service):
"""Blastcells Service
Consumes Blastcells API: <{url}/blastcells>
"""
def __init__(self, client):
"""Creates Blastcells object with a client"""
super(Blastcells, self).__init__(client)
def get(self, id=None):
"""Gets Blastcells from the API
Args:
id (int): id of the blastcell.
if None, returns all Blastcells
Returns:
Blastcells resources
"""
if id is None:
request = self.request_builder('blastcells.get')
else:
assert isinstance(id, int)
request = self.request_builder('blastcells.get', id=id)
return self._get(request)
|
sustainableis/python-sis
|
pysis/services/blastcells/__init__.py
|
Python
|
isc
| 860
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include('emoticonvis.apps.api.urls')),
url(r'^docs/', include('docs.urls')),
url(r'^', include('emoticonvis.apps.base.urls')),
url(r'^topics/', include('emoticonvis.apps.enhance.urls')),
)
from django.conf import settings
if 'debug_toolbar' in settings.INSTALLED_APPS and not settings.DEBUG_TOOLBAR_PATCH_SETTINGS:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
|
nanchenchen/emoticon-analysis
|
emoticonvis/urls.py
|
Python
|
mit
| 767
|
from django.core.management.base import BaseCommand, CommandError
from layerinfo.models import Theme,Issue,Layer, PointLayer, Points
import json
from os import listdir
from os.path import isfile, join
import csv
import sys
from optparse import make_option
from geopy import geocoders
#from polls.models import Poll
class Command(BaseCommand):
help = 'Closes the specified poll for voting'
option_list = BaseCommand.option_list + (make_option('--path',
action='store', dest='basepath', default=None,
help='The Path to the sql dir'),)
def handle(self, *args, **options):
#delete everything
delitems = [Theme,Issue,Layer, PointLayer, Points]
for delitem in delitems:
delobjs = delitem.objects.all()
for delobj in delobjs:
print "deleting", delobj
delobj.delete()
BASE_DIR = options.get("basepath", None)
if not BASE_DIR:
print "Please provide the path to your SQL dir\nSuch as --path C:\\opengeo\\webapps\\DiplomacyExplorer2\\sql\\"
sys.exit(1)
pointscsvfile = "Points.csv"
layerscsvfile = "Layers.csv"
themescsvfile = "Themes.csv"
issuescsvfile = "Issues.csv"
pointsobj = []
#load points, we are going ot get the point layers from the point.csv file
#this will be the following format
#pointlayername;header;topic;map;country;title;story;lat;lon;locationname
with open(BASE_DIR + pointscsvfile, 'rb') as f:
headers = []
tempreader = csv.reader(f, delimiter=',')
for row in tempreader:
if len(headers) == 0:
headers = row
else:
pointsobj.append(dict(zip(headers, row)))
g = geocoders.GoogleV3()
#create pointlayer
for pointobj in pointsobj:
temppointlayer, created = PointLayer.objects.get_or_create(layername=pointobj['pointlayername'])
print temppointlayer
#build the point
temppoint, created = Points.objects.get_or_create(Title=pointobj['title'],pointlayer=temppointlayer)
if (pointobj['lon'] != "" and pointobj['lat'] != ""):
try:
temppoint.geometry = [float(pointobj['lon']),float(pointobj['lat'])]
except Exception, e:
print e
elif (pointobj['locationname'] != ""):
place, (lat,lon) = g.geocode(pointobj['locationname'])
print "geocoded", pointobj['locationname'], "to", lat,lon
print temppoint
temppoint.geometry = [lon, lat]
else:
print "Could not find the location for ", pointobj['pointlayername']
temppoint.delete()
if temppoint:
temppoint.Header = pointobj['header']
temppoint.Topic = pointobj['topic']
temppoint.Map = pointobj['map']
temppoint.Country = pointobj['country']
temppoint.Story = pointobj['story']
temppoint.save()
layersobj = []
themesobj = []
issuesobj = []
#need to do this order themes, issues and layers
with open(BASE_DIR + layerscsvfile, 'rb') as f:
headers = []
tempreader = csv.reader(f, delimiter=';')
for row in tempreader:
if len(headers) == 0:
headers = row
else:
layersobj.append(dict(zip(headers, row)))
with open(BASE_DIR + themescsvfile, 'rb') as f:
headers = []
tempreader = csv.reader(f, delimiter=',')
for row in tempreader:
if len(headers) == 0:
headers = row
else:
themesobj.append(dict(zip(headers, row)))
with open(BASE_DIR + issuescsvfile, 'rb') as f:
headers = []
tempreader = csv.reader(f, delimiter=';')
for row in tempreader:
if len(headers) == 0:
headers = row
else:
issuesobj.append(dict(zip(headers, row)))
#get the temes
#get the themes
counter = 1
for themerow in themesobj:
print "working on theme ", themerow['ThemeID']
#Name,Description,KeyID,ThemeID,ThemeDrop
currenttheme, created = Theme.objects.get_or_create(keyid=themerow['ThemeID'])
print currenttheme, created
currenttheme.title = themerow['Name']
currenttheme.description = themerow['Description']
currenttheme.keyid = themerow['ThemeID']
currenttheme.order = counter
counter +=1
currenttheme.save()
for issuerow in issuesobj:
#Name,Description,KeyID,ThemeID,ID
try:
themeobj = Theme.objects.get(keyid__exact=issuerow['ThemeID'])
except:
print "could not find themeobj for ", issuerow['KeyID']
else:
currentissue, created = Issue.objects.get_or_create(keyid=issuerow['KeyID'], theme=themeobj)
print currentissue, created
currentissue.categoryName = issuerow['Name']
currentissue.categoryDescription = issuerow['Description']
currentissue.keyid = issuerow['KeyID']
currentissue.save()
for layerrow in layersobj:
#Name,Description,KeyID,Labels,IssueID,jsonStyle,PtsLayer,Attribution
try:
issueobj = Issue.objects.get(keyid__exact=layerrow['IssueID'])
except:
print "could not find issueobj for ", layerrow['IssueID'], "and layer name", layerrow['KeyID']
else:
currentlayer,created = Layer.objects.get_or_create(keyid=layerrow['KeyID'], issue=issueobj)
currentlayer.subject = layerrow['Name']
currentlayer.description = layerrow['Description']
currentlayer.keyid = layerrow['KeyID']
currentlayer.labels = layerrow['Labels']
currentlayer.jsonStyle = layerrow['jsonStyle']
try:
temppointlayer = PointLayer.objects.get(layername__exact=layerrow['KeyID'])
currentlayer.ptsLayer = temppointlayer
except:
pass
currentlayer.attribution = layerrow['Attribution']
currentlayer.isTimeSupported = True if str(layerrow['isTimeSupported']) == "TRUE" else False
if layerrow['isTimeSupported'] == "TRUE":
currentlayer.timeSeriesInfo = layerrow['timeSeriesInfo']
else:
currentlayer.timeSeriesInfo = {}
currentlayer.save()
#now let's test
print "*********we now have the following"
themes = Theme.objects.all()
print "we have ", len(themes), "Themes"
for theme in themes:
issues = theme.issue_set.all()
print "\t", theme.title, "has ", len(issues), "issues"
for issue in issues:
layers = issue.layer_set.all()
print "\t\t", issue.categoryName, "has", len(layers), "Layers"
for layer in layers:
print "\t\t\tIt has", layer.subject
pointlayer = layer.ptsLayer
if pointlayer:
print "\t\t\t\tIt has ", pointlayer.layername
points = pointlayer.points_set.all()
print "\t\t\t\t", pointlayer.layername, "has", len(points)
|
USStateDept/DiplomacyExplorer2
|
dipex/layerinfo/management/commands/loadpadata.py
|
Python
|
mit
| 7,839
|
# -*- coding: utf-8 -*-
"""Discover and lookup command plugins.
This comes from OpenStack cliff.
"""
import inspect
import logging
import pkg_resources
LOG = logging.getLogger(__name__)
class EntryPointWrapper(object):
"""Wrap up a command class already imported to make it look like a plugin.
"""
def __init__(self, name, command_class):
self.name = name
self.command_class = command_class
def load(self, require=False):
return self.command_class
class CommandManager(object):
"""Discovers commands and handles lookup based on argv data.
:param namespace: String containing the setuptools entrypoint namespace
for the plugins to be loaded. For example,
``'cliff.formatter.list'``.
:param convert_underscores: Whether cliff should convert underscores to
spaces in entry_point commands.
"""
def __init__(self, namespace, convert_underscores=True):
self.commands = {}
self.namespace = namespace
self.convert_underscores = convert_underscores
self._load_commands()
def _load_commands(self):
# NOTE(jamielennox): kept for compatability.
self.load_commands(self.namespace)
def load_commands(self, namespace):
"""Load all the commands from an entrypoint"""
for ep in pkg_resources.iter_entry_points(namespace):
LOG.debug('found command %r', ep.name)
cmd_name = (ep.name.replace('_', ' ')
if self.convert_underscores
else ep.name)
self.commands[cmd_name] = ep
return
def __iter__(self):
return iter(self.commands.items())
def add_command(self, name, command_class):
self.commands[name] = EntryPointWrapper(name, command_class)
def find_command(self, argv):
"""Given an argument list, find a command and
return the processor and any remaining arguments.
"""
search_args = argv[:]
name = ''
while search_args:
if search_args[0].startswith('-'):
name = '%s %s' % (name, search_args[0])
raise ValueError('Invalid command %r' % name)
next_val = search_args.pop(0)
name = '%s %s' % (name, next_val) if name else next_val
if name in self.commands:
cmd_ep = self.commands[name]
if hasattr(cmd_ep, 'resolve'):
cmd_factory = cmd_ep.resolve()
else:
# NOTE(dhellmann): Some fake classes don't take
# require as an argument. Yay?
arg_spec = inspect.getargspec(cmd_ep.load)
if 'require' in arg_spec[0]:
cmd_factory = cmd_ep.load(require=False)
else:
cmd_factory = cmd_ep.load()
return (cmd_factory, name, search_args)
else:
raise ValueError('Unknown command %r' % next(iter(argv), ''))
|
TurboGears/gearbox
|
gearbox/commandmanager.py
|
Python
|
mit
| 3,089
|
# -*- encoding: utf-8 -*-
from abjad import *
def test_rhythmmakertools_TaleaRhythmMaker_tie_split_notes_01():
talea = rhythmmakertools.Talea(
counts=(5,),
denominator=16,
)
maker = rhythmmakertools.TaleaRhythmMaker(
talea=talea,
)
divisions = [(2, 8), (2, 8), (2, 8), (2, 8)]
music = maker(divisions)
music = sequencetools.flatten_sequence(music)
measures = scoretools.make_spacer_skip_measures(divisions)
staff = Staff(measures)
measures = mutate(staff).replace_measure_contents(music)
assert systemtools.TestManager.compare(
staff,
r'''
\new Staff {
{
\time 2/8
c'4 ~
}
{
c'16 [
c'8. ~ ]
}
{
c'8 [
c'8 ~ ]
}
{
c'8. [
c'16 ]
}
}
'''
)
assert inspect_(staff).is_well_formed()
def test_rhythmmakertools_TaleaRhythmMaker_tie_split_notes_02():
talea = rhythmmakertools.Talea(
counts=(5,),
denominator=16,
)
maker = rhythmmakertools.TaleaRhythmMaker(
talea=talea,
)
divisions = [(3, 16), (5, 8), (4, 8), (7, 16)]
music = maker(divisions)
music = sequencetools.flatten_sequence(music)
measures = scoretools.make_spacer_skip_measures(divisions)
staff = Staff(measures)
measures = mutate(staff).replace_measure_contents(music)
assert systemtools.TestManager.compare(
staff,
r'''
\new Staff {
{
\time 3/16
c'8. ~
}
{
\time 5/8
c'8
c'4 ~
c'16 [
c'8. ~ ]
}
{
\time 4/8
c'8
c'4 ~
c'16 [
c'16 ~ ]
}
{
\time 7/16
c'4
c'8.
}
}
'''
)
assert inspect_(staff).is_well_formed()
|
mscuthbert/abjad
|
abjad/tools/rhythmmakertools/test/test_rhythmmakertools_TaleaRhythmMaker_tie_split_notes.py
|
Python
|
gpl-3.0
| 2,189
|
"""Test the Coolmaster config flow."""
from homeassistant import config_entries
from homeassistant.components.coolmaster.const import AVAILABLE_MODES, DOMAIN
from tests.async_mock import patch
def _flow_data():
options = {"host": "1.1.1.1"}
for mode in AVAILABLE_MODES:
options[mode] = True
return options
async def test_form(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] is None
with patch(
"homeassistant.components.coolmaster.config_flow.CoolMasterNet.status",
return_value={"test_id": "test_unit"},
), patch(
"homeassistant.components.coolmaster.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.coolmaster.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], _flow_data()
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "1.1.1.1"
assert result2["data"] == {
"host": "1.1.1.1",
"port": 10102,
"supported_modes": AVAILABLE_MODES,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_timeout(hass):
"""Test we handle a connection timeout."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.coolmaster.config_flow.CoolMasterNet.status",
side_effect=TimeoutError(),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], _flow_data()
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_connection_refused(hass):
"""Test we handle a connection error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.coolmaster.config_flow.CoolMasterNet.status",
side_effect=ConnectionRefusedError(),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], _flow_data()
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_no_units(hass):
"""Test we handle no units found."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.coolmaster.config_flow.CoolMasterNet.status",
return_value={},
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], _flow_data()
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "no_units"}
|
tboyce021/home-assistant
|
tests/components/coolmaster/test_config_flow.py
|
Python
|
apache-2.0
| 3,138
|
# flake8: noqa
"""Settings to be used for running tests."""
from settings import *
INSTALLED_APPS.append('integration_tests')
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
EMAIL_SUBJECT_PREFIX = '[test] '
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
SOUTH_TESTS_MIGRATE = False
|
bitmazk/webfaction-django-boilerplate
|
website/webapps/django/project/settings/test_settings.py
|
Python
|
mit
| 371
|
"""
WSGI config for mothra project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
import site
import djcelery
djcelery.setup_loader()
project_path = '/var/www/textflows'
if project_path not in sys.path:
sys.path.append(project_path)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mothra.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
xflows/textflows
|
mothra/wsgi.py
|
Python
|
mit
| 1,305
|
##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from odoo import fields, models, api, _
from odoo.exceptions import UserError
import logging
_logger = logging.getLogger(__name__)
class AfipwsConnection(models.Model):
_name = "afipws.connection"
_description = "AFIP WS Connection"
_rec_name = "afip_ws"
_order = "expirationtime desc"
company_id = fields.Many2one(
'res.company',
'Company',
required=True,
index=True,
auto_join=True,
)
uniqueid = fields.Char(
'Unique ID',
readonly=True,
)
token = fields.Text(
'Token',
readonly=True,
)
sign = fields.Text(
'Sign',
readonly=True,
)
generationtime = fields.Datetime(
'Generation Time',
readonly=True
)
expirationtime = fields.Datetime(
'Expiration Time',
readonly=True
)
afip_login_url = fields.Char(
'AFIP Login URL',
compute='get_urls',
)
afip_ws_url = fields.Char(
'AFIP WS URL',
compute='get_urls',
)
type = fields.Selection(
[('production', 'Production'), ('homologation', 'Homologation')],
'Type',
required=True,
)
afip_ws = fields.Selection([
('ws_sr_padron_a4', 'Servicio de Consulta de Padrón Alcance 4'),
('ws_sr_padron_a5', 'Servicio de Consulta de Padrón Alcance 5'),
('ws_sr_padron_a10', 'Servicio de Consulta de Padrón Alcance 10'),
('ws_sr_padron_a100', 'Servicio de Consulta de Padrón Alcance 100'),
],
'AFIP WS',
required=True,
)
@api.multi
@api.depends('type', 'afip_ws')
def get_urls(self):
for rec in self:
rec.afip_login_url = rec.get_afip_login_url(rec.type)
afip_ws_url = rec.get_afip_ws_url(rec.afip_ws, rec.type)
if rec.afip_ws and not afip_ws_url:
raise UserError(_('Webservice %s not supported') % rec.afip_ws)
rec.afip_ws_url = afip_ws_url
@api.model
def get_afip_login_url(self, environment_type):
if environment_type == 'production':
afip_login_url = (
'https://wsaa.afip.gov.ar/ws/services/LoginCms')
else:
afip_login_url = (
'https://wsaahomo.afip.gov.ar/ws/services/LoginCms')
return afip_login_url
@api.model
def get_afip_ws_url(self, afip_ws, environment_type):
"""
Function to be inherited on each module that add a new webservice
"""
_logger.info('Getting URL for afip ws %s on %s' % (
afip_ws, environment_type))
afip_ws_url = False
if afip_ws == 'ws_sr_padron_a4':
if environment_type == 'production':
afip_ws_url = (
"https://aws.afip.gov.ar/sr-padron/webservices/"
"personaServiceA4?wsdl")
else:
afip_ws_url = (
"https://awshomo.afip.gov.ar/sr-padron/webservices/"
"personaServiceA4?wsdl")
elif afip_ws == 'ws_sr_padron_a5':
if environment_type == 'production':
afip_ws_url = (
"https://aws.afip.gov.ar/sr-padron/webservices/"
"personaServiceA5?wsdl")
else:
afip_ws_url = (
"https://awshomo.afip.gov.ar/sr-padron/webservices/"
"personaServiceA5?wsdl")
return afip_ws_url
@api.multi
def check_afip_ws(self, afip_ws):
# TODO tal vez cambiar nombre cuando veamos si devuelve otra cosa
self.ensure_one()
if self.afip_ws != afip_ws:
raise UserError(_(
'This method is for %s connections and you call it from an'
' %s connection') % (
afip_ws, self.afip_ws))
@api.multi
def connect(self):
"""
Method to be called
"""
self.ensure_one()
_logger.info(
'Getting connection to ws %s from libraries on '
'connection id %s' % (self.afip_ws, self.id))
ws = self._get_ws(self.afip_ws)
# parche por este error que da al consultar por esa opción de homo
# https://groups.google.com/d/msg/pyafipws/Xr08e4ZuMmQ/6iDzXwdJAwAJ
# TODO mejorar ya que probablemente no ande en test pero el tema es
# que en esta parte no tenemos data del env_type
if self.afip_ws in ['ws_sr_padron_a4', 'ws_sr_padron_a5']:
ws.HOMO = False
if not ws:
raise UserError(_('AFIP Webservice %s not implemented yet' % (
self.afip_ws)))
# TODO implementar cache y proxy
# create the proxy and get the configuration system parameters:
# cfg = self.pool.get('ir.config_parameter').sudo()
# cache = cfg.get_param(cr, uid, 'pyafipws.cache', context=context)
# proxy = cfg.get_param(cr, uid, 'pyafipws.proxy', context=context)
wsdl = self.afip_ws_url
# connect to the webservice and call to the test method
ws.Conectar("", wsdl or "", "")
cuit = self.company_id.cuit_required()
ws.Cuit = cuit
ws.Token = self.token
ws.Sign = self.sign
# TODO till this this PR is accepted
ws.Obs = ''
ws.Errores = []
_logger.info(
'Connection getted with url "%s", cuit "%s"' % (
wsdl, ws.Cuit))
return ws
@api.model
def _get_ws(self, afip_ws):
"""
Method to be inherited
"""
_logger.info('Getting ws %s from libraries ' % afip_ws)
# por ahora el unico implementado es ws_sr_padron_a4
ws = False
if afip_ws == 'ws_sr_padron_a4':
from pyafipws.ws_sr_padron import WSSrPadronA4
ws = WSSrPadronA4()
elif afip_ws == 'ws_sr_padron_a5':
from pyafipws.ws_sr_padron import WSSrPadronA5
ws = WSSrPadronA5()
return ws
|
bmya/odoo-argentina
|
l10n_ar_afipws/models/afipws_connection.py
|
Python
|
agpl-3.0
| 6,250
|
# Generated by Django 2.0.4 on 2018-04-24 22:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('calaccess_processed_filings', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Form496Filing',
fields=[
('date_filed', models.DateField(db_index=True, help_text='Date this report was filed, according to the filer (from CVR_CAMPAIGN_DISCLOSURE.RPT_DATE)', verbose_name='date filed')),
('filer_id', models.IntegerField(db_index=True, help_text='Numeric filer identification number (from FILER_XREF.FILER_ID)', verbose_name='filer id')),
('filer_lastname', models.CharField(help_text='Last name of filer (from CVR_CAMPAIGN_DISCLOSURE.FILER_NAML)', max_length=200, verbose_name='filer last name')),
('filer_firstname', models.CharField(blank=True, help_text='First name of the filer (from CVR_CAMPAIGN_DISCLOSURE.FILER_NAMF)', max_length=45, verbose_name='filer first name')),
('election_date', models.DateField(db_index=True, help_text='Date of the election in which the filer is participating (from CVR_CAMPAIGN_DISCLOSURE.ELECT_DATE)', null=True, verbose_name='election date')),
('filing_id', models.IntegerField(help_text='Unique identification number for the Schedule 496 filing (from CVR_CAMPAIGN_DISCLOSURE_CD.FILING_ID)', primary_key=True, serialize=False, verbose_name='filing id')),
('amendment_count', models.IntegerField(help_text='Number of amendments to the Schedule 496 filing (from maximum value of CVR_CAMPAIGN_DISCLOSURE_CD.AMEND_ID)', verbose_name='Count amendments')),
],
options={
'verbose_name': 'Form 496 (Late Independent Expenditure) filing',
},
),
migrations.CreateModel(
name='Form496FilingVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_filed', models.DateField(db_index=True, help_text='Date this report was filed, according to the filer (from CVR_CAMPAIGN_DISCLOSURE.RPT_DATE)', verbose_name='date filed')),
('filer_id', models.IntegerField(db_index=True, help_text='Numeric filer identification number (from FILER_XREF.FILER_ID)', verbose_name='filer id')),
('filer_lastname', models.CharField(help_text='Last name of filer (from CVR_CAMPAIGN_DISCLOSURE.FILER_NAML)', max_length=200, verbose_name='filer last name')),
('filer_firstname', models.CharField(blank=True, help_text='First name of the filer (from CVR_CAMPAIGN_DISCLOSURE.FILER_NAMF)', max_length=45, verbose_name='filer first name')),
('election_date', models.DateField(db_index=True, help_text='Date of the election in which the filer is participating (from CVR_CAMPAIGN_DISCLOSURE.ELECT_DATE)', null=True, verbose_name='election date')),
('amend_id', models.IntegerField(help_text='Identifies the version of the Schedule 496 filing, with 0 representing the initial filing (from CVR_CAMPAIGN_DISCLOSURE_CD.AMEND_ID)')),
('filing', models.ForeignKey(db_constraint=False, help_text='Unique identification number for the Schedule 496 filing (from S496_CD.FILING_ID)', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='versions', to='calaccess_processed_filings.Form496Filing')),
],
options={
'verbose_name': 'Form 496 (Late Independent Expenditure) filing version',
},
),
migrations.AlterIndexTogether(
name='form496filing',
index_together={('filing_id', 'amendment_count')},
),
migrations.AlterUniqueTogether(
name='form496filingversion',
unique_together={('filing', 'amend_id')},
),
migrations.AlterIndexTogether(
name='form496filingversion',
index_together={('filing', 'amend_id')},
),
]
|
california-civic-data-coalition/django-calaccess-processed-data
|
calaccess_processed_filings/migrations/0002_auto_20180424_2226.py
|
Python
|
mit
| 4,143
|
"""Tcp client for synchronous uhd message tcp port"""
import threading
import Queue
import time
import socket
import struct
import numpy as np
class _TcpSyncClient(threading.Thread):
"""Thead for message polling"""
queue = Queue.Queue()
q_quit = Queue.Queue()
ip_address = None
port = None
def __init__(self, ip_address, port, packet_size, packet_type):
super(_TcpSyncClient, self).__init__()
self.ip_address = ip_address
self.port = port
self.packet_size = packet_size
self.packet_type = packet_type
def __exit__(self):
self.stop()
def run(self):
"""connect and poll messages to queue"""
#Establish connection
sock = None
print("Connecting to synchronous uhd message tcp port " + str(self.port))
while self.q_quit.empty():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.ip_address, self.port))
break
except socket.error:
print("connecting to synchronous uhd message tcp port " + str(self.port))
#traceback.print_exc()
sock.close()
time.sleep(0.5)
print("Connected to synchronous uhd message tcp port " + str(self.port))
#Read messages
sock.settimeout(None)
s = ""
while self.q_quit.empty():
try:
#concatenate to one package
while self.q_quit.empty():
s += sock.recv(self.packet_size)
if (len(s)) >= self.packet_size:
break
res_tuple = struct.unpack( self.packet_type, s[:self.packet_size])
s = s[self.packet_size:]
self.queue.put(res_tuple)
except socket.timeout:
self.stop()
traceback.print_exc()
pass
sock.close()
def stop(self):
"""stop thread"""
print("stop tcp_sync uhd message tcp thread")
self.q_quit.put("end")
class UhdSyncMsg(object):
"""Creates a thread to connect to the synchronous uhd messages tcp port"""
def __init__(self, ip_address = "127.0.0.1", port = 47009, packet_size = 3, packet_type = "fff"):
self.tcpa = _TcpSyncClient(ip_address, port, packet_size, packet_type)
self.tcpa.start()
def __exit__(self):
self.tcpa.stop()
def stop(self):
"""stop tcp thread"""
self.tcpa.stop()
def get_msgs(self, num):
"""get received messages as string of integer"""
out = []
while len(out) < num:
out.append(self.tcpa.queue.get())
return out
def get_msgs_fft(self, num):
"""
get received messages as string of integer
apply fftshift to message
"""
out = []
while len(out) < num:
out.append(self.tcpa.queue.get())
return [np.fft.fftshift(np.array(o)) for o in out]
def get_res(self):
"""get received messages as string of integer"""
out = []
while not self.tcpa.queue.empty():
out.append(self.tcpa.queue.get())
return out
def has_msg(self):
"""Checks if one or more messages were received and empties the message queue"""
return self.get_res() != ""
|
Opendigitalradio/ODR-StaticPrecorrection
|
src/tcp_sync.py
|
Python
|
mit
| 3,403
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import, division, print_function
import cookielib
def toPyCookie(QtCookie):
"""
qt 쿠키를 python 쿠키로 바꿉니다.
.. 참조:
http://pyqt.sourceforge.net/Docs/PyQt4/qnetworkcookie.html
https://github.com/jeanphix/Ghost.py/blob/dev/ghost/ghost.py
:param QtCookie: qt 쿠키.
:type QtCookie: :py:class:`PyQt4.QtNetwork.QNetworkCookie`
:return: python 쿠키.
:rtype: :py:class:`cookielib.Cookie`
"""
port=None
port_specified=False
secure=QtCookie.isSecure()
name=str(QtCookie.name())
value=str(QtCookie.value())
v = str(QtCookie.path())
path_specified = bool( v != "" )
path = v if path_specified else None
v = str(QtCookie.domain())
domain_specified = bool( v != "" )
domain = v
domain_initial_dot = v.startswith('.') if domain_specified else None
v = long(QtCookie.expirationDate().toTime_t())
# Long type boundary on 32bit platfroms; avoid ValueError
expires = 2147483647 if v > 2147483647 else v
rest = {}
discard = False
return cookielib.Cookie(0, name, value, port, port_specified, domain, domain_specified, domain_initial_dot, path, path_specified, secure, expires, discard, None, None, rest)
|
Thestars3/pyufp
|
ufp/pyqt4/QNetworkCookie.py
|
Python
|
gpl-3.0
| 1,239
|
# This file is part of Checkbox.
#
# Copyright 2013 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
# Daniel Manrique <roadmr@ubuntu.com>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
plainbox.impl.commands.test_run
===============================
Test definitions for plainbox.impl.run module
"""
import os
import shutil
import tempfile
from collections import OrderedDict
from inspect import cleandoc
from unittest import TestCase
from plainbox.impl.box import main
from plainbox.impl.exporter.json import JSONSessionStateExporter
from plainbox.impl.exporter.rfc822 import RFC822SessionStateExporter
from plainbox.impl.exporter.text import TextSessionStateExporter
from plainbox.impl.exporter.xml import XMLSessionStateExporter
from plainbox.testing_utils.io import TestIO
from plainbox.vendor.mock import patch
class TestRun(TestCase):
def setUp(self):
# session data are kept in XDG_CACHE_HOME/plainbox/.session
# To avoid resuming a real session, we have to select a temporary
# location instead
self._sandbox = tempfile.mkdtemp()
self._env = os.environ
os.environ['XDG_CACHE_HOME'] = self._sandbox
self._exporters = OrderedDict([
('json', JSONSessionStateExporter),
('rfc822', RFC822SessionStateExporter),
('text', TextSessionStateExporter),
('xml', XMLSessionStateExporter),
])
def test_help(self):
with TestIO(combined=True) as io:
with self.assertRaises(SystemExit) as call:
main(['run', '--help'])
self.assertEqual(call.exception.args, (0,))
self.maxDiff = None
expected = """
usage: plainbox run [-h] [--not-interactive] [-n] [-f FORMAT] [-p OPTIONS]
[-o FILE] [-t TRANSPORT] [--transport-where WHERE]
[--transport-options OPTIONS] [-i PATTERN] [-x PATTERN]
[-w WHITELIST]
optional arguments:
-h, --help show this help message and exit
user interface options:
--not-interactive Skip tests that require interactivity
-n, --dry-run Don't actually run any jobs
output options:
-f FORMAT, --output-format FORMAT
Save test results in the specified FORMAT (pass ? for
a list of choices)
-p OPTIONS, --output-options OPTIONS
Comma-separated list of options for the export
mechanism (pass ? for a list of choices)
-o FILE, --output-file FILE
Save test results to the specified FILE (or to stdout
if FILE is -)
-t TRANSPORT, --transport TRANSPORT
use TRANSPORT to send results somewhere (pass ? for a
list of choices)
--transport-where WHERE
Where to send data using the selected transport. This
is passed as-is and is transport-dependent.
--transport-options OPTIONS
Comma-separated list of key-value options (k=v) to be
passed to the transport.
job definition options:
-i PATTERN, --include-pattern PATTERN
Run jobs matching the given regular expression.
Matches from the start to the end of the line.
-x PATTERN, --exclude-pattern PATTERN
Do not run jobs matching the given regular expression.
Matches from the start to the end of the line.
-w WHITELIST, --whitelist WHITELIST
Load whitelist containing run patterns
"""
self.assertEqual(io.combined, cleandoc(expected) + "\n")
def test_run_without_args(self):
with TestIO(combined=True) as io:
with self.assertRaises(SystemExit) as call:
with patch('plainbox.impl.commands.run.authenticate_warmup') as mock_warmup:
mock_warmup.return_value = 0
main(['run'])
self.assertEqual(call.exception.args, (0,))
expected1 = """
===============================[ Analyzing Jobs ]===============================
Estimated duration cannot be determined for automated jobs.
Estimated duration cannot be determined for manual jobs.
==============================[ Running All Jobs ]==============================
==================================[ Results ]===================================
"""
expected2 = """
===============================[ Authentication ]===============================
===============================[ Analyzing Jobs ]===============================
Estimated duration cannot be determined for automated jobs.
Estimated duration cannot be determined for manual jobs.
==============================[ Running All Jobs ]==============================
==================================[ Results ]===================================
"""
self.assertIn(io.combined, [
cleandoc(expected1) + "\n",
cleandoc(expected2) + "\n"])
def test_output_format_list(self):
with TestIO(combined=True) as io:
with self.assertRaises(SystemExit) as call:
with patch('plainbox.impl.commands.run.get_all_exporters') as mock_get_all_exporters:
mock_get_all_exporters.return_value = self._exporters
main(['run', '--output-format=?'])
self.assertEqual(call.exception.args, (0,))
expected = """
Available output formats: json, rfc822, text, xml
"""
self.assertEqual(io.combined, cleandoc(expected) + "\n")
def test_output_option_list(self):
with TestIO(combined=True) as io:
with self.assertRaises(SystemExit) as call:
with patch('plainbox.impl.commands.run.get_all_exporters') as mock_get_all_exporters:
mock_get_all_exporters.return_value = self._exporters
main(['run', '--output-option=?'])
self.assertEqual(call.exception.args, (0,))
expected = """
Each format may support a different set of options
json: with-io-log, squash-io-log, flatten-io-log, with-run-list, with-job-list, with-resource-map, with-job-defs, with-attachments, with-comments, with-job-via, with-job-hash, machine-json
rfc822: with-io-log, squash-io-log, flatten-io-log, with-run-list, with-job-list, with-resource-map, with-job-defs, with-attachments, with-comments, with-job-via, with-job-hash
text: with-io-log, squash-io-log, flatten-io-log, with-run-list, with-job-list, with-resource-map, with-job-defs, with-attachments, with-comments, with-job-via, with-job-hash
xml:
"""
self.assertEqual(io.combined, cleandoc(expected) + "\n")
def tearDown(self):
shutil.rmtree(self._sandbox)
os.environ = self._env
|
zyga/debian.plainbox
|
plainbox/impl/commands/test_run.py
|
Python
|
gpl-3.0
| 7,912
|
'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2013 Task Coach developers <developers@taskcoach.org>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from taskcoachlib.domain import base
from taskcoachlib import patterns
class CategoryList(base.Collection):
@patterns.eventSource
def extend(self, categories, event=None):
super(CategoryList, self).extend(categories, event=event)
for category in self._compositesAndAllChildren(categories):
for categorizable in category.categorizables():
categorizable.addCategory(category, event=event, modify=False)
@patterns.eventSource
def removeItems(self, categories, event=None):
super(CategoryList, self).removeItems(categories, event=event)
for category in self._compositesAndAllChildren(categories):
for categorizable in category.categorizables():
categorizable.removeCategory(category, event=event)
def findCategoryByName(self, name):
for category in self:
recursive = ' -> ' in name
if category.subject(recursive=recursive) == name:
return category
return None
def filteredCategories(self):
return [category for category in self if category.isFiltered()]
@patterns.eventSource
def resetAllFilteredCategories(self, event=None):
for category in self:
category.setFiltered(False, event=event)
|
TaskEvolution/Task-Coach-Evolution
|
taskcoach/taskcoachlib/domain/category/categorycontainer.py
|
Python
|
gpl-3.0
| 2,050
|
#!/usr/bin/env python
# rh2mr.py
import numpy as num
from satvap import satvap
from satmix import satmix
from e2mr import e2mr
def rh2mr(p,t,rh,Tconvert=None):
"""(w1,w2) = rh2mr(p,t,rh,Tconvert)
determine H2O mixing ratio (w, g/kg) given
reference pressure (mbar), temperature (t,K), and
relative humidity (rh,%)
Two mixing ratios are returned: w1 is with RH defined as the ratio
of water vapor partial pressure to saturation vapor pressure and
w2 is with RH defined as the ratio of water vapor mixing ratio to
saturation mixing ratio.
if input, Tconvert is used as the temperature point to switch
from using saturation vapor pressure over water to over ice.
DCT 3/5/00
"""
# saturation pressure
if (Tconvert is None):
esat = satvap(t) # Goff Gratch formulation, over water
wsat = satmix(p,t) # Goff Gratch formulation, over water
else:
esat = satvap(t,Tconvert) # Goff Gratch formulation, over water/ice
wsat = satmix(p,t,Tconvert) # Goff Gratch formulation, over water/ice
# H2O partial pressure
e = rh/100.0*esat
# H2O mixing ratio
w1 = e2mr(p,e)
# using WMO definition of relative humidity
w2 = rh/100.0*wsat
return(w1,w2)
if __name__ == '__main__':
from mr2rh import mr2rh
print(rh2mr.__doc__)
p = num.array(
( 1012.0, 991.3, 969.1, 945.5, 920.4, 893.8, 865.7, 836.1, 805.1, 772.8,
739.5, 705.2, 670.3, 635.0, 599.7, 564.5, 529.8, 495.7, 462.6, 430.7,
400.0, 370.8, 343.0, 316.7, 292.0, 266.8, 247.2, 227.0, 208.2, 190.8,
174.7, 159.9, 146.2, 133.6, 121.9, 111.3, 101.5, 92.6, 84.4, 76.9,
70.0 ))
t = num.array(
( 24.54, 23.16, 21.67, 20.23, 18.86, 17.49, 16.10, 14.69, 13.22, 11.52,
9.53, 7.24, 4.80, 2.34, 0.04, -2.29, -4.84, -7.64,-10.66,-13.95,
-17.54,-21.45,-25.58,-29.90,-34.33,-38.94,-43.78,-48.80,-53.94,-58.79,
-63.27,-67.32,-70.74,-73.62,-75.74,-77.07,-77.43,-76.63,-75.06,-73.14,
-71.43 ))
t = t + 273.15
r = num.array(
( 17.78, 16.92, 15.93, 14.87, 13.78, 12.70, 11.84, 10.96, 10.15, 9.31,
8.46, 7.73, 7.05, 6.32, 5.62, 4.91, 4.10, 3.30, 2.67, 2.15,
1.66, 1.26, 0.95, 0.68, 0.45, 0.28, 0.17, 0.10, 0.06, 0.04,
0.02, 0.02, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01,
0.02 ))
(rh1,rh2) = mr2rh(p,t,r,253.15)
(w1,w2) = rh2mr(p,t,rh1,253.15)
print(w1)
(w1,w2) = rh2mr(p,t,rh2,253.15)
print(w2)
|
graziano-giuliani/pythoncode
|
pyuwphysret/common/pyfiles/atmos/rh2mr.py
|
Python
|
mit
| 2,422
|
#Initialisation
from time import sleep
from NaoCommunication import *
nao=NaoControle(Nao())
# 1 - Que fait ce code ?
# ...
for nombre in range(0,11,1):
nao.dire(str(nombre))
# 2 - Que fait cette fonction ?
# ...
sleep(3)
# 3 - Comment faire pour que le robot compte les secondes jusqu'a 10 ?
# 4 - Resoudre le probleme de ce morceau de code :
for nombre in range(0,11,1):
nao.dire(str(nombre))
# 5 - Expliquer pourquoi il ne marchait pas.
# ...
# 6 - Resoudre le probleme de ce morceau de code :
for nombre in range(0,11,1):
nao.dire(str(Nombre))
# 7 - Expliquer pourquoi il ne marchait pas.
# ...
|
AdrienVR/NaoSimulator
|
TPINFO/Partie1/exercice2.py
|
Python
|
lgpl-3.0
| 619
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gooosieblog.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
gooosie/django-blog
|
manage.py
|
Python
|
apache-2.0
| 809
|
from theano import tensor as TT
from theano.ifelse import ifelse
from ObjectiveFunction import ObjectiveFunction
from infos.InfoElement import PrintableInfoElement
from infos.InfoList import InfoList
from learningRule.LearningStepRule import LearningStepRule
from theanoUtils import is_inf_or_nan
__author__ = 'giulio'
class GradientClipping(LearningStepRule):
def __init__(self, lr_value=0.01, clip_thr=1., clip_style: str = 'l1'):
self.__lr_value = lr_value
self.__clip_thr = clip_thr
self.__clip_style = clip_style.lower()
def compute_lr(self, net, obj_fnc: ObjectiveFunction, direction):
if self.__clip_style == "l2":
norm = direction.norm(2)
elif self.__clip_style == "l1":
norm = direction.norm(1) # * direction.shape[0] * direction.shape[1]
else:
raise AttributeError(
"not supported clip_style '{}', available styles are 'l1' and 'l2'".format(self.__clip_style))
lr = self.__lr_value
computed_learning_rate = ifelse(TT.or_(norm < self.__clip_thr, is_inf_or_nan(norm)),
TT.cast(lr, dtype='float32'), TT.cast((self.__clip_thr / norm) * lr, dtype="float32"))
return computed_learning_rate, LearningStepRule.Infos(computed_learning_rate)
@property
def updates(self):
return []
@property
def infos(self):
step_info = PrintableInfoElement('constant_step', ':02.2e', self.__lr_value)
thr_info = PrintableInfoElement('clipping_thr', ':02.2e', self.__clip_thr)
clip_style = PrintableInfoElement('clip_style', '', self.__clip_style)
info = InfoList(step_info, thr_info, clip_style)
return info
|
GiulioGx/RNNs
|
sources/learningRule/GradientClipping.py
|
Python
|
lgpl-3.0
| 1,739
|
"""
This module creates a sysadmin dashboard for managing and viewing
courses.
"""
import csv
import json
import logging
import os
import subprocess
import time
import StringIO
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.db import IntegrityError
from django.http import HttpResponse, Http404
from django.utils.decorators import method_decorator
from django.utils.html import escape
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.views.decorators.cache import cache_control
from django.views.generic.base import TemplateView
from django.views.decorators.http import condition
from django.views.decorators.csrf import ensure_csrf_cookie
from edxmako.shortcuts import render_to_response
import mongoengine
from path import path
from courseware.courses import get_course_by_id
import dashboard.git_import as git_import
from dashboard.git_import import GitImportError
from student.roles import CourseStaffRole, CourseInstructorRole
from dashboard.models import CourseImportLog
from external_auth.models import ExternalAuthMap
from external_auth.views import generate_password
from student.models import CourseEnrollment, UserProfile, Registration
import track.views
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.xml import XMLModuleStore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger(__name__)
class SysadminDashboardView(TemplateView):
"""Base class for sysadmin dashboard views with common methods"""
template_name = 'sysadmin_dashboard.html'
def __init__(self, **kwargs):
"""
Initialize base sysadmin dashboard class with modulestore,
modulestore_type and return msg
"""
self.def_ms = modulestore()
self.is_using_mongo = True
if isinstance(self.def_ms, XMLModuleStore):
self.is_using_mongo = False
self.msg = u''
self.datatable = []
super(SysadminDashboardView, self).__init__(**kwargs)
@method_decorator(ensure_csrf_cookie)
@method_decorator(login_required)
@method_decorator(cache_control(no_cache=True, no_store=True,
must_revalidate=True))
@method_decorator(condition(etag_func=None))
def dispatch(self, *args, **kwargs):
return super(SysadminDashboardView, self).dispatch(*args, **kwargs)
def get_courses(self):
""" Get an iterable list of courses."""
return self.def_ms.get_courses()
def return_csv(self, filename, header, data):
"""
Convenient function for handling the http response of a csv.
data should be iterable and is used to stream object over http
"""
csv_file = StringIO.StringIO()
writer = csv.writer(csv_file, dialect='excel', quotechar='"',
quoting=csv.QUOTE_ALL)
writer.writerow(header)
# Setup streaming of the data
def read_and_flush():
"""Read and clear buffer for optimization"""
csv_file.seek(0)
csv_data = csv_file.read()
csv_file.seek(0)
csv_file.truncate()
return csv_data
def csv_data():
"""Generator for handling potentially large CSVs"""
for row in data:
writer.writerow(row)
csv_data = read_and_flush()
yield csv_data
response = HttpResponse(csv_data(), mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename={0}'.format(
filename)
return response
class Users(SysadminDashboardView):
"""
The status view provides Web based user management, a listing of
courses loaded, and user statistics
"""
def fix_external_auth_map_passwords(self):
"""
This corrects any passwords that have drifted from eamap to
internal django auth. Needs to be removed when fixed in external_auth
"""
msg = ''
for eamap in ExternalAuthMap.objects.all():
euser = eamap.user
epass = eamap.internal_password
if euser is None:
continue
try:
testuser = authenticate(username=euser.username, password=epass)
except (TypeError, PermissionDenied, AttributeError), err:
# Translators: This message means that the user could not be authenticated (that is, we could
# not log them in for some reason - maybe they don't have permission, or their password was wrong)
msg += _('Failed in authenticating {username}, error {error}\n').format(
username=euser,
error=err
)
continue
if testuser is None:
# Translators: This message means that the user could not be authenticated (that is, we could
# not log them in for some reason - maybe they don't have permission, or their password was wrong)
msg += _('Failed in authenticating {username}\n').format(username=euser)
# Translators: this means that the password has been corrected (sometimes the database needs to be resynchronized)
# Translate this as meaning "the password was fixed" or "the password was corrected".
msg += _('fixed password')
euser.set_password(epass)
euser.save()
continue
if not msg:
# Translators: this means everything happened successfully, yay!
msg = _('All ok!')
return msg
def create_user(self, uname, name, password=None):
""" Creates a user (both SSL and regular)"""
if not uname:
return _('Must provide username')
if not name:
return _('Must provide full name')
email_domain = getattr(settings, 'SSL_AUTH_EMAIL_DOMAIN', 'MIT.EDU')
msg = u''
if settings.FEATURES['AUTH_USE_CERTIFICATES']:
if '@' not in uname:
email = '{0}@{1}'.format(uname, email_domain)
else:
email = uname
if not email.endswith('@{0}'.format(email_domain)):
# Translators: Domain is an email domain, such as "@gmail.com"
msg += _('Email address must end in {domain}').format(domain="@{0}".format(email_domain))
return msg
mit_domain = 'ssl:MIT'
if ExternalAuthMap.objects.filter(external_id=email,
external_domain=mit_domain):
msg += _('Failed - email {email_addr} already exists as {external_id}').format(
email_addr=email,
external_id="external_id"
)
return msg
new_password = generate_password()
else:
if not password:
return _('Password must be supplied if not using certificates')
email = uname
if '@' not in email:
msg += _('email address required (not username)')
return msg
new_password = password
user = User(username=uname, email=email, is_active=True)
user.set_password(new_password)
try:
user.save()
except IntegrityError:
msg += _('Oops, failed to create user {user}, {error}').format(
user=user,
error="IntegrityError"
)
return msg
reg = Registration()
reg.register(user)
profile = UserProfile(user=user)
profile.name = name
profile.save()
if settings.FEATURES['AUTH_USE_CERTIFICATES']:
credential_string = getattr(settings, 'SSL_AUTH_DN_FORMAT_STRING',
'/C=US/ST=Massachusetts/O=Massachusetts Institute of Technology/OU=Client CA v1/CN={0}/emailAddress={1}')
credentials = credential_string.format(name, email)
eamap = ExternalAuthMap(
external_id=email,
external_email=email,
external_domain=mit_domain,
external_name=name,
internal_password=new_password,
external_credentials=json.dumps(credentials),
)
eamap.user = user
eamap.dtsignup = timezone.now()
eamap.save()
msg += _('User {user} created successfully!').format(user=user)
return msg
def delete_user(self, uname):
"""Deletes a user from django auth"""
if not uname:
return _('Must provide username')
if '@' in uname:
try:
user = User.objects.get(email=uname)
except User.DoesNotExist, err:
msg = _('Cannot find user with email address {email_addr}').format(email_addr=uname)
return msg
else:
try:
user = User.objects.get(username=uname)
except User.DoesNotExist, err:
msg = _('Cannot find user with username {username} - {error}').format(
username=uname,
error=str(err)
)
return msg
user.delete()
return _('Deleted user {username}').format(username=uname)
def make_common_context(self):
"""Returns the datatable used for this view"""
self.datatable = {}
self.datatable = dict(header=[_('Statistic'), _('Value')],
title=_('Site statistics'))
self.datatable['data'] = [[_('Total number of users'),
User.objects.all().count()]]
self.msg += u'<h2>{0}</h2>'.format(
_('Courses loaded in the modulestore')
)
self.msg += u'<ol>'
for course in self.get_courses():
self.msg += u'<li>{0} ({1})</li>'.format(
escape(course.id.to_deprecated_string()), course.location.to_deprecated_string())
self.msg += u'</ol>'
def get(self, request):
if not request.user.is_staff:
raise Http404
self.make_common_context()
context = {
'datatable': self.datatable,
'msg': self.msg,
'djangopid': os.getpid(),
'modeflag': {'users': 'active-section'},
'edx_platform_version': getattr(settings, 'EDX_PLATFORM_VERSION_STRING', ''),
}
return render_to_response(self.template_name, context)
def post(self, request):
"""Handle various actions available on page"""
if not request.user.is_staff:
raise Http404
self.make_common_context()
action = request.POST.get('action', '')
track.views.server_track(request, action, {}, page='user_sysdashboard')
if action == 'download_users':
header = [_('username'), _('email'), ]
data = ([u.username, u.email] for u in
(User.objects.all().iterator()))
return self.return_csv('users_{0}.csv'.format(
request.META['SERVER_NAME']), header, data)
elif action == 'repair_eamap':
self.msg = u'<h4>{0}</h4><pre>{1}</pre>{2}'.format(
_('Repair Results'),
self.fix_external_auth_map_passwords(),
self.msg)
self.datatable = {}
elif action == 'create_user':
uname = request.POST.get('student_uname', '').strip()
name = request.POST.get('student_fullname', '').strip()
password = request.POST.get('student_password', '').strip()
self.msg = u'<h4>{0}</h4><p>{1}</p><hr />{2}'.format(
_('Create User Results'),
self.create_user(uname, name, password), self.msg)
elif action == 'del_user':
uname = request.POST.get('student_uname', '').strip()
self.msg = u'<h4>{0}</h4><p>{1}</p><hr />{2}'.format(
_('Delete User Results'), self.delete_user(uname), self.msg)
context = {
'datatable': self.datatable,
'msg': self.msg,
'djangopid': os.getpid(),
'modeflag': {'users': 'active-section'},
'edx_platform_version': getattr(settings, 'EDX_PLATFORM_VERSION_STRING', ''),
}
return render_to_response(self.template_name, context)
class Courses(SysadminDashboardView):
"""
This manages adding/updating courses from git, deleting courses, and
provides course listing information.
"""
def git_info_for_course(self, cdir):
"""This pulls out some git info like the last commit"""
cmd = ''
gdir = settings.DATA_DIR / cdir
info = ['', '', '']
# Try the data dir, then try to find it in the git import dir
if not gdir.exists():
gdir = path(git_import.GIT_REPO_DIR) / cdir
if not gdir.exists():
return info
cmd = ['git', 'log', '-1',
'--format=format:{ "commit": "%H", "author": "%an %ae", "date": "%ad"}', ]
try:
output_json = json.loads(subprocess.check_output(cmd, cwd=gdir))
info = [output_json['commit'],
output_json['date'],
output_json['author'], ]
except (ValueError, subprocess.CalledProcessError):
pass
return info
def get_course_from_git(self, gitloc, branch):
"""This downloads and runs the checks for importing a course in git"""
if not (gitloc.endswith('.git') or gitloc.startswith('http:') or
gitloc.startswith('https:') or gitloc.startswith('git:')):
return _("The git repo location should end with '.git', "
"and be a valid url")
if self.is_using_mongo:
return self.import_mongo_course(gitloc, branch)
return self.import_xml_course(gitloc, branch)
def import_mongo_course(self, gitloc, branch):
"""
Imports course using management command and captures logging output
at debug level for display in template
"""
msg = u''
log.debug('Adding course using git repo {0}'.format(gitloc))
# Grab logging output for debugging imports
output = StringIO.StringIO()
import_log_handler = logging.StreamHandler(output)
import_log_handler.setLevel(logging.DEBUG)
logger_names = ['xmodule.modulestore.xml_importer',
'dashboard.git_import',
'xmodule.modulestore.xml',
'xmodule.seq_module', ]
loggers = []
for logger_name in logger_names:
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
logger.addHandler(import_log_handler)
loggers.append(logger)
error_msg = ''
try:
git_import.add_repo(gitloc, None, branch)
except GitImportError as ex:
error_msg = str(ex)
ret = output.getvalue()
# Remove handler hijacks
for logger in loggers:
logger.setLevel(logging.NOTSET)
logger.removeHandler(import_log_handler)
if error_msg:
msg_header = error_msg
color = 'red'
else:
msg_header = _('Added Course')
color = 'blue'
msg = u"<h4 style='color:{0}'>{1}</h4>".format(color, msg_header)
msg += u"<pre>{0}</pre>".format(escape(ret))
return msg
def import_xml_course(self, gitloc, branch):
"""Imports a git course into the XMLModuleStore"""
msg = u''
if not getattr(settings, 'GIT_IMPORT_WITH_XMLMODULESTORE', False):
# Translators: "GIT_IMPORT_WITH_XMLMODULESTORE" is a variable name.
# "XMLModuleStore" and "MongoDB" are database systems. You should not
# translate these names.
return _('Refusing to import. GIT_IMPORT_WITH_XMLMODULESTORE is '
'not turned on, and it is generally not safe to import '
'into an XMLModuleStore with multithreaded. We '
'recommend you enable the MongoDB based module store '
'instead, unless this is a development environment.')
cdir = (gitloc.rsplit('/', 1)[1])[:-4]
gdir = settings.DATA_DIR / cdir
if os.path.exists(gdir):
msg += _("The course {0} already exists in the data directory! "
"(reloading anyway)").format(cdir)
cmd = ['git', 'pull', ]
cwd = gdir
else:
cmd = ['git', 'clone', gitloc, ]
cwd = settings.DATA_DIR
cwd = os.path.abspath(cwd)
try:
cmd_output = escape(
subprocess.check_output(cmd, stderr=subprocess.STDOUT, cwd=cwd)
)
except subprocess.CalledProcessError as ex:
log.exception('Git pull or clone output was: %r', ex.output)
# Translators: unable to download the course content from
# the source git repository. Clone occurs if this is brand
# new, and pull is when it is being updated from the
# source.
return _('Unable to clone or pull repository. Please check '
'your url. Output was: {0!r}').format(ex.output)
msg += u'<pre>{0}</pre>'.format(cmd_output)
if not os.path.exists(gdir):
msg += _('Failed to clone repository to {directory_name}').format(directory_name=gdir)
return msg
# Change branch if specified
if branch:
try:
git_import.switch_branch(branch, gdir)
except GitImportError as ex:
return str(ex)
# Translators: This is a git repository branch, which is a
# specific version of a courses content
msg += u'<p>{0}</p>'.format(
_('Successfully switched to branch: '
'{branch_name}').format(branch_name=branch))
self.def_ms.try_load_course(os.path.abspath(gdir))
errlog = self.def_ms.errored_courses.get(cdir, '')
if errlog:
msg += u'<hr width="50%"><pre>{0}</pre>'.format(escape(errlog))
else:
course = self.def_ms.courses[os.path.abspath(gdir)]
msg += _('Loaded course {course_name}<br/>Errors:').format(
course_name="{} {}".format(cdir, course.display_name)
)
errors = self.def_ms.get_course_errors(course.id)
if not errors:
msg += u'None'
else:
msg += u'<ul>'
for (summary, err) in errors:
msg += u'<li><pre>{0}: {1}</pre></li>'.format(escape(summary),
escape(err))
msg += u'</ul>'
return msg
def make_datatable(self):
"""Creates course information datatable"""
data = []
for course in self.get_courses():
gdir = course.id.course
data.append([course.display_name, course.id.to_deprecated_string()]
+ self.git_info_for_course(gdir))
return dict(header=[_('Course Name'),
_('Directory/ID'),
# Translators: "Git Commit" is a computer command; see http://gitref.org/basic/#commit
_('Git Commit'),
_('Last Change'),
_('Last Editor')],
title=_('Information about all courses'),
data=data)
def get(self, request):
"""Displays forms and course information"""
if not request.user.is_staff:
raise Http404
context = {
'datatable': self.make_datatable(),
'msg': self.msg,
'djangopid': os.getpid(),
'modeflag': {'courses': 'active-section'},
'edx_platform_version': getattr(settings, 'EDX_PLATFORM_VERSION_STRING', ''),
}
return render_to_response(self.template_name, context)
def post(self, request):
"""Handle all actions from courses view"""
if not request.user.is_staff:
raise Http404
action = request.POST.get('action', '')
track.views.server_track(request, action, {},
page='courses_sysdashboard')
courses = {course.id: course for course in self.get_courses()}
if action == 'add_course':
gitloc = request.POST.get('repo_location', '').strip().replace(' ', '').replace(';', '')
branch = request.POST.get('repo_branch', '').strip().replace(' ', '').replace(';', '')
self.msg += self.get_course_from_git(gitloc, branch)
elif action == 'del_course':
course_id = request.POST.get('course_id', '').strip()
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course_found = False
if course_key in courses:
course_found = True
course = courses[course_key]
else:
try:
course = get_course_by_id(course_key)
course_found = True
except Exception, err: # pylint: disable=broad-except
self.msg += _(
'Error - cannot get course with ID {0}<br/><pre>{1}</pre>'
).format(
course_key,
escape(str(err))
)
is_xml_course = (modulestore().get_modulestore_type(course_key) == ModuleStoreEnum.Type.xml)
if course_found and is_xml_course:
cdir = course.data_dir
self.def_ms.courses.pop(cdir)
# now move the directory (don't actually delete it)
new_dir = "{course_dir}_deleted_{timestamp}".format(
course_dir=cdir,
timestamp=int(time.time())
)
os.rename(settings.DATA_DIR / cdir, settings.DATA_DIR / new_dir)
self.msg += (u"<font color='red'>Deleted "
u"{0} = {1} ({2})</font>".format(
cdir, course.id, course.display_name))
elif course_found and not is_xml_course:
# delete course that is stored with mongodb backend
self.def_ms.delete_course(course.id, request.user.id)
# don't delete user permission groups, though
self.msg += \
u"<font color='red'>{0} {1} = {2} ({3})</font>".format(
_('Deleted'), course.location.to_deprecated_string(), course.id.to_deprecated_string(), course.display_name)
context = {
'datatable': self.make_datatable(),
'msg': self.msg,
'djangopid': os.getpid(),
'modeflag': {'courses': 'active-section'},
'edx_platform_version': getattr(settings, 'EDX_PLATFORM_VERSION_STRING', ''),
}
return render_to_response(self.template_name, context)
class Staffing(SysadminDashboardView):
"""
The status view provides a view of staffing and enrollment in
courses that include an option to download the data as a csv.
"""
def get(self, request):
"""Displays course Enrollment and staffing course statistics"""
if not request.user.is_staff:
raise Http404
data = []
for course in self.get_courses(): # pylint: disable=unused-variable
datum = [course.display_name, course.id]
datum += [CourseEnrollment.objects.filter(
course_id=course.id).count()]
datum += [CourseStaffRole(course.id).users_with_role().count()]
datum += [','.join([x.username for x in CourseInstructorRole(
course.id).users_with_role()])]
data.append(datum)
datatable = dict(header=[_('Course Name'), _('course_id'),
_('# enrolled'), _('# staff'),
_('instructors')],
title=_('Enrollment information for all courses'),
data=data)
context = {
'datatable': datatable,
'msg': self.msg,
'djangopid': os.getpid(),
'modeflag': {'staffing': 'active-section'},
'edx_platform_version': getattr(settings, 'EDX_PLATFORM_VERSION_STRING', ''),
}
return render_to_response(self.template_name, context)
def post(self, request):
"""Handle all actions from staffing and enrollment view"""
action = request.POST.get('action', '')
track.views.server_track(request, action, {},
page='staffing_sysdashboard')
if action == 'get_staff_csv':
data = []
roles = [CourseInstructorRole, CourseStaffRole, ]
for course in self.get_courses(): # pylint: disable=unused-variable
for role in roles:
for user in role(course.id).users_with_role():
datum = [course.id, role, user.username, user.email,
user.profile.name]
data.append(datum)
header = [_('course_id'),
_('role'), _('username'),
_('email'), _('full_name'), ]
return self.return_csv('staff_{0}.csv'.format(
request.META['SERVER_NAME']), header, data)
return self.get(request)
class GitLogs(TemplateView):
"""
This provides a view into the import of courses from git repositories.
It is convenient for allowing course teams to see what may be wrong with
their xml
"""
template_name = 'sysadmin_dashboard_gitlogs.html'
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
"""Shows logs of imports that happened as a result of a git import"""
course_id = kwargs.get('course_id')
if course_id:
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)
page_size = 10
# Set mongodb defaults even if it isn't defined in settings
mongo_db = {
'host': 'localhost',
'user': '',
'password': '',
'db': 'xlog',
}
# Allow overrides
if hasattr(settings, 'MONGODB_LOG'):
for config_item in ['host', 'user', 'password', 'db', ]:
mongo_db[config_item] = settings.MONGODB_LOG.get(
config_item, mongo_db[config_item])
mongouri = 'mongodb://{user}:{password}@{host}/{db}'.format(**mongo_db)
error_msg = ''
try:
if mongo_db['user'] and mongo_db['password']:
mdb = mongoengine.connect(mongo_db['db'], host=mongouri)
else:
mdb = mongoengine.connect(mongo_db['db'], host=mongo_db['host'])
except mongoengine.connection.ConnectionError:
log.exception('Unable to connect to mongodb to save log, '
'please check MONGODB_LOG settings.')
if course_id is None:
# Require staff if not going to specific course
if not request.user.is_staff:
raise Http404
cilset = CourseImportLog.objects.order_by('-created')
else:
try:
course = get_course_by_id(course_id)
except Exception: # pylint: disable=broad-except
log.info('Cannot find course {0}'.format(course_id))
raise Http404
# Allow only course team, instructors, and staff
if not (request.user.is_staff or
CourseInstructorRole(course.id).has_user(request.user) or
CourseStaffRole(course.id).has_user(request.user)):
raise Http404
log.debug('course_id={0}'.format(course_id))
cilset = CourseImportLog.objects.filter(
course_id=course_id
).order_by('-created')
log.debug('cilset length={0}'.format(len(cilset)))
# Paginate the query set
paginator = Paginator(cilset, page_size)
try:
logs = paginator.page(request.GET.get('page'))
except PageNotAnInteger:
logs = paginator.page(1)
except EmptyPage:
# If the page is too high or low
given_page = int(request.GET.get('page'))
page = min(max(1, given_page), paginator.num_pages)
logs = paginator.page(page)
mdb.disconnect()
context = {
'logs': logs,
'course_id': course_id.to_deprecated_string() if course_id else None,
'error_msg': error_msg,
'page_size': page_size
}
return render_to_response(self.template_name, context)
|
shubhdev/openedx
|
lms/djangoapps/dashboard/sysadmin.py
|
Python
|
agpl-3.0
| 29,650
|
import os
from typing import Iterator
from .base_parser import BaseParser, PipelineSpec
class BasicPipelineParser(BaseParser):
SPEC_FILENAME = 'pipeline-spec.yaml'
@classmethod
def check_filename(cls, filename):
return filename == cls.SPEC_FILENAME
@classmethod
def to_pipeline(cls, spec, fullpath, root_dir='.') -> Iterator[PipelineSpec]:
dirpath = os.path.dirname(fullpath)
for pipeline_id, pipeline_details in spec.items():
pipeline_id = os.path.join(dirpath, pipeline_id)
pipeline_id = cls.replace_root_dir(pipeline_id, root_dir)
yield PipelineSpec(path=dirpath,
pipeline_id=pipeline_id,
pipeline_details=pipeline_details)
|
frictionlessdata/datapackage-pipelines
|
datapackage_pipelines/specs/parsers/basic_pipeline.py
|
Python
|
mit
| 776
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2011-2018, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
Utility functions
^^^^^^^^^^^^^^^^^
Module util provides a set of useful helpers which are currently not
attributable to the other modules
.. autosummary::
:nosignatures:
:toctree: generated/
from_to
maximum_intensity_projection
filter_window_polar
filter_window_cartesian
find_bbox_indices
get_raster_origin
calculate_polynomial
"""
import datetime as dt
from datetime import tzinfo, timedelta
import os
from importlib import import_module
import numpy as np
from scipy.ndimage import filters
from osgeo import gdal, ogr
from scipy.signal import medfilt
class OptionalModuleStub(object):
"""Stub class for optional imports.
Objects of this class are instantiated when optional modules are not
present on the user's machine.
This allows global imports of optional modules with the code only breaking
when actual attributes from this module are called.
"""
def __init__(self, name):
self.name = name
def __getattr__(self, name):
link = 'https://wradlib.github.io/wradlib-docs/latest/' \
'gettingstarted.html#optional-dependencies'
raise AttributeError('Module "{0}" is not installed.\n\n'
'You tried to access function/module/attribute '
'"{1}"\nfrom module "{0}".\nThis module is '
'optional right now in wradlib.\nYou need to '
'separately install this dependency.\n'
'Please refer to {2}\nfor further instructions.'.
format(self.name, name, link))
def import_optional(module):
"""Allowing for lazy loading of optional wradlib modules or dependencies.
This function removes the need to satisfy all dependencies of wradlib
before being able to work with it.
Parameters
----------
module : string
name of the module
Returns
-------
mod : object
if module is present, returns the module object, on ImportError
returns an instance of `OptionalModuleStub` which will raise an
AttributeError as soon as any attribute is accessed.
Examples
--------
Trying to import a module that exists makes the module available as normal.
You can even use an alias. You cannot use the '*' notation, or import only
select functions, but you can simulate most of the standard import syntax
behavior
>>> m = import_optional('math')
>>> m.log10(100)
2.0
Trying to import a module that does not exists, does not produce
any errors. Only when some function is used, the code triggers an error
>>> m = import_optional('nonexistentmodule') # noqa
>>> m.log10(100) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: Module "nonexistentmodule" is not installed.
<BLANKLINE>
You tried to access function/module/attribute "log10"
from module "nonexistentmodule".
This module is optional right now in wradlib.
You need to separately install this dependency.
Please refer to https://wradlib.github.io/wradlib-docs/\
latest/gettingstarted.html#optional-dependencies
for further instructions.
"""
try:
mod = import_module(module)
except ImportError:
mod = OptionalModuleStub(module)
return mod
def _shape_to_size(shape):
"""
Compute the size which corresponds to a shape
"""
out = 1
for item in shape:
out *= item
return out
def from_to(tstart, tend, tdelta):
"""Return a list of timesteps from <tstart> to <tend> of length <tdelta>
Parameters
----------
tstart : datetime isostring (%Y%m%d %H:%M:%S), e.g. 2000-01-01 15:34:12
or datetime object
tend : datetime isostring (%Y%m%d %H:%M:%S), e.g. 2000-01-01 15:34:12
or datetime object
tdelta : integer representing time interval in SECONDS
Returns
-------
output : list of datetime.datetime objects
"""
if not type(tstart) == dt.datetime:
tstart = dt.datetime.strptime(tstart, "%Y-%m-%d %H:%M:%S")
if not type(tend) == dt.datetime:
tend = dt.datetime.strptime(tend, "%Y-%m-%d %H:%M:%S")
tdelta = dt.timedelta(seconds=tdelta)
tsteps = [tstart, ]
tmptime = tstart
while True:
tmptime = tmptime + tdelta
if tmptime > tend:
break
else:
tsteps.append(tmptime)
return tsteps
def _idvalid(data, isinvalid=None, minval=None, maxval=None):
"""Identifies valid entries in an array and returns the corresponding
indices
Invalid values are NaN and Inf. Other invalid values can be passed using
the isinvalid keyword argument.
Parameters
----------
data : :class:`numpy:numpy.ndarray` of floats
isinvalid : list of what is considered an invalid value
"""
if isinvalid is None:
isinvalid = [-99., 99, -9999., -9999]
ix = np.ma.masked_invalid(data).mask
for el in isinvalid:
ix = np.logical_or(ix, np.ma.masked_where(data == el, data).mask)
if minval is not None:
ix = np.logical_or(ix, np.ma.masked_less(data, minval).mask)
if maxval is not None:
ix = np.logical_or(ix, np.ma.masked_greater(data, maxval).mask)
return np.where(np.logical_not(ix))[0]
def meshgrid_n(*arrs):
"""N-dimensional meshgrid
Just pass sequences of coordinates arrays
"""
arrs = tuple(arrs)
lens = list(map(len, arrs))
dim = len(arrs)
sz = 1
for s in lens:
sz *= s
ans = []
for i, arr in enumerate(arrs):
slc = [1] * dim
slc[i] = lens[i]
arr2 = np.asarray(arr).reshape(slc)
for j, sz in enumerate(lens):
if j != i:
arr2 = arr2.repeat(sz, axis=j)
ans.append(arr2)
# return tuple(ans[::-1])
return tuple(ans)
def gridaspoints(*arrs):
"""Creates an N-dimensional grid form arrs and returns grid points sequence
of point coordinate pairs
"""
# there is a small gotcha here.
# with the convention following the 2013-08-30 sprint in Potsdam it was
# agreed upon that arrays should have shapes (...,z,y,x) similar to the
# convention that polar data should be (...,time,scan,azimuth,range)
#
# Still coordinate tuples are given in the order (x,y,z) [and hopefully not
# more dimensions]. Therefore np.meshgrid must be fed the axis coordinates
# in shape order (z,y,x) and the result needs to be reversed in order
# for everything to work out.
grid = tuple([dim.ravel()
for dim in reversed(np.meshgrid(*arrs, indexing='ij'))])
return np.vstack(grid).transpose()
def issequence(x):
"""Test whether x is a sequence of numbers
Parameters
----------
x : sequence to test
"""
out = True
try:
# can we get a length on the object
len(x)
except TypeError:
return False
# is the object not a string?
out = np.all(np.isreal(x))
return out
def trapezoid(data, x1, x2, x3, x4):
"""
Applied the trapezoidal function described in :cite:`Vulpiani`
to determine the degree of membership in the non-meteorological
target class.
Parameters
----------
data : :class:`numpy:numpy.ndarray`
Array containing the data
x1 : float
x-value of the first vertex of the trapezoid
x2 : float
x-value of the second vertex of the trapezoid
x3 : float
x-value of the third vertex of the trapezoid
x4 : float
x-value of the fourth vertex of the trapezoid
Returns
-------
d : :class:`numpy:numpy.ndarray`
Array of values describing degree of membership in
nonmeteorological target class.
"""
d = np.ones(np.shape(data))
d[np.logical_or(data <= x1, data >= x4)] = 0
d[np.logical_and(data >= x2, data <= x3)] = 1
d[np.logical_and(data > x1, data < x2)] = \
(data[np.logical_and(data > x1, data < x2)] - x1) / float((x2 - x1))
d[np.logical_and(data > x3, data < x4)] = \
(x4 - data[np.logical_and(data > x3, data < x4)]) / float((x4 - x3))
d[np.isnan(data)] = np.nan
return d
def maximum_intensity_projection(data, r=None, az=None, angle=None,
elev=None, autoext=True):
"""Computes the maximum intensity projection along an arbitrary cut \
through the ppi from polar data.
Parameters
----------
data : :class:`numpy:numpy.ndarray`
Array containing polar data (azimuth, range)
r : :class:`numpy:numpy.ndarray`
Array containing range data
az : array
Array containing azimuth data
angle : float
angle of slice, Defaults to 0. Should be between 0 and 180.
0. means horizontal slice, 90. means vertical slice
elev : float
elevation angle of scan, Defaults to 0.
autoext : True | False
This routine uses numpy.digitize to bin the data.
As this function needs bounds, we create one set of coordinates more
than would usually be provided by `r` and `az`.
Returns
-------
xs : :class:`numpy:numpy.ndarray`
meshgrid x array
ys : :class:`numpy:numpy.ndarray`
meshgrid y array
mip : :class:`numpy:numpy.ndarray`
Array containing the maximum intensity projection (range, range*2)
"""
from wradlib.georef import bin_altitude as bin_altitude
# this may seem odd at first, but d1 and d2 are also used in several
# plotting functions and thus it may be easier to compare the functions
d1 = r
d2 = az
# providing 'reasonable defaults', based on the data's shape
if d1 is None:
d1 = np.arange(data.shape[1], dtype=np.float)
if d2 is None:
d2 = np.arange(data.shape[0], dtype=np.float)
if angle is None:
angle = 0.0
if elev is None:
elev = 0.0
if autoext:
# the ranges need to go 'one bin further', assuming some regularity
# we extend by the distance between the preceding bins.
x = np.append(d1, d1[-1] + (d1[-1] - d1[-2]))
# the angular dimension is supposed to be cyclic, so we just add the
# first element
y = np.append(d2, d2[0])
else:
# no autoext basically is only useful, if the user supplied the correct
# dimensions himself.
x = d1
y = d2
# roll data array to specified azimuth, assuming equidistant azimuth angles
ind = (d2 >= angle).nonzero()[0][0]
data = np.roll(data, ind, axis=0)
# build cartesian range array, add delta to last element to compensate for
# open bound (np.digitize)
dc = np.linspace(-np.max(d1), np.max(d1) + 0.0001, num=d1.shape[0] * 2 + 1)
# get height values from polar data and build cartesian height array
# add delta to last element to compensate for open bound (np.digitize)
hp = np.zeros((y.shape[0], x.shape[0]))
hc = bin_altitude(x, elev, 0, re=6370040.)
hp[:] = hc
hc[-1] += 0.0001
# create meshgrid for polar data
xx, yy = np.meshgrid(x, y)
# create meshgrid for cartesian slices
xs, ys = np.meshgrid(dc, hc)
# xs, ys = np.meshgrid(dc,x)
# convert polar coordinates to cartesian
xxx = xx * np.cos(np.radians(90. - yy))
# yyy = xx * np.sin(np.radians(90.-yy))
# digitize coordinates according to cartesian range array
range_dig1 = np.digitize(xxx.ravel(), dc)
range_dig1.shape = xxx.shape
# digitize heights according polar height array
height_dig1 = np.digitize(hp.ravel(), hc)
# reshape accordingly
height_dig1.shape = hp.shape
# what am I doing here?!
range_dig1 = range_dig1[0:-1, 0:-1]
height_dig1 = height_dig1[0:-1, 0:-1]
# create height and range masks
height_mask = [(height_dig1 == i).ravel().nonzero()[0]
for i in range(1, len(hc))]
range_mask = [(range_dig1 == i).ravel().nonzero()[0]
for i in range(1, len(dc))]
# create mip output array, set outval to inf
mip = np.zeros((d1.shape[0], 2 * d1.shape[0]))
mip[:] = np.inf
# fill mip array,
# in some cases there are no values found in the specified range and height
# then we fill in nans and interpolate afterwards
for i in range(0, len(range_mask)):
mask1 = range_mask[i]
found = False
for j in range(0, len(height_mask)):
mask2 = np.intersect1d(mask1, height_mask[j])
# this is to catch the ValueError from the max() routine when
# calculating on empty array
try:
mip[j, i] = data.ravel()[mask2].max()
if not found:
found = True
except ValueError:
if found:
mip[j, i] = np.nan
# interpolate nans inside image, do not touch outvals
good = ~np.isnan(mip)
xp = good.ravel().nonzero()[0]
fp = mip[~np.isnan(mip)]
x = np.isnan(mip).ravel().nonzero()[0]
mip[np.isnan(mip)] = np.interp(x, xp, fp)
# reset outval to nan
mip[mip == np.inf] = np.nan
return xs, ys, mip
def filter_window_polar(img, wsize, fun, rscale, random=False):
"""Apply a filter of an approximated square window of half size `fsize` \
on a given polar image `img`.
Parameters
----------
img : :class:`numpy:numpy.ndarray`
2d array of values to which the filter is to be applied
wsize : float
Half size of the window centred on the pixel [m]
fun : string
name of the 1d filter from :mod:`scipy:scipy.ndimage`
rscale : float
range [m] scale of the polar grid
random: bool
True to use random azimuthal size to avoid long-term biases.
Returns
-------
output : :class:`numpy:numpy.ndarray`
Array with the same shape as `img`, containing the filter's results.
"""
ascale = 2 * np.pi / img.shape[0]
data_filtered = np.empty(img.shape, dtype=img.dtype)
fun = getattr(filters, "%s_filter1d" % fun)
nbins = img.shape[-1]
ranges = np.arange(nbins) * rscale + rscale / 2
asize = ranges * ascale
if random:
na = prob_round(wsize / asize).astype(int)
else:
na = np.fix(wsize / asize + 0.5).astype(int)
# Maximum of adjacent azimuths (higher close to the origin) to
# increase performance
na[na > 20] = 20
sr = np.fix(wsize / rscale + 0.5).astype(int)
for sa in np.unique(na):
imax = np.where(na >= sa)[0][-1] + 1
imin = np.where(na <= sa)[0][0]
if sa == 0:
data_filtered[:, imin:imax] = img[:, imin:imax]
imin2 = max(imin - sr, 0)
imax2 = min(imax + sr, nbins)
temp = img[:, imin2:imax2]
temp = fun(temp, size=2 * sa + 1, mode='wrap', axis=0)
temp = fun(temp, size=2 * sr + 1, axis=1)
imin3 = imin - imin2
imax3 = imin3 + imax - imin
data_filtered[:, imin:imax] = temp[:, imin3:imax3]
return data_filtered
def prob_round(x, prec=0):
"""Round the float number `x` to the lower or higher integer randomly
following a binomial distribution
Parameters
----------
x : float
prec : precision
"""
fixup = np.sign(x) * 10 ** prec
x *= fixup
intx = x.astype(int)
round_func = intx + np.random.binomial(1, x - intx)
return round_func / fixup
def filter_window_cartesian(img, wsize, fun, scale, **kwargs):
"""Apply a filter of square window size `fsize` on a given \
cartesian image `img`.
Parameters
----------
img : :class:`numpy:numpy.ndarray`
2d array of values to which the filter is to be applied
wsize : float
Half size of the window centred on the pixel [m]
fun : string
name of the 2d filter from :mod:`scipy:scipy.ndimage`
scale : tuple of 2 floats
x and y scale of the cartesian grid [m]
Returns
-------
output : :class:`numpy:numpy.ndarray`
Array with the same shape as `img`, containing the filter's results.
"""
fun = getattr(filters, "%s_filter" % fun)
size = np.fix(wsize / scale + 0.5).astype(int)
data_filtered = fun(img, size, **kwargs)
return data_filtered
def roll2d_polar(img, shift=1, axis=0):
"""Roll a 2D polar array [azimuth,range] by a given `shift` for \
the given `axis`
Parameters
----------
img : :class:`numpy:numpy.ndarray`
2d data array
shift : int
shift to apply to the array
axis : int
axis which will be shifted
Returns
-------
out: new array with shifted values
"""
if shift == 0:
return img
else:
out = np.empty(img.shape)
n = img.shape[axis]
if axis == 0:
if shift > 0:
out[shift:, :] = img[:-shift, :]
out[:shift, :] = img[n - shift:, :]
else:
out[:shift, :] = img[-shift:, :]
out[n + shift:, :] = img[:-shift:, :]
else:
if shift > 0:
out[:, shift:] = img[:, :-shift]
out[:, :shift] = np.nan
else:
out[:, :shift] = img[:, -shift:]
out[:, n + shift:] = np.nan
return out
class UTC(tzinfo):
"""UTC implementation for tzinfo.
See e.g. http://python.active-venture.com/lib/datetime-tzinfo.html
Replaces pytz.utc
"""
def __repr__(self):
return "<UTC>"
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
def half_power_radius(r, bwhalf):
"""
Half-power radius.
ported from PyRadarMet
Battan (1973),
Parameters
----------
r : float, :class:`numpy:numpy.ndarray` of floats
Range from radar [m]
bwhalf : float
Half-power beam width [degrees]
Returns
-------
Rhalf : float, :class:`numpy:numpy.ndarray` of floats
Half-power radius [m]
Examples
--------
rhalf = half_power_radius(r,bwhalf)
"""
rhalf = (r * np.deg2rad(bwhalf)) / 2.
return rhalf
def get_raster_origin(coords):
"""Return raster origin
Parameters
----------
coords : :class:`numpy:numpy.ndarray`
3 dimensional array (rows, cols, 2) of xy-coordinates
Returns
-------
out : str
'lower' or 'upper'
"""
return 'lower' if (coords[1, 1] - coords[0, 0])[1] > 0 else 'upper'
def find_bbox_indices(coords, bbox):
"""Find min/max-indices for NxMx2 array coords using bbox-values.
The bounding box is defined by two points (llx,lly and urx,ury)
It finds the first indices before llx,lly and the first indices
after urx,ury. If no index is found 0 and N/M is returned.
Parameters
----------
coords : :class:`numpy:numpy.ndarray`
3 dimensional array (ny, nx, lon/lat) of floats
bbox : 4-element :class:`numpy:numpy.ndarray`, list or tuple of floats
(llx,lly,urx,ury)
Returns
-------
bbind : tuple
4-element tuple of int (llx,lly,urx,ury)
"""
# sort arrays
x_sort = np.argsort(coords[0, :, 0])
y_sort = np.argsort(coords[:, 0, 1])
# find indices in sorted arrays
llx = np.searchsorted(coords[0, :, 0], bbox[0], side='left',
sorter=x_sort)
urx = np.searchsorted(coords[0, :, 0], bbox[2], side='right',
sorter=x_sort)
lly = np.searchsorted(coords[:, 0, 1], bbox[1], side='left',
sorter=y_sort)
ury = np.searchsorted(coords[:, 0, 1], bbox[3], side='right',
sorter=y_sort)
# get indices in original array
if llx < len(x_sort):
llx = x_sort[llx]
if urx < len(x_sort):
urx = x_sort[urx]
if lly < len(y_sort):
lly = y_sort[lly]
if ury < len(y_sort):
ury = y_sort[ury]
# check at boundaries
if llx:
llx -= 1
if get_raster_origin(coords) == 'lower':
if lly:
lly -= 1
else:
if lly < coords.shape[0]:
lly += 1
bbind = (llx, min(lly, ury), urx, max(lly, ury))
return bbind
def has_geos():
pnt1 = ogr.CreateGeometryFromWkt('POINT(10 20)')
pnt2 = ogr.CreateGeometryFromWkt('POINT(30 20)')
ogrex = ogr.GetUseExceptions()
gdalex = gdal.GetUseExceptions()
gdal.DontUseExceptions()
ogr.DontUseExceptions()
hasgeos = pnt1.Union(pnt2) is not None
if ogrex:
ogr.UseExceptions()
if gdalex:
gdal.UseExceptions()
return hasgeos
def get_wradlib_data_path():
wrl_data_path = os.environ.get('WRADLIB_DATA', None)
if wrl_data_path is None:
raise EnvironmentError("'WRADLIB_DATA' environment variable not set")
if not os.path.isdir(wrl_data_path):
raise EnvironmentError("'WRADLIB_DATA' path '{0}' "
"does not exist".format(wrl_data_path))
return wrl_data_path
def get_wradlib_data_file(relfile):
data_file = os.path.join(get_wradlib_data_path(), relfile)
if not os.path.exists(data_file):
raise EnvironmentError("WRADLIB_DATA file '{0}' "
"does not exist".format(data_file))
return data_file
def calculate_polynomial(data, w):
"""Calculate Polynomial
The functions calculates the following polynomial:
.. math::
P = \\sum_{n=0}^{N} w(n) \\cdot data^{n}
Parameters
----------
data : :class:`numpy:numpy.ndarray`
Flat array of data values.
w : :class:`numpy:numpy.ndarray`
Array of shape (N) containing weights.
Returns
-------
poly : :class:`numpy:numpy.ndarray`
Flat array of processed data.
"""
poly = np.zeros_like(data)
for i, c in enumerate(w):
poly += c * data**i
return poly
def medfilt_along_axis(x, n, axis=-1):
"""Applies median filter smoothing on one axis of an N-dimensional array.
"""
kernel_size = np.array(x.shape)
kernel_size[:] = 1
kernel_size[axis] = n
return medfilt(x, kernel_size)
def gradient_along_axis(x):
"""Computes gradient along last axis of an N-dimensional array
"""
axis = -1
newshape = np.array(x.shape)
newshape[axis] = 1
diff_begin = (x[..., 1] - x[..., 0]).reshape(newshape)
diff_end = (x[..., -1] - x[..., -2]).reshape(newshape)
diffs = ((x - np.roll(x, 2, axis)) / 2.)
diffs = np.append(diffs[..., 2:], diff_end, axis=axis)
return np.insert(diffs, [0], diff_begin, axis=axis)
def gradient_from_smoothed(x, n=5):
"""Computes gradient of smoothed data along final axis of an array
"""
return gradient_along_axis(medfilt_along_axis(x, n)).astype("f4")
if __name__ == '__main__':
print('wradlib: Calling module <util> as main...')
|
kmuehlbauer/wradlib
|
wradlib/util.py
|
Python
|
mit
| 23,048
|
import numpy as np
import lasagne
from lasagne.layers import Conv2DLayer as ConvLayer
from lasagne.layers import ElemwiseSumLayer
from lasagne.layers import InputLayer
from lasagne.layers import DenseLayer
from lasagne.layers import GlobalPoolLayer
from lasagne.layers import PadLayer
from lasagne.layers import ExpressionLayer
from lasagne.layers import NonlinearityLayer
from lasagne.nonlinearities import softmax, rectify
from lasagne.layers import batch_norm
from lasagne.layers import Layer
from scipy.spatial.distance import cdist
###################### Load the data #######################
def unpickle(file):
import cPickle
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
def load_data(samples_pr_cl_val):
xs = []
ys = []
for j in range(1):
d = unpickle('cifar-100-python/train')
x = d['data']
y = d['fine_labels']
xs.append(x)
ys.append(y)
d = unpickle('cifar-100-python/test')
xs.append(d['data'])
ys.append(d['fine_labels'])
x = np.concatenate(xs)/np.float32(255)
y = np.concatenate(ys)
x = np.dstack((x[:, :1024], x[:, 1024:2048], x[:, 2048:]))
x = x.reshape((x.shape[0], 32, 32, 3)).transpose(0,3,1,2)
# subtract per-pixel mean
pixel_mean = np.mean(x[0:50000],axis=0)
x -= pixel_mean
# Create Train/Validation set
eff_samples_cl = 500-samples_pr_cl_val
X_train = np.zeros((eff_samples_cl*100,3,32, 32))
Y_train = np.zeros(eff_samples_cl*100)
X_valid = np.zeros((samples_pr_cl_val*100,3,32, 32))
Y_valid = np.zeros(samples_pr_cl_val*100)
for i in range(100):
index_y=np.where(y[0:50000]==i)[0]
np.random.shuffle(index_y)
X_train[i*eff_samples_cl:(i+1)*eff_samples_cl] = x[index_y[0:eff_samples_cl],:,:,:]
Y_train[i*eff_samples_cl:(i+1)*eff_samples_cl] = y[index_y[0:eff_samples_cl]]
X_valid[i*samples_pr_cl_val:(i+1)*samples_pr_cl_val] = x[index_y[eff_samples_cl:500],:,:,:]
Y_valid[i*samples_pr_cl_val:(i+1)*samples_pr_cl_val] = y[index_y[eff_samples_cl:500]]
X_test = x[50000:,:,:,:]
Y_test = y[50000:]
return dict(
X_train = lasagne.utils.floatX(X_train),
Y_train = Y_train.astype('int32'),
X_valid = lasagne.utils.floatX(X_valid),
Y_valid = Y_valid.astype('int32'),
X_test = lasagne.utils.floatX(X_test),
Y_test = Y_test.astype('int32'),)
###################### Build the neural network model #######################
def build_cnn(input_var=None, n=5):
# This block of code for the architecture and the data augmentation is inspired from Lasagne recipe code : https://github.com/Lasagne/Recipes/blob/master/papers/deep_residual_learning/Deep_Residual_Learning_CIFAR-10.py
# create a residual learning building block with two stacked 3x3 convlayers as in paper
def residual_block(l, increase_dim=False, projection=False,last=False):
input_num_filters = l.output_shape[1]
if increase_dim:
first_stride = (2,2)
out_num_filters = input_num_filters*2
else:
first_stride = (1,1)
out_num_filters = input_num_filters
stack_1 = batch_norm(ConvLayer(l, num_filters=out_num_filters, filter_size=(3,3), stride=first_stride, nonlinearity=rectify, pad='same', W=lasagne.init.HeNormal(gain='relu'), flip_filters=False))
stack_2 = batch_norm(ConvLayer(stack_1, num_filters=out_num_filters, filter_size=(3,3), stride=(1,1), nonlinearity=None, pad='same', W=lasagne.init.HeNormal(gain='relu'), flip_filters=False))
# add shortcut connections
if increase_dim:
if projection:
# projection shortcut, as option B in paper
projection = batch_norm(ConvLayer(l, num_filters=out_num_filters, filter_size=(1,1), stride=(2,2), nonlinearity=None, pad='same', b=None, flip_filters=False))
if last:
block = ElemwiseSumLayer([stack_2, projection])
else:
block = NonlinearityLayer(ElemwiseSumLayer([stack_2, projection]),nonlinearity=rectify)
else:
# identity shortcut, as option A in paper
identity = ExpressionLayer(l, lambda X: X[:, :, ::2, ::2], lambda s: (s[0], s[1], s[2]//2, s[3]//2))
padding = PadLayer(identity, [out_num_filters//4,0,0], batch_ndim=1)
if last:
block = ElemwiseSumLayer([stack_2, padding])
else:
block = NonlinearityLayer(ElemwiseSumLayer([stack_2, padding]),nonlinearity=rectify)
else:
if last:
block = ElemwiseSumLayer([stack_2, l])
else:
block = NonlinearityLayer(ElemwiseSumLayer([stack_2, l]),nonlinearity=rectify)
return block
# Building the network
l_in = InputLayer(shape=(None, 3, 32, 32), input_var=input_var)
# first layer, output is 16 x 32 x 32
l = batch_norm(ConvLayer(l_in, num_filters=16, filter_size=(3,3), stride=(1,1), nonlinearity=rectify, pad='same', W=lasagne.init.HeNormal(gain='relu'), flip_filters=False))
# first stack of residual blocks, output is 16 x 32 x 32
for _ in range(n):
l = residual_block(l)
# second stack of residual blocks, output is 32 x 16 x 16
l = residual_block(l, increase_dim=True)
for _ in range(1,n):
l = residual_block(l)
# third stack of residual blocks, output is 64 x 8 x 8
l = residual_block(l, increase_dim=True)
for _ in range(1,n-1):
l = residual_block(l)
l = residual_block(l,last=True)
# average pooling
l = GlobalPoolLayer(l)
# fully connected layer
network = DenseLayer(
l, num_units=100,
W=lasagne.init.HeNormal(),
nonlinearity=lasagne.nonlinearities.sigmoid)
return network,l
############################## Batch iterator ###############################
def iterate_minibatches(inputs, targets, batchsize, shuffle=False, augment=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
if augment:
# as in paper :
# pad feature arrays with 4 pixels on each side
# and do random cropping of 32x32
padded = np.pad(inputs[excerpt],((0,0),(0,0),(4,4),(4,4)),mode='constant')
random_cropped = np.zeros(inputs[excerpt].shape, dtype=np.float32)
crops = np.random.random_integers(0,high=8,size=(batchsize,2))
for r in range(batchsize):
# Cropping and possible flipping
if (np.random.randint(2) > 0):
random_cropped[r,:,:,:] = padded[r,:,crops[r,0]:(crops[r,0]+32),crops[r,1]:(crops[r,1]+32)]
else:
random_cropped[r,:,:,:] = padded[r,:,crops[r,0]:(crops[r,0]+32),crops[r,1]:(crops[r,1]+32)][:,:,::-1]
inp_exc = random_cropped
else:
inp_exc = inputs[excerpt]
yield inp_exc, targets[excerpt]
def accuracy_measure(X_valid, Y_valid, class_means, val_fn, top1_acc_list, iteration, iteration_total, type_data):
stat_hb1 = []
stat_icarl = []
stat_ncm = []
for batch in iterate_minibatches(X_valid, Y_valid, min(500,len(X_valid)), shuffle=False):
inputs, targets_prep = batch
targets = np.zeros((inputs.shape[0],100),np.float32)
targets[range(len(targets_prep)),targets_prep.astype('int32')] = 1.
err,pred,pred_inter = val_fn(inputs, targets)
pred_inter = (pred_inter.T/np.linalg.norm(pred_inter.T,axis=0)).T
# Compute score for iCaRL
sqd = cdist(class_means[:,:,0].T, pred_inter, 'sqeuclidean')
score_icarl = (-sqd).T
# Compute score for NCM
sqd = cdist(class_means[:,:,1].T, pred_inter, 'sqeuclidean')
score_ncm = (-sqd).T
# Compute the accuracy over the batch
stat_hb1 += ([ll in best for ll, best in zip(targets_prep.astype('int32'), np.argsort(pred, axis=1)[:, -1:])])
stat_icarl += ([ll in best for ll, best in zip(targets_prep.astype('int32'), np.argsort(score_icarl, axis=1)[:, -1:])])
stat_ncm += ([ll in best for ll, best in zip(targets_prep.astype('int32'), np.argsort(score_ncm, axis=1)[:, -1:])])
print("Final results on "+type_data+" classes:")
print(" top 1 accuracy iCaRL :\t\t{:.2f} %".format(np.average(stat_icarl)* 100))
print(" top 1 accuracy Hybrid 1 :\t\t{:.2f} %".format(np.average(stat_hb1)* 100))
print(" top 1 accuracy NCM :\t\t{:.2f} %".format(np.average(stat_ncm)* 100))
top1_acc_list[iteration,0,iteration_total] = np.average(stat_icarl) * 100
top1_acc_list[iteration,1,iteration_total] = np.average(stat_hb1) * 100
top1_acc_list[iteration,2,iteration_total] = np.average(stat_ncm) * 100
return top1_acc_list
|
srebuffi/iCaRL
|
iCaRL-TheanoLasagne/utils_cifar100.py
|
Python
|
mit
| 9,375
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for creating partitioned variables.
This is a convenient abstraction to partition a large variable across
multiple smaller variables that can be assigned to different devices.
The full variable can be reconstructed by concatenating the smaller variables.
Using partitioned variables instead of a single variable is mostly a
performance choice. It however also has an impact on:
1. Random initialization, as the random number generator is called once per
slice
2. Updates, as they happen in parallel across slices
A key design goal is to allow a different graph to repartition a variable
with the same name but different slicings, including possibly no partitions.
TODO(touts): If an initializer provides a seed, the seed must be changed
deterministically for each slice, maybe by adding one to it, otherwise each
slice will use the same values. Maybe this can be done by passing the
slice offsets to the initializer functions.
Typical usage:
```python
# Create a list of partitioned variables with:
vs = create_partitioned_variables(
<shape>, <slicing>, <initializer>, name=<optional-name>)
# Pass the list as inputs to embedding_lookup for sharded, parallel lookup:
y = embedding_lookup(vs, ids, partition_strategy="div")
# Or fetch the variables in parallel to speed up large matmuls:
z = matmul(x, concat(slice_dim, vs))
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"create_partitioned_variables",
"variable_axis_size_partitioner",
"min_max_variable_partitioner",
"fixed_size_partitioner",
]
@tf_export("variable_axis_size_partitioner")
def variable_axis_size_partitioner(
max_shard_bytes, axis=0, bytes_per_string_element=16, max_shards=None):
"""Get a partitioner for VariableScope to keep shards below `max_shard_bytes`.
This partitioner will shard a Variable along one axis, attempting to keep
the maximum shard size below `max_shard_bytes`. In practice, this is not
always possible when sharding along only one axis. When this happens,
this axis is sharded as much as possible (i.e., every dimension becomes
a separate shard).
If the partitioner hits the `max_shards` limit, then each shard may end up
larger than `max_shard_bytes`. By default `max_shards` equals `None` and no
limit on the number of shards is enforced.
One reasonable value for `max_shard_bytes` is `(64 << 20) - 1`, or almost
`64MB`, to keep below the protobuf byte limit.
Args:
max_shard_bytes: The maximum size any given shard is allowed to be.
axis: The axis to partition along. Default: outermost axis.
bytes_per_string_element: If the `Variable` is of type string, this provides
an estimate of how large each scalar in the `Variable` is.
max_shards: The maximum number of shards in int created taking precedence
over `max_shard_bytes`.
Returns:
A partition function usable as the `partitioner` argument to
`variable_scope`, `get_variable`, and `get_partitioned_variable_list`.
Raises:
ValueError: If any of the byte counts are non-positive.
"""
if max_shard_bytes < 1 or bytes_per_string_element < 1:
raise ValueError(
"Both max_shard_bytes and bytes_per_string_element must be positive.")
if max_shards and max_shards < 1:
raise ValueError(
"max_shards must be positive.")
def _partitioner(shape, dtype):
"""Partitioner that partitions shards to have max_shard_bytes total size.
Args:
shape: A `TensorShape`.
dtype: A `DType`.
Returns:
A tuple representing how much to slice each axis in shape.
Raises:
ValueError: If shape is not a fully defined `TensorShape` or dtype is not
a `DType`.
"""
if not isinstance(shape, tensor_shape.TensorShape):
raise ValueError("shape is not a TensorShape: %s" % shape)
if not shape.is_fully_defined():
raise ValueError("shape is not fully defined: %s" % shape)
if not isinstance(dtype, dtypes.DType):
raise ValueError("dtype is not a DType: %s" % dtype)
if dtype.base_dtype == dtypes.string:
element_size = bytes_per_string_element
else:
element_size = dtype.size
partitions = [1] * shape.ndims
bytes_per_slice = 1.0 * (
shape.num_elements() / shape[axis].value) * element_size
# How many slices can we fit on one shard of size at most max_shard_bytes?
# At least one slice is required.
slices_per_shard = max(1, math.floor(max_shard_bytes / bytes_per_slice))
# How many shards do we need for axis given that each shard fits
# slices_per_shard slices from a total of shape[axis].value slices?
axis_shards = int(math.ceil(1.0 * shape[axis].value / slices_per_shard))
if max_shards:
axis_shards = min(max_shards, axis_shards)
partitions[axis] = axis_shards
return partitions
return _partitioner
@tf_export("min_max_variable_partitioner")
def min_max_variable_partitioner(max_partitions=1, axis=0,
min_slice_size=256 << 10,
bytes_per_string_element=16):
"""Partitioner to allocate minimum size per slice.
Returns a partitioner that partitions the variable of given shape and dtype
such that each partition has a minimum of `min_slice_size` slice of the
variable. The maximum number of such partitions (upper bound) is given by
`max_partitions`.
Args:
max_partitions: Upper bound on the number of partitions. Defaults to 1.
axis: Axis along which to partition the variable. Defaults to 0.
min_slice_size: Minimum size of the variable slice per partition. Defaults
to 256K.
bytes_per_string_element: If the `Variable` is of type string, this provides
an estimate of how large each scalar in the `Variable` is.
Returns:
A partition function usable as the `partitioner` argument to
`variable_scope`, `get_variable`, and `get_partitioned_variable_list`.
"""
def _partitioner(shape, dtype):
"""Partitioner that partitions list for a variable of given shape and type.
Ex: Consider partitioning a variable of type float32 with
shape=[1024, 1024].
If `max_partitions` >= 16, this function would return
[(1024 * 1024 * 4) / (256 * 1024), 1] = [16, 1].
If `max_partitions` < 16, this function would return
[`max_partitions`, 1].
Args:
shape: Shape of the variable.
dtype: Type of the variable.
Returns:
List of partitions for each axis (currently only one axis can be
partitioned).
Raises:
ValueError: If axis to partition along does not exist for the variable.
"""
if axis >= len(shape):
raise ValueError("Can not partition variable along axis %d when shape is "
"only %s" % (axis, shape))
if dtype.base_dtype == dtypes.string:
bytes_per_element = bytes_per_string_element
else:
bytes_per_element = dtype.size
total_size_bytes = shape.num_elements() * bytes_per_element
partitions = total_size_bytes / min_slice_size
partitions_list = [1] * len(shape)
# We can not partition the variable beyond what its shape or
# `max_partitions` allows.
partitions_list[axis] = max(1, min(shape[axis].value,
max_partitions,
int(math.ceil(partitions))))
return partitions_list
return _partitioner
@tf_export("fixed_size_partitioner")
def fixed_size_partitioner(num_shards, axis=0):
"""Partitioner to specify a fixed number of shards along given axis.
Args:
num_shards: `int`, number of shards to partition variable.
axis: `int`, axis to partition on.
Returns:
A partition function usable as the `partitioner` argument to
`variable_scope`, `get_variable`, and `get_partitioned_variable_list`.
"""
def _partitioner(shape, **unused_args):
partitions_list = [1] * len(shape)
partitions_list[axis] = min(num_shards, shape[axis].value)
return partitions_list
return _partitioner
@tf_export("create_partitioned_variables")
def create_partitioned_variables(
shape, slicing, initializer, dtype=dtypes.float32,
trainable=True, collections=None, name=None, reuse=None):
"""Create a list of partitioned variables according to the given `slicing`.
Currently only one dimension of the full variable can be sliced, and the
full variable can be reconstructed by the concatenation of the returned
list along that dimension.
Args:
shape: List of integers. The shape of the full variable.
slicing: List of integers. How to partition the variable.
Must be of the same length as `shape`. Each value
indicate how many slices to create in the corresponding
dimension. Presently only one of the values can be more than 1;
that is, the variable can only be sliced along one dimension.
For convenience, The requested number of partitions does not have to
divide the corresponding dimension evenly. If it does not, the
shapes of the partitions are incremented by 1 starting from partition
0 until all slack is absorbed. The adjustment rules may change in the
future, but as you can save/restore these variables with different
slicing specifications this should not be a problem.
initializer: A `Tensor` of shape `shape` or a variable initializer
function. If a function, it will be called once for each slice,
passing the shape and data type of the slice as parameters. The
function must return a tensor with the same shape as the slice.
dtype: Type of the variables. Ignored if `initializer` is a `Tensor`.
trainable: If True also add all the variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`.
collections: List of graph collections keys to add the variables to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
name: Optional name for the full variable. Defaults to
`"PartitionedVariable"` and gets uniquified automatically.
reuse: Boolean or `None`; if `True` and name is set, it would reuse
previously created variables. if `False` it will create new variables.
if `None`, it would inherit the parent scope reuse.
Returns:
A list of Variables corresponding to the slicing.
Raises:
ValueError: If any of the arguments is malformed.
"""
logging.warn(
"create_partitioned_variables is deprecated. Use "
"tf.get_variable with a partitioner set, or "
"tf.get_partitioned_variable_list, instead.")
if len(shape) != len(slicing):
raise ValueError("The 'shape' and 'slicing' of a partitioned Variable "
"must have the length: shape: %s, slicing: %s" %
(shape, slicing))
if len(shape) < 1:
raise ValueError("A partitioned Variable must have rank at least 1: "
"shape: %s" % shape)
# Legacy: we are provided the slicing directly, so just pass it to
# the partitioner.
partitioner = lambda **unused_kwargs: slicing
with variable_scope.variable_scope(
name, "PartitionedVariable", reuse=reuse):
# pylint: disable=protected-access
partitioned_var = variable_scope._get_partitioned_variable(
name=None,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=trainable,
partitioner=partitioner,
collections=collections)
return list(partitioned_var)
# pylint: enable=protected-access
|
nburn42/tensorflow
|
tensorflow/python/ops/partitioned_variables.py
|
Python
|
apache-2.0
| 12,541
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import inspect
import shutil
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(__location__, '../src'))
# -- Run sphinx-apidoc ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
from sphinx import apidoc
output_dir = os.path.join(__location__, "api")
module_dir = os.path.join(__location__, "../src/nlpia")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.mathjax',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nlpia'
copyright = u'2018, Hobson Lane'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from nlpia import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'nlpia-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'nlpia Documentation',
u'Hobson Lane', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://www.sphinx-doc.org/en/stable', None),
'python': ('https://docs.python.org/' + python_version, None),
'matplotlib': ('https://matplotlib.org', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
}
|
totalgood/nlpia
|
docs/conf.py
|
Python
|
mit
| 8,742
|
#! /usr/bin/env python
#
# CRC32 WJ103
#
import zlib
def crc32(filename):
'''calculate CRC-32 checksum of file'''
f = open(filename, 'r')
if not f:
return ''
crc = 0
while 1:
buf = f.read(16384)
if not buf:
break
crc = zlib.crc32(buf, crc)
f.close()
str_crc = '%x' % crc
# print 'TD: CRC32 : %s' % str_crc
return str_crc
if __name__ == '__main__':
import sys
for file in sys.argv[1:]:
print '%s %s' % (crc32(file), file)
# EOB
|
walterdejong/synctool
|
contrib/attic/crc32.py
|
Python
|
gpl-2.0
| 463
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.7.4, generator: @autorest/python@5.12.4)
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
from . import models
from ._configuration import ContainerRegistryConfiguration
from .operations import AuthenticationOperations, ContainerRegistryBlobOperations, ContainerRegistryOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.rest import HttpRequest, HttpResponse
class ContainerRegistry(object):
"""Metadata API definition for the Azure Container Registry runtime.
:ivar container_registry: ContainerRegistryOperations operations
:vartype container_registry: container_registry.operations.ContainerRegistryOperations
:ivar container_registry_blob: ContainerRegistryBlobOperations operations
:vartype container_registry_blob: container_registry.operations.ContainerRegistryBlobOperations
:ivar authentication: AuthenticationOperations operations
:vartype authentication: container_registry.operations.AuthenticationOperations
:param url: Registry login URL.
:type url: str
:keyword api_version: Api Version. The default value is "2021-07-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
url, # type: str
**kwargs # type: Any
):
# type: (...) -> None
_base_url = '{url}'
self._config = ContainerRegistryConfiguration(url=url, **kwargs)
self._client = PipelineClient(base_url=_base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.container_registry = ContainerRegistryOperations(self._client, self._config, self._serialize, self._deserialize)
self.container_registry_blob = ContainerRegistryBlobOperations(self._client, self._config, self._serialize, self._deserialize)
self.authentication = AuthenticationOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request, # type: HttpRequest
**kwargs # type: Any
):
# type: (...) -> HttpResponse
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
path_format_arguments = {
"url": self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> ContainerRegistry
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
Azure/azure-sdk-for-python
|
sdk/containerregistry/azure-containerregistry/azure/containerregistry/_generated/_container_registry.py
|
Python
|
mit
| 4,280
|
import shutil
import json
from rest_framework import routers, serializers, viewsets, parsers, filters
from rest_framework.views import APIView
from rest_framework.exceptions import APIException
from rest_framework.response import Response
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile, InMemoryUploadedFile
from django.core.validators import URLValidator
from base.models import Project, SeedsList
from apps.crawl_space.models import Crawl, CrawlModel
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionError, NotFoundError
class DataWakeIndexUnavailable(APIException):
status_code = 404
default_detail = "The server failed to find the DataWake index in elasticsearch."
class SlugModelSerializer(serializers.ModelSerializer):
slug = serializers.SlugField(required=False, read_only=True)
class ProjectSerializer(SlugModelSerializer):
url = serializers.CharField(read_only=True)
class Meta:
model = Project
class CrawlSerializer(SlugModelSerializer):
# Expose these fields, but only as read only.
id = serializers.ReadOnlyField()
seeds_list = serializers.FileField(read_only=True, use_url=False)
status = serializers.CharField(read_only=True)
config = serializers.CharField(read_only=True)
index_name = serializers.CharField(read_only=True)
url = serializers.CharField(read_only=True)
pages_crawled = serializers.IntegerField(read_only=True)
harvest_rate = serializers.FloatField(read_only=True)
location = serializers.CharField(read_only=True)
def validate_crawler(self, value):
if value == "ache" and not self.initial_data.get("crawl_model"):
raise serializers.ValidationError("Ache crawls require a Crawl Model.")
return value
class Meta:
model = Crawl
class CrawlModelSerializer(SlugModelSerializer):
model = serializers.FileField(use_url=False)
features = serializers.FileField(use_url=False)
url = serializers.CharField(read_only=True)
def validate_model(self, value):
if value.name != "pageclassifier.model":
raise serializers.ValidationError("File must be named pageclassifier.model")
return value
def validate_features(self, value):
if value.name != "pageclassifier.features":
raise serializers.ValidationError("File must be named pageclassifier.features")
return value
class Meta:
model = CrawlModel
class SeedsListSerializer(SlugModelSerializer):
url = serializers.CharField(read_only=True)
file_string = serializers.CharField(read_only=True)
def validate_seeds(self, value):
try:
seeds = json.loads(value)
except ValueError:
raise serializers.ValidationError("Seeds must be a JSON encoded string.")
if type(seeds) != list:
raise serializers.ValidationError("Seeds must be an array of URLs.")
validator = URLValidator()
errors = []
for index, x in enumerate(seeds):
try:
validator(x)
except ValidationError:
# Add index to make it easier for CodeMirror to select the right
# line.
errors.append({index: x})
if errors:
errors.insert(0, "The seeds list contains invalid urls.")
errors.append({"list": "\n".join(seeds)})
raise serializers.ValidationError(errors)
return value
class Meta:
model = SeedsList
"""
Viewset Classes.
Filtering is provided by django-filter.
Backend settings are in common_settings.py under REST_FRAMEWORK. Setting is:
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',)
This backend is supplied to every viewset by default. Alter query fields by adding
or removing items from filter_fields
"""
class ProjectViewSet(viewsets.ModelViewSet):
queryset = Project.objects.all()
serializer_class = ProjectSerializer
filter_fields = ('id', 'slug', 'name',)
class CrawlViewSet(viewsets.ModelViewSet):
queryset = Crawl.objects.all()
serializer_class = CrawlSerializer
filter_fields = ('id', 'slug', 'name', 'description', 'status', 'project',
'crawl_model', 'crawler', 'seeds_object')
class CrawlModelViewSet(viewsets.ModelViewSet):
queryset = CrawlModel.objects.all()
serializer_class = CrawlModelSerializer
filter_fields = ('id', 'slug', 'name', 'project',)
def destroy(self, request, pk=None):
model = CrawlModel.objects.get(pk=pk)
crawls = Crawl.objects.all().filter(crawl_model=pk)
if crawls:
message = "The Crawl Model is being used by the following Crawls and cannot be deleted: "
raise serializers.ValidationError({
"message": message,
"errors": [x.name for x in crawls],
})
else:
shutil.rmtree(model.get_model_path())
return super(CrawlModelViewSet, self).destroy(request)
class SeedsListViewSet(viewsets.ModelViewSet):
queryset = SeedsList.objects.all()
serializer_class = SeedsListSerializer
filter_fields = ('id', 'name', 'seeds', 'slug',)
def create(self, request):
# If a seeds file or a textseeds exists, then use those. Otherwise, look
# for a string in request.data["seeds"]
seeds_list = request.FILES.get("seeds", False)
textseeds = request.data.get("textseeds", False)
if seeds_list:
request.data["seeds"] = json.dumps(map(str.strip, seeds_list.readlines()))
elif textseeds:
if type(textseeds) == unicode:
request.data["seeds"] = json.dumps(map(unicode.strip, textseeds.split("\n")))
# Get rid of carriage return character.
elif type(textseeds) == str:
request.data["seeds"] = json.dumps(map(str.strip, textseeds.split("\n")))
return super(SeedsListViewSet, self).create(request)
def destroy(self, request, pk=None):
seeds = SeedsList.objects.get(pk=pk)
crawls = Crawl.objects.all().filter(seeds_object=pk)
if crawls:
message = "The Seeds List is being used by the following Crawls and cannot be deleted: "
raise serializers.ValidationError({
"message": message,
"errors": [x.name for x in crawls],
})
else:
return super(SeedsListViewSet, self).destroy(request)
class DataWakeView(APIView):
index = "datawake"
es = Elasticsearch()
def create_trails(self, trail_ids):
trails = []
for x in trail_ids:
url_search = self.es.search(index=self.index, q="trail_id:%d" % x,
fields="url", size=1000)["hits"]["hits"]
new_trail = {"trail_id": x, "urls": [], "domain_name":url_search[0]["_type"]}
for y in url_search:
new_trail["urls"].append(y["fields"]["url"][0])
new_trail.update({"urls_string": "\n".join(new_trail["urls"])})
trails.append(new_trail)
return trails
def get(self, request, format=None):
# TODO: catch all exception. At the very least, deal with 404 not found and
# connection refused exceptions.
# Temporarily remove exceptions for debugging.
try:
trail_ids = [x["key"] for x in self.es.search(index=self.index, body={
"aggs" : {
"trail_id" : {
"terms" : { "field" : "trail_id" }
}
}
})["aggregations"]["trail_id"]["buckets"]]
response = self.create_trails(trail_ids)
except ConnectionError as e:
raise OSError("Failed to connect to local elasticsearch instance.")
except NotFoundError:
raise DataWakeIndexUnavailable
return Response(response)
router = routers.DefaultRouter()
router.register(r"projects", ProjectViewSet)
router.register(r"crawls", CrawlViewSet)
router.register(r"crawl_models", CrawlModelViewSet)
router.register(r"seeds_list", SeedsListViewSet)
|
memex-explorer/memex-explorer
|
source/memex/rest.py
|
Python
|
bsd-2-clause
| 8,218
|
"""
Just to test database functions,
outside of Flask.
We want to open our MongoDB database,
insert some memos, and read them back
"""
import arrow
# Mongo database
from pymongo import MongoClient
import CONFIG
try:
dbclient = MongoClient(CONFIG.MONGO_URL)
db = dbclient.meetme
collection = db.dated
except:
print("Failure opening database. Is Mongo running? Correct password?")
sys.exit(1)
#
# Insertions: I commented these out after the first
# run successfuly inserted them
#
record = { "type": "dated_memo",
"date": arrow.utcnow().naive,
"text": "This is a sample memo"
}
collection.insert(record)
record = { "type": "dated_memo",
"date": arrow.utcnow().replace(days=+1).naive,
"text": "Sample one day later"
}
collection.insert(record)
#
# Read database --- May be useful to see what is in there,
# even after you have a working 'insert' operation in the flask app,
# but they aren't very readable. If you have more than a couple records,
# you'll want a loop for printing them in a nicer format.
#
records = [ ]
for record in collection.find( { "type": "dated_memo" } ):
records.append(
{ "type": record['type'],
"date": arrow.get(record['date']).to('local').isoformat(),
"text": record['text']
})
print(records)
|
ian-garrett/meetMe
|
db_trial.py
|
Python
|
artistic-2.0
| 1,359
|
# -*- coding: utf-8 -*-
"""
This is part of WebScout software
Docs EN: http://hack4sec.pro/wiki/index.php/WebScout_en
Docs RU: http://hack4sec.pro/wiki/index.php/WebScout
License: MIT
Copyright (c) Anton Kuzmin <http://anton-kuzmin.ru> (ru) <http://anton-kuzmin.pro> (en)
Class for common HTTP work
"""
import re
import os
import requests
from classes.Registry import Registry
from classes.kernel.WSException import WSException
class HttpMaxSizeException(BaseException):
""" Exception class for max-size error """
pass
class Http(object):
""" Class for common HTTP work """
verify = False
allow_redirects = False
headers = None
config = None
sess = None
noscan_content_types = []
scan_content_types = []
# Common for all class copies dict with errors
errors = {'maxsize': [], 'noscan_content_types': [], 'scan_content_types': []}
current_proxy = None
current_proxy_count = 0
every_request_new_session = False
def __init__(self, verify=False, allow_redirects=False, headers=None):
self.verify = verify
self.allow_redirects = allow_redirects
self.headers = {} if headers is None else headers
self.session = requests.Session()
def load_headers_from_file(self, _file):
if not os.path.exists(_file):
raise WSException("File '{0}' not exists".format(_file))
header_regex = re.compile('([a-zA-Z0-9\-]*): (.*)')
fh = open(_file, 'r')
for line in fh:
try:
if len(line.strip()):
parsed_header = header_regex.findall(line)[0]
self.headers[parsed_header[0]] = parsed_header[1]
except BaseException:
raise WSException("Wrong header line '{0}'".format(line.strip()))
fh.close()
def set_allowed_types(self, types):
""" Set allowed contnent types """
self.scan_content_types = types
def set_denied_types(self, types):
""" Set denied contnent types """
self.noscan_content_types = types
def change_proxy(self):
self.current_proxy = Registry().get('proxies').get_proxy()
def get_current_proxy(self):
""" Check current proxy, get next if need (max requests per proxy made) """
if self.current_proxy_count >= int(Registry().get('config')['main']['requests_per_proxy']):
self.current_proxy = None
self.current_proxy_count = 0
if not self.current_proxy:
#self.current_proxy = Registry().get('proxies').get_proxy()
self.change_proxy()
self.current_proxy_count += 1
return {
"http": "http://" + self.current_proxy,
"https": "http://" + self.current_proxy,
} if self.current_proxy else None
def get(self, url, verify=None, allow_redirects=None, headers=None):
""" HTTP GET request """
if self.every_request_new_session:
self.session = requests.Session()
verify = self.verify if verify is None else verify
allow_redirects = self.allow_redirects if allow_redirects is None else allow_redirects
headers = self.headers if headers is None else headers
if 'User-Agent' not in headers.keys():
headers['User-Agent'] = Registry().get('ua')
resp = self.session.get(
url,
verify=verify,
allow_redirects=allow_redirects,
headers=headers,
stream=True,
proxies=self.get_current_proxy()
)
if 'content-length' in resp.headers and \
int(resp.headers['content-length']) > int(Registry().get('config')['main']['max_size']):
self.errors['maxsize'].append(
"URL {0} has size {1} bytes, but limit in config - {2} bytes".
format(
url,
resp.headers['content-length'],
Registry().get('config')['main']['max_size']
)
)
resp = None
if resp and 'content-type' in resp.headers and (len(self.scan_content_types) or len(self.noscan_content_types)):
if len(self.noscan_content_types):
for _type in self.noscan_content_types:
if resp.headers['content-type'].lower().count(_type.lower()):
self.errors['noscan_content_types'].append(
"URL {0} have denied content type - {1}".format(url, _type)
)
resp = None
break
if resp and len(self.scan_content_types):
allowed = False
for _type in self.scan_content_types:
if resp.headers['content-type'].lower().count(_type.lower()):
allowed = True
break
if not allowed:
self.errors['scan_content_types'].append(
"URL {0} have not allowed content type - {1}".format(url, resp.headers['content-type'])
)
resp = None
return resp
def post(self, url, data=None, verify=None, allow_redirects=None, headers=None):
""" HTTP POST request """
if self.every_request_new_session:
self.session = requests.Session()
verify = self.verify if verify is None else verify
allow_redirects = self.allow_redirects if allow_redirects is None else allow_redirects
headers = self.headers if headers is None else headers
if 'User-Agent' not in headers.keys():
headers['User-Agent'] = Registry().get('ua')
resp = self.session.post(
url,
data=data,
verify=verify,
allow_redirects=allow_redirects,
headers=headers,
stream=True,
proxies=self.get_current_proxy()
)
if 'content-length' in resp.headers and \
int(resp.headers['content-length']) > int(Registry().get('config')['main']['max_size']):
self.errors['maxsize'].append(
"URL {0} has size {1} bytes, but limit in config - {2} bytes".
format(
url,
resp.headers['content-length'],
Registry().get('config')['main']['max_size']
)
)
resp = None
return resp
def head(self, url, verify=None, allow_redirects=None, headers=None):
""" HTTP HEAD request """
if self.every_request_new_session:
self.session = requests.Session()
verify = self.verify if verify is None else verify
allow_redirects = self.allow_redirects if allow_redirects is None else allow_redirects
headers = self.headers if headers is None else headers
if 'User-Agent' not in headers.keys():
headers['User-Agent'] = Registry().get('ua')
resp = self.session.head(
url,
verify=verify,
allow_redirects=allow_redirects,
headers=headers,
proxies=self.get_current_proxy()
)
if 'content-length' in resp.headers and \
int(resp.headers['content-length']) > int(Registry().get('config')['main']['max_size']):
self.errors['maxsize'].append(
"URL {0} has size {1} bytes, but limit in config - {2} bytes".\
format(
url,
resp.headers['content-length'],
Registry().get('config')['main']['max_size']
)
)
resp = None
return resp
|
hack4sec/ws-cli
|
classes/Http.py
|
Python
|
mit
| 7,764
|
import six
if six.PY2:
from blogstrap import create_app # noqa
else:
from blogstrap.blogstrap import create_app # noqa
|
joehakimrahme/blogstrap
|
blogstrap/__init__.py
|
Python
|
apache-2.0
| 130
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from pylms.server import Server
from pylms.player import Player
from displayscreen import PiInfoScreen
# Class must have this name
class myScreen(PiInfoScreen):
refreshtime = 2
displaytime = 5
supportedsizes = [(320, 240)]
pluginname = "LMSInfo"
plugininfo = "Shows status of music playing on Logitech Media Server"
def setPluginVariables(self):
# Log on to LMS server
self.lmsserverIP = self.pluginConfig["LMSServerInfo"]["serverip"]
self.lmsserverTelnetPort = int(self.pluginConfig["LMSServerInfo"]["telnetport"])
self.lmsserverWebPort = int(self.pluginConfig["LMSServerInfo"]["webport"])
self.lmsServer = self.lmsLogon(self.lmsserverIP, self.lmsserverTelnetPort)
# Get player
self.squeezePlayers = self.getSqueezePlayers(self.lmsServer)
self.playerIndex = 0
self.squeezePlayer = self.getPlayer(self.squeezePlayers)
# Set a few variables for this screen
self.currenttrack = None
self.nexttrack = None
self.playlist = None
self.currentart = None
self.currenttrackname = None
self.currentartist = None
self.currentalbum = None
self.nexttracks = None
def lmsLogon(self, host, port):
try:
sc = Server(hostname=host, port=port)
sc.connect()
except:
sc = None
return sc
def getSqueezePlayers(self, server):
try:
sq= server.get_players()
except:
sq = None
return sq
def getPlayer(self, players):
if players:
try:
player = players[self.playerIndex]
except:
self.playerIndex = 0
player = players[self.playerIndex]
else:
player = None
return player
def nextPlayer(self):
self.squeezePlayers = self.getSqueezePlayers(self.lmsServer)
self.playerIndex = (self.playerIndex + 1) % len(self.squeezePlayers)
self.squeezePlayer = self.squeezePlayers[self.playerIndex]
# Only refresh all data if track has changed
def currentTrackChanged(self, playlist, pos):
track = pos
try:
if playlist[track]['id'] == self.currenttrack:
return False
else:
return True
except:
return True
# Has the next track changed
def nextTrackChanged(self, playlist, pos):
try:
if (playlist[pos + 1]['id'] == self.nexttracks[0]['id']) or (playlist[pos + 2]['id'] == self.nexttracks[1]['id']):
return False
else:
return True
except:
return True
# Get current track information
def getCurrentTrackInfo(self, playlist, pos):
self.currenttrack = int(playlist[pos]['id'])
self.currentart = pygame.transform.scale(self.LoadImageFromUrl("http://%s:%d/music/current/cover.jpg" % (self.lmsserverIP, self.lmsserverWebPort), True),(150,150))
self.currenttrackname = self.squeezePlayer.get_track_title_unicode()
self.currentartist = self.squeezePlayer.get_track_artist_unicode()
self.currentalbum = self.squeezePlayer.get_track_album_unicode()
def getNextTrackInfo(self, playlist, pos):
ntracks = []
for i in range(2):
try:
trackdetail = {}
trackdetail['id'] = int(playlist[pos+i+1]['id'])
trackdetail['trackname'] = str(playlist[pos+i+1]['title'])
trackdetail['artist'] = str(playlist[pos+i+1]['artist'])
ntracks.append(trackdetail)
except:
continue
return ntracks
def Button1Click(self):
# Toggles play/pause on player
self.squeezePlayer.toggle()
def Button2Click(self):
# Previous track
self.squeezePlayer.prev()
def Button3Click(self):
# Next track
self.squeezePlayer.next()
def Button4Click(self):
# Change player
self.nextPlayer()
# Main function - returns screen to main script
def showScreen(self):
self.surface.fill([0,0,0])
myfont = pygame.font.SysFont(None, 18)
mybigfont = pygame.font.SysFont(None, 20)
mysmallfont = pygame.font.SysFont(None, 12)
if self.lmsServer == None:
try:
self.lmsServer = self.lmsLogon(self.lmsserverIP, self.lmsserverTelnetPort)
self.squeezePlayers = self.getSqueezePlayers(self.lmsServer)
self.squeezePlayer = self.getPlayer(self.squeezePlayers)
finally:
errortext = pygame.font.SysFont("freesans",10).render("Logitech Media Server not found on %s." % (self.lmsserverIP),1,(255,255,255))
errorrect = errortext.get_rect()
errorrect.centerx = self.surface.get_rect().centerx
errorrect.centery = self.surface.get_rect().centery
self.surface.blit(errortext,errorrect)
elif self.squeezePlayer == None:
try:
self.lmsServer = self.lmsLogon(self.lmsserverIP, self.lmsserverTelnetPort)
self.squeezePlayer = self.getSqueezePlayer(self.lmsServer)
finally:
errortext = pygame.font.SysFont("freesans",10).render("No Squeezeplayers connected to server on %s." % (self.lmsserverIP),1,(255,255,255))
errorrect = errortext.get_rect()
errorrect.centerx = self.surface.get_rect().centerx
errorrect.centery = self.surface.get_rect().centery
self.surface.blit(errortext,errorrect)
else:
if len(self.lmsServer.get_players()) == 0:
self.squeezePlayer = None
else:
try:
self.playlist = self.squeezePlayer.playlist_get_info()
self.playlistposition = int(self.squeezePlayer.playlist_get_position())
except:
self.squeezePlayer = None
else:
if self.currentTrackChanged(self.playlist, self.playlistposition):
self.getCurrentTrackInfo(self.playlist,self.playlistposition)
if self.nextTrackChanged(self.playlist, self.playlistposition):
updatenext = True
self.nexttracks=self.getNextTrackInfo(self.playlist, self.playlistposition)
else:
updatenext = False
# Player name
playername = myfont.render(self.squeezePlayer.get_name(), 1, (255,255,255))
self.surface.blit(playername, (310 - playername.get_rect()[2], 10))
# Now playing...
nowtext = myfont.render("Now playing...", 1, (255,255,255))
self.surface.blit(nowtext, (170, 60))
# get artwork
self.surface.blit(self.currentart, (10,40))
# get artist name
artisttext = mybigfont.render(self.currentartist, 1, [255,255,255])
self.surface.blit(artisttext, (170,85))
# get track name
tracktext = myfont.render(self.currenttrackname, 1, [255,255,255])
self.surface.blit(tracktext, (170,110))
# get track album
albumtext = myfont.render(self.currentalbum, 1, [255,255,255])
self.surface.blit(albumtext, (170,135))
# Show progress bar
elapse = self.squeezePlayer.get_time_elapsed()
duration = self.squeezePlayer.get_track_duration()
try:
trackposition = elapse / duration
except:
trackposition = 0
self.surface.blit(self.showProgress(trackposition,(130,10),(255,255,255),(0,0,144),(0,0,0)),(170,180))
elapsem, elapses = divmod(int(elapse),60)
elapseh, elapsem = divmod(elapsem, 60)
elapsestring = "%02d:%02d" % (elapsem, elapses)
if elapseh > 0 : elapsestring = elapsestring + "%d:" % (elapseh)
durationm, durations = divmod(int(duration),60)
durationh, durationm = divmod(durationm, 60)
durationstring = "%02d:%02d" % (durationm, durations)
if durationh > 0 : durationstring = durationstring + "%d:" % (durationh)
progressstring = "%s / %s" % (elapsestring, durationstring)
progresstext = myfont.render(progressstring, 1, (255,255,255))
self.surface.blit(progresstext, (170, 160))
# Next track info
# if len(self.nexttracks) > 0:
# if updatenext:
# self.nexttrackart = pygame.transform.scale(self.LoadImageFromUrl("http://%s:%d/music/%d/cover.jpg" % (self.lmsserverIP, self.lmsserverWebPort, self.nexttracks[0]['id'])),(75,75))
# nexttracklabel = mysmallfont.render("Next track: %s - %s" % (self.nexttracks[0]['artist'], self.nexttracks[0]['trackname']), 1, (255,255,255))
# self.surface.blit(self.nexttrackart, (20, 300))
# self.surface.blit(nexttracklabel, (105, 300))
# if len(self.nexttracks) > 1:
# if updatenext:
# self.xnexttrackart = pygame.transform.scale(self.LoadImageFromUrl("http://%s:%d/music/%d/cover.jpg" % (self.lmsserverIP, self.lmsserverWebPort, self.nexttracks[1]['id'])),(75,75))
# xnexttracklabel = mysmallfont.render("Next track: %s - %s" % (self.nexttracks[1]['artist'], self.nexttracks[1]['trackname']), 1, (255,255,255))
# self.surface.blit(self.xnexttrackart, (20, 385))
# self.surface.blit(xnexttracklabel, (105, 385))
# Scale our surface to the required screensize before sending back
scaled = pygame.transform.scale(self.surface,self.screensize)
self.screen.blit(scaled,(0,0))
return self.screen
|
elParaguayo/RPI-Info-Screen
|
plugins/lms/screen.py
|
Python
|
gpl-3.0
| 10,972
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""OAI Repository Regression Test Suite."""
__revision__ = "$Id$"
import unittest
import time
from invenio.config import CFG_SITE_URL, CFG_OAI_SLEEP
from invenio.testutils import make_test_suite, run_test_suite, \
test_web_page_content, merge_error_messages
class OAIRepositoryWebPagesAvailabilityTest(unittest.TestCase):
"""Check OAI Repository web pages whether they are up or not."""
def test_your_baskets_pages_availability(self):
"""oairepository - availability of OAI server pages"""
baseurl = CFG_SITE_URL + '/oai2d'
_exports = [#fast commands first:
'?verb=Identify',
'?verb=ListMetadataFormats',
# sleepy commands now:
'?verb=ListSets',
'?verb=ListRecords',
'?verb=GetRecord']
error_messages = []
for url in [baseurl + page for page in _exports]:
if url.endswith('Identify') or \
url.endswith('ListMetadataFormats'):
pass
else:
# some sleep required for verbs other than Identify
# and ListMetadataFormats, since oai2d refuses too
# frequent access:
time.sleep(CFG_OAI_SLEEP)
error_messages.extend(test_web_page_content(url,
expected_text=
'</OAI-PMH>'))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
TEST_SUITE = make_test_suite(OAIRepositoryWebPagesAvailabilityTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE, warn_user=True)
|
valkyriesavage/invenio
|
modules/bibharvest/lib/oai_repository_regression_tests.py
|
Python
|
gpl-2.0
| 2,579
|
# -*- coding: utf-8 -*-
## Add path to library (just for examples; you do not need this)
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
import numpy as np
from PyQt4 import QtGui, QtCore
import pyqtgraph as pg
app = QtGui.QApplication([])
mw = QtGui.QMainWindow()
p = pg.PlotWidget()
mw.setCentralWidget(p)
c = p.plot(x=np.sin(np.linspace(0, 2*np.pi, 100)), y=np.cos(np.linspace(0, 2*np.pi, 100)))
a = pg.CurveArrow(c)
p.addItem(a)
mw.show()
anim = a.makeAnimation(loop=-1)
anim.start()
## Start Qt event loop unless running in interactive mode.
if sys.flags.interactive != 1:
app.exec_()
|
robertsj/poropy
|
pyqtgraph/examples/test_Arrow.py
|
Python
|
mit
| 641
|
#!/usr/bin/env python2.7
# Standard Library
import argparse
import collections
import os
import re
import xml.etree.cElementTree
# Third Party
import imdb
import imdb.helpers
import mediawiki
import sqlalchemy
import sqlalchemy.ext.declarative
import sqlalchemy.ext.hybrid
import sqlalchemy.orm
CANON = {'"Star Trek" (1966)': 'TOS',
'"Star Trek: The Next Generation" (1987)': 'TNG',
'"Star Trek: Voyager" (1995)': 'VOY',
'"Star Trek: Enterprise" (2001)': 'ENT',
'"Star Trek" (1973)': 'TAS',
'"Star Trek: Deep Space Nine" (1993)': 'DS9',
'Star Trek: The Motion Picture (1979)': '',
'Star Trek II: The Wrath of Khan (1982)': '',
'Star Trek III: The Search for Spock (1984)': '',
'Star Trek IV: The Voyage Home (1986)': '',
'Star Trek V: The Final Frontier (1989)': '',
'Star Trek VI: The Undiscovered Country (1991)': '',
'Star Trek: Generations (1994)': '',
'Star Trek: First Contact (1996)': '',
'Star Trek: Insurrection (1998)': '',
'Star Trek: Nemesis (2002)': '',
'Star Trek (2009)': '',
'Star Trek Into Darkness (2013)': ''}
BASE = sqlalchemy.ext.declarative.declarative_base()
association_table = sqlalchemy.Table('character_appearance', BASE.metadata,
sqlalchemy.Column('character_id', sqlalchemy.Integer,
sqlalchemy.ForeignKey('character.id')),
sqlalchemy.Column('appearance_id', sqlalchemy.Integer,
sqlalchemy.ForeignKey('appearance.id'))
)
class Character(BASE):
__tablename__ = 'character'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
role_id = sqlalchemy.Column(sqlalchemy.String, unique=True)
name = sqlalchemy.Column(sqlalchemy.String)
article = sqlalchemy.orm.relationship('Article', uselist=False,
backref='character')
appearances = sqlalchemy.orm.relationship('Appearance',
secondary=association_table,
backref='characters')
def __repr__(self):
return self.name
class Appearance(BASE):
__tablename__ = 'appearance'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
title = sqlalchemy.Column(sqlalchemy.String)
kind = sqlalchemy.Column(sqlalchemy.String)
article = sqlalchemy.orm.relationship('Article', uselist=False,
backref='appearance')
def __repr__(self):
return self.title
class Article(BASE):
__tablename__ = 'article'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
title = sqlalchemy.Column(sqlalchemy.String)
text = sqlalchemy.Column(sqlalchemy.String)
character_id = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey('character.id'))
appearance_id = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey('appearance.id'))
def __repr__(self):
return self.title
@sqlalchemy.ext.hybrid.hybrid_property
def html(self):
return mediawiki.wiki2html(self.text or '', False)
class SixDegrees(object):
def __init__(self):
database_uri = os.getenv('DATABASE_URI', 'sqlite:///star_trek.sqlite')
engine = sqlalchemy.create_engine(database_uri)
BASE.metadata.create_all(engine)
self.session = sqlalchemy.orm.sessionmaker(bind=engine)()
self._role_names = None
def load_imdb(self):
access = imdb.IMDb()
self._role_names = collections.defaultdict(list)
for series in access.search_movie('Star Trek', results=30):
title = series['long imdb title']
kind = series['kind']
if title in CANON:
if kind == 'tv series':
title = CANON[title]
access.update(series, 'episodes')
for episode in imdb.helpers.sortedEpisodes(series):
access.update(episode)
self._parse_episode(episode, title, kind)
elif kind == 'movie':
appearance = self._get_appearance(title, kind)
movie = access.get_movie(series.movieID)
for actor in movie['cast']:
self._add_actor(actor, appearance)
self.session.add(appearance)
self.session.commit()
for role_id in self._role_names:
counter = collections.Counter(self._role_names[role_id])
query = self.session.query(Character)
character = query.filter_by(role_id=role_id).first()
character.name = counter.most_common(1)[0][0]
self.session.commit()
def _parse_episode(self, episode, series_title, series_kind):
episode_title = u'{} ({})'.format(episode['title'], series_title)
appearance = self._get_appearance(episode_title, series_kind)
for actor in episode['cast']:
self._add_actor(actor, appearance)
self.session.add(appearance)
self.session.commit()
def _add_actor(self, actor, appearance):
if isinstance(actor.currentRole, imdb.utils.RolesList):
for role in actor.currentRole:
character = self._add_character(role)
if character:
appearance.characters.append(character)
else:
character = self._add_character(actor.currentRole)
if character:
appearance.characters.append(character)
def _add_character(self, role):
generic = ('Enterprise Computer', 'Ensign', 'Starfleet Officer')
if role.get('name') and role.getID() and role['name'] not in generic:
self._role_names[role.getID()].append(role['name'])
query = self.session.query(Character)
character = query.filter_by(role_id=role.getID()).first()
if not character:
character = Character(role_id=role.getID(), name=role['name'])
self.session.add(character)
self.session.commit()
return character
def _get_appearance(self, title, kind):
query = self.session.query(Appearance)
appearance = query.filter_by(title=title).first()
if not appearance:
appearance = Appearance(title=title, kind=kind)
self.session.add(appearance)
return appearance
def load_ma(self):
name_pattern = re.compile(r'(?:\{.*\}){0,1}(.*)')
slug_pattern = re.compile(r'[\W_]+')
slugs = self.slugs
xml_dir = os.path.dirname(os.path.realpath(__file__))
xml_path = os.path.join(xml_dir, 'memory_alpha.xml')
events = ('start', 'end')
etree = xml.etree.cElementTree.iterparse(xml_path, events=events)
level = -1
for event, elem in etree:
name = name_pattern.search(elem.tag).groups()[0]
if event == 'start':
level += 1
if level == 2 and event == 'end' and name == 'title':
title = elem.text
if level == 3 and event == 'end' and name == 'text':
text = elem.text
if level == 1 and event == 'end':
if name == 'page':
slug = slug_pattern.sub('', title).lower()
if slug in slugs:
obj = slugs[slug]
article = Article(title=title, text=text)
if isinstance(obj, Appearance):
article.appearance = obj
else:
article.character = obj
self.session.add(article)
self.session.commit()
if event == 'end':
level -= 1
elem.clear()
@property
def slugs(self):
pattern = re.compile(r'[\W_]+')
slugs = {}
for appearance in self.session.query(Appearance).all():
slug = pattern.sub(u'', appearance.title.split('(')[0]).lower()
slug = slug.replace(u'part1', u'parti').replace(u'part2', u'partii')
if appearance.kind == 'movie':
slugs[slug] = appearance
else:
slug += u'episode'
slugs[slug] = appearance
if 'parti' in slug:
slugs[slug.replace(u'parti', u'')] = appearance
for character in self.session.query(Character).all():
slugs[pattern.sub('', character.name).lower()] = character
return slugs
def get_character(self, name):
name_filter = '%{}%'.format(name.replace(' ', '%').replace('.', ''))
query = self.session.query(Character)
return query.filter(Character.name.like(name_filter)).first()
def all_characters(self):
return self.session.query(Character).all()
def find_connection(self, start_name, end_name):
link = self._shortest_link(start_name, end_name)
if not link:
return link
complete_link = []
for count, character in enumerate(link):
if count:
appearance = self._direct_link(link[count - 1], character)
complete_link.append((appearance, character))
return complete_link
def _shortest_link(self, start_name, end_name):
if start_name == end_name:
return []
start_character = self.get_character(start_name)
end_character = self.get_character(end_name)
if not (start_character or end_character):
return []
investigated = [end_character]
to_investigate = [[end_character]]
distance = 0
while to_investigate:
character_link = to_investigate[0]
character = character_link[distance]
for appearance in character.appearances:
for co_star in appearance.characters:
if co_star not in investigated:
if co_star == start_character:
character_link.append(co_star)
return character_link
elif co_star not in investigated:
investigated.append(co_star)
full_link = character_link[:]
full_link.append(co_star)
to_investigate.append(full_link)
to_investigate.remove(character_link)
if _minimum(to_investigate) == distance + 2:
distance += 1
return []
def _direct_link(self, start_character, end_character):
for appearance in start_character.appearances:
if end_character in appearance.characters:
return appearance
def find_article(self, obj):
pass
def _minimum(L):
smallest = 'z'
for sublist in L:
smallest = min(smallest, len(sublist))
return smallest
def play():
while True:
start_name = 'Captain James T. Kirk'
message = 'Please enter character name (or press Enter to exit): '
end_name = raw_input(message)
character = six.get_character(end_name)
if end_name == '':
print 'Thank you for playing!'
break
elif character.name == start_name:
print u'{} has a Kirk Number of 0.'.format(character)
else:
full_link = six.find_connection(start_name, end_name)
if len(full_link):
message = '{} has a Kirk Number of {}'
print message.format(end_name, len(full_link))
previous = character
for sublink in full_link:
appearance, character = sublink
message = '\t{} was in {} with {}.'
print message.format(previous, appearance, character)
previous = character
else:
print '{} has a Kirk Number of Infinity.'.format(end_name)
print '\n',
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Six Degress of Captain Kirk')
parser.add_argument('--load_data', action='store_true', help='Query IMDB')
args = parser.parse_args()
six = SixDegrees()
if args.load_data:
six.load_imdb()
six.load_ma()
else:
play()
|
brolewis/oracle_of_kirk
|
oracle.py
|
Python
|
bsd-3-clause
| 12,551
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Funciones para convertir números a letras o sea al nombre en español
# de ese número según las reglas que explica el artículo de wikipedia
# https://es.wikipedia.org/wiki/Anexo:Nombres_de_los_n%C3%BAmeros_en_espa%C3%B1ol.
import sys
import math
import re
# un arreglo para nombrar las unidades
# descenas y centenas
nomtrio = [["uno ", "dos ", "tres ", "cuatro ", "cinco ", "seis ",
"siete ", "ocho ", "nueve "],
["dieci", "veinti", "treinta y ", "cuarenta y ",
"cincuenta y ", "sesenta y ", "setenta y ",
"ochenta y ","noventa y "],
["ciento ", "doscientos ", "trescientos ",
"cuatrocientos ", "quinientos ", "seiscientos ",
"setecientos ", "ochocientos ", "novecientos "]]
# estos son números que son excepciones que se
# pueden combinar en tres digitos.
exc = {10: "diez ", 11: "once ", 12: "doce ", 13: "trece ",
14: "catorce ", 15: "quince ", 16: "dieciséis ",
20: "veinte ", 22: "veintidós ", 23: "veintitrés ",
26: "veintiséis ", 30: "treinta ", 40: "cuarenta ",
50: "cincuenta ", 60: "sesenta ", 70: "setenta ",
80: "ochenta ", 90: "noventa ", 100: "cien "}
# Estos son los nombres para las combinaciones,
# estos nombres denotan el nivel.
llones = ["mi", "bi", "tri", "cuatri", "quinti", "sisti", "septi",
"octi", "nuevi"]
# Una funcion para convertir una terna
# unidad, descena, centena a letras
# esta funcion no se debe usar directamente
# es para ser usada por la función numALetras
# lvalor: Número natural de 3 dígitos que se desea convertir.
def terna(lvalor):
"""Convierte cifras naturales de tres dígitos, unidad, decena y centena, en letras."""
loc = 0
vale = 0
retorno = ""
if (type(lvalor) != int) or (lvalor > 999):
raise ValueError("terna recibe como parámetros naturales de"
+ " 3 dígitos.")
if (lvalor in exc.keys()):
# el valor completo es una excepción?
return exc[lvalor]
elif (int(lvalor % 100) in exc.keys()):
# las descenas y las unidades forman una excepción?
retorno = exc[int(lvalor % 100)]
lvalor = lvalor // 100
loc = 2
elif (int(lvalor % 10) in exc.keys()):
# las unidades forman una excepción?
retorno = exc[int(lvalor % 10)]
lvalor = lvalor // 10
loc = 1
# después de resolver las excepciones, que continue con los nombres
# estandares
while (lvalor > 0):
vale = int(lvalor % 10)
lvalor = lvalor // 10
if (vale > 0):
retorno = nomtrio[loc][vale - 1] + retorno
loc += 1
return retorno
# Convierte de números a letras.
# numeroLetra: El número natural que se desea convertir.
def numALetras(numeroLetra):
"""Convierte un número a letras o sea a su nombre en español."""
pos = 0
vala = 0; valb = 0
temp = ""
retorno = ""
if (((type(numeroLetra) == str) and (numeroLetra.isdigit()))
or (type(numeroLetra) == float)):
lvalor = int(numeroLetra)
elif (type(numeroLetra) == int):
lvalor = numeroLetra
else:
raise TypeError("Tipo de dato incorrecto, se requiere un "
+ "entero, un flotante o un cadena solo "
+ "con caracteres numéricos.")
# se trata de alguna excepcion ?
if (lvalor == 0):
return "cero"
# si no, se usa el algoritmo
while (lvalor > 0):
# tomo dos grupos de 3 digitos
# vala toma unidades, descenas y centenas
vala = int(lvalor % 1000)
lvalor = lvalor // 1000
# valb toma las tres cifras de miles
valb = int(lvalor % 1000)
lvalor = lvalor // 1000
if ((pos > 0) and (vala == 1) and (valb == 0)
and (lvalor == 0)):
# si esta iteracion es mayor que 0 (llones) y
# vala es 1 y valb es 0 ( 000 001 XXX ... )
# y lvalor ya es 0 (no quedan mas cifras)
retorno = llones[pos - 1] + "llón " + retorno
elif ((pos > 0) and ((vala + valb) > 0)):
# si no, si estamos en los llones y
# vala o valb tienen lvalor es mas de un llon
retorno = llones[pos - 1] + "llones " + retorno
if ((pos > 0) and (int(vala % 10) == 1)
and not (int(vala % 100) == 11)):
# si estamos en los llones y vala tiene 1
# y valb tiene 0 (000 001 XXX ...) "un millón XXX"
retorno = terna(vala)[:-2] + " " + retorno
elif (vala > 0):
# si no, si vala tiene valor que busque el nombre de la
# terna
retorno = terna(vala) + retorno
if (valb == 1):
retorno = "mil " + retorno
elif ((int(valb % 10) == 1) and ((valb % 100) != 11)):
# si en los digitos de mil hay un 1 en las unidades y no se
# trata de un once entonces hay que recortar el "uno" para
# que diga "un"
temp = terna(valb)
retorno = temp[:-2] + " mil " + retorno
elif (valb > 1):
# cualquier otra cosa en digitos de mil lleva "mil"
retorno = terna(valb) + "mil " + retorno
pos += 1
return retorno[:-1]
# Convierte un número que expresa una cantidad de dinero con centavos
# en letras.
# cantidad: Cantidad de dinero que se desea convertir.
# nmoneda: Nombre de la moneda.
# ncentavo: Nombre de los centavos.
def monedaALetras(cantidad, nmoneda, ncentavo):
"""Convierte una cantidad de dinero a letras o sea su nombre en español."""
entera = 0
decimal = 0.0
expdecimal = re.compile("^(\\d*\\.?\\d+|\\d+\\.?\\d*)$")
retorno = ""
if (((type(cantidad) == str) and expdecimal.match(cantidad))
or (type(cantidad) == float) or (type(cantidad) == int)):
cantidad = float(cantidad)
else:
raise TypeError("Tipo de dato incorrecto, se requiere un "
+ "entero, un flotante, o una cadena que "
+ "represente un número decimal.")
decimal = cantidad % 1
entera = cantidad // 1
retorno = "{} {}".format(numALetras(entera), nmoneda)
if decimal != 0.0:
decimal = int(decimal * 100)
retorno += "con {} {}".format(numALetras(decimal), ncentavo)
retorno = retorno.replace("uno", "un")
return retorno
# Convierte una cantidad con decimal en letras
# cantidad: Cadena que representa la cantidad que se desea convertir
def decimalALetras(cantidad):
entera = ""
decimal = ""
expdecimal = re.compile("^(\\d*\\.?\\d+|\\d+\\.?\\d*)$")
retorno = ""
if (((type(cantidad) == str) and expdecimal.match(cantidad))
or (type(cantidad) == float) or (type(cantidad) == int)):
cantidadstr = str(cantidad)
else:
raise TypeError("Tipo de dato incorrecto, se requiere un "
+ "entero, un flotante, o una cadena que "
+ "represente un número decimal.")
# si tiene un punto tiene parte decimal
lvalor = cantidadstr.find(".")
if lvalor != -1:
# separo la parte decimal de la entera
entera = cantidadstr[:lvalor]
decimal = cantidadstr[lvalor + 1:]
if len(decimal) > 2:
decimal = decimal[:2]
elif len(decimal) == 1:
decimal = decimal + "0"
retorno = numALetras(entera) + " punto " + numALetras(decimal)
else:
retorno = numALetras(cantidadstr)
return retorno
|
programingfrik/NumerosLetras
|
python/numLetras.py
|
Python
|
mit
| 7,652
|
#!/usr/bin/env python3
import csv
from pyhpeimc.auth import *
from pyhpeimc.plat.device import *
IMC_IP="192.168.20.21"
IMC_PROTO="http://"
IMC_PORT="8080"
IMC_USER="imcrs"
IMC_PWD="imcrs"
authIMC = IMCAuth(IMC_PROTO, IMC_IP, IMC_PORT, IMC_USER, IMC_PWD)
def export_vendors(auth,csvFile,delimiter):
vendors = get_system_vendors(auth.creds, auth.url)
write_json_to_csv(vendors,csvFile,delimiter)
def export_categories(auth,csvFile,delimiter):
categories = get_system_category(auth.creds, auth.url)
write_json_to_csv(categories,csvFile,delimiter)
def export_models(auth,csvFile,delimiter):
models = get_system_device_models(auth.creds, auth.url)
write_json_to_csv(models,csvFile,delimiter)
def export_series(auth,csvFile,delimiter):
series = get_system_series(auth.creds, auth.url)
write_json_to_csv(series,csvFile,delimiter)
def write_json_to_csv(list,filename,delimiter):
with open(filename, "w") as file:
keys = list[0].keys()
csv_file = csv.DictWriter(file,delimiter=delimiter,fieldnames=keys,lineterminator='\n')
csv_file.writeheader()
csv_file.writerows(list)
def get_system_series(auth, url):
"""Takes string no input to issue RESTUL call to HP IMC\n
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: list of dictionaries where each dictionary represents a single device category
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.device import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> series = get_system_series(auth.creds, auth.url)
>>> type(series) is list
True
"""
get_system_series_url = '/imcrs/plat/res/series?managedOnly=false&start=0&size=10000&orderBy=id&desc=false&total=false'
f_url = url + get_system_series_url
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=HEADERS)
# r.status_code
try:
if r.status_code == 200:
system_series = (json.loads(r.text))
return system_series['deviceSeries']
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_dev_series: An Error has occured"
export_vendors(authIMC,"vendors.csv",";")
export_categories(authIMC,"categories.csv",";")
export_models(authIMC,"models.csv",";")
export_series(authIMC,"series.csv",";")
|
HPENetworking/HPEIMCUtils
|
PythonUtilities/Export_Conf/exportDeviceDefinition.py
|
Python
|
apache-2.0
| 2,519
|
"""Support for Acrobat Forms in ReportLab documents
This module is somewhat experimental at this time.
Includes basic support for
textfields,
select fields (drop down lists), and
check buttons.
The public interface consists of functions at the moment.
At some later date these operations may be made into canvas
methods. (comments?)
The ...Absolute(...) functions position the fields with respect
to the absolute canvas coordinate space -- that is, they do not
respect any coordinate transforms in effect for the canvas.
The ...Relative(...) functions position the ONLY THE LOWER LEFT
CORNER of the field using the coordinate transform in effect for
the canvas. THIS WILL ONLY WORK CORRECTLY FOR TRANSLATED COORDINATES
-- THE SHAPE, SIZE, FONTSIZE, AND ORIENTATION OF THE FIELD WILL NOT BE EFFECTED
BY SCALING, ROTATION, SKEWING OR OTHER NON-TRANSLATION COORDINATE
TRANSFORMS.
Please note that all field names (titles) in a given document must be unique.
Textfields and select fields only support the "base 14" canvas fonts
at this time.
See individual function docstrings below for more information.
The function test1(...) generates a simple test file.
THIS CONTRIBUTION WAS COMMISSIONED BY REPORTLAB USERS
WHO WISH TO REMAIN ANONYMOUS.
"""
### NOTE: MAKE THE STRING FORMATS DYNAMIC IN PATTERNS TO SUPPORT ENCRYPTION XXXX
import string
from reportlab.pdfbase.pdfdoc import PDFString, PDFStream, PDFDictionary, PDFName, PDFObject
from reportlab.lib.colors import obj_R_G_B
#==========================public interfaces
def textFieldAbsolute(canvas, title, x, y, width, height, value="", maxlen=1000000, multiline=0):
"""Place a text field on the current page
with name title at ABSOLUTE position (x,y) with
dimensions (width, height), using value as the default value and
maxlen as the maximum permissible length. If multiline is set make
it a multiline field.
"""
theform = getForm(canvas)
return theform.textField(canvas, title, x, y, x+width, y+height, value, maxlen, multiline)
def textFieldRelative(canvas, title, xR, yR, width, height, value="", maxlen=1000000, multiline=0):
"same as textFieldAbsolute except the x and y are relative to the canvas coordinate transform"
(xA, yA) = canvas.absolutePosition(xR,yR)
return textFieldAbsolute(canvas, title, xA, yA, width, height, value, maxlen, multiline)
def buttonFieldAbsolute(canvas, title, value, x, y, width=16.7704, height=14.907):
"""Place a check button field on the current page
with name title and default value value (one of "Yes" or "Off")
at ABSOLUTE position (x,y).
"""
theform = getForm(canvas)
return theform.buttonField(canvas, title, value, x, y, width=width, height=height)
def buttonFieldRelative(canvas, title, value, xR, yR, width=16.7704, height=14.907):
"same as buttonFieldAbsolute except the x and y are relative to the canvas coordinate transform"
(xA, yA) = canvas.absolutePosition(xR,yR)
return buttonFieldAbsolute(canvas, title, value, xA, yA, width=width, height=height)
def selectFieldAbsolute(canvas, title, value, options, x, y, width, height):
"""Place a select field (drop down list) on the current page
with name title and
with options listed in the sequence options
default value value (must be one of options)
at ABSOLUTE position (x,y) with dimensions (width, height)."""
theform = getForm(canvas)
theform.selectField(canvas, title, value, options, x, y, x+width, y+height)
def selectFieldRelative(canvas, title, value, options, xR, yR, width, height):
"same as textFieldAbsolute except the x and y are relative to the canvas coordinate transform"
(xA, yA) = canvas.absolutePosition(xR,yR)
return selectFieldAbsolute(canvas, title, value, options, xA, yA, width, height)
#==========================end of public interfaces
from reportlab.pdfbase.pdfpattern import PDFPattern, PDFPatternIf
def getForm(canvas):
"get form from canvas, create the form if needed"
try:
return canvas.AcroForm
except AttributeError:
theform = canvas.AcroForm = AcroForm()
# install the form in the document
d = canvas._doc
cat = d._catalog
cat.AcroForm = theform
return theform
class AcroForm(PDFObject):
def __init__(self):
self.fields = []
def textField(self, canvas, title, xmin, ymin, xmax, ymax, value="", maxlen=1000000, multiline=0):
# determine the page ref
doc = canvas._doc
page = doc.thisPageRef()
# determine text info
R, G, B = obj_R_G_B(canvas._fillColorObj)
#print "rgb", (R,G,B)
font = canvas. _fontname
fontsize = canvas. _fontsize
field = TextField(title, value, xmin, ymin, xmax, ymax, page, maxlen,
font, fontsize, R, G, B, multiline)
self.fields.append(field)
canvas._addAnnotation(field)
def selectField(self, canvas, title, value, options, xmin, ymin, xmax, ymax):
# determine the page ref
doc = canvas._doc
page = doc.thisPageRef()
# determine text info
R, G, B = obj_R_G_B(canvas._fillColorObj)
#print "rgb", (R,G,B)
font = canvas. _fontname
fontsize = canvas. _fontsize
field = SelectField(title, value, options, xmin, ymin, xmax, ymax, page,
font=font, fontsize=fontsize, R=R, G=G, B=B)
self.fields.append(field)
canvas._addAnnotation(field)
def buttonField(self, canvas, title, value, xmin, ymin, width=16.7704, height=14.907):
# determine the page ref
doc = canvas._doc
page = doc.thisPageRef()
field = ButtonField(title, value, xmin, ymin, page, width=width, height=height)
self.fields.append(field)
canvas._addAnnotation(field)
def format(self, document):
from reportlab.pdfbase.pdfdoc import PDFArray
proxy = PDFPattern(FormPattern,
Resources=getattr(self,'resources',None) or FormResources(),
NeedAppearances=getattr(self,'needAppearances','false'),
fields=PDFArray(self.fields), SigFlags=getattr(self,'sigFlags',0))
return proxy.format(document)
FormPattern = [
'<<\r\n',
'/NeedAppearances ',['NeedAppearances'],'\r\n'
'/DA ', PDFString('/Helv 0 Tf 0 g '), '\r\n',
'/DR ',["Resources"],'\r\n',
'/Fields ', ["fields"],'\r\n',
PDFPatternIf('SigFlags',['\r\n/SigFlags ',['SigFlags']]),
'>>'
]
def FormFontsDictionary():
from reportlab.pdfbase.pdfdoc import PDFDictionary
fontsdictionary = PDFDictionary()
fontsdictionary.__RefOnly__ = 1
for fullname, shortname in FORMFONTNAMES.items():
fontsdictionary[shortname] = FormFont(fullname, shortname)
fontsdictionary["ZaDb"] = PDFPattern(ZaDbPattern)
return fontsdictionary
def FormResources():
return PDFPattern(FormResourcesDictionaryPattern,
Encoding=PDFPattern(EncodingPattern,PDFDocEncoding=PDFPattern(PDFDocEncodingPattern)),
Font=FormFontsDictionary())
ZaDbPattern = [
' <<'
' /BaseFont'
' /ZapfDingbats'
' /Name'
' /ZaDb'
' /Subtype'
' /Type1'
' /Type'
' /Font'
'>>']
FormResourcesDictionaryPattern = [
'<<',
' /Encoding ',
["Encoding"], '\r\n',
' /Font ',
["Font"], '\r\n',
'>>'
]
FORMFONTNAMES = {
"Helvetica": "Helv",
"Helvetica-Bold": "HeBo",
'Courier': "Cour",
'Courier-Bold': "CoBo",
'Courier-Oblique': "CoOb",
'Courier-BoldOblique': "CoBO",
'Helvetica-Oblique': "HeOb",
'Helvetica-BoldOblique': "HeBO",
'Times-Roman': "Time",
'Times-Bold': "TiBo",
'Times-Italic': "TiIt",
'Times-BoldItalic': "TiBI",
}
EncodingPattern = [
'<<',
' /PDFDocEncoding ',
["PDFDocEncoding"], '\r\n',
'>>',
]
PDFDocEncodingPattern = [
'<<'
' /Differences'
' ['
' 24'
' /breve'
' /caron'
' /circumflex'
' /dotaccent'
' /hungarumlaut'
' /ogonek'
' /ring'
' /tilde'
' 39'
' /quotesingle'
' 96'
' /grave'
' 128'
' /bullet'
' /dagger'
' /daggerdbl'
' /ellipsis'
' /emdash'
' /endash'
' /florin'
' /fraction'
' /guilsinglleft'
' /guilsinglright'
' /minus'
' /perthousand'
' /quotedblbase'
' /quotedblleft'
' /quotedblright'
' /quoteleft'
' /quoteright'
' /quotesinglbase'
' /trademark'
' /fi'
' /fl'
' /Lslash'
' /OE'
' /Scaron'
' /Ydieresis'
' /Zcaron'
' /dotlessi'
' /lslash'
' /oe'
' /scaron'
' /zcaron'
' 160'
' /Euro'
' 164'
' /currency'
' 166'
' /brokenbar'
' 168'
' /dieresis'
' /copyright'
' /ordfeminine'
' 172'
' /logicalnot'
' /.notdef'
' /registered'
' /macron'
' /degree'
' /plusminus'
' /twosuperior'
' /threesuperior'
' /acute'
' /mu'
' 183'
' /periodcentered'
' /cedilla'
' /onesuperior'
' /ordmasculine'
' 188'
' /onequarter'
' /onehalf'
' /threequarters'
' 192'
' /Agrave'
' /Aacute'
' /Acircumflex'
' /Atilde'
' /Adieresis'
' /Aring'
' /AE'
' /Ccedilla'
' /Egrave'
' /Eacute'
' /Ecircumflex'
' /Edieresis'
' /Igrave'
' /Iacute'
' /Icircumflex'
' /Idieresis'
' /Eth'
' /Ntilde'
' /Ograve'
' /Oacute'
' /Ocircumflex'
' /Otilde'
' /Odieresis'
' /multiply'
' /Oslash'
' /Ugrave'
' /Uacute'
' /Ucircumflex'
' /Udieresis'
' /Yacute'
' /Thorn'
' /germandbls'
' /agrave'
' /aacute'
' /acircumflex'
' /atilde'
' /adieresis'
' /aring'
' /ae'
' /ccedilla'
' /egrave'
' /eacute'
' /ecircumflex'
' /edieresis'
' /igrave'
' /iacute'
' /icircumflex'
' /idieresis'
' /eth'
' /ntilde'
' /ograve'
' /oacute'
' /ocircumflex'
' /otilde'
' /odieresis'
' /divide'
' /oslash'
' /ugrave'
' /uacute'
' /ucircumflex'
' /udieresis'
' /yacute'
' /thorn'
' /ydieresis'
' ]'
' /Type'
' /Encoding'
'>>']
def FormFont(BaseFont, Name):
from reportlab.pdfbase.pdfdoc import PDFName
return PDFPattern(FormFontPattern, BaseFont=PDFName(BaseFont), Name=PDFName(Name), Encoding=PDFPattern(PDFDocEncodingPattern))
FormFontPattern = [
'<<',
' /BaseFont ',
["BaseFont"], '\r\n',
' /Encoding ',
["Encoding"], '\r\n',
' /Name ',
["Name"], '\r\n',
' /Subtype '
' /Type1 '
' /Type '
' /Font '
'>>' ]
def resetPdfForm():
pass
from reportlab.rl_config import register_reset
register_reset(resetPdfForm)
resetPdfForm()
def TextField(title, value, xmin, ymin, xmax, ymax, page,
maxlen=1000000, font="Helvetica-Bold", fontsize=9, R=0, G=0, B=0.627, multiline=0):
from reportlab.pdfbase.pdfdoc import PDFString, PDFName
Flags = 0
if multiline:
Flags = Flags | (1<<12) # bit 13 is at position 12 :)
fontname = FORMFONTNAMES[font]
return PDFPattern(TextFieldPattern,
value=PDFString(value), maxlen=maxlen, page=page,
title=PDFString(title),
xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax,
fontname=PDFName(fontname), fontsize=fontsize, R=R, G=G, B=B, Flags=Flags)
TextFieldPattern = [
'<<'
' /DA'
' (', ["fontname"],' ',["fontsize"],' Tf ',["R"],' ',["G"],' ',["B"],' rg)'
' /DV ',
["value"], '\r\n',
' /F 4 /FT /Tx'
'/MK << /BC [ 0 0 0 ] >>'
' /MaxLen ',
["maxlen"], '\r\n',
' /P ',
["page"], '\r\n',
' /Rect '
' [', ["xmin"], " ", ["ymin"], " ", ["xmax"], " ", ["ymax"], ' ]'
'/Subtype /Widget'
' /T ',
["title"], '\r\n',
' /Type'
' /Annot'
' /V ',
["value"], '\r\n',
' /Ff ',
["Flags"],'\r\n',
'>>']
def SelectField(title, value, options, xmin, ymin, xmax, ymax, page,
font="Helvetica-Bold", fontsize=9, R=0, G=0, B=0.627):
#print "ARGS", (title, value, options, xmin, ymin, xmax, ymax, page, font, fontsize, R, G, B)
from reportlab.pdfbase.pdfdoc import PDFString, PDFName, PDFArray
if value not in options:
raise ValueError("value %s must be one of options %s" % (repr(value), repr(options)))
fontname = FORMFONTNAMES[font]
optionstrings = list(map(PDFString, options))
optionarray = PDFArray(optionstrings)
return PDFPattern(SelectFieldPattern,
Options=optionarray,
Selected=PDFString(value), Page=page,
Name=PDFString(title),
xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax,
fontname=PDFName(fontname), fontsize=fontsize, R=R, G=G, B=B)
SelectFieldPattern = [
'<< % a select list\r\n'
' /DA ',
' (', ["fontname"],' ',["fontsize"],' Tf ',["R"],' ',["G"],' ',["B"],' rg)\r\n',
#' (/Helv 12 Tf 0 g)\r\n',
' /DV ',
["Selected"],'\r\n',
' /F ',
' 4\r\n',
' /FT ',
' /Ch\r\n',
' /MK ',
' <<',
' /BC',
' [',
' 0',
' 0',
' 0',
' ]',
' /BG',
' [',
' 1',
' 1',
' 1',
' ]',
' >>\r\n',
' /Opt ',
["Options"],'\r\n',
' /P ',
["Page"],'\r\n',
'/Rect',
' [',["xmin"], " ", ["ymin"], " ", ["xmax"], " ", ["ymax"],
' ] \r\n',
'/Subtype',
' /Widget\r\n',
' /T ',
["Name"],'\r\n',
' /Type ',
' /Annot',
' /V ',
["Selected"],'\r\n',
'>>']
def ButtonField(title, value, xmin, ymin, page, width=16.7704, height=14.907):
if value not in ("Yes", "Off"):
raise ValueError("button value must be 'Yes' or 'Off': "+repr(value))
fontSize = (11.3086/14.907)*height
dx = (3.6017/16.7704)*width
dy = (3.3881/14.907)*height
return PDFPattern(ButtonFieldPattern,
Name=PDFString(title),
xmin=xmin, ymin=ymin, xmax=xmin+width, ymax=ymin+width,
Hide=PDFPattern(['<< /S /Hide >>']),
APDOff=ButtonStream('0.749 g 0 0 %(width)s %(height)s re f\r\n' % vars(), width=width, height=height),
APDYes=ButtonStream('0.749 g 0 0 %(width)s %(height)s re f q 1 1 %(width)s %(height)s re W n BT /ZaDb %(fontSize)s Tf 0 g 1 0 0 1 %(dx)s %(dy)s Tm (4) Tj ET\r\n' % vars(),
width=width, height=height),
APNYes=ButtonStream('q 1 1 %(width)s %(height)s re W n BT /ZaDb %(fontSize)s Tf 0 g 1 0 0 1 %(dx)s %(dy)s Tm (4) Tj ET Q\r\n' % vars(),
width=width, height=height),
Value=PDFName(value),
Page=page)
ButtonFieldPattern = ['<< ',
'/AA',
' <<',
' /D ',
["Hide"],'\r\n',
#' %(imported.18.0)s',
' >> ',
'/AP ',
' <<',
' /D',
' <<',
' /Off ',
#' %(imported.40.0)s',
["APDOff"], '\r\n',
' /Yes ',
#' %(imported.39.0)s',
["APDYes"], '\r\n',
' >>', '\r\n',
' /N',
' << ',
' /Yes ',
#' %(imported.38.0)s',
["APNYes"], '\r\n',
' >>',
' >>\r\n',
' /AS ',
["Value"], '\r\n',
' /DA ',
PDFString('/ZaDb 0 Tf 0 g'), '\r\n',
'/DV ',
["Value"], '\r\n',
'/F ',
' 4 ',
'/FT ',
' /Btn ',
'/H ',
' /T ',
'/MK ',
' <<',
' /AC (\\376\\377)',
#PDFString('\376\377'),
' /CA ',
PDFString('4'),
' /RC ',
PDFString('\376\377'),
' >> ','\r\n',
'/P ',
["Page"], '\r\n',
'/Rect',
' [',["xmin"], " ", ["ymin"], " ", ["xmax"], " ", ["ymax"],
' ] ','\r\n',
'/Subtype',
' /Widget ',
'/T ',
["Name"], '\r\n',
'/Type',
' /Annot ',
'/V ',
["Value"], '\r\n',
' >>']
def buttonStreamDictionary(width=16.7704, height=14.907):
"everything except the length for the button appearance streams"
result = PDFDictionary()
result["SubType"] = "/Form"
result["BBox"] = "[0 0 %(width)s %(height)s]" % vars()
font = PDFDictionary()
font["ZaDb"] = PDFPattern(ZaDbPattern)
resources = PDFDictionary()
resources["ProcSet"] = "[ /PDF /Text ]"
resources["Font"] = font
result["Resources"] = resources
return result
def ButtonStream(content, width=16.7704, height=14.907):
result = PDFStream(buttonStreamDictionary(width=width,height=height), content)
result.filters = []
return result
|
EduPepperPDTesting/pepper2013-testing
|
lms/djangoapps/reportlab/pdfbase/pdfform.py
|
Python
|
agpl-3.0
| 16,301
|
import hglib
from flask import current_app
# Find the stack on which we want to store the database connection.
# Starting with Flask 0.9, the _app_ctx_stack is the correct one,
# before that we need to use the _request_ctx_stack.
try:
from flask import _app_ctx_stack as stack
except ImportError:
from flask import _request_ctx_stack as stack
class Mercurial(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
app.config.setdefault('MERCURIAL_REPOPATH', '/tmp')
def open_client(self):
return hglib.open(current_app.config['MERCURIAL_REPOPATH'])
@property
def client(self):
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'hg_client'):
ctx.hg_client = self.open_client()
return ctx.hg_client
def commits(self, reverse=False):
if reverse:
return self.client.log(':')
else:
return self.client.log()
def commits_for_path(self, path, reverse=False):
if reverse:
return self.client.log(':', files=[path])
else:
return self.client.log(files=[path])
|
drivet/flask-mercurial
|
flask_mercurial.py
|
Python
|
mit
| 1,242
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from airflow.providers.amazon.aws.operators.s3_list import S3ListOperator
TASK_ID = 'test-s3-list-operator'
BUCKET = 'test-bucket'
DELIMITER = '.csv'
PREFIX = 'TEST'
MOCK_FILES = ["TEST1.csv", "TEST2.csv", "TEST3.csv"]
class TestS3ListOperator(unittest.TestCase):
@mock.patch('airflow.providers.amazon.aws.operators.s3_list.S3Hook')
def test_execute(self, mock_hook):
mock_hook.return_value.list_keys.return_value = MOCK_FILES
operator = S3ListOperator(task_id=TASK_ID, bucket=BUCKET, prefix=PREFIX, delimiter=DELIMITER)
files = operator.execute(None)
mock_hook.return_value.list_keys.assert_called_once_with(
bucket_name=BUCKET, prefix=PREFIX, delimiter=DELIMITER
)
assert sorted(files) == sorted(MOCK_FILES)
|
apache/incubator-airflow
|
tests/providers/amazon/aws/operators/test_s3_list.py
|
Python
|
apache-2.0
| 1,616
|
from django.template import RequestContext
from django.template.response import TemplateResponse
from pyconcz_2016.team.models import Organizer
def team_list(request):
organizers = Organizer.objects.all().filter(published=True).order_by('?')
return TemplateResponse(
request,
template='team/organizers_list.html',
context={
'organizers': organizers
}
)
|
pyvec/cz.pycon.org-2016
|
pyconcz_2016/team/views.py
|
Python
|
mit
| 413
|
# cgc.py - Curriculum Graph Converter - Dana Toribio
import graphviz
import os
import re
import subprocess
import sys
re_courses = re.compile('\w+\s\d+\w*') # regex for courses
header = r'''from graphviz import Digraph
g = Digraph('studyplan', filename='studyplan.gv')
g.attr('graph', fontname='Helvetica')
g.attr('node', fontname='Helvetica')'''
legend = r'''c0 = Digraph('cluster_0')
c0.body.append('label = "LEGEND"')
c0.body.append('color=lightgrey')
c0.node_attr.update(style='filled', color='white')
c0.edge_attr.update(color='white')
c0.node('Semester 6', color='plum')
c0.node('Semester 7', color='crimson')
c0.node('Semester 3', color='peachpuff')
c0.node('Semester 4', color='darkseagreen')
c0.node('Semester 5', color='lightblue')
c0.node('Completed', color='grey')
c0.node('Semester 1', color='pink')
c0.node('Semester 2', color='lightsalmon')
c0.node('Semester 8', color='chocolate')
c0.edge('Semester 6', 'Semester 7')
c0.edge('Semester 7', 'Semester 8')
c0.edge('Semester 3', 'Semester 4')
c0.edge('Semester 4', 'Semester 5')
c0.edge('Completed', 'Semester 1')
c0.edge('Semester 1', 'Semester 2')
c0.body.append('label = "LEGEND"')
'''
req_electives = r'''c1 = Digraph('cluster_1')
c1.body.append('color=aliceblue')
c1.body.append('style=filled')
c1.body.append('labelloc = "b"')
c1.body.append('label = "'''
req_electives_footer = "c1.body.append('label = \""
trk_electives = r'''
c2 = Digraph('cluster_2')
c2.body.append('color=aliceblue')
c2.body.append('style=filled')
c2.body.append('labelloc = "b"')
c2.body.append('label = "'''
trk_electives_footer = "c2.body.append('label = \""
completed_courses = ''
suggestions = ''
core_courses = ''
elec_prereqs = ''
def node(value):
return ".node('" + value + "')\n"
def prereq_edge(node1, node2, crit):
if crit is True:
return ".edge('" + node1 + "', '" + node2 + "', color='red')\n"
else:
return ".edge('" + node1 + "', '" + node2 + "')\n"
def coreq_edge(node1, node2, crit):
if crit is True:
return ".edge('" + node1 + "', '" + node2 + "', '', arrowhead='dot', arrowtail='dot', dir='both', color='red')\n"
else:
return ".edge('" + node1 + "', '" + node2 + "', '', arrowhead='dot', arrowtail='dot', dir='both')\n"
f = open('studyplan.txt', 'r')
nf = open('studyplan.py', 'w')
write_to = ''
legend_index = 0
legend_color = ['pink', 'lightsalmon', 'peachpuff', 'darkseagreen', 'lightblue', 'plum', 'crimson', 'chocolate', 'goldenrod']
for line in f:
if ('#' in line.split(' ')) and ('Core' in line):
write_to = 'core'
core_courses = core_courses + '\n' + line
elif ('#' in line.split(' ')) and ('required' in line):
write_to = 'req_electives'
req_electives = req_electives + line[2:-1] + '"\')\n'
elif ('Track' in line):
write_to = 'trk_electives'
trk_electives = trk_electives + line[2:-1] + '"\')\n'
elif ('#' in line.split(' ')) and ('taken' in line):
write_to = 'suggestions'
suggestions = suggestions + '\n' + line + "g.attr('node', style='filled', color='grey')\n"
elif ('#' in line.split(' ')) and ('semester' in line):
write_to = 'suggestions'
suggestions = suggestions + '\n' + line + "g.attr('node', style='filled', color='" + legend_color[legend_index] + "')\n"
legend_index = legend_index + 1
elif line is '\n':
write_to = ''
course = re_courses.findall(line)
if write_to is 'core':
if (course) and ('->' in line) and ('*' in line):
core_courses = core_courses + 'g' + prereq_edge(course[0], course[1], True)
elif (course) and ('--' in line) and ('*' in line):
core_courses = core_courses + 'g' + coreq_edge(course[0], course[1], True)
elif (course) and ('->' in line):
core_courses = core_courses + 'g' + prereq_edge(course[0], course[1], False)
elif (course) and ('--' in line):
core_courses = core_courses + 'g' + coreq_edge(course[0], course[1], False)
elif (course):
core_courses = core_courses + 'g' + node(course[0])
elif write_to is 'req_electives':
if (course) and ('->' in line):
req_electives = req_electives + 'c1' + prereq_edge(course[0], course[1], False)
elif (course) and ('--' in line):
req_electives = req_electives + 'c1' + coreq_edge(course[0], course[1], False)
elif (course):
req_electives = req_electives + 'c1' + node(course[0])
elif write_to is 'trk_electives':
if (course) and ('->' in line):
trk_electives = trk_electives + 'c2' + prereq_edge(course[0], course[1], False)
elif (course) and ('--' in line):
trk_electives = trk_electives + 'c2' + coreq_edge(course[0], course[1], False)
elif (course):
trk_electives = trk_electives + 'c2' + node(course[0])
elif (write_to is 'suggestions') and course:
suggestions = suggestions + 'g' + node(course[0])
else:
pass
nf.write(header + '\n')
nf.write(legend + '\n')
nf.write(req_electives + '\n')
nf.write(trk_electives + '\n')
nf.write(suggestions + '\n')
nf.write(core_courses + '\n')
# write track prerequisites
# subgraph calls
nf.write('g.subgraph(c1)' + '\n')
nf.write('g.subgraph(c2)' + '\n')
nf.write('g.subgraph(c0)' + '\n')
nf.write('g.view()')
os.startfile('studyplan.py')
|
danaoira/CurriculumGraphVisualizer
|
cgc.py
|
Python
|
mit
| 5,047
|
from setuptools import setup, find_packages
setup(
name="dominoscli",
version="0.0.1",
packages=find_packages(),
author="Rick van Biljouw",
author_email="rick@ondemand.io",
scripts = [
'bin/dominos.py'
],
data_files = [
],
)
|
freecode/dominos-cli
|
setup.py
|
Python
|
mit
| 270
|
# -*- encoding: utf-8 -*-
"""Test class for PuppetModule CLI
:Requirement: Puppetmodule
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: Puppet
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from robottelo.cli.factory import make_org, make_product, make_repository
from robottelo.cli.puppetmodule import PuppetModule
from robottelo.cli.repository import Repository
from robottelo.constants import FAKE_0_PUPPET_REPO, FAKE_1_PUPPET_REPO
from robottelo.decorators import tier1, tier2, upgrade
from robottelo.test import CLITestCase
class PuppetModuleTestCase(CLITestCase):
"""Tests for PuppetModule via Hammer CLI"""
@classmethod
def setUpClass(cls):
super(PuppetModuleTestCase, cls).setUpClass()
cls.org = make_org()
cls.product = make_product({
u'organization-id': cls.org['id']
})
cls.repo = make_repository({
u'organization-id': cls.org['id'],
u'product-id': cls.product['id'],
u'content-type': u'puppet',
u'url': FAKE_0_PUPPET_REPO,
})
Repository.synchronize({'id': cls.repo['id']})
@tier1
def test_positive_list(self):
"""Check if puppet-module list retrieves puppet-modules of
the given org
:id: 77635e70-19e7-424d-9c89-ec5dbe91de75
:expectedresults: Puppet-modules are retrieved for the given org
:bz: 1283173
:CaseImportance: Critical
"""
result = PuppetModule.list({'organization-id': self.org['id']})
# There are 4 puppet modules in the test puppet-module url
self.assertEqual(len(result), 4)
@tier1
def test_positive_info(self):
"""Check if puppet-module info retrieves info for the given
puppet-module id
:id: 8aaa9243-5e20-49d6-95ce-620cc1ba18dc
:expectedresults: The puppet-module info is retrieved
:CaseImportance: Critical
"""
return_value = PuppetModule.list({
'organization-id': self.org['id'],
})
for i in range(len(return_value)):
result = PuppetModule.info(
{'id': return_value[i]['id']},
output_format='json'
)
self.assertEqual(result['id'], return_value[i]['id'])
@tier2
@upgrade
def test_positive_list_multiple_repos(self):
"""Verify that puppet-modules list for specific repo is correct
and does not affected by other repositories.
:id: f36d25b3-2495-4e89-a1cf-e39d52762d95
:expectedresults: Number of modules has no changed after a second repo
was synced.
:CaseImportance: Critical
"""
# Verify that number of synced modules is correct
repo1 = Repository.info({'id': self.repo['id']})
repo_content_count = repo1['content-counts']['puppet-modules']
modules_num = len(
PuppetModule.list({'repository-id': repo1['id']}))
self.assertEqual(repo_content_count, str(modules_num))
# Create and sync second repo
repo2 = make_repository({
u'organization-id': self.org['id'],
u'product-id': self.product['id'],
u'content-type': u'puppet',
u'url': FAKE_1_PUPPET_REPO,
})
Repository.synchronize({'id': repo2['id']})
# Verify that number of modules from the first repo has not changed
self.assertEqual(
modules_num,
len(PuppetModule.list({'repository-id': repo1['id']}))
)
|
omaciel/robottelo
|
tests/foreman/cli/test_puppetmodule.py
|
Python
|
gpl-3.0
| 3,568
|
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import contextlib
import mock
from oslo.config import cfg
from oslo.serialization import jsonutils
from six.moves import queue as Queue
from neutron.api.v2 import attributes
from neutron import context
from neutron.extensions import loadbalancer
from neutron import manager
from neutron.plugins.common import constants
from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
from neutron_lbaas.services.loadbalancer.drivers.radware import driver
from neutron_lbaas.services.loadbalancer.drivers.radware \
import exceptions as r_exc
GET_200 = ('/api/workflow/', '/api/service/', '/api/workflowTemplate')
SERVER_DOWN_CODES = (-1, 301, 307)
class QueueMock(Queue.Queue):
def __init__(self, completion_handler):
self.completion_handler = completion_handler
super(QueueMock, self).__init__()
def put_nowait(self, oper):
self.completion_handler(oper)
def _recover_function_mock(action, resource, data, headers, binary=False):
pass
def rest_call_function_mock(action, resource, data, headers, binary=False):
if rest_call_function_mock.RESPOND_WITH_ERROR:
return 400, 'error_status', 'error_description', None
if rest_call_function_mock.RESPOND_WITH_SERVER_DOWN in SERVER_DOWN_CODES:
val = rest_call_function_mock.RESPOND_WITH_SERVER_DOWN
return val, 'error_status', 'error_description', None
if action == 'GET':
return _get_handler(resource)
elif action == 'DELETE':
return _delete_handler(resource)
elif action == 'POST':
return _post_handler(resource, binary)
else:
return 0, None, None, None
def _get_handler(resource):
if resource == GET_200[2]:
if rest_call_function_mock.TEMPLATES_MISSING:
data = jsonutils.loads('[]')
else:
data = jsonutils.loads(
'[{"name":"openstack_l2_l3"},{"name":"openstack_l4"}]'
)
return 200, '', '', data
if resource in GET_200:
return 200, '', '', ''
else:
data = jsonutils.loads('{"complete":"True", "success": "True"}')
return 202, '', '', data
def _delete_handler(resource):
return 404, '', '', {'message': 'Not Found'}
def _post_handler(resource, binary):
if re.search(r'/api/workflow/.+/action/.+', resource):
data = jsonutils.loads('{"uri":"some_uri"}')
return 202, '', '', data
elif re.search(r'/api/service\?name=.+', resource):
data = jsonutils.loads('{"links":{"actions":{"provision":"someuri"}}}')
return 201, '', '', data
elif binary:
return 201, '', '', ''
else:
return 202, '', '', ''
RADWARE_PROVIDER = ('LOADBALANCER:radware:neutron.services.'
'loadbalancer.drivers.radware.driver.'
'LoadBalancerDriver:default')
class TestLoadBalancerPluginBase(
test_db_loadbalancer.LoadBalancerPluginDbTestCase):
def setUp(self):
super(TestLoadBalancerPluginBase, self).setUp(
lbaas_provider=RADWARE_PROVIDER)
loaded_plugins = manager.NeutronManager().get_service_plugins()
self.plugin_instance = loaded_plugins[constants.LOADBALANCER]
class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
def setUp(self):
super(TestLoadBalancerPlugin, self).setUp()
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_ERROR': False})
rest_call_function_mock.__dict__.update(
{'TEMPLATES_MISSING': False})
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_SERVER_DOWN': 200})
self.operation_completer_start_mock = mock.Mock(
return_value=None)
self.operation_completer_join_mock = mock.Mock(
return_value=None)
self.driver_rest_call_mock = mock.Mock(
side_effect=rest_call_function_mock)
self.flip_servers_mock = mock.Mock(
return_value=None)
self.recover_mock = mock.Mock(
side_effect=_recover_function_mock)
radware_driver = self.plugin_instance.drivers['radware']
radware_driver.completion_handler.start = (
self.operation_completer_start_mock)
radware_driver.completion_handler.join = (
self.operation_completer_join_mock)
self.orig_call = radware_driver.rest_client.call
self.orig__call = radware_driver.rest_client._call
radware_driver.rest_client.call = self.driver_rest_call_mock
radware_driver.rest_client._call = self.driver_rest_call_mock
radware_driver.rest_client._flip_servers = self.flip_servers_mock
radware_driver.rest_client._recover = self.recover_mock
radware_driver.completion_handler.rest_client.call = (
self.driver_rest_call_mock)
radware_driver.queue = QueueMock(
radware_driver.completion_handler.handle_operation_completion)
self.addCleanup(radware_driver.completion_handler.join)
def test_get_pip(self):
"""Call _get_pip twice and verify that a Port is created once."""
port_dict = {'fixed_ips': [{'subnet_id': '10.10.10.10',
'ip_address': '11.11.11.11'}]}
port_data = {
'tenant_id': 'tenant_id',
'name': 'port_name',
'network_id': 'network_id',
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': False,
'device_id': '',
'device_owner': 'neutron:' + constants.LOADBALANCER,
'fixed_ips': [{'subnet_id': '10.10.10.10'}]
}
self.plugin_instance._core_plugin.get_ports = mock.Mock(
return_value=[])
self.plugin_instance._core_plugin.create_port = mock.Mock(
return_value=port_dict)
radware_driver = self.plugin_instance.drivers['radware']
radware_driver._get_pip(context.get_admin_context(),
'tenant_id', 'port_name',
'network_id', '10.10.10.10')
self.plugin_instance._core_plugin.get_ports.assert_called_once_with(
mock.ANY, filters={'name': ['port_name']})
self.plugin_instance._core_plugin.create_port.assert_called_once_with(
mock.ANY, {'port': port_data})
self.plugin_instance._core_plugin.create_port.reset_mock()
self.plugin_instance._core_plugin.get_ports.reset_mock()
self.plugin_instance._core_plugin.get_ports.return_value = [port_dict]
radware_driver._get_pip(context.get_admin_context(),
'tenant_id', 'port_name',
'network_id', '10.10.10.10')
self.plugin_instance._core_plugin.get_ports.assert_called_once_with(
mock.ANY, filters={'name': ['port_name']})
self.assertFalse(self.plugin_instance._core_plugin.create_port.called)
def test_rest_client_recover_was_called(self):
"""Call the real REST client and verify _recover is called."""
radware_driver = self.plugin_instance.drivers['radware']
radware_driver.rest_client.call = self.orig_call
radware_driver.rest_client._call = self.orig__call
self.assertRaises(r_exc.RESTRequestFailure,
radware_driver._verify_workflow_templates)
self.recover_mock.assert_called_once_with('GET',
'/api/workflowTemplate',
None, None, False)
def test_rest_client_flip_servers(self):
radware_driver = self.plugin_instance.drivers['radware']
server = radware_driver.rest_client.server
sec_server = radware_driver.rest_client.secondary_server
radware_driver.rest_client._flip_servers()
self.assertEqual(server,
radware_driver.rest_client.secondary_server)
self.assertEqual(sec_server,
radware_driver.rest_client.server)
def test_verify_workflow_templates_server_down(self):
"""Test the rest call failure when backend is down."""
for value in SERVER_DOWN_CODES:
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_SERVER_DOWN': value})
self.assertRaises(r_exc.RESTRequestFailure,
self.plugin_instance.drivers['radware'].
_verify_workflow_templates)
def test_verify_workflow_templates(self):
"""Test the rest call failure handling by Exception raising."""
rest_call_function_mock.__dict__.update(
{'TEMPLATES_MISSING': True})
self.assertRaises(r_exc.WorkflowMissing,
self.plugin_instance.drivers['radware'].
_verify_workflow_templates)
def test_create_vip_failure(self):
"""Test the rest call failure handling by Exception raising."""
with self.network() as network:
with self.subnet(network=network) as subnet:
with self.pool(do_delete=False,
provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_ERROR': True})
self.assertRaises(r_exc.RESTRequestFailure,
self.plugin_instance.create_vip,
context.get_admin_context(),
{'vip': vip_data})
def test_create_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
# Test creation REST calls
calls = [
mock.call('GET', u'/api/service/srv_' +
subnet['subnet']['network_id'], None, None),
mock.call('POST', u'/api/service?name=srv_' +
subnet['subnet']['network_id'] + '&tenant=' +
vip['tenant_id'], mock.ANY,
driver.CREATE_SERVICE_HEADER),
mock.call('GET', u'/api/workflow/l2_l3_' +
subnet['subnet']['network_id'], None, None),
mock.call('POST', '/api/workflow/l2_l3_' +
subnet['subnet']['network_id'] +
'/action/setup_l2_l3',
mock.ANY, driver.TEMPLATE_HEADER),
mock.call('POST', 'someuri',
None, driver.PROVISION_HEADER),
mock.call('POST', '/api/workflowTemplate/' +
'openstack_l4' +
'?name=' + pool['pool']['id'],
mock.ANY,
driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflowTemplate/' +
'openstack_l2_l3' +
'?name=l2_l3_' + subnet['subnet']['network_id'],
mock.ANY,
driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER),
mock.call('GET', '/api/workflow/' +
pool['pool']['id'], None, None)
]
self.driver_rest_call_mock.assert_has_calls(calls,
any_order=True)
#Test DB
new_vip = self.plugin_instance.get_vip(
context.get_admin_context(),
vip['id']
)
self.assertEqual(new_vip['status'], constants.ACTIVE)
# Delete VIP
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
# Test deletion REST calls
calls = [
mock.call('DELETE', u'/api/workflow/' + pool['pool']['id'],
None, None)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
def test_create_vip_2_leg(self):
"""Test creation of a VIP where Alteon VIP and PIP are different."""
with self.subnet(cidr='10.0.0.0/24') as subnet:
with self.subnet(cidr='10.0.1.0/24') as pool_sub:
with self.pool(provider='radware',
subnet_id=pool_sub['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
name_suffix = '%s_%s' % (subnet['subnet']['network_id'],
pool_sub['subnet']['network_id'])
# Test creation REST calls
calls = [
mock.call('GET', '/api/workflowTemplate', None, None),
mock.call('GET', '/api/service/srv_' + name_suffix,
None, None),
mock.call('POST', '/api/service?name=srv_' +
name_suffix + '&tenant=' + vip['tenant_id'],
mock.ANY, driver.CREATE_SERVICE_HEADER),
mock.call('POST', 'someuri',
None, driver.PROVISION_HEADER),
mock.call('GET', '/api/workflow/l2_l3_' + name_suffix,
None, None),
mock.call('POST', '/api/workflowTemplate/' +
'openstack_l2_l3' +
'?name=l2_l3_' + name_suffix,
mock.ANY,
driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflow/l2_l3_' +
name_suffix + '/action/setup_l2_l3',
mock.ANY, driver.TEMPLATE_HEADER),
mock.call('GET', '/api/workflow/' +
pool['pool']['id'], None, None),
mock.call('POST', '/api/workflowTemplate/' +
'openstack_l4' +
'?name=' + pool['pool']['id'],
mock.ANY,
driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflow/' +
pool['pool']['id'] + '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER)
]
self.driver_rest_call_mock.assert_has_calls(calls)
#Test DB
new_vip = self.plugin_instance.get_vip(
context.get_admin_context(),
vip['id']
)
self.assertEqual(new_vip['status'], constants.ACTIVE)
# Test that PIP neutron port was created
pip_port_filter = {
'name': ['pip_' + vip['id']],
}
plugin = manager.NeutronManager.get_plugin()
num_ports = plugin.get_ports_count(
context.get_admin_context(), filters=pip_port_filter)
self.assertTrue(num_ports > 0)
# Delete VIP
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
# Test deletion REST calls
calls = [
mock.call('DELETE', u'/api/workflow/' +
pool['pool']['id'], None, None)
]
self.driver_rest_call_mock.assert_has_calls(calls)
def test_update_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
do_delete=False,
subnet_id=subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
vip_data['status'] = constants.PENDING_UPDATE
self.plugin_instance.update_vip(
context.get_admin_context(),
vip['id'], {'vip': vip_data})
# Test REST calls
calls = [
mock.call('POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER),
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
updated_vip = self.plugin_instance.get_vip(
context.get_admin_context(), vip['id'])
self.assertEqual(updated_vip['status'], constants.ACTIVE)
# delete VIP
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
def test_update_vip_2_leg(self):
"""Test update of a VIP where Alteon VIP and PIP are different."""
with self.subnet(cidr='10.0.0.0/24') as subnet:
with self.subnet(cidr='10.0.1.0/24') as pool_subnet:
with self.pool(provider='radware',
subnet_id=pool_subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
self.plugin_instance.update_vip(
context.get_admin_context(),
vip['id'], {'vip': vip_data})
# Test REST calls
calls = [
mock.call('POST', '/api/workflow/' +
pool['pool']['id'] + '/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER),
]
self.driver_rest_call_mock.assert_has_calls(calls)
updated_vip = self.plugin_instance.get_vip(
context.get_admin_context(), vip['id'])
self.assertEqual(updated_vip['status'], constants.ACTIVE)
# delete VIP
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
def test_delete_vip_failure(self):
plugin = self.plugin_instance
with self.network() as network:
with self.subnet(network=network) as subnet:
with self.pool(do_delete=False,
provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
with contextlib.nested(
self.member(pool_id=pool['pool']['id'],
do_delete=False),
self.member(pool_id=pool['pool']['id'],
address='192.168.1.101',
do_delete=False),
self.health_monitor(do_delete=False),
self.vip(pool=pool, subnet=subnet, do_delete=False)
) as (mem1, mem2, hm, vip):
plugin.create_pool_health_monitor(
context.get_admin_context(), hm, pool['pool']['id']
)
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_ERROR': True})
plugin.delete_vip(
context.get_admin_context(), vip['vip']['id'])
u_vip = plugin.get_vip(
context.get_admin_context(), vip['vip']['id'])
u_pool = plugin.get_pool(
context.get_admin_context(), pool['pool']['id'])
u_mem1 = plugin.get_member(
context.get_admin_context(), mem1['member']['id'])
u_mem2 = plugin.get_member(
context.get_admin_context(), mem2['member']['id'])
u_phm = plugin.get_pool_health_monitor(
context.get_admin_context(),
hm['health_monitor']['id'], pool['pool']['id'])
self.assertEqual(u_vip['status'], constants.ERROR)
self.assertEqual(u_pool['status'], constants.ACTIVE)
self.assertEqual(u_mem1['status'], constants.ACTIVE)
self.assertEqual(u_mem2['status'], constants.ACTIVE)
self.assertEqual(u_phm['status'], constants.ACTIVE)
def test_delete_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
do_delete=False,
subnet_id=subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
calls = [
mock.call('DELETE', '/api/workflow/' + pool['pool']['id'],
None, None)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
self.assertRaises(loadbalancer.VipNotFound,
self.plugin_instance.get_vip,
context.get_admin_context(), vip['id'])
def test_delete_vip_2_leg(self):
"""Test deletion of a VIP where Alteon VIP and PIP are different."""
self.driver_rest_call_mock.reset_mock()
with self.subnet(cidr='10.0.0.0/24') as subnet:
with self.subnet(cidr='10.0.1.0/24') as pool_subnet:
with self.pool(provider='radware',
do_delete=False,
subnet_id=pool_subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
calls = [
mock.call('DELETE', '/api/workflow/' +
pool['pool']['id'], None, None)
]
self.driver_rest_call_mock.assert_has_calls(calls)
# Test that PIP neutron port was deleted
pip_port_filter = {
'name': ['pip_' + vip['id']],
}
plugin = manager.NeutronManager.get_plugin()
num_ports = plugin.get_ports_count(
context.get_admin_context(), filters=pip_port_filter)
self.assertTrue(num_ports == 0)
self.assertRaises(loadbalancer.VipNotFound,
self.plugin_instance.get_vip,
context.get_admin_context(), vip['id'])
def test_update_pool(self):
with self.subnet():
with self.pool() as pool:
del pool['pool']['provider']
del pool['pool']['status']
self.plugin_instance.update_pool(
context.get_admin_context(),
pool['pool']['id'], pool)
pool_db = self.plugin_instance.get_pool(
context.get_admin_context(), pool['pool']['id'])
self.assertEqual(pool_db['status'], constants.PENDING_UPDATE)
def test_delete_pool_with_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
do_delete=False,
subnet_id=subnet['subnet']['id']) as pool:
with self.vip(pool=pool, subnet=subnet):
self.assertRaises(loadbalancer.PoolInUse,
self.plugin_instance.delete_pool,
context.get_admin_context(),
pool['pool']['id'])
def test_create_member_with_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as p:
with self.vip(pool=p, subnet=subnet):
with self.member(pool_id=p['pool']['id']):
calls = [
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
def test_create_member_on_different_subnets(self):
with contextlib.nested(
self.subnet(),
self.subnet(cidr='20.0.0.0/24'),
self.subnet(cidr='30.0.0.0/24')
) as (vip_sub, pool_sub, member_sub):
with self.pool(provider='radware',
subnet_id=pool_sub['subnet']['id']) as pool:
with contextlib.nested(
self.port(subnet=vip_sub,
fixed_ips=[{'ip_address': '10.0.0.2'}]),
self.port(subnet=pool_sub,
fixed_ips=[{'ip_address': '20.0.0.2'}]),
self.port(subnet=member_sub,
fixed_ips=[{'ip_address': '30.0.0.2'}])
):
with contextlib.nested(
self.member(pool_id=pool['pool']['id'],
address='10.0.0.2'),
self.member(pool_id=pool['pool']['id'],
address='20.0.0.2'),
self.member(pool_id=pool['pool']['id'],
address='30.0.0.2')
) as (member_vip, member_pool, member_out):
with self.vip(pool=pool, subnet=vip_sub):
calls = [
mock.call(
'POST', '/api/workflow/' +
pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
mock_calls = self.driver_rest_call_mock.mock_calls
params = mock_calls[-2][1][2]['parameters']
member_subnet_array = params['member_subnet_array']
member_mask_array = params['member_mask_array']
member_gw_array = params['member_gw_array']
self.assertEqual(member_subnet_array,
['10.0.0.0',
'255.255.255.255',
'30.0.0.0'])
self.assertEqual(member_mask_array,
['255.255.255.0',
'255.255.255.255',
'255.255.255.0'])
self.assertEqual(
member_gw_array,
[pool_sub['subnet']['gateway_ip'],
'255.255.255.255',
pool_sub['subnet']['gateway_ip']])
def test_create_member_on_different_subnet_no_port(self):
with contextlib.nested(
self.subnet(),
self.subnet(cidr='20.0.0.0/24'),
self.subnet(cidr='30.0.0.0/24')
) as (vip_sub, pool_sub, member_sub):
with self.pool(provider='radware',
subnet_id=pool_sub['subnet']['id']) as pool:
with self.member(pool_id=pool['pool']['id'],
address='30.0.0.2'):
with self.vip(pool=pool, subnet=vip_sub):
calls = [
mock.call(
'POST', '/api/workflow/' +
pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
mock_calls = self.driver_rest_call_mock.mock_calls
params = mock_calls[-2][1][2]['parameters']
member_subnet_array = params['member_subnet_array']
member_mask_array = params['member_mask_array']
member_gw_array = params['member_gw_array']
self.assertEqual(member_subnet_array,
['30.0.0.2'])
self.assertEqual(member_mask_array,
['255.255.255.255'])
self.assertEqual(member_gw_array,
[pool_sub['subnet']['gateway_ip']])
def test_create_member_on_different_subnet_multiple_ports(self):
cfg.CONF.set_override("allow_overlapping_ips", 'true')
with self.network() as other_net:
with contextlib.nested(
self.subnet(),
self.subnet(cidr='20.0.0.0/24'),
self.subnet(cidr='30.0.0.0/24'),
self.subnet(network=other_net, cidr='30.0.0.0/24')
) as (vip_sub, pool_sub, member_sub1, member_sub2):
with self.pool(provider='radware',
subnet_id=pool_sub['subnet']['id']) as pool:
with contextlib.nested(
self.port(subnet=member_sub1,
fixed_ips=[{'ip_address': '30.0.0.2'}]),
self.port(subnet=member_sub2,
fixed_ips=[{'ip_address': '30.0.0.2'}])):
with self.member(pool_id=pool['pool']['id'],
address='30.0.0.2'):
with self.vip(pool=pool, subnet=vip_sub):
calls = [
mock.call(
'POST', '/api/workflow/' +
pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
calls = self.driver_rest_call_mock.mock_calls
params = calls[-2][1][2]['parameters']
m_sub_array = params['member_subnet_array']
m_mask_array = params['member_mask_array']
m_gw_array = params['member_gw_array']
self.assertEqual(m_sub_array,
['30.0.0.2'])
self.assertEqual(m_mask_array,
['255.255.255.255'])
self.assertEqual(
m_gw_array,
[pool_sub['subnet']['gateway_ip']])
def test_update_member_with_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as p:
with self.member(pool_id=p['pool']['id']) as member:
with self.vip(pool=p, subnet=subnet):
self.plugin_instance.update_member(
context.get_admin_context(),
member['member']['id'], member
)
calls = [
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
updated_member = self.plugin_instance.get_member(
context.get_admin_context(),
member['member']['id']
)
updated_member = self.plugin_instance.get_member(
context.get_admin_context(),
member['member']['id']
)
self.assertEqual(updated_member['status'],
constants.ACTIVE)
def test_update_member_without_vip(self):
with self.subnet():
with self.pool(provider='radware') as pool:
with self.member(pool_id=pool['pool']['id']) as member:
member['member']['status'] = constants.PENDING_UPDATE
updated_member = self.plugin_instance.update_member(
context.get_admin_context(),
member['member']['id'], member
)
self.assertEqual(updated_member['status'],
constants.PENDING_UPDATE)
def test_delete_member_with_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as p:
with self.member(pool_id=p['pool']['id'],
do_delete=False) as m:
with self.vip(pool=p, subnet=subnet):
# Reset mock and
# wait for being sure the member
# Changed status from PENDING-CREATE
# to ACTIVE
self.plugin_instance.delete_member(
context.get_admin_context(),
m['member']['id']
)
name, args, kwargs = (
self.driver_rest_call_mock.mock_calls[-2]
)
deletion_post_graph = str(args[2])
self.assertTrue(re.search(
r'.*\'member_address_array\': \[\].*',
deletion_post_graph
))
calls = [
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
self.assertRaises(loadbalancer.MemberNotFound,
self.plugin_instance.get_member,
context.get_admin_context(),
m['member']['id'])
def test_delete_member_without_vip(self):
with self.subnet():
with self.pool(provider='radware') as p:
with self.member(pool_id=p['pool']['id'],
do_delete=False) as m:
self.plugin_instance.delete_member(
context.get_admin_context(), m['member']['id']
)
self.assertRaises(loadbalancer.MemberNotFound,
self.plugin_instance.get_member,
context.get_admin_context(),
m['member']['id'])
def test_create_hm_with_vip(self):
with self.subnet() as subnet:
with self.health_monitor() as hm:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
with self.vip(pool=pool, subnet=subnet):
self.plugin_instance.create_pool_health_monitor(
context.get_admin_context(),
hm, pool['pool']['id']
)
# Test REST calls
calls = [
mock.call(
'POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
phm = self.plugin_instance.get_pool_health_monitor(
context.get_admin_context(),
hm['health_monitor']['id'], pool['pool']['id']
)
self.assertEqual(phm['status'], constants.ACTIVE)
def test_delete_pool_hm_with_vip(self):
with self.subnet() as subnet:
with self.health_monitor(do_delete=False) as hm:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
with self.vip(pool=pool, subnet=subnet):
self.plugin_instance.create_pool_health_monitor(
context.get_admin_context(),
hm, pool['pool']['id']
)
self.plugin_instance.delete_pool_health_monitor(
context.get_admin_context(),
hm['health_monitor']['id'],
pool['pool']['id']
)
name, args, kwargs = (
self.driver_rest_call_mock.mock_calls[-2]
)
deletion_post_graph = str(args[2])
self.assertTrue(re.search(
r'.*\'hm_uuid_array\': \[\].*',
deletion_post_graph
))
calls = [
mock.call(
'POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
self.assertRaises(
loadbalancer.PoolMonitorAssociationNotFound,
self.plugin_instance.get_pool_health_monitor,
context.get_admin_context(),
hm['health_monitor']['id'],
pool['pool']['id']
)
|
citrix-openstack-build/neutron-lbaas
|
neutron_lbaas/tests.skip/unit/services/loadbalancer/drivers/radware/test_plugin_driver.py
|
Python
|
apache-2.0
| 46,493
|
"""
State Space Representation and Kalman Filter
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
from warnings import warn
import numpy as np
from .representation import OptionWrapper, Representation, FrozenRepresentation
from .tools import (
prefix_kalman_filter_map, validate_vector_shape, validate_matrix_shape
)
# Define constants
FILTER_CONVENTIONAL = 0x01 # Durbin and Koopman (2012), Chapter 4
INVERT_UNIVARIATE = 0x01
SOLVE_LU = 0x02
INVERT_LU = 0x04
SOLVE_CHOLESKY = 0x08
INVERT_CHOLESKY = 0x10
STABILITY_FORCE_SYMMETRY = 0x01
MEMORY_STORE_ALL = 0
MEMORY_NO_FORECAST = 0x01
MEMORY_NO_PREDICTED = 0x02
MEMORY_NO_FILTERED = 0x04
MEMORY_NO_LIKELIHOOD = 0x08
MEMORY_CONSERVE = (
MEMORY_NO_FORECAST | MEMORY_NO_PREDICTED | MEMORY_NO_FILTERED |
MEMORY_NO_LIKELIHOOD
)
class KalmanFilter(Representation):
r"""
State space representation of a time series process, with Kalman filter
Parameters
----------
k_endog : array_like or integer
The observed time-series process :math:`y` if array like or the
number of variables in the process if an integer.
k_states : int
The dimension of the unobserved state process.
k_posdef : int, optional
The dimension of a guaranteed positive definite covariance matrix
describing the shocks in the measurement equation. Must be less than
or equal to `k_states`. Default is `k_states`.
loglikelihood_burn : int, optional
The number of initial periods during which the loglikelihood is not
recorded. Default is 0.
tolerance : float, optional
The tolerance at which the Kalman filter determines convergence to
steady-state. Default is 1e-19.
results_class : class, optional
Default results class to use to save filtering output. Default is
`FilterResults`. If specified, class must extend from `FilterResults`.
**kwargs
Keyword arguments may be used to provide values for the filter,
inversion, and stability methods. See `set_filter_method`,
`set_inversion_method`, and `set_stability_method`.
Keyword arguments may be used to provide default values for state space
matrices. See `Representation` for more details.
Notes
-----
There are several types of options available for controlling the Kalman
filter operation. All options are internally held as bitmasks, but can be
manipulated by setting class attributes, which act like boolean flags. For
more information, see the `set_*` class method documentation. The options
are:
filter_method
The filtering method controls aspects of which
Kalman filtering approach will be used.
inversion_method
The Kalman filter may contain one matrix inversion: that of the
forecast error covariance matrix. The inversion method controls how and
if that inverse is performed.
stability_method
The Kalman filter is a recursive algorithm that may in some cases
suffer issues with numerical stability. The stability method controls
what, if any, measures are taken to promote stability.
conserve_memory
By default, the Kalman filter computes a number of intermediate
matrices at each iteration. The memory conservation options control
which of those matrices are stored.
The `filter_method` and `inversion_method` options intentionally allow
the possibility that multiple methods will be indicated. In the case that
multiple methods are selected, the underlying Kalman filter will attempt to
select the optional method given the input data.
For example, it may be that INVERT_UNIVARIATE and SOLVE_CHOLESKY are
indicated (this is in fact the default case). In this case, if the
endogenous vector is 1-dimensional (`k_endog` = 1), then INVERT_UNIVARIATE
is used and inversion reduces to simple division, and if it has a larger
dimension, the Cholesky decomposition along with linear solving (rather
than explicit matrix inversion) is used. If only SOLVE_CHOLESKY had been
set, then the Cholesky decomposition method would *always* be used, even in
the case of 1-dimensional data.
See Also
--------
FilterResults
statsmodels.tsa.statespace.representation.Representation
"""
filter_methods = [
'filter_conventional'
]
filter_conventional = OptionWrapper('filter_method', FILTER_CONVENTIONAL)
"""
(bool) Flag for conventional Kalman filtering.
"""
inversion_methods = [
'invert_univariate', 'solve_lu', 'invert_lu', 'solve_cholesky',
'invert_cholesky'
]
invert_univariate = OptionWrapper('inversion_method', INVERT_UNIVARIATE)
"""
(bool) Flag for univariate inversion method (recommended).
"""
solve_lu = OptionWrapper('inversion_method', SOLVE_LU)
"""
(bool) Flag for LU and linear solver inversion method.
"""
invert_lu = OptionWrapper('inversion_method', INVERT_LU)
"""
(bool) Flag for LU inversion method.
"""
solve_cholesky = OptionWrapper('inversion_method', SOLVE_CHOLESKY)
"""
(bool) Flag for Cholesky and linear solver inversion method (recommended).
"""
invert_cholesky = OptionWrapper('inversion_method', INVERT_CHOLESKY)
"""
(bool) Flag for Cholesky inversion method.
"""
stability_methods = ['stability_force_symmetry']
stability_force_symmetry = (
OptionWrapper('stability_method', STABILITY_FORCE_SYMMETRY)
)
"""
(bool) Flag for enforcing covariance matrix symmetry
"""
memory_options = [
'memory_store_all', 'memory_no_forecast', 'memory_no_predicted',
'memory_no_filtered', 'memory_no_likelihood', 'memory_conserve'
]
memory_store_all = OptionWrapper('conserve_memory', MEMORY_STORE_ALL)
"""
(bool) Flag for storing all intermediate results in memory (default).
"""
memory_no_forecast = OptionWrapper('conserve_memory', MEMORY_NO_FORECAST)
"""
(bool) Flag to prevent storing forecasts.
"""
memory_no_predicted = OptionWrapper('conserve_memory', MEMORY_NO_PREDICTED)
"""
(bool) Flag to prevent storing predicted state and covariance matrices.
"""
memory_no_filtered = OptionWrapper('conserve_memory', MEMORY_NO_FILTERED)
"""
(bool) Flag to prevent storing filtered state and covariance matrices.
"""
memory_no_likelihood = (
OptionWrapper('conserve_memory', MEMORY_NO_LIKELIHOOD)
)
"""
(bool) Flag to prevent storing likelihood values for each observation.
"""
memory_conserve = OptionWrapper('conserve_memory', MEMORY_CONSERVE)
"""
(bool) Flag to conserve the maximum amount of memory.
"""
# Default filter options
filter_method = FILTER_CONVENTIONAL
"""
(int) Filtering method bitmask.
"""
inversion_method = INVERT_UNIVARIATE | SOLVE_CHOLESKY
"""
(int) Inversion method bitmask.
"""
stability_method = STABILITY_FORCE_SYMMETRY
"""
(int) Stability method bitmask.
"""
conserve_memory = MEMORY_STORE_ALL
"""
(int) Memory conservation bitmask.
"""
def __init__(self, k_endog, k_states, k_posdef=None,
loglikelihood_burn=0, tolerance=1e-19, results_class=None,
**kwargs):
super(KalmanFilter, self).__init__(
k_endog, k_states, k_posdef, **kwargs
)
# Setup the underlying Kalman filter storage
self._kalman_filters = {}
# Filter options
self.loglikelihood_burn = loglikelihood_burn
self.results_class = (
results_class if results_class is not None else FilterResults
)
self.set_filter_method(**kwargs)
self.set_inversion_method(**kwargs)
self.set_stability_method(**kwargs)
self.set_conserve_memory(**kwargs)
self.tolerance = tolerance
@property
def _kalman_filter(self):
prefix = self.prefix
if prefix in self._kalman_filters:
return self._kalman_filters[prefix]
return None
def _initialize_filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None,
tolerance=None, loglikelihood_burn=None):
if filter_method is None:
filter_method = self.filter_method
if inversion_method is None:
inversion_method = self.inversion_method
if stability_method is None:
stability_method = self.stability_method
if conserve_memory is None:
conserve_memory = self.conserve_memory
if loglikelihood_burn is None:
loglikelihood_burn = self.loglikelihood_burn
if tolerance is None:
tolerance = self.tolerance
# Make sure we have endog
if self.endog is None:
raise RuntimeError('Must bind a dataset to the model before'
' filtering or smoothing.')
# Initialize the representation matrices
prefix, dtype, create_statespace = self._initialize_representation()
# Determine if we need to (re-)create the filter
# (definitely need to recreate if we recreated the _statespace object)
create_filter = create_statespace or prefix not in self._kalman_filters
if not create_filter:
kalman_filter = self._kalman_filters[prefix]
create_filter = (
not kalman_filter.conserve_memory == conserve_memory or
not kalman_filter.loglikelihood_burn == loglikelihood_burn
)
# If the dtype-specific _kalman_filter does not exist (or if we need
# to re-create it), create it
if create_filter:
if prefix in self._kalman_filters:
# Delete the old filter
del self._kalman_filters[prefix]
# Setup the filter
cls = prefix_kalman_filter_map[prefix]
self._kalman_filters[prefix] = cls(
self._statespaces[prefix], filter_method, inversion_method,
stability_method, conserve_memory, tolerance,
loglikelihood_burn
)
# Otherwise, update the filter parameters
else:
kalman_filter = self._kalman_filters[prefix]
kalman_filter.set_filter_method(filter_method, False)
kalman_filter.inversion_method = inversion_method
kalman_filter.stability_method = stability_method
kalman_filter.tolerance = tolerance
# conserve_memory and loglikelihood_burn changes always lead to
# re-created filters
return prefix, dtype, create_filter, create_statespace
def set_filter_method(self, filter_method=None, **kwargs):
"""
Set the filtering method
The filtering method controls aspects of which Kalman filtering
approach will be used.
Parameters
----------
filter_method : integer, optional
Bitmask value to set the filter method to. See notes for details.
**kwargs
Keyword arguments may be used to influence the filter method by
setting individual boolean flags. See notes for details.
Notes
-----
The filtering method is defined by a collection of boolean flags, and
is internally stored as a bitmask. Only one method is currently
available:
FILTER_CONVENTIONAL = 0x01
Conventional Kalman filter.
If the bitmask is set directly via the `filter_method` argument, then
the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the filter method may also be specified by directly modifying
the class attributes which are defined similarly to the keyword
arguments.
The default filtering method is FILTER_CONVENTIONAL.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.filter_method
1
>>> mod.filter_conventional
True
>>> mod.set_filter_method(filter_method=1)
>>> mod.filter_method
1
>>> mod.set_filter_method(filter_conventional=True)
>>> mod.filter_method
1
>>> mod.filter_conventional = True
>>> mod.filter_method
1
"""
if filter_method is not None:
self.filter_method = filter_method
for name in KalmanFilter.filter_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_inversion_method(self, inversion_method=None, **kwargs):
"""
Set the inversion method
The Kalman filter may contain one matrix inversion: that of the
forecast error covariance matrix. The inversion method controls how and
if that inverse is performed.
Parameters
----------
inversion_method : integer, optional
Bitmask value to set the inversion method to. See notes for
details.
**kwargs
Keyword arguments may be used to influence the inversion method by
setting individual boolean flags. See notes for details.
Notes
-----
The inversion method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
INVERT_UNIVARIATE = 0x01
If the endogenous time series is univariate, then inversion can be
performed by simple division. If this flag is set and the time
series is univariate, then division will always be used even if
other flags are also set.
SOLVE_LU = 0x02
Use an LU decomposition along with a linear solver (rather than
ever actually inverting the matrix).
INVERT_LU = 0x04
Use an LU decomposition along with typical matrix inversion.
SOLVE_CHOLESKY = 0x08
Use a Cholesky decomposition along with a linear solver.
INVERT_CHOLESKY = 0x10
Use an Cholesky decomposition along with typical matrix inversion.
If the bitmask is set directly via the `inversion_method` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the inversion method may also be specified by directly
modifying the class attributes which are defined similarly to the
keyword arguments.
The default inversion method is `INVERT_UNIVARIATE | SOLVE_CHOLESKY`
Several things to keep in mind are:
- Cholesky decomposition is about twice as fast as LU decomposition,
but it requires that the matrix be positive definite. While this
should generally be true, it may not be in every case.
- Using a linear solver rather than true matrix inversion is generally
faster and is numerically more stable.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.inversion_method
1
>>> mod.solve_cholesky
True
>>> mod.invert_univariate
True
>>> mod.invert_lu
False
>>> mod.invert_univariate = False
>>> mod.inversion_method
8
>>> mod.set_inversion_method(solve_cholesky=False,
invert_cholesky=True)
>>> mod.inversion_method
16
"""
if inversion_method is not None:
self.inversion_method = inversion_method
for name in KalmanFilter.inversion_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_stability_method(self, stability_method=None, **kwargs):
"""
Set the numerical stability method
The Kalman filter is a recursive algorithm that may in some cases
suffer issues with numerical stability. The stability method controls
what, if any, measures are taken to promote stability.
Parameters
----------
stability_method : integer, optional
Bitmask value to set the stability method to. See notes for
details.
**kwargs
Keyword arguments may be used to influence the stability method by
setting individual boolean flags. See notes for details.
Notes
-----
The stability method is defined by a collection of boolean flags, and
is internally stored as a bitmask. The methods available are:
STABILITY_FORCE_SYMMETRY = 0x01
If this flag is set, symmetry of the predicted state covariance
matrix is enforced at each iteration of the filter, where each
element is set to the average of the corresponding elements in the
upper and lower triangle.
If the bitmask is set directly via the `stability_method` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the stability method may also be specified by directly
modifying the class attributes which are defined similarly to the
keyword arguments.
The default stability method is `STABILITY_FORCE_SYMMETRY`
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.stability_method
1
>>> mod.stability_force_symmetry
True
>>> mod.stability_force_symmetry = False
>>> mod.stability_method
0
"""
if stability_method is not None:
self.stability_method = stability_method
for name in KalmanFilter.stability_methods:
if name in kwargs:
setattr(self, name, kwargs[name])
def set_conserve_memory(self, conserve_memory=None, **kwargs):
"""
Set the memory conservation method
By default, the Kalman filter computes a number of intermediate
matrices at each iteration. The memory conservation options control
which of those matrices are stored.
Parameters
----------
conserve_memory : integer, optional
Bitmask value to set the memory conservation method to. See notes
for details.
**kwargs
Keyword arguments may be used to influence the memory conservation
method by setting individual boolean flags. See notes for details.
Notes
-----
The memory conservation method is defined by a collection of boolean
flags, and is internally stored as a bitmask. The methods available
are:
MEMORY_STORE_ALL = 0
Store all intermediate matrices. This is the default value.
MEMORY_NO_FORECAST = 0x01
Do not store the forecast, forecast error, or forecast error
covariance matrices. If this option is used, the `predict` method
from the results class is unavailable.
MEMORY_NO_PREDICTED = 0x02
Do not store the predicted state or predicted state covariance
matrices.
MEMORY_NO_FILTERED = 0x04
Do not store the filtered state or filtered state covariance
matrices.
MEMORY_NO_LIKELIHOOD = 0x08
Do not store the vector of loglikelihood values for each
observation. Only the sum of the loglikelihood values is stored.
MEMORY_CONSERVE
Do not store any intermediate matrices.
If the bitmask is set directly via the `conserve_memory` argument,
then the full method must be provided.
If keyword arguments are used to set individual boolean flags, then
the lowercase of the method must be used as an argument name, and the
value is the desired value of the boolean flag (True or False).
Note that the memory conservation method may also be specified by
directly modifying the class attributes which are defined similarly to
the keyword arguments.
The default memory conservation method is `MEMORY_STORE_ALL`, so that
all intermediate matrices are stored.
Examples
--------
>>> mod = sm.tsa.statespace.SARIMAX(range(10))
>>> mod.conserve_memory
0
>>> mod.memory_no_predicted
False
>>> mod.memory_no_predicted = True
>>> mod.conserve_memory
2
>>> mod.set_conserve_memory(memory_no_filtered=True,
memory_no_forecast=True)
>>> mod.conserve_memory
7
"""
if conserve_memory is not None:
self.conserve_memory = conserve_memory
for name in KalmanFilter.memory_options:
if name in kwargs:
setattr(self, name, kwargs[name])
def filter(self, filter_method=None, inversion_method=None,
stability_method=None, conserve_memory=None, tolerance=None,
loglikelihood_burn=None, results=None):
"""
Apply the Kalman filter to the statespace model.
Parameters
----------
filter_method : int, optional
Determines which Kalman filter to use. Default is conventional.
inversion_method : int, optional
Determines which inversion technique to use. Default is by Cholesky
decomposition.
stability_method : int, optional
Determines which numerical stability techniques to use. Default is
to enforce symmetry of the predicted state covariance matrix.
conserve_memory : int, optional
Determines what output from the filter to store. Default is to
store everything.
tolerance : float, optional
The tolerance at which the Kalman filter determines convergence to
steady-state. Default is 1e-19.
loglikelihood_burn : int, optional
The number of initial periods during which the loglikelihood is not
recorded. Default is 0.
results : class, object, or {'loglikelihood'}, optional
If a class which is a subclass of FilterResults, then that class is
instantiated and returned with the result of filtering. Classes
must subclass FilterResults.
If an object, then that object is updated with the new filtering
results.
If the string 'loglikelihood', then only the loglikelihood is
returned as an ndarray.
If None, then the default results object is updated with the
result of filtering.
"""
# Set the class to be the default results class, if None provided
if results is None:
results = self.results_class
# Initialize the filter
prefix, dtype, create_filter, create_statespace = (
self._initialize_filter(
filter_method, inversion_method, stability_method,
conserve_memory, tolerance, loglikelihood_burn
)
)
kfilter = self._kalman_filters[prefix]
# Instantiate a new results object, if required
if isinstance(results, type):
if not issubclass(results, FilterResults):
raise ValueError
results = results(self)
# Initialize the state
self._initialize_state(prefix=prefix)
# Run the filter
kfilter()
# We may just want the loglikelihood
if results == 'loglikelihood':
results = np.array(
self._kalman_filters[prefix].loglikelihood, copy=True
)
# Otherwise update the results object
else:
# Update the model features; unless we had to recreate the
# statespace, only update the filter options
results.update_representation(
self, only_options=not create_statespace
)
results.update_filter(kfilter)
return results
def loglike(self, loglikelihood_burn=None, **kwargs):
"""
Calculate the loglikelihood associated with the statespace model.
Parameters
----------
loglikelihood_burn : int, optional
The number of initial periods during which the loglikelihood is not
recorded. Default is 0.
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
Returns
-------
loglike : float
The joint loglikelihood.
"""
if self.memory_no_likelihood:
raise RuntimeError('Cannot compute loglikelihood if'
' MEMORY_NO_LIKELIHOOD option is selected.')
if loglikelihood_burn is None:
loglikelihood_burn = self.loglikelihood_burn
kwargs['results'] = 'loglikelihood'
return np.sum(self.filter(**kwargs)[loglikelihood_burn:])
def loglikeobs(self, loglikelihood_burn=None, **kwargs):
"""
Calculate the loglikelihood for each observation associated with the
statespace model.
Parameters
----------
loglikelihood_burn : int, optional
The number of initial periods during which the loglikelihood is not
recorded. Default is 0.
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
Notes
-----
If `loglikelihood_burn` is positive, then the entries in the returned
loglikelihood vector are set to be zero for those initial time periods.
Returns
-------
loglike : array of float
Array of loglikelihood values for each observation.
"""
if self.memory_no_likelihood:
raise RuntimeError('Cannot compute loglikelihood if'
' MEMORY_NO_LIKELIHOOD option is selected.')
if loglikelihood_burn is None:
loglikelihood_burn = self.loglikelihood_burn
kwargs['results'] = 'loglikelihood'
llf_obs = self.filter(**kwargs)
# Set any burned observations to have zero likelihood
llf_obs[:loglikelihood_burn] = 0
return llf_obs
def simulate(self, nsimulations, measurement_shocks=None,
state_shocks=None, initial_state=None):
"""
Simulate a new time series following the state space model
Parameters
----------
nsimulations : int
The number of observations to simulate. If the model is
time-invariant this can be any number. If the model is
time-varying, then this number must be less than or equal to the
number
measurement_shocks : array_like, optional
If specified, these are the shocks to the measurement equation,
:math:`\varepsilon_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_endog`, where `k_endog` is the
same as in the state space model.
state_shocks : array_like, optional
If specified, these are the shocks to the state equation,
:math:`\eta_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_posdef` where `k_posdef` is the
same as in the state space model.
initial_state : array_like, optional
If specified, this is the state vector at time zero, which should
be shaped (`k_states` x 1), where `k_states` is the same as in the
state space model. If unspecified, but the model has been
initialized, then that initialization is used. If unspecified and
the model has not been initialized, then a vector of zeros is used.
Note that this is not included in the returned `simulated_states`
array.
Returns
-------
simulated_obs : array
An (nsimulations x k_endog) array of simulated observations.
simulated_states : array
An (nsimulations x k_states) array of simulated states.
"""
time_invariant = self.time_invariant
# Check for valid number of simulations
if not time_invariant and nsimulations > self.nobs:
raise ValueError('In a time-varying model, cannot create more'
' simulations than there are observations.')
# Check / generate measurement shocks
if measurement_shocks is not None:
measurement_shocks = np.array(measurement_shocks)
if measurement_shocks.ndim == 0:
measurement_shocks = measurement_shocks[np.newaxis, np.newaxis]
elif measurement_shocks.ndim == 1:
measurement_shocks = measurement_shocks[:, np.newaxis]
if not measurement_shocks.shape == (nsimulations, self.k_endog):
raise ValueError('Invalid shape of provided measurement shocks.'
' Required (%d, %d)'
% (nsimulations, self.k_endog))
elif self.shapes['obs_cov'][-1] == 1:
measurement_shocks = np.random.multivariate_normal(
mean=np.zeros(self.k_endog), cov=self['obs_cov'],
size=nsimulations)
# Check / generate state shocks
if state_shocks is not None:
state_shocks = np.array(state_shocks)
if state_shocks.ndim == 0:
state_shocks = state_shocks[np.newaxis, np.newaxis]
elif state_shocks.ndim == 1:
state_shocks = state_shocks[:, np.newaxis]
if not state_shocks.shape == (nsimulations, self.k_posdef):
raise ValueError('Invalid shape of provided state shocks.'
' Required (%d, %d).'
% (nsimulations, self.k_posdef))
elif self.shapes['state_cov'][-1] == 1:
state_shocks = np.random.multivariate_normal(
mean=np.zeros(self.k_posdef), cov=self['state_cov'],
size=nsimulations)
# Get the initial states
if initial_state is not None:
initial_state = np.array(initial_state)
if initial_state.ndim == 0:
initial_state = initial_state[np.newaxis]
elif (initial_state.ndim > 1 and
not initial_state.shape == (self.k_states, 1)):
raise ValueError('Invalid shape of provided initial state'
' vector. Required (%d, 1)' % self.k_states)
elif self.initialization == 'known':
initial_state = self._initial_state
elif self.initialization in ['approximate_diffuse', 'stationary']:
initial_state = np.zeros(self.k_states)
else:
initial_state = np.zeros(self.k_states)
return self._simulate(nsimulations, measurement_shocks, state_shocks,
initial_state)
def _simulate(self, nsimulations, measurement_shocks, state_shocks,
initial_state):
time_invariant = self.time_invariant
# Holding variables for the simulations
simulated_obs = np.zeros((nsimulations, self.k_endog),
dtype=self.dtype)
simulated_states = np.zeros((nsimulations+1, self.k_states),
dtype=self.dtype)
simulated_states[0] = initial_state
# Perform iterations to create the new time series
obs_intercept_t = 0
design_t = 0
state_intercept_t = 0
transition_t = 0
selection_t = 0
for t in range(nsimulations):
# Get the current shocks (this accomodates time-varying matrices)
if measurement_shocks is None:
measurement_shock = np.random.multivariate_normal(
mean=np.zeros(self.k_endog), cov=self['obs_cov', :, :, t])
else:
measurement_shock = measurement_shocks[t]
if state_shocks is None:
state_shock = np.random.multivariate_normal(
mean=np.zeros(self.k_posdef),
cov=self['state_cov', :, :, t])
else:
state_shock = state_shocks[t]
# Get current-iteration matrices
if not time_invariant:
obs_intercept_t = 0 if self.obs_intercept.shape[-1] == 1 else t
design_t = 0 if self.design.shape[-1] == 1 else t
state_intercept_t = (
0 if self.state_intercept.shape[-1] == 1 else t)
transition_t = 0 if self.transition.shape[-1] == 1 else t
selection_t = 0 if self.selection.shape[-1] == 1 else t
obs_intercept = self['obs_intercept', :, obs_intercept_t]
design = self['design', :, :, design_t]
state_intercept = self['state_intercept', :, state_intercept_t]
transition = self['transition', :, :, transition_t]
selection = self['selection', :, :, selection_t]
# Iterate the measurement equation
simulated_obs[t] = (
obs_intercept + np.dot(design, simulated_states[t])
+ measurement_shock)
# Iterate the state equation
simulated_states[t+1] = (
state_intercept + np.dot(transition, simulated_states[t]) +
np.dot(selection, state_shock))
return simulated_obs, simulated_states[:-1]
def impulse_responses(self, steps=10, impulse=0, orthogonalized=False,
cumulative=False, **kwargs):
"""
Impulse response function
Parameters
----------
steps : int, optional
The number of steps for which impulse responses are calculated.
Default is 10. Note that the initial impulse is not counted as a
step, so if `steps=1`, the output will have 2 entries.
impulse : int or array_like
If an integer, the state innovation to pulse; must be between 0
and `k_posdef-1` where `k_posdef` is the same as in the state
space model. Alternatively, a custom impulse vector may be
provided; must be a column vector with shape `(k_posdef, 1)`.
orthogonalized : boolean, optional
Whether or not to perform impulse using orthogonalized innovations.
Note that this will also affect custum `impulse` vectors. Default
is False.
cumulative : boolean, optional
Whether or not to return cumulative impulse responses. Default is
False.
**kwargs
If the model is time-varying and `steps` is greater than the number
of observations, any of the state space representation matrices
that are time-varying must have updated values provided for the
out-of-sample steps.
For example, if `design` is a time-varying component, `nobs` is 10,
and `steps` is 15, a (`k_endog` x `k_states` x 5) matrix must be
provided with the new design matrix values.
Returns
-------
impulse_responses : array
Responses for each endogenous variable due to the impulse
given by the `impulse` argument. A (steps + 1 x k_endog) array.
Notes
-----
Intercepts in the measurement and state equation are ignored when
calculating impulse responses.
"""
# Since the first step is the impulse itself, we actually want steps+1
steps += 1
# Check for what kind of impulse we want
if type(impulse) == int:
if impulse >= self.k_posdef or impulse < 0:
raise ValueError('Invalid value for `impulse`. Must be the'
' of one of the state innovations.')
# Create the (non-orthogonalized) impulse vector
idx = impulse
impulse = np.zeros(self.k_posdef)
impulse[idx] = 1
else:
impulse = np.array(impulse)
if impulse.ndim > 1:
impulse = np.squeeze(impulse)
if not impulse.shape == (self.k_posdef,):
raise ValueError('Invalid impulse vector. Must be shaped'
' (%d,)' % self.k_posdef)
# Orthogonalize the impulses, if requested, using Cholesky on the
# first state covariance matrix
if orthogonalized:
state_chol = np.linalg.cholesky(self.state_cov[:,:,0])
impulse = np.dot(state_chol, impulse)
# If we have a time-invariant system, we can solve for the IRF directly
if self.time_invariant:
# Get the state space matrices
design = self.design[:, :, 0]
transition = self.transition[:, :, 0]
selection = self.selection[:, :, 0]
# Holding arrays
irf = np.zeros((steps, self.k_endog), dtype=self.dtype)
states = np.zeros((steps, self.k_states), dtype=self.dtype)
# First iteration
states[0] = np.dot(selection, impulse)
irf[0] = np.dot(design, states[0])
# Iterations
for t in range(1, steps):
states[t] = np.dot(transition, states[t-1])
irf[t] = np.dot(design, states[t])
# Otherwise, create a new model
else:
# Get the basic model components
representation = {}
for name, shape in self.shapes.items():
if name in ['obs', 'obs_intercept', 'state_intercept']:
continue
representation[name] = getattr(self, name)
# Allow additional specification
warning = ('Model has time-invariant %s matrix, so the %s'
' argument to `irf` has been ignored.')
exception = ('Impulse response functions for models with'
' time-varying %s matrix requires an updated'
' time-varying matrix for any periods beyond those in'
' the original model.')
for name, shape in self.shapes.items():
if name in ['obs', 'obs_intercept', 'state_intercept']:
continue
if representation[name].shape[-1] == 1:
if name in kwargs:
warn(warning % (name, name))
elif name not in kwargs:
raise ValueError(exception % name)
else:
mat = np.asarray(kwargs[name])
validate_matrix_shape(name, mat.shape, shape[0],
shape[1], nforecast)
if mat.ndim < 3 or not mat.shape[2] == nforecast:
raise ValueError(exception % name)
representation[name] = np.c_[representation[name], mat]
# Setup the new statespace representation
model_kwargs = {
'filter_method': self.filter_method,
'inversion_method': self.inversion_method,
'stability_method': self.stability_method,
'conserve_memory': self.conserve_memory,
'tolerance': self.tolerance,
'loglikelihood_burn': self.loglikelihood_burn
}
model_kwargs.update(representation)
model = KalmanFilter(np.zeros(self.endog.T.shape), self.k_states,
self.k_posdef, **model_kwargs)
model.initialize_known(self.initial_state, self.initial_state_cov)
model._initialize_filter()
model._initialize_state()
# Get the impulse response function via simulation of the state
# space model, but with other shocks set to zero
measurement_shocks = np.zeros((steps, self.k_endog))
state_shocks = np.zeros((steps, self.k_posdef))
state_shocks[0] = impulse
irf, _ = model.simulate(
steps, measurement_shocks=measurement_shocks,
state_shocks=state_shocks)
# Get the cumulative response if requested
if cumulative:
irf = np.cumsum(irf, axis=0)
return irf
class FilterResults(FrozenRepresentation):
"""
Results from applying the Kalman filter to a state space model.
Parameters
----------
model : Representation
A Statespace representation
Attributes
----------
nobs : int
Number of observations.
k_endog : int
The dimension of the observation series.
k_states : int
The dimension of the unobserved state process.
k_posdef : int
The dimension of a guaranteed positive definite
covariance matrix describing the shocks in the
measurement equation.
dtype : dtype
Datatype of representation matrices
prefix : str
BLAS prefix of representation matrices
shapes : dictionary of name,tuple
A dictionary recording the shapes of each of the
representation matrices as tuples.
endog : array
The observation vector.
design : array
The design matrix, :math:`Z`.
obs_intercept : array
The intercept for the observation equation, :math:`d`.
obs_cov : array
The covariance matrix for the observation equation :math:`H`.
transition : array
The transition matrix, :math:`T`.
state_intercept : array
The intercept for the transition equation, :math:`c`.
selection : array
The selection matrix, :math:`R`.
state_cov : array
The covariance matrix for the state equation :math:`Q`.
missing : array of bool
An array of the same size as `endog`, filled
with boolean values that are True if the
corresponding entry in `endog` is NaN and False
otherwise.
nmissing : array of int
An array of size `nobs`, where the ith entry
is the number (between 0 and `k_endog`) of NaNs in
the ith row of the `endog` array.
time_invariant : bool
Whether or not the representation matrices are time-invariant
initialization : str
Kalman filter initialization method.
initial_state : array_like
The state vector used to initialize the Kalamn filter.
initial_state_cov : array_like
The state covariance matrix used to initialize the Kalamn filter.
filter_method : int
Bitmask representing the Kalman filtering method
inversion_method : int
Bitmask representing the method used to
invert the forecast error covariance matrix.
stability_method : int
Bitmask representing the methods used to promote
numerical stability in the Kalman filter
recursions.
conserve_memory : int
Bitmask representing the selected memory conservation method.
tolerance : float
The tolerance at which the Kalman filter
determines convergence to steady-state.
loglikelihood_burn : int
The number of initial periods during which
the loglikelihood is not recorded.
converged : bool
Whether or not the Kalman filter converged.
period_converged : int
The time period in which the Kalman filter converged.
filtered_state : array
The filtered state vector at each time period.
filtered_state_cov : array
The filtered state covariance matrix at each time period.
predicted_state : array
The predicted state vector at each time period.
predicted_state_cov : array
The predicted state covariance matrix at each time period.
forecasts : array
The one-step-ahead forecasts of observations at each time period.
forecasts_error : array
The forecast errors at each time period.
forecasts_error_cov : array
The forecast error covariance matrices at each time period.
llf_obs : array
The loglikelihood values at each time period.
"""
_filter_attributes = [
'filter_method', 'inversion_method', 'stability_method',
'conserve_memory', 'tolerance', 'loglikelihood_burn', 'converged',
'period_converged', 'filtered_state', 'filtered_state_cov',
'predicted_state', 'predicted_state_cov',
'forecasts', 'forecasts_error', 'forecasts_error_cov',
'llf_obs'
]
_filter_options = (
KalmanFilter.filter_methods + KalmanFilter.stability_methods +
KalmanFilter.inversion_methods + KalmanFilter.memory_options
)
_attributes = FrozenRepresentation._model_attributes + _filter_attributes
def __init__(self, model):
super(FilterResults, self).__init__(model)
# Setup caches for uninitialized objects
self._kalman_gain = None
self._standardized_forecasts_error = None
def update_representation(self, model, only_options=False):
"""
Update the results to match a given model
Parameters
----------
model : Representation
The model object from which to take the updated values.
only_options : boolean, optional
If set to true, only the filter options are updated, and the state
space representation is not updated. Default is False.
Notes
-----
This method is rarely required except for internal usage.
"""
if not only_options:
super(FilterResults, self).update_representation(model)
# Save the options as boolean variables
for name in self._filter_options:
setattr(self, name, getattr(model, name, None))
def update_filter(self, kalman_filter):
"""
Update the filter results
Parameters
----------
kalman_filter : KalmanFilter
The model object from which to take the updated values.
Notes
-----
This method is rarely required except for internal usage.
"""
# State initialization
self.initial_state = np.array(
kalman_filter.model.initial_state, copy=True
)
self.initial_state_cov = np.array(
kalman_filter.model.initial_state_cov, copy=True
)
# Save Kalman filter parameters
self.filter_method = kalman_filter.filter_method
self.inversion_method = kalman_filter.inversion_method
self.stability_method = kalman_filter.stability_method
self.conserve_memory = kalman_filter.conserve_memory
self.tolerance = kalman_filter.tolerance
self.loglikelihood_burn = kalman_filter.loglikelihood_burn
# Save Kalman filter output
self.converged = bool(kalman_filter.converged)
self.period_converged = kalman_filter.period_converged
self.filtered_state = np.array(kalman_filter.filtered_state, copy=True)
self.filtered_state_cov = np.array(
kalman_filter.filtered_state_cov, copy=True
)
self.predicted_state = np.array(
kalman_filter.predicted_state, copy=True
)
self.predicted_state_cov = np.array(
kalman_filter.predicted_state_cov, copy=True
)
# Note: use forecasts rather than forecast, so as not to interfer
# with the `forecast` methods in subclasses
self.forecasts = np.array(kalman_filter.forecast, copy=True)
self.forecasts_error = np.array(
kalman_filter.forecast_error, copy=True
)
self.forecasts_error_cov = np.array(
kalman_filter.forecast_error_cov, copy=True
)
self.llf_obs = np.array(kalman_filter.loglikelihood, copy=True)
# If there was missing data, save the original values from the Kalman
# filter output, since below will set the values corresponding to
# the missing observations to nans.
self.missing_forecasts = None
self.missing_forecasts_error = None
self.missing_forecasts_error_cov = None
if np.sum(self.nmissing) > 0:
# Copy the provided arrays (which are as the Kalman filter dataset)
# into new variables
self.missing_forecasts = np.copy(self.forecasts)
self.missing_forecasts_error = np.copy(self.forecasts_error)
self.missing_forecasts_error_cov = (
np.copy(self.forecasts_error_cov)
)
# Fill in missing values in the forecast, forecast error, and
# forecast error covariance matrix (this is required due to how the
# Kalman filter implements observations that are either partly or
# completely missing)
# Construct the predictions, forecasts
if not (self.memory_no_forecast or self.memory_no_predicted):
for t in range(self.nobs):
design_t = 0 if self.design.shape[2] == 1 else t
obs_cov_t = 0 if self.obs_cov.shape[2] == 1 else t
obs_intercept_t = 0 if self.obs_intercept.shape[1] == 1 else t
# For completely missing observations, the Kalman filter will
# produce forecasts, but forecast errors and the forecast
# error covariance matrix will be zeros - make them nan to
# improve clarity of results.
if self.nmissing[t] == self.k_endog:
# We can recover forecasts
self.forecasts[:, t] = np.dot(
self.design[:, :, design_t], self.predicted_state[:, t]
) + self.obs_intercept[:, obs_intercept_t]
self.forecasts_error[:, t] = np.nan
self.forecasts_error_cov[:, :, t] = np.dot(
np.dot(self.design[:, :, design_t],
self.predicted_state_cov[:, :, t]),
self.design[:, :, design_t].T
) + self.obs_cov[:, :, obs_cov_t]
# For partially missing observations, the Kalman filter
# will produce all elements (forecasts, forecast errors,
# forecast error covariance matrices) as usual, but their
# dimension will only be equal to the number of non-missing
# elements, and their location in memory will be in the first
# blocks (e.g. for the forecasts_error, the first
# k_endog - nmissing[t] columns will be filled in), regardless
# of which endogenous variables they refer to (i.e. the non-
# missing endogenous variables for that observation).
# Furthermore, the forecast error covariance matrix is only
# valid for those elements. What is done is to set all elements
# to nan for these observations so that they are flagged as
# missing. The variables missing_forecasts, etc. then provide
# the forecasts, etc. provided by the Kalman filter, from which
# the data can be retrieved if desired.
elif self.nmissing[t] > 0:
self.forecasts[:, t] = np.nan
self.forecasts_error[:, t] = np.nan
self.forecasts_error_cov[:, :, t] = np.nan
@property
def kalman_gain(self):
"""
Kalman gain matrices
"""
if self._kalman_gain is None:
# k x n
self._kalman_gain = np.zeros(
(self.k_states, self.k_endog, self.nobs), dtype=self.dtype)
for t in range(self.nobs):
design_t = 0 if self.design.shape[2] == 1 else t
transition_t = 0 if self.transition.shape[2] == 1 else t
self._kalman_gain[:, :, t] = np.dot(
np.dot(
self.transition[:, :, transition_t],
self.predicted_state_cov[:, :, t]
),
np.dot(
np.transpose(self.design[:, :, design_t]),
np.linalg.inv(self.forecasts_error_cov[:, :, t])
)
)
return self._kalman_gain
@property
def standardized_forecasts_error(self):
"""
Standardized forecast errors
"""
if self._standardized_forecasts_error is None:
from scipy import linalg
self._standardized_forecasts_error = np.zeros(
self.forecasts_error.shape, dtype=self.dtype)
for t in range(self.forecasts_error_cov.shape[2]):
if self.nmissing[t] > 0:
self._standardized_forecasts_error[:, t] = np.nan
else:
upper, _ = linalg.cho_factor(
self.forecasts_error_cov[:, :, t]
)
self._standardized_forecasts_error[:, t] = (
linalg.solve_triangular(
upper, self.forecasts_error[:, t]
)
)
return self._standardized_forecasts_error
def predict(self, start=None, end=None, dynamic=None, **kwargs):
"""
In-sample and out-of-sample prediction for state space models generally
Parameters
----------
start : int, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast will be at start.
end : int, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast will be at end.
dynamic : int, optional
Offset relative to `start` at which to begin dynamic prediction.
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
**kwargs
If the prediction range is outside of the sample range, any
of the state space representation matrices that are time-varying
must have updated values provided for the out-of-sample range.
For example, of `obs_intercept` is a time-varying component and
the prediction range extends 10 periods beyond the end of the
sample, a (`k_endog` x 10) matrix must be provided with the new
intercept values.
Returns
-------
results : PredictionResults
A PredictionResults object.
Notes
-----
All prediction is performed by applying the deterministic part of the
measurement equation using the predicted state variables.
Out-of-sample prediction first applies the Kalman filter to missing
data for the number of periods desired to obtain the predicted states.
"""
# Cannot predict if we do not have appropriate arrays
if self.memory_no_forecast or self.memory_no_predicted:
raise ValueError('Predict is not possible if memory conservation'
' has been used to avoid storing forecasts or'
' predicted values.')
# Get the start and the end of the entire prediction range
if start is None:
start = 0
elif start < 0:
raise ValueError('Cannot predict values previous to the sample.')
if end is None:
end = self.nobs
# Prediction and forecasting is performed by iterating the Kalman
# Kalman filter through the entire range [0, end]
# Then, everything is returned corresponding to the range [start, end].
# In order to perform the calculations, the range is separately split
# up into the following categories:
# - static: (in-sample) the Kalman filter is run as usual
# - dynamic: (in-sample) the Kalman filter is run, but on missing data
# - forecast: (out-of-sample) the Kalman filter is run, but on missing
# data
# Short-circuit if end is before start
if end <= start:
raise ValueError('End of prediction must be after start.')
# Get the number of forecasts to make after the end of the sample
nforecast = max(0, end - self.nobs)
# Get the number of dynamic prediction periods
# If `dynamic=True`, then assume that we want to begin dynamic
# prediction at the start of the sample prediction.
if dynamic is True:
dynamic = 0
# If `dynamic=False`, then assume we want no dynamic prediction
if dynamic is False:
dynamic = None
ndynamic = 0
if dynamic is not None:
# Replace the relative dynamic offset with an absolute offset
dynamic = start + dynamic
# Validate the `dynamic` parameter
if dynamic < 0:
raise ValueError('Dynamic prediction cannot begin prior to the'
' first observation in the sample.')
elif dynamic > end:
warn('Dynamic prediction specified to begin after the end of'
' prediction, and so has no effect.')
dynamic = None
elif dynamic > self.nobs:
warn('Dynamic prediction specified to begin during'
' out-of-sample forecasting period, and so has no'
' effect.')
dynamic = None
# Get the total size of the desired dynamic forecasting component
# Note: the first `dynamic` periods of prediction are actually
# *not* dynamic, because dynamic prediction begins at observation
# `dynamic`.
if dynamic is not None:
ndynamic = max(0, min(end, self.nobs) - dynamic)
# Get the number of in-sample static predictions
nstatic = min(end, self.nobs) if dynamic is None else dynamic
# Construct the design and observation intercept and covariance
# matrices for start-npadded:end. If not time-varying in the original
# model, then they will be copied over if none are provided in
# `kwargs`. Otherwise additional matrices must be provided in `kwargs`.
representation = {}
for name, shape in self.shapes.items():
if name == 'obs':
continue
representation[name] = getattr(self, name)
# Update the matrices from kwargs for forecasts
warning = ('Model has time-invariant %s matrix, so the %s'
' argument to `predict` has been ignored.')
exception = ('Forecasting for models with time-varying %s matrix'
' requires an updated time-varying matrix for the'
' period to be forecasted.')
if nforecast > 0:
for name, shape in self.shapes.items():
if name == 'obs':
continue
if representation[name].shape[-1] == 1:
if name in kwargs:
warn(warning % (name, name))
elif name not in kwargs:
raise ValueError(exception % name)
else:
mat = np.asarray(kwargs[name])
if len(shape) == 2:
validate_vector_shape(name, mat.shape,
shape[0], nforecast)
if mat.ndim < 2 or not mat.shape[1] == nforecast:
raise ValueError(exception % name)
representation[name] = np.c_[representation[name], mat]
else:
validate_matrix_shape(name, mat.shape, shape[0],
shape[1], nforecast)
if mat.ndim < 3 or not mat.shape[2] == nforecast:
raise ValueError(exception % name)
representation[name] = np.c_[representation[name], mat]
# Update the matrices from kwargs for dynamic prediction in the case
# that `end` is less than `nobs` and `dynamic` is less than `end`. In
# this case, any time-varying matrices in the default `representation`
# will be too long, causing an error to be thrown below in the
# KalmanFilter(...) construction call, because the endog has length
# nstatic + ndynamic + nforecast, whereas the time-varying matrices
# from `representation` have length nobs.
if ndynamic > 0 and end < self.nobs:
for name, shape in self.shapes.items():
if not name == 'obs' and representation[name].shape[-1] > 1:
representation[name] = representation[name][..., :end]
# Construct the predicted state and covariance matrix for each time
# period depending on whether that time period corresponds to
# one-step-ahead prediction, dynamic prediction, or out-of-sample
# forecasting.
# If we only have simple prediction, then we can use the already saved
# Kalman filter output
if ndynamic == 0 and nforecast == 0:
results = self
else:
# Construct the new endogenous array.
endog = np.empty((self.k_endog, ndynamic + nforecast))
endog.fill(np.nan)
endog = np.asfortranarray(np.c_[self.endog[:, :nstatic], endog])
# Setup the new statespace representation
model_kwargs = {
'filter_method': self.filter_method,
'inversion_method': self.inversion_method,
'stability_method': self.stability_method,
'conserve_memory': self.conserve_memory,
'tolerance': self.tolerance,
'loglikelihood_burn': self.loglikelihood_burn
}
model_kwargs.update(representation)
model = KalmanFilter(
endog, self.k_states, self.k_posdef, **model_kwargs
)
model.initialize_known(
self.initial_state,
self.initial_state_cov
)
model._initialize_filter()
model._initialize_state()
results = self._predict(nstatic, ndynamic, nforecast, model)
return PredictionResults(results, start, end, nstatic, ndynamic,
nforecast)
def _predict(self, nstatic, ndynamic, nforecast, model):
# TODO: this doesn't use self, and can either be a static method or
# moved outside the class altogether.
# Get the underlying filter
kfilter = model._kalman_filter
# Save this (which shares memory with the memoryview on which the
# Kalman filter will be operating) so that we can replace actual data
# with predicted data during dynamic forecasting
endog = model._representations[model.prefix]['obs']
# print(nstatic, ndynamic, nforecast, model.nobs)
for t in range(kfilter.model.nobs):
# Run the Kalman filter for the first `nstatic` periods (for
# which dynamic computation will not be performed)
if t < nstatic:
next(kfilter)
# Perform dynamic prediction
elif t < nstatic + ndynamic:
design_t = 0 if model.design.shape[2] == 1 else t
obs_intercept_t = 0 if model.obs_intercept.shape[1] == 1 else t
# Unconditional value is the intercept (often zeros)
endog[:, t] = model.obs_intercept[:, obs_intercept_t]
# If t > 0, then we can condition the forecast on the state
if t > 0:
# Predict endog[:, t] given `predicted_state` calculated in
# previous iteration (i.e. t-1)
endog[:, t] += np.dot(
model.design[:, :, design_t],
kfilter.predicted_state[:, t]
)
# Advance Kalman filter
next(kfilter)
# Perform any (one-step-ahead) forecasting
else:
next(kfilter)
# Return the predicted state and predicted state covariance matrices
results = FilterResults(model)
results.update_filter(kfilter)
return results
class PredictionResults(FilterResults):
"""
Results of in-sample and out-of-sample prediction for state space models
generally
Parameters
----------
results : FilterResults
Output from filtering, corresponding to the prediction desired
start : int
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast will be at start.
end : int
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast will be at end.
nstatic : int
Number of in-sample static predictions (these are always the first
elements of the prediction output).
ndynamic : int
Number of in-sample dynamic predictions (these always follow the static
predictions directly, and are directly followed by the forecasts).
nforecast : int
Number of in-sample forecasts (these always follow the dynamic
predictions directly).
Attributes
----------
npredictions : int
Number of observations in the predicted series; this is not necessarily
the same as the number of observations in the original model from which
prediction was performed.
start : int
Zero-indexed observation number at which to start prediction,
i.e., the first predict will be at `start`; this is relative to the
original model from which prediction was performed.
end : int
Zero-indexed observation number at which to end prediction,
i.e., the last predict will be at `end`; this is relative to the
original model from which prediction was performed.
nstatic : int
Number of in-sample static predictions.
ndynamic : int
Number of in-sample dynamic predictions.
nforecast : int
Number of in-sample forecasts.
endog : array
The observation vector.
design : array
The design matrix, :math:`Z`.
obs_intercept : array
The intercept for the observation equation, :math:`d`.
obs_cov : array
The covariance matrix for the observation equation :math:`H`.
transition : array
The transition matrix, :math:`T`.
state_intercept : array
The intercept for the transition equation, :math:`c`.
selection : array
The selection matrix, :math:`R`.
state_cov : array
The covariance matrix for the state equation :math:`Q`.
filtered_state : array
The filtered state vector at each time period.
filtered_state_cov : array
The filtered state covariance matrix at each time period.
predicted_state : array
The predicted state vector at each time period.
predicted_state_cov : array
The predicted state covariance matrix at each time period.
forecasts : array
The one-step-ahead forecasts of observations at each time period.
forecasts_error : array
The forecast errors at each time period.
forecasts_error_cov : array
The forecast error covariance matrices at each time period.
Notes
-----
The provided ranges must be conformable, meaning that it must be that
`end - start == nstatic + ndynamic + nforecast`.
This class is essentially a view to the FilterResults object, but
returning the appropriate ranges for everything.
"""
representation_attributes = [
'endog', 'design', 'design', 'obs_intercept',
'obs_cov', 'transition', 'state_intercept', 'selection',
'state_cov'
]
filter_attributes = [
'filtered_state', 'filtered_state_cov',
'predicted_state', 'predicted_state_cov',
'forecasts', 'forecasts_error', 'forecasts_error_cov'
]
def __init__(self, results, start, end, nstatic, ndynamic, nforecast):
from scipy import stats
# Save the filter results object
self.results = results
# Save prediction ranges
self.npredictions = start - end
self.start = start
self.end = end
self.nstatic = nstatic
self.ndynamic = ndynamic
self.nforecast = nforecast
def __getattr__(self, attr):
"""
Provide access to the representation and filtered output in the
appropriate range (`start` - `end`).
"""
# Prevent infinite recursive lookups
if attr[0] == '_':
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, attr))
_attr = '_' + attr
# Cache the attribute
if not hasattr(self, _attr):
if attr == 'endog' or attr in self.filter_attributes:
# Get a copy
value = getattr(self.results, attr).copy()
# Subset to the correct time frame
value = value[..., self.start:self.end]
elif attr in self.representation_attributes:
value = getattr(self.results, attr).copy()
# If a time-invariant matrix, return it. Otherwise, subset to
# the correct period.
if value.shape[-1] == 1:
value = value[..., 0]
else:
value = value[..., self.start:self.end]
else:
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, attr))
setattr(self, _attr, value)
return getattr(self, _attr)
|
YihaoLu/statsmodels
|
statsmodels/tsa/statespace/kalman_filter.py
|
Python
|
bsd-3-clause
| 71,383
|
"""Dump flowpaths to a shapefile."""
from geopandas import read_postgis
from pyiem.util import get_dbconn
def main():
"""Go Main Go."""
pgconn = get_dbconn("idep")
df = read_postgis(
"""
SELECT f.fpath, f.huc_12, ST_Transform(f.geom, 4326) as geo from
flowpaths f, huc12 h WHERE h.scenario = 0 and f.scenario = 0
and h.huc_12 = f.huc_12 and h.states ~* 'IA'
""",
pgconn,
index_col=None,
geom_col="geo",
)
df.to_file("ia_flowpaths.shp")
if __name__ == "__main__":
main()
|
akrherz/idep
|
scripts/util/dump_flowpaths.py
|
Python
|
mit
| 559
|
# -*- coding:utf-8 -*-
# ========================================================== #
# File name: vgg_19.py
# Author: BIGBALLON
# Date created: 07/22/2017
# Python Version: 3.5.2
# Description: implement vgg19 network to train cifar10
# ========================================================== #
import tensorflow as tf
from data_utility import *
iterations = 200
batch_size = 250
total_epoch = 164
weight_decay = 0.0005 # change it for test
dropout_rate = 0.5
momentum_rate = 0.9
log_save_path = './pretrain_vgg_logs'
model_save_path = './model/'
# ========================================================== #
# ├─ bias_variable()
# ├─ conv2d() With Batch Normalization
# ├─ max_pool()
# └─ global_avg_pool()
# ========================================================== #
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape, dtype=tf.float32 )
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')
def max_pool(input, k_size=1, stride=1, name=None):
return tf.nn.max_pool(input, ksize=[1, k_size, k_size, 1], strides=[1, stride, stride, 1], padding='SAME',name=name)
def batch_norm(input):
return tf.contrib.layers.batch_norm(input, decay=0.9, center=True, scale=True, epsilon=1e-3, is_training=train_flag, updates_collections=None)
# ========================================================== #
# ├─ _random_crop()
# ├─ _random_flip_leftright()
# ├─ data_augmentation()
# ├─ data_preprocessing()
# └─ learning_rate_schedule()
# ========================================================== #
def _random_crop(batch, crop_shape, padding=None):
oshape = np.shape(batch[0])
if padding:
oshape = (oshape[0] + 2*padding, oshape[1] + 2*padding)
new_batch = []
npad = ((padding, padding), (padding, padding), (0, 0))
for i in range(len(batch)):
new_batch.append(batch[i])
if padding:
new_batch[i] = np.lib.pad(batch[i], pad_width=npad,
mode='constant', constant_values=0)
nh = random.randint(0, oshape[0] - crop_shape[0])
nw = random.randint(0, oshape[1] - crop_shape[1])
new_batch[i] = new_batch[i][nh:nh + crop_shape[0],
nw:nw + crop_shape[1]]
return new_batch
def _random_flip_leftright(batch):
for i in range(len(batch)):
if bool(random.getrandbits(1)):
batch[i] = np.fliplr(batch[i])
return batch
def data_preprocessing(x_train,x_test):
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train[:,:,:,0] = (x_train[:,:,:,0]-123.680)
x_train[:,:,:,1] = (x_train[:,:,:,1]-116.779)
x_train[:,:,:,2] = (x_train[:,:,:,2]-103.939)
x_test[:,:,:,0] = (x_test[:,:,:,0]-123.680)
x_test[:,:,:,1] = (x_test[:,:,:,1]-116.779)
x_test[:,:,:,2] = (x_test[:,:,:,2]-103.939)
return x_train, x_test
def learning_rate_schedule(epoch_num):
if epoch_num < 81:
return 0.1
elif epoch_num < 121:
return 0.01
else:
return 0.001
def data_augmentation(batch):
batch = _random_flip_leftright(batch)
batch = _random_crop(batch, [32,32], 4)
return batch
def run_testing(sess,ep):
acc = 0.0
loss = 0.0
pre_index = 0
add = 1000
for it in range(10):
batch_x = test_x[pre_index:pre_index+add]
batch_y = test_y[pre_index:pre_index+add]
pre_index = pre_index + add
loss_, acc_ = sess.run([cross_entropy,accuracy],feed_dict={x:batch_x, y_:batch_y, keep_prob: 1.0, train_flag: False})
loss += loss_ / 10.0
acc += acc_ / 10.0
summary = tf.Summary(value=[tf.Summary.Value(tag="test_loss", simple_value=loss),
tf.Summary.Value(tag="test_accuracy", simple_value=acc)])
return acc, loss, summary
# ========================================================== #
# ├─ main()
# Training and Testing
# Save train/teset loss and acc for visualization
# Save Model in ./model
# ========================================================== #
if __name__ == '__main__':
train_x, train_y, test_x, test_y = prepare_data()
train_x, test_x = data_preprocessing(train_x, test_x)
# load pretrained weight from vgg19.npy
params_dict = np.load('vgg19.npy',encoding='latin1').item()
# define placeholder x, y_ , keep_prob, learning_rate
x = tf.placeholder(tf.float32,[None, image_size, image_size, 3])
y_ = tf.placeholder(tf.float32, [None, class_num])
keep_prob = tf.placeholder(tf.float32)
learning_rate = tf.placeholder(tf.float32)
train_flag = tf.placeholder(tf.bool)
# build_network
W_conv1_1 = tf.Variable(params_dict['conv1_1'][0])
b_conv1_1 = tf.Variable(params_dict['conv1_1'][1])
output = tf.nn.relu( batch_norm(conv2d(x,W_conv1_1) + b_conv1_1))
W_conv1_2 = tf.Variable(params_dict['conv1_2'][0])
b_conv1_2 = tf.Variable(params_dict['conv1_2'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv1_2) + b_conv1_2))
output = max_pool(output, 2, 2, "pool1")
W_conv2_1 = tf.Variable(params_dict['conv2_1'][0])
b_conv2_1 = tf.Variable(params_dict['conv2_2'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv2_1) + b_conv2_1))
W_conv2_2 = tf.Variable(params_dict['conv2_2'][0])
b_conv2_2 = tf.Variable(params_dict['conv2_2'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv2_2) + b_conv2_2))
output = max_pool(output, 2, 2, "pool2")
W_conv3_1 = tf.Variable(params_dict['conv3_1'][0])
b_conv3_1 = tf.Variable(params_dict['conv3_1'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv3_1) + b_conv3_1))
W_conv3_2 = tf.Variable(params_dict['conv3_2'][0])
b_conv3_2 = tf.Variable(params_dict['conv3_2'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv3_2) + b_conv3_2))
W_conv3_3 = tf.Variable(params_dict['conv3_3'][0])
b_conv3_3 = tf.Variable(params_dict['conv3_3'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv3_3) + b_conv3_3))
W_conv3_4 = tf.Variable(params_dict['conv3_4'][0])
b_conv3_4 = tf.Variable(params_dict['conv3_4'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv3_4) + b_conv3_4))
output = max_pool(output, 2, 2, "pool3")
W_conv4_1 = tf.Variable(params_dict['conv4_1'][0])
b_conv4_1 = tf.Variable(params_dict['conv4_1'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv4_1) + b_conv4_1))
W_conv4_2 = tf.Variable(params_dict['conv4_2'][0])
b_conv4_2 = tf.Variable(params_dict['conv4_2'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv4_2) + b_conv4_2))
W_conv4_3 = tf.Variable(params_dict['conv4_3'][0])
b_conv4_3 = tf.Variable(params_dict['conv4_3'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv4_3) + b_conv4_3))
W_conv4_4 = tf.Variable(params_dict['conv4_4'][0])
b_conv4_4 = tf.Variable(params_dict['conv4_4'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv4_4)) + b_conv4_4)
output = max_pool(output, 2, 2)
W_conv5_1 = tf.Variable(params_dict['conv5_1'][0])
b_conv5_1 = tf.Variable(params_dict['conv5_1'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv5_1) + b_conv5_1))
W_conv5_2 = tf.Variable(params_dict['conv5_2'][0])
b_conv5_2 = tf.Variable(params_dict['conv5_2'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv5_2) + b_conv5_2))
W_conv5_3 = tf.Variable(params_dict['conv5_3'][0])
b_conv5_3 = tf.Variable(params_dict['conv5_3'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv5_3) + b_conv5_3))
W_conv5_4 = tf.Variable(params_dict['conv5_4'][0])
b_conv5_4 = tf.Variable(params_dict['conv5_4'][1])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv5_4) + b_conv5_4))
output = tf.reshape(output,[-1,2*2*512])
W_fc1 = tf.get_variable('fc1', shape=[2048,4096], initializer=tf.contrib.keras.initializers.he_normal())
b_fc1 = bias_variable([4096])
output = tf.nn.relu( batch_norm(tf.matmul(output,W_fc1) + b_fc1) )
output = tf.nn.dropout(output,keep_prob)
W_fc2 = tf.Variable(params_dict['fc7'][0])
b_fc2 = tf.Variable(params_dict['fc7'][1])
output = tf.nn.relu( batch_norm(tf.matmul(output,W_fc2) + b_fc2) )
output = tf.nn.dropout(output,keep_prob)
W_fc3 = tf.get_variable('fc3', shape=[4096,10], initializer=tf.contrib.keras.initializers.he_normal())
b_fc3 = bias_variable([10])
output = tf.nn.relu( batch_norm(tf.matmul(output,W_fc3) + b_fc3) )
# loss function: cross_entropy
# train_step: training operation
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=output))
l2 = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
train_step = tf.train.MomentumOptimizer(learning_rate, momentum_rate,use_nesterov=True).minimize(cross_entropy + l2 * weight_decay)
correct_prediction = tf.equal(tf.argmax(output,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
# initial an saver to save model
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter(log_save_path,sess.graph)
# epoch = 164
# make sure [bath_size * iteration = data_set_number]
for ep in range(1,total_epoch+1):
lr = learning_rate_schedule(ep)
pre_index = 0
train_acc = 0.0
train_loss = 0.0
start_time = time.time()
print("\nepoch %d/%d:" %(ep,total_epoch))
for it in range(1,iterations+1):
batch_x = train_x[pre_index:pre_index+batch_size]
batch_y = train_y[pre_index:pre_index+batch_size]
batch_x = data_augmentation(batch_x)
_, batch_loss = sess.run([train_step, cross_entropy],feed_dict={x:batch_x, y_:batch_y, keep_prob: dropout_rate, learning_rate: lr, train_flag: True})
batch_acc = accuracy.eval(feed_dict={x:batch_x, y_:batch_y, keep_prob: 1.0, train_flag: True})
train_loss += batch_loss
train_acc += batch_acc
pre_index += batch_size
if it == iterations:
train_loss /= iterations
train_acc /= iterations
loss_, acc_ = sess.run([cross_entropy,accuracy],feed_dict={x:batch_x, y_:batch_y, keep_prob: 1.0, train_flag: True})
train_summary = tf.Summary(value=[tf.Summary.Value(tag="train_loss", simple_value=train_loss),
tf.Summary.Value(tag="train_accuracy", simple_value=train_acc)])
val_acc, val_loss, test_summary = run_testing(sess,ep)
summary_writer.add_summary(train_summary, ep)
summary_writer.add_summary(test_summary, ep)
summary_writer.flush()
print("iteration: %d/%d, cost_time: %ds, train_loss: %.4f, train_acc: %.4f, test_loss: %.4f, test_acc: %.4f" %(it, iterations, int(time.time()-start_time), train_loss, train_acc, val_loss, val_acc))
else:
print("iteration: %d/%d, train_loss: %.4f, train_acc: %.4f" %(it, iterations, train_loss / it, train_acc / it) , end='\r')
save_path = saver.save(sess, model_save_path)
print("Model saved in file: %s" % save_path)
|
BIGBALLON/cifar-10-cnn
|
Tensorflow_version/vgg_19_pretrain.py
|
Python
|
mit
| 12,039
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from biocomplexity.datasets.land_cover_dataset import LandCoverDataset
from biocomplexity.models.land_cover_change_model import LandCoverChangeModel
from biocomplexity.equation_specification import EquationSpecification
from opus_core.opus_package import OpusPackage
from biocomplexity.opus_package_info import package
from opus_core.resources import Resources
from opus_core.storage_factory import StorageFactory
from numpy import arange, where
from urbansim.estimation.estimator import Estimator
from opus_core.logger import logger
from time import time
import os
from opus_core.sampling_toolbox import sample_noreplace
from opus_core.variables.variable_name import VariableName
class LCCMEstimatorMultiRun(Estimator):
def __init__(self, **kargs):
# Estimator.__init__(self, settings=None, run_land_price_model_before_estimation=False, **kargs) # <-- old __init__
# Estimator.__init__(self, config=None, save_estimation_results=True) # <-- new __init__ doesn't work, but not needed
parent_dir_path = package().get_package_parent_path()
package_path = OpusPackage().get_path_for_package("biocomplexity")
self.storage = StorageFactory().get_storage('tab_storage',
storage_location=os.path.join(package_path, 'data'))
## 1. directory path of full (4 county spatial extent) dataset
flt_directory = os.path.join(parent_dir_path, "biocomplexity", "data", "LCCM_4County")
## 2. select (uncomment) from one the following choices of directory pathes of subsetted sample input data/variables
# flt_directory_est = os.path.join(parent_dir_path, "biocomplexity", "data", "LCCM_small_test_set_opus")
flt_directory_est = os.path.join(parent_dir_path, "biocomplexity", "data", "data_for_estimation_all")
# flt_directory_est = os.path.join(parent_dir_path, "biocomplexity", "data", "data_for_estimation_all_orig")
# flt_directory_est = os.path.join(parent_dir_path, "biocomplexity", "data", "data_for_suburban_orig")
# flt_directory_est = os.path.join(parent_dir_path, "biocomplexity", "data", "data_for_urban")
# flt_directory_est = os.path.join(parent_dir_path, "biocomplexity", "data", "data_for_urban_orig")
## note - must rename lct-forusewith91sample.Float32 to lct.lf4 if doing 1991-1995
## note - must rename lct-forusewith95sample.Float32 to lct.lf4 if doing 1995-1999
## 3. select (uncomment) from one the following choices of land cover data (input data) date pairs (years)
# years = [1991, 1995]
# years = [1995, 1999]
years = [1999, 2002]
self.lc1 = LandCoverDataset(in_storage = StorageFactory().get_storage("flt_storage",
storage_location = os.path.join(flt_directory_est, str(years[0]))),
resources=Resources({"lowercase":1}))
self.lc2 = LandCoverDataset(in_storage = StorageFactory().get_storage("flt_storage",
storage_location = os.path.join(flt_directory_est, str(years[1]))),
resources=Resources({"lowercase":1}))
self.lc1_all = LandCoverDataset(in_storage = StorageFactory().get_storage("flt_storage",
storage_location = os.path.join(flt_directory, str(years[0]))),
resources=Resources({"lowercase":1}))
self.lc1_all.flush_dataset()
self.lc2_all = LandCoverDataset(in_storage = StorageFactory().get_storage("flt_storage",
storage_location = os.path.join(flt_directory, str(years[1]))),
resources=Resources({"lowercase":1}))
self.lc2_all.flush_dataset()
def estimate(self, spec_py=None, spec_var=None, spec_file=None):
t1 = time()
if spec_py is not None:
reload(spec_py)
spec_var = spec_py.specification
if spec_var is not None:
self.specification, variables, coefficents, equations, submodels = \
self.load_specification_from_variable(spec_var)
elif spec_file is not None:
self.specification = EquationSpecification(in_storage=self.storage)
self.specification.load(in_table_name=spec_file)
self.specification.set_dataset_name_of_variables("land_cover")
self.model_name = "land_cover_change_model"
choices = range(1,15)
lccm = LandCoverChangeModel(choices, submodel_string="lct")
## 4. select (uncomment) from one the following choices of subsetted sampling files (agents_index)
# agents_index = where(self.lc1.get_attribute("sall_91_95_0"))[0]
# agents_index = where(self.lc1.get_attribute("sall_95_99_0"))[0]
agents_index = where(self.lc1.get_attribute("sall_99_02_0"))[0]
# agents_index = where(self.lc1.get_attribute("suburb91_95sample0"))[0]
# agents_index = where(self.lc1.get_attribute("suburb95_99sample0"))[0]
# agents_index = where(self.lc1.get_attribute("up91x95_old_samp0"))[0]
# agents_index = where(self.lc1.get_attribute("urbsamp95_99_0"))[0]
## need to include agents_index_all seperate for the calibration portion
## when using the dataset at the full extent, agents_index_all is needed as it is
## created from the lc1_all agents_set and matches the size of the input data
## 5. select (uncomment) from one the following choices of sampling files (agents_index) at full spatial extent
# agents_index_all = where(self.lc1_all.get_attribute("sall_91_95_0"))[0]
# agents_index_all = where(self.lc1_all.get_attribute("sall_95_99_0"))[0]
agents_index_all = where(self.lc1_all.get_attribute("sall_99_02_0"))[0]
# agents_index_all = where(self.lc1_all.get_attribute("suburb91_95sample0"))[0]
# agents_index_all = where(self.lc1_all.get_attribute("suburb95_99sample0"))[0]
# agents_index_all = where(self.lc1_all.get_attribute("up91x95_old_samp0"))[0]
# agents_index_all = where(self.lc1_all.get_attribute("urbsamp95_99_0"))[0]
coef, results = lccm.estimate(self.specification, self.lc1, self.lc2, agents_index=agents_index, debuglevel=4)
new_coef = lccm.calibrate(self.lc1_all, self.lc2_all, agents_index_all)
specification = lccm.specification
#save estimation results
out_suffix = spec_py.__name__[len(spec_py.__name__) - 11:]
specification.write(out_storage=self.storage, out_table_name='lccm_specification_%sc' % out_suffix)
new_coef.write(out_storage=self.storage, out_table_name='lccm_coefficients_%sc' % out_suffix)
logger.log_status("Estimation done. %s s" % str(time()-t1))
def load_specification_from_variable(self, spec_var):
variables = []
coefficients = []
equations = []
submodels = []
try:
for sub_model, submodel_spec in spec_var.items():
if not isinstance(submodel_spec, dict):
raise ValueError, "Wrong specification format"
if submodel_spec.has_key("equation_ids"):
equation_ids = submodel_spec["equation_ids"] ## this retrieves eq_ids from spec.py - they're stored in equations then passed to the equation specifications
del submodel_spec["equation_ids"]
else:
equation_ids = None
for var, coefs in submodel_spec.items():
if not equation_ids:
equation_ids = range(1, len(coeffs)+1)
for i in range(len(coefs)):
if coefs[i] != 0:
variables.append(var)
coefficients.append(coefs[i])
equations.append(equation_ids[i])
submodels.append(sub_model)
except:
raise ValueError, "Wrong specification format for submodel variable."
specification = EquationSpecification(variables=variables,
coefficients=coefficients,
equations = equations,
submodels=submodels)
return (specification, variables, coefficients, equations, submodels)
if __name__ == "__main__":
estimator = LCCMEstimatorMultiRun(save_estimation_results=True, debuglevel=4)
# ## 6. select (uncomment) from one the following choices of model specifications
## used for single run of lccm_estimator
## import estimation_lccm_specification_all91to95 as spec_py
## import estimation_lccm_specification_all95to99 as spec_py
# import estimation_lccm_specification_all99to02v2 as spec_py
## import estimation_lccm_specification_sub91to95 as spec_py
## import estimation_lccm_specification_sub95to99 as spec_py
## import estimation_lccm_specification_ub91to95 as spec_py
## import estimation_lccm_specification_ub95to99 as spec_py
# estimator.estimate(spec_py)
## 2 to iterate over spec_py file permutations (i.e. systematic)
# ::IMPT:: need to read in each spec_py (output from _lccm_multirun_specpy_gen.py),
# import them as spec_py, and iterate through them using write_dict_to_file
# this is recursive until until all spec_py files are processed
rootdir = os.path.join(OpusPackage().get_path_for_package("biocomplexity"), "data", "uncertainty", "model_specs0")
# rootdir = os.path.join(OpusPackage().get_path_for_package("biocomplexity"), "data", "uncertainty")
for subdir, dirs, files in os.walk(rootdir):
for file in files:
file_short = file[:-3]
if file_short != "_lccm_multirun_estimator" and file_short != "lccm_estimator_local_multirun":
module = [file_short]
exec "import %s as spec_py" % module[0]
estimator.estimate(spec_py)
|
christianurich/VIBe2UrbanSim
|
3rdparty/opus/src/biocomplexity/examples/lccm_estimator_multirun.py
|
Python
|
gpl-2.0
| 10,192
|
#!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds complete campaigns using BatchJobService.
Complete campaigns include campaign budgets, campaigns, ad groups and keywords.
"""
import argparse
import asyncio
import sys
from uuid import uuid4
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
NUMBER_OF_CAMPAIGNS_TO_ADD = 2
NUMBER_OF_AD_GROUPS_TO_ADD = 2
NUMBER_OF_KEYWORDS_TO_ADD = 4
PAGE_SIZE = 1000
_temporary_id = 0
def _get_next_temporary_id():
"""Returns the next temporary ID to use in batch job operations.
Decrements the temporary ID by one before returning it. The first value
returned for the ID is -1.
Returns: an int of the next temporary ID.
"""
global _temporary_id
_temporary_id -= 1
return _temporary_id
def _build_mutate_operation(client, operation_type, operation):
"""Builds a mutate operation with the given operation type and operation.
Args:
client: an initialized GoogleAdsClient instance.
operation_type: a str of the operation type corresponding to a field on
the MutateOperation message class.
operation: an operation instance.
Returns: a MutateOperation instance
"""
mutate_operation = client.get_type("MutateOperation")
# Retrieve the nested operation message instance using getattr then copy the
# contents of the given operation into it using the client.copy_from method.
client.copy_from(getattr(mutate_operation, operation_type), operation)
return mutate_operation
async def main(client, customer_id):
"""Main function that runs the example.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a str of a customer ID.
"""
batch_job_service = client.get_service("BatchJobService")
batch_job_operation = _create_batch_job_operation(client)
resource_name = _create_batch_job(
batch_job_service, customer_id, batch_job_operation
)
operations = _build_all_operations(client, customer_id)
_add_all_batch_job_operations(batch_job_service, operations, resource_name)
operations_response = _run_batch_job(batch_job_service, resource_name)
# Create an asyncio.Event instance to control execution during the
# asyncronous steps in _poll_batch_job. Note that this is not important
# for polling asyncronously, it simply helps with execution control so we
# can run _fetch_and_print_results after the asyncronous operations have
# completed.
_done_event = asyncio.Event()
_poll_batch_job(operations_response, _done_event)
# Execution will stop here and wait for the asyncronous steps in
# _poll_batch_job to complete before proceeding.
await _done_event.wait()
_fetch_and_print_results(client, batch_job_service, resource_name)
def _create_batch_job_operation(client):
"""Created a BatchJobOperation and sets an empty BatchJob instance to
the "create" property in order to tell the Google Ads API that we're
creating a new BatchJob.
Args:
client: an initialized GoogleAdsClient instance.
Returns: a BatchJobOperation with a BatchJob instance set in the "create"
property.
"""
batch_job_operation = client.get_type("BatchJobOperation")
batch_job = client.get_type("BatchJob")
client.copy_from(batch_job_operation.create, batch_job)
return batch_job_operation
# [START add_complete_campaigns_using_batch_job]
def _create_batch_job(batch_job_service, customer_id, batch_job_operation):
"""Creates a batch job for the specified customer ID.
Args:
batch_job_service: an instance of the BatchJobService message class.
customer_id: a str of a customer ID.
batch_job_operation: a BatchJobOperation instance set to "create"
Returns: a str of a resource name for a batch job.
"""
try:
response = batch_job_service.mutate_batch_job(
customer_id=customer_id, operation=batch_job_operation
)
resource_name = response.result.resource_name
print(f'Created a batch job with resource name "{resource_name}"')
return resource_name
except GoogleAdsException as exception:
_handle_googleads_exception(exception)
# [END add_complete_campaigns_using_batch_job]
# [START add_complete_campaigns_using_batch_job_1]
def _add_all_batch_job_operations(batch_job_service, operations, resource_name):
"""Adds all mutate operations to the batch job.
As this is the first time for this batch job, we pass null as a sequence
token. The response will contain the next sequence token that we can use
to upload more operations in the future.
Args:
batch_job_service: an instance of the BatchJobService message class.
operations: a list of a mutate operations.
resource_name: a str of a resource name for a batch job.
"""
try:
response = batch_job_service.add_batch_job_operations(
resource_name=resource_name,
sequence_token=None,
mutate_operations=operations,
)
print(
f"{response.total_operations} mutate operations have been "
"added so far."
)
# You can use this next sequence token for calling
# add_batch_job_operations() next time.
print(
"Next sequence token for adding next operations is "
f"{response.next_sequence_token}"
)
except GoogleAdsException as exception:
_handle_googleads_exception(exception)
# [END add_complete_campaigns_using_batch_job_1]
def _build_all_operations(client, customer_id):
"""Builds all operations for creating a complete campaign.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a str of a customer ID.
Returns: a list of operations of various types.
"""
operations = []
# Creates a new campaign budget operation and adds it to the list of
# mutate operations.
campaign_budget_op = _build_campaign_budget_operation(client, customer_id)
operations.append(
_build_mutate_operation(
client, "campaign_budget_operation", campaign_budget_op
)
)
# Creates new campaign operations and adds them to the list of
# mutate operations.
campaign_operations = _build_campaign_operations(
client, customer_id, campaign_budget_op.create.resource_name
)
operations = operations + [
_build_mutate_operation(client, "campaign_operation", operation)
for operation in campaign_operations
]
# Creates new campaign criterion operations and adds them to the list of
# mutate operations.
campaign_criterion_operations = _build_campaign_criterion_operations(
client, campaign_operations
)
operations = operations + [
_build_mutate_operation(
client, "campaign_criterion_operation", operation
)
for operation in campaign_criterion_operations
]
# Creates new ad group operations and adds them to the list of
# mutate operations.
ad_group_operations = _build_ad_group_operations(
client, customer_id, campaign_operations
)
operations = operations + [
_build_mutate_operation(client, "ad_group_operation", operation)
for operation in ad_group_operations
]
# Creates new ad group criterion operations and add them to the list of
# mutate operations.
ad_group_criterion_operations = _build_ad_group_criterion_operations(
client, ad_group_operations
)
operations = operations + [
_build_mutate_operation(
client, "ad_group_criterion_operation", operation
)
for operation in ad_group_criterion_operations
]
# Creates new ad group ad operations and adds them to the list of
# mutate operations.
ad_group_ad_operations = _build_ad_group_ad_operations(
client, ad_group_operations
)
operations = operations + [
_build_mutate_operation(client, "ad_group_ad_operation", operation)
for operation in ad_group_ad_operations
]
return operations
def _build_campaign_budget_operation(client, customer_id):
"""Builds a new campaign budget operation for the given customer ID.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a str of a customer ID.
Returns: a CampaignBudgetOperation instance.
"""
campaign_budget_service = client.get_service("CampaignBudgetService")
campaign_budget_operation = client.get_type("CampaignBudgetOperation")
campaign_budget = campaign_budget_operation.create
resource_name = campaign_budget_service.campaign_budget_path(
customer_id, _get_next_temporary_id()
)
campaign_budget.resource_name = resource_name
campaign_budget.name = f"Interplanetary Cruise Budget #{uuid4()}"
campaign_budget.delivery_method = (
client.enums.BudgetDeliveryMethodEnum.STANDARD
)
campaign_budget.amount_micros = 5000000
return campaign_budget_operation
def _build_campaign_operations(
client, customer_id, campaign_budget_resource_name
):
"""Builds new campaign operations for the specified customer ID.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a str of a customer ID.
campaign_budget_resource_name: a str resource name for a campaign
budget.
Returns: a list of CampaignOperation instances.
"""
return [
_build_campaign_operation(
client, customer_id, campaign_budget_resource_name
)
for i in range(NUMBER_OF_CAMPAIGNS_TO_ADD)
]
def _build_campaign_operation(
client, customer_id, campaign_budget_resource_name
):
"""Builds new campaign operation for the specified customer ID.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a str of a customer ID.
campaign_budget_resource_name: a str resource name for a campaign
budget.
Returns: a CampaignOperation instance.
"""
campaign_operation = client.get_type("CampaignOperation")
campaign_service = client.get_service("CampaignService")
# Creates a campaign.
campaign = campaign_operation.create
campaign_id = _get_next_temporary_id()
# Creates a resource name using the temporary ID.
campaign.resource_name = campaign_service.campaign_path(
customer_id, campaign_id
)
campaign.name = f"Batch job campaign #{customer_id}.{campaign_id}"
campaign.advertising_channel_type = (
client.enums.AdvertisingChannelTypeEnum.SEARCH
)
# Recommendation: Set the campaign to PAUSED when creating it to prevent
# the ads from immediately serving. Set to ENABLED once you've added
# targeting and the ads are ready to serve.
campaign.status = client.enums.CampaignStatusEnum.PAUSED
# Set the bidding strategy and type by setting manual_cpc equal to an empty
# ManualCpc instance.
client.copy_from(campaign.manual_cpc, client.get_type("ManualCpc"))
campaign.campaign_budget = campaign_budget_resource_name
return campaign_operation
def _build_campaign_criterion_operations(client, campaign_operations):
"""Builds new campaign criterion operations for negative keyword criteria.
Args:
client: an initialized GoogleAdsClient instance.
campaign_operations: a list of CampaignOperation instances.
Returns: a list of CampaignCriterionOperation instances.
"""
return [
_build_campaign_criterion_operation(client, campaign_operation)
for campaign_operation in campaign_operations
]
def _build_campaign_criterion_operation(client, campaign_operation):
"""Builds a new campaign criterion operation for negative keyword criterion.
Args:
client: an initialized GoogleAdsClient instance.
campaign_operation: a CampaignOperation instance.
Returns: a CampaignCriterionOperation instance.
"""
campaign_criterion_operation = client.get_type("CampaignCriterionOperation")
# Creates a campaign criterion.
campaign_criterion = campaign_criterion_operation.create
campaign_criterion.keyword.text = "venus"
campaign_criterion.keyword.match_type = (
client.enums.KeywordMatchTypeEnum.BROAD
)
# Sets the campaign criterion as a negative criterion.
campaign_criterion.negative = True
campaign_criterion.campaign = campaign_operation.create.resource_name
return campaign_criterion_operation
def _build_ad_group_operations(client, customer_id, campaign_operations):
"""Builds new ad group operations for the specified customer ID.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a str of a customer ID.
campaign_operations: a list of CampaignOperation instances.
Return: a list of AdGroupOperation instances.
"""
operations = []
for campaign_operation in campaign_operations:
for i in range(NUMBER_OF_AD_GROUPS_TO_ADD):
operations.append(
_build_ad_group_operation(
client, customer_id, campaign_operation
)
)
return operations
def _build_ad_group_operation(client, customer_id, campaign_operation):
"""Builds a new ad group operation for the specified customer ID.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a str of a customer ID.
campaign_operation: a CampaignOperation instance.
Return: an AdGroupOperation instance.
"""
ad_group_operation = client.get_type("AdGroupOperation")
ad_group_service = client.get_service("AdGroupService")
# Creates an ad group.
ad_group = ad_group_operation.create
ad_group_id = _get_next_temporary_id()
# Creates a resource name using the temporary ID.
ad_group.resource_name = ad_group_service.ad_group_path(
customer_id, ad_group_id
)
ad_group.name = f"Batch job ad group #{uuid4()}.{ad_group_id}"
ad_group.campaign = campaign_operation.create.resource_name
ad_group.type_ = client.enums.AdGroupTypeEnum.SEARCH_STANDARD
ad_group.cpc_bid_micros = 10000000
return ad_group_operation
def _build_ad_group_criterion_operations(client, ad_group_operations):
"""Builds new ad group criterion operations for creating keywords.
50% of keywords are created with some invalid characters to demonstrate
how BatchJobService returns information about such errors.
Args:
client: an initialized GoogleAdsClient instance.
ad_group_operations: a list of AdGroupOperation instances.
Returns a list of AdGroupCriterionOperation instances.
"""
operations = []
for ad_group_operation in ad_group_operations:
for i in range(NUMBER_OF_KEYWORDS_TO_ADD):
operations.append(
_build_ad_group_criterion_operation(
# Create a keyword text by making 50% of keywords invalid
# to demonstrate error handling.
client,
ad_group_operation,
i,
i % 2 == 0,
)
)
return operations
def _build_ad_group_criterion_operation(
client, ad_group_operation, number, is_valid=True
):
"""Builds new ad group criterion operation for creating keywords.
Takes an optional param that dictates whether the keyword text should
intentionally generate an error with invalid characters.
Args:
client: an initialized GoogleAdsClient instance.
ad_group_operation: an AdGroupOperation instance.
number: an int of the number to assign to the name of the criterion.
is_valid: a bool of whether the keyword text should be invalid.
Returns: an AdGroupCriterionOperation instance.
"""
ad_group_criterion_operation = client.get_type("AdGroupCriterionOperation")
# Creates an ad group criterion.
ad_group_criterion = ad_group_criterion_operation.create
ad_group_criterion.keyword.text = f"mars{number}"
# If keyword should be invalid we add exclamation points, which will
# generate errors when sent to the API.
if not is_valid:
ad_group_criterion.keyword.text += "!!!"
ad_group_criterion.keyword.match_type = (
client.enums.KeywordMatchTypeEnum.BROAD
)
ad_group_criterion.ad_group = ad_group_operation.create.resource_name
ad_group_criterion.status = client.enums.AdGroupCriterionStatusEnum.ENABLED
return ad_group_criterion_operation
def _build_ad_group_ad_operations(client, ad_group_operations):
"""Builds new ad group ad operations.
Args:
client: an initialized GoogleAdsClient instance.
ad_group_operations: a list of AdGroupOperation instances.
Returns: a list of AdGroupAdOperation instances.
"""
return [
_build_ad_group_ad_operation(client, ad_group_operation)
for ad_group_operation in ad_group_operations
]
def _build_ad_group_ad_operation(client, ad_group_operation):
"""Builds a new ad group ad operation.
Args:
client: an initialized GoogleAdsClient instance.
ad_group_operation: an AdGroupOperation instance.
Returns: an AdGroupAdOperation instance.
"""
ad_group_ad_operation = client.get_type("AdGroupAdOperation")
# Creates an ad group ad.
ad_group_ad = ad_group_ad_operation.create
# Creates the expanded text ad info.
text_ad = ad_group_ad.ad.expanded_text_ad
text_ad.headline_part1 = f"Cruise to Mars #{uuid4()}"
text_ad.headline_part2 = "Best Space Cruise Line"
text_ad.description = "Buy your tickets now!"
ad_group_ad.ad.final_urls.append("http://www.example.com")
ad_group_ad.ad_group = ad_group_operation.create.resource_name
ad_group_ad.status = client.enums.AdGroupAdStatusEnum.PAUSED
return ad_group_ad_operation
# [START add_complete_campaigns_using_batch_job_2]
def _run_batch_job(batch_job_service, resource_name):
"""Runs the batch job for executing all uploaded mutate operations.
Args:
batch_job_service: an instance of the BatchJobService message class.
resource_name: a str of a resource name for a batch job.
Returns: a google.api_core.operation.Operation instance.
"""
try:
response = batch_job_service.run_batch_job(resource_name=resource_name)
print(
f'Batch job with resource name "{resource_name}" has been '
"executed."
)
return response
except GoogleAdsException as exception:
_handle_googleads_exception(exception)
# [END add_complete_campaigns_using_batch_job_2]
# [START add_complete_campaigns_using_batch_job_3]
def _poll_batch_job(operations_response, event):
"""Polls the server until the batch job execution finishes.
Sets the initial poll delay time and the total time to wait before time-out.
Args:
operations_response: a google.api_core.operation.Operation instance.
event: an instance of asyncio.Event to invoke once the operations have
completed, alerting the awaiting calling code that it can proceed.
"""
loop = asyncio.get_event_loop()
def _done_callback(future):
# The operations_response object will call callbacks from a daemon
# thread so we must use a threadsafe method of setting the event here
# otherwise it will not trigger the awaiting code.
loop.call_soon_threadsafe(event.set)
# operations_response represents a Long-Running Operation or LRO. The class
# provides an interface for polling the API to check when the operation is
# complete. Below we use the asynchronous interface, but there's also a
# synchronous interface that uses the Operation.result method.
# See: https://googleapis.dev/python/google-api-core/latest/operation.html
operations_response.add_done_callback(_done_callback)
# [END add_complete_campaigns_using_batch_job_3]
# [START add_complete_campaigns_using_batch_job_4]
def _fetch_and_print_results(client, batch_job_service, resource_name):
"""Prints all the results from running the batch job.
Args:
client: an initialized GoogleAdsClient instance.
batch_job_service: an instance of the BatchJobService message class.
resource_name: a str of a resource name for a batch job.
"""
print(
f'Batch job with resource name "{resource_name}" has finished. '
"Now, printing its results..."
)
list_results_request = client.get_type("ListBatchJobResultsRequest")
list_results_request.resource_name = resource_name
list_results_request.page_size = PAGE_SIZE
# Gets all the results from running batch job and prints their information.
batch_job_results = batch_job_service.list_batch_job_results(
request=list_results_request
)
for batch_job_result in batch_job_results:
status = batch_job_result.status.message
status = status if status else "N/A"
result = batch_job_result.mutate_operation_response
result = result or "N/A"
print(
f"Batch job #{batch_job_result.operation_index} "
f'has a status "{status}" and response type "{result}"'
)
# [END add_complete_campaigns_using_batch_job_4]
def _handle_googleads_exception(exception):
"""Prints the details of a GoogleAdsException object.
Args:
exception: an instance of GoogleAdsException.
"""
print(
f'Request with ID "{exception.request_id}" failed with status '
f'"{exception.error.code().name}" and includes the following errors:'
)
for error in exception.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
googleads_client = GoogleAdsClient.load_from_storage(version="v10")
parser = argparse.ArgumentParser(
description=(
"Adds complete campaigns, including campaign budgets, "
"campaigns, ad groups and keywords for the given "
"customer ID using BatchJobService."
)
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
args = parser.parse_args()
asyncio.run(main(googleads_client, args.customer_id))
|
googleads/google-ads-python
|
examples/campaign_management/add_complete_campaigns_using_batch_job.py
|
Python
|
apache-2.0
| 23,528
|
from builtins import str
import logging
import subprocess
from airflow.executors.base_executor import BaseExecutor
from airflow.utils import State
class SequentialExecutor(BaseExecutor):
"""
This executor will only run one task instance at a time, can be used
for debugging. It is also the only executor that can be used with sqlite
since sqlite doesn't support multiple connections.
Since we want airflow to work out of the box, it defaults to this
SequentialExecutor alongside sqlite as you first install it.
"""
def __init__(self):
super(SequentialExecutor, self).__init__()
self.commands_to_run = []
def execute_async(self, key, command, queue=None):
self.commands_to_run.append((key, command,))
def sync(self):
for key, command in self.commands_to_run:
logging.info("command" + str(command))
try:
sp = subprocess.Popen(command, shell=True)
sp.wait()
except Exception as e:
self.change_state(key, State.FAILED)
raise e
self.change_state(key, State.SUCCESS)
self.commands_to_run = []
def end(self):
self.heartbeat()
|
wangtuanjie/airflow
|
airflow/executors/sequential_executor.py
|
Python
|
apache-2.0
| 1,230
|
import psutil
def secs2hours(secs):
mm, ss = divmod(secs, 60)
hh, mm = divmod(mm, 60)
return "%d:%02d:%02d" % (hh, mm, ss)
def giveMeBattery():
battery = psutil.sensors_battery()
battery
return "charge = %s%%, time left = %s" % (battery.percent, secs2hours(battery.secsleft))
print(giveMeBattery())
|
grotadmorv/i3status
|
batterie.py
|
Python
|
apache-2.0
| 327
|
""" Runs the TwitterKnitter. Prepares images and
sends them to the Arduino one line at a time. """
from knitter24 import Knitter24
from pattern24 import Pattern24
from TwitterSearch import *
from secrets import Secrets
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
import requests
import re
def choose_tweet():
"""Offers a text-based menu of tweet text to knit"""
tweets = get_tweets_genes()
chosen = False
while(not(chosen)):
print("0: <REFRESH TWEETS>")
for idx, tweet in enumerate(tweets):
print ("%d: %s" % (idx+1, tweet))
print("%d: <ENTER TEXT>" % (len(tweets) + 1))
print
selection = input('Choose a tweet: ')
try:
number = int(selection)
if(number == 0):
tweets = get_tweets_genes()
elif(0 < number <= len(tweets)):
text = tweets[number-1]
chosen = True
elif(number == len(tweets)+1):
text = input("Enter text: ")
chosen = True
else:
print("Not a tweet! Try again")
except:
print( "Invalid number" )
return text
def get_tweets():
"""Fetches up to 10 tweets and returns their text in a list"""
hashtag = "maker"
sources = []
try:
tso = TwitterSearchOrder()
tso.setSearchURL("?q=%23" + hashtag)
tso.setLocale('en')
tso.setCount(10)
tso.setIncludeEntities(False)
twitter_search = TwitterSearch(
consumer_key = Secrets.consumer_key,
consumer_secret = Secrets.consumer_secret,
access_token = Secrets.access_token,
access_token_secret = Secrets.access_token_secret
)
tweets = twitter_search.searchTweets(tso)
for tweet in tweets['content']['statuses']:
sources.append(tweet['text'])
except TwitterSearchException as exception:
print(exception)
return sources
def get_tweets_genes():
"""Fetches up to 10 tweets and returns their text in a list"""
hashtag = "makerprintgenes"
sources = []
try:
tso = TwitterSearchOrder()
tso.setSearchURL("?q=%23" + hashtag)
tso.setLocale('en')
tso.setCount(10)
tso.setIncludeEntities(False)
twitter_search = TwitterSearch(
consumer_key = Secrets.consumer_key,
consumer_secret = Secrets.consumer_secret,
access_token = Secrets.access_token,
access_token_secret = Secrets.access_token_secret
)
tweets = twitter_search.searchTweets(tso)
for tweet in tweets['content']['statuses']:
sources.append(tweet['text'])
except TwitterSearchException as exception:
print(exception)
return sources
def find_gene_id(tweet):
tweet = re.sub("(?i)#makerprintgenes", "", tweet)
# Expected tweet format "Gene-symbol Species #MakerPrintGenes"
# Eg. "BRCA1 human #MakerPrintGenes"
# "BRAFP1 homo sapien #MakerPrintGenes"
# "BRCA2 mouse #MakerPrintGenes"
# "MT-TV human #MakerPrintGenes"
queries = tweet.strip().split(" ", 1)
gene_symbol = queries[0]
species = queries[1]
# Lookup searches for a specific gene using the gene-symbol and species
base_url = "http://rest.ensembl.org/lookup/symbol/"
request_params = "?content-type=application/json"
try:
r = requests.get(base_url+species+"/"+gene_symbol+request_params,
headers={"Content-Type": "application/json"})
r.raise_for_status()
response = r.json()
# Now that Gene-ID found use this to get gene (& protein) sequence(s)
return get_gene_sequences(response["id"], response["biotype"])
except requests.exceptions.HTTPError as errh:
print ("http error for gene id request:",errh)
except requests.exceptions.ConnectionError as errc:
print ("Error Connecting for gene id request:",errc)
except requests.exceptions.Timeout as errt:
print ("Timeout Error for gene id request:",errt)
except requests.exceptions.RequestException as err:
print('Request to find gene ID from Ensembl failed.')
print (err)
return
def get_gene_sequences(gene_id, gene_type):
# Sequence API endpoint requires an Ensembl ID
base_url = "http://rest.ensembl.org/sequence/id/"
request_params = "?content-type=application/json&type="
transcript_id = ""
nucleotide_seq = ""
# If the gene is protein-coding we want both the coding sequence (CDS) and
# the translated protein sequence
if gene_type == 'protein_coding':
# Specify multiple_sequences=1 as there can be multiple transcripts for
# each gene. A typical strategy is to choose the longest transcript that
# includes most(/all) of the gene's coding sequence
r = requests.get(base_url+gene_id+request_params+"cds&multiple_sequences=1",
headers={"Content-Type": "application/json"})
sequences = r.json()
# If there's more than one sequence we want to choose the longest,
# reverse sorting puts that sequence first
if len(sequences) > 1:
sequences = sorted(sequences, key = lambda i: len(i["seq"]), reverse=True)
# Now that we have the longest (or only) sequence first, we want to take
# note of that specific transcript ID so that we can request the
# matching translated protein sequence for that transcript.
# This avoids a case arising where two transcripts have the same
# length and the wrong protein sequence is chosen to match the
# nucleotide sequence.
transcript_id = sequences[0]["id"]
nucleotide_seq = sequences[0]["seq"]
r = requests.get(base_url+transcript_id+request_params+"protein",
headers={"Content-Type": "application/json"})
protein = r.json()
# Spacing added to amino acid sequence so that it aligns to nucleotide
# coding sequence later
protein_seq = " " + " ".join(protein["seq"]) + " "
# Return both the coding sequence and the protein sequence so they can
# be printed together
return [nucleotide_seq, protein_seq]
else:
# If this isn't a protein-coding gene then just fetch the cDNA sequence
r = requests.get(base_url+gene_id+request_params+"cdna&multiple_sequences=1",
headers={"Content-Type": "application/json"})
sequences = r.json()
if len(sequences) > 1:
sequences = sorted(sequences, key = lambda i: len(i["seq"]), reverse=True)
nucleotide_seq = sequences[0]["seq"]
# Return just the nucleotide sequence
return nucleotide_seq
def create_image_from_text(text):
"""Creates a 24-pixel wide image featuring the given text"""
try:
#text = text.encode('ascii', 'ignore')
#no TTF, they get antialiased!
#font = ImageFont.load("fonts/helvB12.pil", 22) #22, clear
#font = ImageFont.truetype("fonts/FreeMonoBold.ttf", 14) #22, clear
#font = ImageFont.truetype("fonts/RobotoMono-Regular.ttf", 11) #22, clear
font = ImageFont.truetype("fonts/RobotoMono-Bold.ttf", 11) #22, clear
#font = ImageFont.load("courB14.pil") #20 pixels, bold ish
#font = ImageFont.load("charR14.pil") #23, seify
#font = ImageFont.load("timR14.pil") #22, ick
#font = ImageFont.load("term14.pil") #22, ick
border_width = 1
# Determine if `text` is a string - if it is then it just contains the
# nucleotide sequence for a gene that is not protein-protein_coding
# If `text` is not a string then it's protein-coding and should be a
# list with a corresponding protein sequence
if isinstance(text, str):
# NON-CODING GENES
# ----------------
width, height = font.getsize(text)
# bonds = text.replace("C", "…").replace("G", "…").replace("A", "‥").replace("T", "‥")
# Generate a complementary DNA sequence to base pair with for printing
#complement_mapping = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
#complement = "".join(complement_mapping.get(base, base) for base in text)
#Trying make trans
transTable = str.maketrans("ATGC", "TACG")
complement = str.translate(text, transTable)
img = Image.new("1", (border_width*2 + width, 24), 1)
#24 pixels high, length is arbitrary
width, height = img.size
print("width", width, "height", height)
draw = ImageDraw.Draw(img)
draw.fontmode = "1"
draw.line(((0, 1), (width, 1)), fill=0)
draw.line(((0, height - 2), (width, height - 2)), fill=0)
# Draw gene sequence
draw.text((border_width, 1), text, 0, font=font)
#draw.text((border_width, border_width+1), bonds, 0, font=font)
# Draw complementary DNA sequence
draw.text((border_width, border_width+9), complement, 0, font=font)
draw = ImageDraw.Draw(img)
img = img.rotate(-90, expand=1)
#width, height = img.size
#print("width", width, "height", height)
img.save("b_test.png")
return img
else:
# PROTEIN-CODING GENES
# --------------------
# Get size of nucleotide sequence
width, height = font.getsize(text[0])
img = Image.new("1", (border_width*2 + width, 24), 1)
#24 pixels high, length is arbitrary
width, height = img.size
print("width", width, "height", height)
draw = ImageDraw.Draw(img)
draw.fontmode = "1"
draw.line(((0, 1), (width, 1)), fill=0)
draw.line(((0, height - 2), (width, height - 2)), fill=0)
# Draw DNA coding sequence
draw.text((border_width, 0), text[0], 0, font=font)
# Draw protein sequence
draw.text((border_width, border_width+7), text[1], 0, font=font)
draw = ImageDraw.Draw(img)
img = img.rotate(-90, expand=1)
#width, height = img.size
#print("width", width, "height", height)
img.save("b_test.png")
return img
except Exception as exception:
print(exception)
tweet = choose_tweet()
text = find_gene_id(tweet)
#text = input("Enter text: ")
#pattern = Pattern24.from_test_columns()
#pattern = Pattern24.from_test_rows()
#image = Image.open("../img/at_test1.bmp")
image = create_image_from_text(text)
pattern = Pattern24.from_image(image)
knitter = Knitter24("/dev/ttyUSB2", 9600)
knitter.send_pattern(pattern)
|
tangentmonger/twitterknitter
|
python/twitterknitter.py
|
Python
|
gpl-2.0
| 10,925
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
"""Superdesk Users"""
from superdesk.metadata.item import BYLINE, SIGN_OFF
from superdesk.resource import Resource
class UsersResource(Resource):
def __init__(self, endpoint_name, app, service, endpoint_schema=None):
self.readonly = True if app.config.get('LDAP_SERVER', None) else False
self.additional_lookup = {
'url': 'regex("[\w]+")',
'field': 'username'
}
self.schema = {
'username': {
'type': 'string',
'unique': True,
'required': True,
'minlength': 1
},
'password': {
'type': 'string',
'minlength': 5,
'readonly': self.readonly
},
'first_name': {
'type': 'string',
'readonly': self.readonly
},
'last_name': {
'type': 'string',
'readonly': self.readonly
},
'display_name': {
'type': 'string',
'readonly': self.readonly
},
'email': {
'unique': True,
'type': 'email',
'required': True
},
'phone': {
'type': 'phone_number',
'readonly': self.readonly,
'nullable': True
},
'language': {
'type': 'string',
'readonly': self.readonly,
'nullable': True
},
'user_info': {
'type': 'dict'
},
'picture_url': {
'type': 'string',
'nullable': True
},
'avatar': Resource.rel('upload', embeddable=True, nullable=True),
'role': Resource.rel('roles', True),
'privileges': {'type': 'dict'},
'workspace': {
'type': 'dict'
},
'user_type': {
'type': 'string',
'allowed': ['user', 'administrator'],
'default': 'user'
},
'is_active': {
'type': 'boolean',
'default': True
},
'is_enabled': {
'type': 'boolean',
'default': True
},
'needs_activation': {
'type': 'boolean',
'default': True
},
'desk': Resource.rel('desks'), # Default desk of the user, which would be selected when logged-in.
SIGN_OFF: { # Used for putting a sign-off on the content when it's created/updated except kill
'type': 'string',
'required': False,
'regex': '^[a-zA-Z0-9]+$'
},
BYLINE: {
'type': 'string',
'required': False,
'nullable': True
}
}
self.extra_response_fields = [
'display_name',
'username',
'email',
'user_info',
'picture_url',
'avatar',
'is_active',
'is_enabled',
'needs_activation',
'desk'
]
self.etag_ignore_fields = ['session_preferences', '_etag']
self.datasource = {
'projection': {'password': 0},
'default_sort': [('username', 1)],
}
self.privileges = {'POST': 'users', 'DELETE': 'users', 'PATCH': 'users'}
super().__init__(endpoint_name, app=app, service=service, endpoint_schema=endpoint_schema)
|
akintolga/superdesk-core
|
superdesk/users/users.py
|
Python
|
agpl-3.0
| 3,970
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the profile page."""
__author__ = 'sfederwisch@google.com (Stephanie Federwisch)'
from core.controllers import base
from core.domain import config_domain
from core.domain import exp_services
from core.domain import user_services
import feconf
import utils
EDITOR_PREREQUISITES_AGREEMENT = config_domain.ConfigProperty(
'editor_prerequisites_agreement', 'UnicodeString',
'The agreement that editors are asked to accept before making any '
'contributions.',
default_value=feconf.DEFAULT_EDITOR_PREREQUISITES_AGREEMENT
)
class ProfilePage(base.BaseHandler):
"""The profile page."""
PAGE_NAME_FOR_CSRF = 'profile'
@base.require_user
def get(self):
"""Handles GET requests."""
self.values.update({
'nav_mode': feconf.NAV_MODE_PROFILE,
})
self.render_template('profile/profile.html')
class ProfileHandler(base.BaseHandler):
"""Provides data for the profile page."""
@base.require_user
def get(self):
"""Handles GET requests."""
self.render_json(self.values)
class EditorPrerequisitesPage(base.BaseHandler):
"""The page which prompts for username and acceptance of terms."""
PAGE_NAME_FOR_CSRF = 'editor_prerequisites_page'
@base.require_user
def get(self):
"""Handles GET requests."""
self.values.update({
'agreement': EDITOR_PREREQUISITES_AGREEMENT.value,
'nav_mode': feconf.NAV_MODE_PROFILE,
})
self.render_template('profile/editor_prerequisites.html')
class EditorPrerequisitesHandler(base.BaseHandler):
"""Provides data for the editor prerequisites page."""
PAGE_NAME_FOR_CSRF = 'editor_prerequisites_page'
@base.require_user
def get(self):
"""Handles GET requests."""
user_settings = user_services.get_user_settings(self.user_id)
self.render_json({
'has_agreed_to_terms': bool(user_settings.last_agreed_to_terms),
'username': user_settings.username,
})
@base.require_user
def post(self):
"""Handles POST requests."""
username = self.payload.get('username')
agreed_to_terms = self.payload.get('agreed_to_terms')
if not isinstance(agreed_to_terms, bool) or not agreed_to_terms:
raise self.InvalidInputException(
'In order to edit explorations on this site, you will '
'need to accept the license terms.')
else:
user_services.record_agreement_to_terms(self.user_id)
if user_services.get_username(self.user_id):
# A username has already been set for this user.
self.render_json({})
return
try:
user_services.set_username(self.user_id, username)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
self.render_json({})
class UsernameCheckHandler(base.BaseHandler):
"""Checks whether a username has already been taken."""
PAGE_NAME_FOR_CSRF = 'editor_prerequisites_page'
@base.require_user
def post(self):
"""Handles POST requests."""
username = self.payload.get('username')
try:
user_services.UserSettings.require_valid_username(username)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
username_is_taken = user_services.is_username_taken(username)
self.render_json({
'username_is_taken': username_is_taken,
})
|
miyucy/oppia
|
core/controllers/profile.py
|
Python
|
apache-2.0
| 4,152
|
"""
This script intends to assist avoiding recreating audio file by caching common audio files locally.
"""
import os
import string
from shutil import rmtree
DEDFAULT_LOCAL_CACHE_DIR = os.path.join(os.path.expanduser("~"), "reko.cache")
MAX_FILENAME_LEN = 250
TMP_IMAGE_NAME = "reko_img.png"
class CacheStore():
def __init__(self):
self._cache_dir = DEDFAULT_LOCAL_CACHE_DIR
self._cache_img = None
@property
def cache_dir(self):
if not os.path.exists(self._cache_dir):
os.mkdir(self._cache_dir)
return self._cache_dir
@cache_dir.setter
def cache_dir(self, value):
self._cache_dir = value
def get_filepath(self, filename):
return os.path.join(self.cache_dir, filename)
@property
def cache_img(self):
return self.get_filepath(TMP_IMAGE_NAME)
def get_filename(self, txt, ext=None):
"""
Determine a filename may help to identify the given message.
:param txt: The message in text
:param ext: file extension (e.g. mp4, png, etc)
"""
new_filename = txt.lower().translate(str.maketrans("", "", string.punctuation)).replace(" ","")
new_filename = (new_filename[:MAX_FILENAME_LEN] + "..") \
if len(new_filename) > MAX_FILENAME_LEN else new_filename
if ext is not None:
new_filename = f"{new_filename}.{ext}"
return new_filename
def maintain_cache_dir(self, filename):
if len(filename) > MAX_FILENAME_LEN:
# Do not keep this file (filename may not contain entire message)
f = os.path.join(self.cache_dir, filename)
if os.path.exists(f):
os.remove(f)
def delete_cache_dir(self):
if os.path.exists(self._cache_dir):
rmtree(self._cache_dir)
|
kyhau/reko
|
reko/cachestore.py
|
Python
|
mit
| 1,826
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_gir_profile_management
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Create a maintenance-mode or normal-mode profile for GIR.
description:
- Manage a maintenance-mode or normal-mode profile with configuration
commands that can be applied during graceful removal
or graceful insertion.
author:
- Gabriele Gerbino (@GGabriele)
notes:
- This module is not idempotent when C(state=present).
- C(state=absent) removes the whole profile.
options:
commands:
description:
- List of commands to be included into the profile.
required: false
default: null
mode:
description:
- Configure the profile as Maintenance or Normal mode.
required: true
choices: ['maintenance', 'normal']
state:
description:
- Specify desired state of the resource.
required: false
default: present
choices: ['present','absent']
include_defaults:
description:
- Specify to retrieve or not the complete running configuration
for module operations.
required: false
default: false
choices: ['true','false']
config:
description:
- Specify the configuration string to be used for module operations.
required: false
default: null
'''
EXAMPLES = '''
# Create a maintenance-mode profile
- nxos_gir_profile:
mode: maintenance
commands:
- router eigrp 11
- isolate
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Remove the maintenance-mode profile
- nxos_gir_profile:
mode: maintenance
state: absent
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: list of commands passed into module.
returned: verbose mode
type: list
sample: ["router eigrp 11", "isolate"]
existing:
description: list of existing profile commands.
returned: verbose mode
type: list
sample: ["router bgp 65535","isolate","router eigrp 10","isolate",
"diagnostic bootup level complete"]
end_state:
description: list of profile entries after module execution.
returned: verbose mode
type: list
sample: ["router bgp 65535","isolate","router eigrp 10","isolate",
"diagnostic bootup level complete","router eigrp 11", "isolate"]
updates:
description: commands sent to the device
returned: always
type: list
sample: ["configure maintenance profile maintenance-mode",
"router eigrp 11","isolate"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
def get_existing(module):
existing = []
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
if module.params['mode'] == 'maintenance':
parents = ['configure maintenance profile maintenance-mode']
else:
parents = ['configure maintenance profile normal-mode']
config = netcfg.get_section(parents)
if config:
existing = config.splitlines()
existing = [cmd.strip() for cmd in existing]
existing.pop(0)
return existing
def state_present(module, existing, commands):
cmds = list()
cmds.extend(commands)
if module.params['mode'] == 'maintenance':
cmds.insert(0, 'configure maintenance profile maintenance-mode')
else:
cmds.insert(0, 'configure maintenance profile normal-mode')
return cmds
def state_absent(module, existing, commands):
if module.params['mode'] == 'maintenance':
cmds = ['no configure maintenance profile maintenance-mode']
else:
cmds = ['no configure maintenance profile normal-mode']
return cmds
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def main():
argument_spec = dict(
commands=dict(required=False, type='list'),
mode=dict(required=True, choices=['maintenance', 'normal']),
state=dict(choices=['absent', 'present'],
default='present'),
include_defaults=dict(default=False),
config=dict()
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
state = module.params['state']
commands = module.params['commands'] or []
if state == 'absent' and commands:
module.fail_json(msg='when state is absent, no command can be used.')
existing = invoke('get_existing', module)
end_state = existing
changed = False
result = {}
cmds = []
if state == 'present' or (state == 'absent' and existing):
cmds = invoke('state_%s' % state, module, existing, commands)
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
load_config(module, cmds)
changed = True
end_state = invoke('get_existing', module)
result['changed'] = changed
if module._verbosity > 0:
end_state = invoke('get_existing', module)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = commands
result['updates'] = cmds
result['warnings'] = warnings
module.exit_json(**result)
if __name__ == '__main__':
main()
|
shanemcd/ansible
|
lib/ansible/modules/network/nxos/nxos_gir_profile_management.py
|
Python
|
gpl-3.0
| 6,691
|
import argparse
import sys
import dockbot
from dockbot.Error import Error
from dockbot.Dockbot import Dockbot
from dockbot.Config import Config
from dockbot.Image import Image
from dockbot.Container import Container
from dockbot.Slave import Slave
from dockbot.Master import Master
from dockbot.RemoteImage import RemoteImage
from dockbot.RemoteSlave import RemoteSlave
from dockbot.util import *
args = {}
RUNNING = ('Running', 'green')
OFFLINE = ('Offline', 'blue')
NOT_FOUND = ('Not Found', 'magenta')
STARTING = ('Starting', 'Yellow')
STOPPING = ('Stopping', 'red')
BUILDING = ('Building', 'cyan')
BUILT = ('Built', 'white')
DELETING = ('Deleting', 'Red')
DIRTY = ('Dirty', 'red')
TRIGGERED = ('Triggered', 'cyan')
REMOTE = ('Remote', 'Magenta')
usage = '%(prog)s [OPTIONS] COMMAND [CONTAINER] [-- ARGS...]'
description = '''
A tool for running a Buildbot master and set of slaves under Docker.
'''
cmd_help = '''
status Print status then exit.
config Print the instance config(s).
shell Run shell in instance.
start Start docker instance(s).
stop Stop docker instance(s).
restart Start then stop docker instance(s).
build Build image(s).
delete Delete an image or container.
trigger Trigger one or all builds on a running slave container.
publish Publish a project's files.
'''
def version_type(s):
if not re.match(r'^\d+\.\d+$', s):
raise argparse.ArgumentTypeError('Must be <major>.<minor>')
return s
def run():
parser = argparse.ArgumentParser(
usage = usage, description = description,
formatter_class = argparse.RawTextHelpFormatter)
parser._positionals.title = 'Positional arguments'
parser._optionals.title = 'OPTIONS'
parser.add_argument('cmd', metavar = 'COMMAND', nargs = '?',
default = 'status', help = cmd_help)
parser.add_argument('name', metavar = 'NAME', nargs = '?',
help = 'Docker instance or container to operate on. '
'Either "master", one of the slave or image names or '
'a glob pattern matching one or more slave or images '
'names.')
parser.add_argument('args', metavar = 'ARGS', nargs = '*',
help = 'Extra arguments to pass on to Docker')
parser.add_argument('--slaves', metavar = 'DIR', default = 'slaves',
help = 'Slave directory')
parser.add_argument('--width', metavar = 'NUMBER', default = 80, type = int,
help = 'Status line width')
parser.add_argument('-v', '--verbose', action = 'store_true',
help = 'Verbose output')
parser.add_argument('-f', '--foreground', action = 'store_true',
help = 'Run in foreground')
parser.add_argument('--force', action = 'store_true',
help = 'Run even if container is dirty')
parser.add_argument('-a', '--all', action = 'store_true',
help = 'Perform all prerequisite actions automatically')
parser.add_argument('-p', '--project',
help = 'Specify a specific project to trigger.')
parser.add_argument('--release', default = 'alpha', choices = (
'alpha', 'beta', 'public'), help = 'Release to publish.')
parser.add_argument('--version', type = version_type,
help = 'Version to publish. Latest version if '
'omitted.')
parser.add_argument('--mode',
help = 'Build mode to publish. Otherwise, all modes.')
parser.add_argument('-c', '--continue', action = 'store_true',
dest='_continue',
help = 'Continue running if an operation fails.')
parser.add_argument('--group', help = 'The system group to use when '
'publishing files.'),
parser.add_argument('--fperms', default = 0o644, type = int,
help = 'The file permissions to use when publishing '
'files.'),
parser.add_argument('--dperms', default = 0o755, type = int,
help = 'The directory permissions to use when '
'publishing files.'),
parser.add_argument('--key', help = 'Path to the key file used for signing '
'published files.'),
parser.add_argument('--password', help = 'Password used to unlock signing '
'key. Will be prompted for if not supplied.'),
parser.add_argument('--ts-url',
default = 'http://timestamp.comodoca.com/authenticode',
help = 'Time stamping URL used for signing published '
'files.'),
global args
args = parser.parse_args()
try:
dockbot.Dockbot(args)
except dockbot.Error as e:
print('\n%s\n' % e)
sys.exit(1)
|
CauldronDevelopmentLLC/dockbot
|
dockbot/__init__.py
|
Python
|
gpl-3.0
| 5,070
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of volumes,
and storage repositories
"""
import subprocess
import sys
import time
from nova import block_device
from nova import flags
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.hyperv import vmutils
# Check needed for unit testing on Unix
if sys.platform == 'win32':
import _winreg
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
class VolumeUtils(object):
def execute(self, *args, **kwargs):
proc = subprocess.Popen(
[args],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
stdout_value, stderr_value = proc.communicate()
if stdout_value.find('The operation completed successfully') == -1:
raise vmutils.HyperVException(_('An error has occurred when '
'calling the iscsi initiator: %s') % stdout_value)
def get_iscsi_initiator(self, cim_conn):
"""Get iscsi initiator name for this machine"""
computer_system = cim_conn.Win32_ComputerSystem()[0]
hostname = computer_system.name
keypath = \
r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\iSCSI\Discovery"
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, keypath, 0,
_winreg.KEY_ALL_ACCESS)
temp = _winreg.QueryValueEx(key, 'DefaultInitiatorName')
initiator_name = str(temp[0])
_winreg.CloseKey(key)
except Exception:
LOG.info(_("The ISCSI initiator name can't be found. "
"Choosing the default one"))
computer_system = cim_conn.Win32_ComputerSystem()[0]
initiator_name = "iqn.1991-05.com.microsoft:" + \
hostname.lower()
return {
'ip': FLAGS.my_ip,
'initiator': initiator_name,
}
def login_storage_target(self, target_lun, target_iqn, target_portal):
"""Add target portal, list targets and logins to the target"""
separator = target_portal.find(':')
target_address = target_portal[:separator]
target_port = target_portal[separator + 1:]
#Adding target portal to iscsi initiator. Sending targets
self.execute('iscsicli.exe ' + 'AddTargetPortal ' +
target_address + ' ' + target_port +
' * * * * * * * * * * * * *')
#Listing targets
self.execute('iscsicli.exe ' + 'LisTargets')
#Sending login
self.execute('iscsicli.exe ' + 'qlogintarget ' + target_iqn)
#Waiting the disk to be mounted. Research this
time.sleep(FLAGS.hyperv_wait_between_attach_retry)
def logout_storage_target(self, _conn_wmi, target_iqn):
""" Logs out storage target through its session id """
sessions = _conn_wmi.query(
"SELECT * FROM MSiSCSIInitiator_SessionClass \
WHERE TargetName='" + target_iqn + "'")
for session in sessions:
self.execute_log_out(session.SessionId)
def execute_log_out(self, session_id):
""" Executes log out of the session described by its session ID """
self.execute('iscsicli.exe ' + 'logouttarget ' + session_id)
def volume_in_mapping(self, mount_device, block_device_info):
block_device_list = [block_device.strip_dev(vol['mount_device'])
for vol in
driver.block_device_info_get_mapping(
block_device_info)]
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
block_device_list.append(
block_device.strip_dev(swap['device_name']))
block_device_list += [block_device.strip_dev(
ephemeral['device_name'])
for ephemeral in
driver.block_device_info_get_ephemerals(block_device_info)]
LOG.debug(_("block_device_list %s"), block_device_list)
return block_device.strip_dev(mount_device) in block_device_list
|
tylertian/Openstack
|
openstack F/nova/nova/virt/hyperv/volumeutils.py
|
Python
|
apache-2.0
| 5,097
|
from numbers import Number
from _collections import OrderedDict
import inspect
import numpy as np
import theano
import theano.tensor as TT
from . import helpers
from . import filter
from . import origin
class SimpleNode(object):
"""A SimpleNode allows you to put arbitary code as part of an NEF model.
This object has Origins and Terminations which can be used just like
any other Nengo component. Arbitrary code can be run every time step,
making this useful for simulating sensory systems (reading data
from a file or a webcam, for example), motor systems (writing data to
a file or driving a robot, for example), or even parts of the brain
that we don't want a full neural model for (symbolic reasoning or
declarative memory, for example).
You can have as many origins you like. The dimensionality
of the origins are set by the length of the returned vector of floats.
class SquaringFiveValues(nef.SimpleNode):
def init(self):
self.value=0
def origin_output(self):
return [self.value]
There is also a special method called tick() that is called once per
time step.
class HelloNode(nef.SimpleNode):
def tick(self):
print 'Hello world'
The current time can be accessed via `self.t`. This value will be the
time for the beginning of the current time step. The end of the current
time step is `self.t_end`.
"""
def __init__(self, name):
"""
:param string name: the name of the created node
"""
self.t = 0 # current simulation time
self.name = name
self.dimensions = {} # tracks dimensions of inputs
self.input = {}
self.origin = {}
self.array_size = 1
self.init() # initialize internal variables if there are any
# look at all the defined methods, if any start with 'origin_',
# make origins that implement the defined function
for name, method in inspect.getmembers(self, inspect.ismethod):
if name.startswith('origin_'):
# get initial value
initial_value = method()
# add to dictionary of origins
if isinstance(initial_value, TT.TensorVariable):
#import pdb; pdb.set_trace()
self.origin[name[7:]] = origin.Origin(
func=None, initial_value=np.zeros(initial_value.eval().shape))
self.origin[name[7:]].method = method
else:
self.origin[name[7:]] = origin.Origin(
func=method, initial_value=initial_value)
def add_input(self, name, dimensions):
"""Create a Filter and add it to the list of input
"""
self.input[name] = filter.Filter(
name=name, pstc=None, shape=(1, dimensions))
self.dimensions[name] = dimensions
def init(self):
"""Initialize the node.
Override this to initialize any internal variables. This will
also be called whenever the simulation is reset.
"""
pass
def reset(self, **kwargs):
"""Reset the state of all the internal variables."""
self.init(**kwargs)
def set_input_source(self, name, pstc, source):
"""Set the source of input for a filter specified in init().
"""
if not self.input.has_key(name):
print 'Invalid SimpleNode input name'
self.input[name].pstc = pstc
self.input[name].source = source
def tick(self):
"""An extra utility function that is called every time step.
Override this to create custom behaviour that isn't necessarily tied
to a particular input or output. Often used to write spike data
to a file or produce some other sort of custom effect.
"""
pass
def theano_tick(self):
"""Run the simple node.
"""
self.tick()
for origin in self.origin.values():
if origin.func is not None:
value = origin.func()
# if value is a scalar output, make it a list
if isinstance(value, Number):
value = [value]
# cast as float32 for consistency / speed,
# but _after_ it's been made a list
origin.decoded_output.set_value(np.float32(value))
def update(self, dt):
"""Update the input and output of all the theano variables.
"""
updates = OrderedDict()
for input in self.input.values():
updates.update(input.update(dt))
for origin in self.origin.values():
if origin.func is None:
updates.update(
{origin.decoded_output: origin.method()})
return updates
|
ctn-waterloo/nengo_theano
|
nengo_theano/simplenode.py
|
Python
|
mit
| 4,900
|
# encoding: utf-8
from __future__ import unicode_literals
import copy
# `from doctest import DocTestCase` causes crashes, since the DocTestCase is
# detected as a TestCase subclass and unittest.TestLoader.loadTestsFromModule()
# called from GreenTestLoader.loadTestsFromModule() thinks it is a definition
# of a test to actually try to run, and causes very weird crashes.
import doctest
from io import StringIO
import sys
import os
import unittest
import tempfile
from green.config import default_args
from green.output import Colors, GreenStream
from green.result import (
GreenTestResult,
proto_test,
ProtoTest,
proto_error,
ProtoTestResult,
BaseTestResult,
)
try:
from unittest.mock import MagicMock, patch
except ImportError:
from mock import MagicMock, patch
from coverage import coverage, CoverageException
class MyProtoTest(ProtoTest, object):
"""
For quickly making a ProtoTest
"""
def __init__(self):
super(MyProtoTest, self).__init__()
self.module = "my_module"
self.class_name = "MyClass"
self.method_name = "myMethod"
self.docstr_part = "My docstring"
self.subtest_part = ""
class TestBaseTestResult(unittest.TestCase):
def test_stdoutOutput(self):
"""
recordStdout records output.
"""
btr = BaseTestResult(None, None)
pt = ProtoTest()
o = "some output"
btr.recordStdout(pt, o)
self.assertEqual(btr.stdout_output[pt], o)
def test_stdoutNoOutput(self):
"""
recordStdout ignores empty output sent to it
"""
btr = BaseTestResult(None, None)
pt = ProtoTest()
btr.recordStdout(pt, "")
self.assertEqual(btr.stdout_output, {})
def test_displayStdout(self):
"""
displayStdout displays captured stdout
"""
stream = StringIO()
noise = "blah blah blah"
btr = BaseTestResult(stream, Colors(False))
pt = ProtoTest()
btr.stdout_output[pt] = noise
btr.displayStdout(pt)
self.assertIn(noise, stream.getvalue())
def test_stderrErrput(self):
"""
recordStderr records errput.
"""
btr = BaseTestResult(None, None)
pt = ProtoTest()
o = "some errput"
btr.recordStderr(pt, o)
self.assertEqual(btr.stderr_errput[pt], o)
def test_stderrNoErrput(self):
"""
recordStderr ignores empty errput sent to it
"""
btr = BaseTestResult(None, None)
pt = ProtoTest()
btr.recordStderr(pt, "")
self.assertEqual(btr.stderr_errput, {})
def test_displayStderr(self):
"""
displayStderr displays captured stderr
"""
stream = StringIO()
noise = "blah blah blah"
btr = BaseTestResult(stream, Colors(False))
pt = ProtoTest()
btr.stderr_errput[pt] = noise
btr.displayStderr(pt)
self.assertIn(noise, stream.getvalue())
class TestProtoTestResult(unittest.TestCase):
def test_addSuccess(self):
"""
addSuccess adds a test correctly
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
ptr.addSuccess(test)
self.assertEqual(test, ptr.passing[0])
def test_addError(self):
"""
addError adds a test and error correctly
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
try:
raise Exception
except:
err = proto_error(sys.exc_info())
ptr.addError(test, err)
self.assertEqual(test, ptr.errors[0][0])
self.assertEqual(err, ptr.errors[0][1])
def test_addFailure(self):
"""
addFailure adds a test and error correctly
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
try:
raise Exception
except:
err = proto_error(sys.exc_info())
ptr.addFailure(test, err)
self.assertEqual(test, ptr.failures[0][0])
self.assertEqual(err, ptr.failures[0][1])
def test_addSkip(self):
"""
addSkip adds a test and reason correctly
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
reason = "some plausible reason"
ptr.addSkip(test, reason)
self.assertEqual(test, ptr.skipped[0][0])
self.assertEqual(reason, ptr.skipped[0][1])
def test_addExpectedFailure(self):
"""
addExpectedFailure adds a test and error correctly
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
try:
raise Exception
except:
err = proto_error(sys.exc_info())
ptr.addExpectedFailure(test, err)
self.assertEqual(test, ptr.expectedFailures[0][0])
self.assertEqual(err, ptr.expectedFailures[0][1])
def test_addUnexpectedSuccess(self):
"""
addUnexpectedSuccess adds a test correctly
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
ptr.addUnexpectedSuccess(test)
self.assertEqual(test, ptr.unexpectedSuccesses[0])
@patch("green.result.ProtoTestResult.addError")
@patch("green.result.ProtoTestResult.addFailure")
def test_addSubTest_failure(self, mock_addFailure, mock_addError):
"""
addSubTest calls over to addFailure for failures
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
test.failureException = Exception
subtest = MagicMock()
err = [Exception]
ptr.addSubTest(test, subtest, err)
mock_addFailure.assert_called_with(subtest, err)
@patch("green.result.ProtoTestResult.addError")
@patch("green.result.ProtoTestResult.addFailure")
def test_addSubTest_error(self, mock_addFailure, mock_addError):
"""
addSubTest calls over to addError for errors
"""
ptr = ProtoTestResult()
test = proto_test(MagicMock())
test.failureException = KeyError
subtest = MagicMock()
err = [Exception]
ptr.addSubTest(test, subtest, err)
mock_addError.assert_called_with(subtest, err)
class TestProtoError(unittest.TestCase):
def test_str(self):
"""
Running a ProtoError through str() should result in a traceback string
"""
test_str = "noetuaoe"
try:
raise Exception(test_str)
except:
err = sys.exc_info()
pe = proto_error(err)
self.assertIn(test_str, str(pe))
class TestProtoTest(unittest.TestCase):
def test_ProtoTestBlank(self):
"""
ProtoTest can be instantiated empty
"""
pt = ProtoTest()
for i in ["module", "class_name", "docstr_part", "method_name"]:
self.assertEqual("", getattr(pt, i, None))
def test_str(self):
"""
Running a ProtoTest through str() is the same as getting .dotted_name
"""
pt = ProtoTest()
pt.module = "aoeusnth"
self.assertEqual(str(pt), pt.dotted_name)
def test_ProtoTestFromTest(self):
"""
Passing a test into ProtoTest copies out the relevant info.
"""
module = "green.test.test_result"
class_name = "Small"
docstr_part = "stuff"
method_name = "test_method"
class Small(unittest.TestCase):
def test_method(self):
"stuff"
pt = ProtoTest(Small("test_method"))
for i in ["module", "class_name", "docstr_part", "method_name"]:
self.assertEqual(locals()[i], getattr(pt, i, None))
def test_getDescription(self):
"""
getDescription() returns what we expect for all verbose levels
"""
# With a docstring
class Fruit(unittest.TestCase):
def test_stuff(self):
"apple"
pass
t = proto_test(Fruit("test_stuff"))
self.assertEqual(t.getDescription(1), "")
self.assertEqual(t.getDescription(2), "test_stuff")
self.assertEqual(t.getDescription(3), "apple")
self.assertEqual(t.getDescription(4), "test_stuff: apple")
self.assertEqual(t.getDescription(5), "test_stuff: apple")
# Without a docstring
class Vegetable(unittest.TestCase):
def test_vegetable(self):
pass
t = proto_test(Vegetable("test_vegetable"))
self.assertEqual(t.getDescription(1), "")
self.assertEqual(t.getDescription(2), "test_vegetable")
self.assertEqual(t.getDescription(3), "test_vegetable")
self.assertEqual(t.getDescription(4), "test_vegetable")
self.assertEqual(t.getDescription(5), "test_vegetable")
def test_newlineDocstring(self):
"""
Docstrings starting with a newline are properly handled.
"""
class MyTests(unittest.TestCase):
def test_stuff(self):
"""
tricky
"""
pass
test = proto_test(MyTests("test_stuff"))
self.assertIn("tricky", test.getDescription(3))
def test_multilineDocstring(self):
"""
The description includes all of docstring until the first blank line.
"""
class LongDocs(unittest.TestCase):
def test_long(self):
"""First line is
tricky!
garbage
"""
pass
test = proto_test(LongDocs("test_long"))
self.assertIn("tricky", test.getDescription(3))
self.assertNotIn("garbage", test.getDescription(3))
def test_doctest(self):
"""
If we parse a doctest, we get all the fields we need.
"""
test = """
>>> f()
42
"""
def f():
return 42
parser = doctest.DocTestParser()
dt = parser.get_doctest(test, {"f": f}, "doctest.name", "somefile.py", 20)
dt.__module__ = "somefile"
p = proto_test(doctest.DocTestCase(dt))
# no description
self.assertEqual(p.getDescription(0), "")
self.assertEqual(p.getDescription(1), "")
# short description
self.assertEqual(p.getDescription(2), "doctest.name")
# long description
description = p.getDescription(3)
self.assertIn("doctest.name", description)
self.assertIn("somefile.py", description)
self.assertIn("20", description)
# very long == long
description = p.getDescription(4)
self.assertIn("doctest.name", description)
self.assertIn("somefile.py", description)
self.assertIn("20", description)
# dotted name
self.assertEqual(p.dotted_name, "doctest.name")
def test_class_or_module_failure(self):
"""
If we parse an error from a class or module failure, we get the correct result.
"""
p = ProtoTest()
p.is_class_or_module_teardown_error = True
p.name = "the thing"
self.assertEqual(p.getDescription(1), "the thing")
self.assertEqual(p.getDescription(2), "the thing")
self.assertEqual(p.getDescription(3), "the thing")
class TestGreenTestResult(unittest.TestCase):
def setUp(self):
self.args = copy.deepcopy(default_args)
self.stream = StringIO()
def tearDown(self):
del self.stream
del self.args
@patch("green.result.GreenTestResult.printErrors")
def test_stopTestRun(self, mock_printErrors):
"""
We ignore coverage's error about not having anything to cover.
"""
self.args.cov = MagicMock()
self.args.cov.stop = MagicMock(
side_effect=CoverageException("Different Exception")
)
self.args.run_coverage = True
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.startTestRun()
self.assertRaises(CoverageException, gtr.stopTestRun)
self.args.cov.stop = MagicMock(
side_effect=CoverageException("No data to report")
)
def test_tryRecordingStdoutStderr(self):
"""
Recording stdout and stderr works correctly.
"""
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.recordStdout = MagicMock()
gtr.recordStderr = MagicMock()
output = "apple"
test1 = MagicMock()
ptr1 = MagicMock()
ptr1.stdout_output = {test1: output}
ptr1.stderr_errput = {}
errput = "banana"
test2 = MagicMock()
ptr2 = MagicMock()
ptr2.stdout_output = {}
ptr2.stderr_errput = {test2: errput}
gtr.tryRecordingStdoutStderr(test1, ptr1)
gtr.recordStdout.assert_called_with(test1, output)
gtr.tryRecordingStdoutStderr(test2, ptr2)
gtr.recordStderr.assert_called_with(test2, errput)
def test_tryRecordingStdoutStderr_SubTest(self):
"""
Recording stdout and stderr works correctly for failed/errored SubTests.
"""
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.recordStdout = MagicMock()
gtr.recordStderr = MagicMock()
output = "apple"
test1 = MagicMock()
test1.dotted_name = "test 1"
subtest1 = MagicMock()
subtest1.dotted_name = "test 1: the subtest"
subtest1.class_name = "SubTest"
ptr1 = MagicMock()
ptr1.stdout_output = {test1: output}
ptr1.stderr_errput = {}
errput = "banana"
test2 = MagicMock()
test2.dotted_name = "test 2"
subtest2 = MagicMock()
subtest2.dotted_name = "test 2: subtests are annoying"
subtest2.class_name = "SubTest"
ptr2 = MagicMock()
ptr2.stdout_output = {}
ptr2.stderr_errput = {test2: errput}
gtr.tryRecordingStdoutStderr(subtest1, ptr1, err=True)
gtr.recordStdout.assert_called_with(subtest1, output)
gtr.tryRecordingStdoutStderr(subtest2, ptr2, err=True)
gtr.recordStderr.assert_called_with(subtest2, errput)
def test_failfastAddError(self):
"""
addError triggers failfast when it is set
"""
self.args.failfast = True
gtr = GreenTestResult(self.args, GreenStream(self.stream))
self.assertEqual(gtr.failfast, True)
try:
raise Exception
except:
err = sys.exc_info()
self.assertEqual(gtr.shouldStop, False)
gtr.addError(MyProtoTest(), proto_error(err))
self.assertEqual(gtr.shouldStop, True)
def test_failfastAddFailure(self):
"""
addFailure triggers failfast when it is set
"""
self.args.failfast = True
gtr = GreenTestResult(self.args, GreenStream(self.stream))
self.assertEqual(gtr.failfast, True)
try:
raise Exception
except:
err = sys.exc_info()
self.assertEqual(gtr.shouldStop, False)
gtr.addFailure(MyProtoTest(), proto_error(err))
self.assertEqual(gtr.shouldStop, True)
def test_failfastAddUnexpectedSuccess(self):
"""
addUnexpectedSuccess no longer triggers failfast when it is set
"""
self.args.failfast = True
gtr = GreenTestResult(self.args, GreenStream(self.stream))
self.assertEqual(gtr.failfast, True)
self.assertEqual(gtr.shouldStop, False)
gtr.addUnexpectedSuccess(MyProtoTest())
self.assertEqual(gtr.shouldStop, False)
def _outputFromVerboseTest(self):
"""
Start a test with verbose = 2 and get its output.
"""
class FakeCase(unittest.TestCase):
def runTest(self):
pass
self.args.verbose = 2
gtr = GreenTestResult(self.args, GreenStream(self.stream))
tc = FakeCase()
gtr.startTest(tc)
output = self.stream.getvalue()
return output.split("\n")
def test_startTestVerboseTerminal(self):
"""
startTest() contains output we expect in verbose mode on a terminal
"""
self.stream.isatty = lambda: True
output_lines = self._outputFromVerboseTest()
# Output should look like (I'm not putting the termcolor formatting
# here)
# green.test.test_runner
# FakeCase
# test_it
self.assertEqual(len(output_lines), 3)
self.assertNotIn(" ", output_lines[0])
self.assertIn(" ", output_lines[1])
self.assertIn(" ", output_lines[2])
def test_startTestVerbosePipe(self):
"""
startTest() contains output we expect in verbose mode on a pipe
"""
self.stream.isatty = lambda: False
output_lines = self._outputFromVerboseTest()
# Output should look like (I'm not putting the termcolor formatting
# here)
# green.test.test_runner
# FakeCase
# test_it
self.assertEqual(len(output_lines), 3)
self.assertNotIn(" ", output_lines[0])
self.assertIn(" ", output_lines[1])
# No carriage return or extra lines printed
self.assertIn("", output_lines[2])
def test_reportOutcome(self):
"""
_reportOutcome contains output we expect.
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr._reportOutcome(None, ".", lambda x: x)
self.assertIn(".", self.stream.getvalue())
@patch("green.result.proto_test")
def test_reportOutcomeCursorUp(self, mock_proto_test):
"""
_reportOutcome moves the cursor up when it needs to.
"""
mockProtoTest = MagicMock()
mockProtoTest.getDescription.return_value = "a description"
mock_proto_test.return_value = mockProtoTest
self.args.verbose = 2
def isatty():
return True
gs = GreenStream(self.stream)
gs.isatty = isatty
gtr = GreenTestResult(self.args, gs)
r = "a fake reason"
t = MagicMock()
t.__str__.return_value = "x" * 1000
gtr._reportOutcome(t, ".", lambda x: x, None, r)
self.assertIn(r, self.stream.getvalue())
self.assertLess(len(self.stream.getvalue()), 2000)
@patch("green.result.proto_test")
def test_reportOutcomeVerbose(self, mock_proto_test):
"""
_reportOutcome contains output we expect in verbose mode.
"""
mockProtoTest = MagicMock()
mockProtoTest.getDescription.return_value = "a description"
mock_proto_test.return_value = mockProtoTest
self.args.verbose = 2
def isatty():
return True
gs = GreenStream(self.stream)
gs.isatty = isatty
gtr = GreenTestResult(self.args, gs)
r = "a fake reason"
t = MagicMock()
t.__str__.return_value = "junk"
gtr._reportOutcome(t, ".", lambda x: x, None, r)
self.assertIn(r, self.stream.getvalue())
def test_printErrorsSkipreport(self):
"""
printErrors() prints the skip report.
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
pt = MyProtoTest()
reason = "dog ate homework"
gtr.addSkip(pt, reason)
gtr.printErrors()
self.assertIn(reason, self.stream.getvalue())
def test_printErrorsStdout(self):
"""
printErrors() prints out the captured stdout.
"""
self.args.verbose = 1
self.args.termcolor = False
gtr = GreenTestResult(self.args, GreenStream(self.stream))
pt = MyProtoTest()
output = "this is what the test spit out to stdout"
gtr.recordStdout(pt, output)
gtr.addSuccess(pt)
gtr.printErrors()
self.assertIn(output, self.stream.getvalue())
def test_printErrorsStdoutQuietStdoutOnSuccess(self):
"""
printErrors() prints out the captured stdout
except when quiet_stdout is set to True
for successful tests.
"""
self.args.quiet_stdout = True
gtr = GreenTestResult(self.args, GreenStream(self.stream))
pt = MyProtoTest()
output = "this is what the test should not spit out to stdout"
gtr.recordStdout(pt, output)
gtr.addSuccess(pt)
gtr.printErrors()
self.assertNotIn(output, self.stream.getvalue())
def test_printErrorsStdoutQuietStdoutOnError(self):
"""
printErrors() prints out the captured stdout
except when quiet_stdout is set to True
for successful tests, but here we are on a
failing test.
"""
self.args.quiet_stdout = True
try:
raise Exception
except:
err = sys.exc_info()
gtr = GreenTestResult(self.args, GreenStream(self.stream))
pt = MyProtoTest()
output = "this is what the test should spit out to stdout"
gtr.recordStdout(pt, output)
gtr.addError(pt, proto_error(err))
gtr.printErrors()
self.assertIn(output, self.stream.getvalue())
def test_printErrorsStderrQuietStdoutOnSuccess(self):
"""
printErrors() prints out the captured stdout
except when quiet_stdout is set to True
for successful tests.
"""
self.args.quiet_stdout = True
gtr = GreenTestResult(self.args, GreenStream(self.stream))
pt = MyProtoTest()
output = "this is what the test should not spit out to stdout"
gtr.recordStderr(pt, output)
gtr.addSuccess(pt)
gtr.printErrors()
self.assertNotIn(output, self.stream.getvalue())
def test_printErrorsNoTracebacks(self):
"""
printErrors() omits tracebacks for failures and errors when
no_tracebacks is True
"""
self.args.no_tracebacks = True
try:
raise Exception
except:
err = sys.exc_info()
gtr = GreenTestResult(self.args, GreenStream(self.stream))
pt = MyProtoTest()
gtr.addError(pt, proto_error(err))
gtr.printErrors()
self.assertNotIn("Exception", self.stream.getvalue())
def test_printErrorsDots(self):
"""
printErrors() looks correct in verbose=1 (dots) mode.
"""
try:
raise Exception
except:
err = sys.exc_info()
self.args.verbose = 1
self.args.termcolor = False
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.addError(MyProtoTest(), proto_error(err))
gtr.printErrors()
self.assertIn("\n\n", self.stream.getvalue())
self.assertIn("my_module.MyClass.myMethod", self.stream.getvalue())
self.assertIn("test_printErrorsDots", self.stream.getvalue())
self.assertIn("raise Exception", self.stream.getvalue())
self.assertIn("Error", self.stream.getvalue())
def test_printErrorsVerbose2(self):
"""
printErrors() looks correct in verbose=2 mode.
"""
try:
raise Exception
except:
err = sys.exc_info()
self.args.verbose = 2
self.args.termcolor = False
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.addError(MyProtoTest(), proto_error(err))
gtr.printErrors()
self.assertIn("\n\n", self.stream.getvalue())
self.assertIn("my_module.MyClass.myMethod", self.stream.getvalue())
self.assertIn("test_printErrorsVerbose2", self.stream.getvalue())
self.assertIn("raise Exception", self.stream.getvalue())
self.assertIn("Error", self.stream.getvalue())
def test_printErrorsVerbose3(self):
"""
printErrors() looks correct in verbose=3 mode.
"""
try:
raise Exception
except:
err = sys.exc_info()
self.args.verbose = 3
self.args.termcolor = False
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.addError(MyProtoTest(), proto_error(err))
gtr.printErrors()
self.assertIn("\n\n", self.stream.getvalue())
self.assertIn("my_module.MyClass.myMethod", self.stream.getvalue())
self.assertIn("test_printErrorsVerbose3", self.stream.getvalue())
self.assertIn("raise Exception", self.stream.getvalue())
self.assertIn("Error", self.stream.getvalue())
def test_printErrorsVerbose4(self):
"""
printErrors() looks correct in verbose=4 mode.
"""
try:
raise Exception
except:
err = sys.exc_info()
self.args.verbose = 4
self.args.termcolor = False
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.addError(MyProtoTest(), err)
gtr.printErrors()
self.assertIn("\n\n", self.stream.getvalue())
self.assertIn("(most recent call last)", self.stream.getvalue())
self.assertIn("my_module.MyClass.myMethod", self.stream.getvalue())
self.assertIn("test_printErrorsVerbose4", self.stream.getvalue())
self.assertIn("raise Exception", self.stream.getvalue())
self.assertIn("Error", self.stream.getvalue())
def test_addProtoTestResult(self):
"""
addProtoTestResult adds the correct things to the correct places.
"""
ptr = ProtoTestResult()
err_t = proto_test(MagicMock())
try:
raise Exception
except:
err_e = proto_error(sys.exc_info())
ptr.addError(err_t, err_e)
ef_t = proto_test(MagicMock())
try:
raise Exception
except:
ef_e = proto_error(sys.exc_info())
ptr.addExpectedFailure(ef_t, ef_e)
fail_t = proto_test(MagicMock())
try:
raise Exception
except:
fail_e = proto_error(sys.exc_info())
ptr.addFailure(fail_t, fail_e)
pass_t = proto_test(MagicMock())
ptr.addSuccess(pass_t)
skip_t = proto_test(MagicMock())
skip_r = proto_test(MagicMock())
ptr.addSkip(skip_t, skip_r)
us_t = proto_test(MagicMock())
ptr.addUnexpectedSuccess(us_t)
self.args.verbose = 0
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.addProtoTestResult(ptr)
self.assertEqual(gtr.errors, [(err_t, err_e)])
self.assertEqual(gtr.expectedFailures, [(ef_t, ef_e)])
self.assertEqual(gtr.failures, [(fail_t, fail_e)])
self.assertEqual(gtr.passing, [pass_t])
self.assertEqual(gtr.skipped, [(skip_t, skip_r)])
self.assertEqual(gtr.unexpectedSuccesses, [us_t])
def test_stopTestRun_processes_message(self):
"""
StopTestRun adds number of processes used to summary
"""
self.args.processes = 4
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.startTestRun()
gtr.stopTestRun()
self.assertIn("using 4 processes\n", self.stream.getvalue())
def test_stopTestRun_singular_process_message(self):
"""
StopTestRun adds correct summary when one process is used
"""
self.args.processes = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.startTestRun()
gtr.stopTestRun()
self.assertIn("using 1 process\n", self.stream.getvalue())
class TestGreenTestResultAdds(unittest.TestCase):
def setUp(self):
self.stream = StringIO()
self.args = copy.deepcopy(default_args)
self.args.verbose = 0
self.gtr = GreenTestResult(self.args, GreenStream(self.stream))
self.gtr._reportOutcome = MagicMock()
def tearDown(self):
del self.stream
del self.gtr
def test_addSuccess(self):
"""
addSuccess() makes the correct calls to other functions.
"""
test = MagicMock()
test.shortDescription.return_value = "a"
test.__str__.return_value = "b"
test = proto_test(test)
self.gtr.addSuccess(test)
self.gtr._reportOutcome.assert_called_with(test, ".", self.gtr.colors.passing)
def test_addSuccess_with_test_time(self):
"""
addSuccess() sets test time to correct value
"""
test = MagicMock()
test.shortDescription.return_value = "a"
test.__str__.return_value = "b"
test = proto_test(test)
self.gtr.addSuccess(test, "0.42")
self.assertEqual(test.test_time, "0.42")
def test_addError(self):
"""
addError() makes the correct calls to other functions.
"""
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
err = proto_error(err)
self.gtr.addError(test, err)
self.gtr._reportOutcome.assert_called_with(
test, "E", self.gtr.colors.error, err
)
def test_addError_with_test_time(self):
"""
addError() sets test time to correct value
"""
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
err = proto_error(err)
self.gtr.addError(test, err, "0.42")
self.assertEqual(test.test_time, "0.42")
def test_addFailure(self):
"""
addFailure() makes the correct calls to other functions.
"""
err = None
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
err = proto_error(err)
self.gtr.addFailure(test, err)
self.gtr._reportOutcome.assert_called_with(
test, "F", self.gtr.colors.failing, err
)
def test_addFailure_with_test_time(self):
"""
addFailure() makes test time the correct value
"""
err = None
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
err = proto_error(err)
self.gtr.addFailure(test, err, "0.42")
self.assertEqual(test.test_time, "0.42")
def test_addFailureTwistedSkip(self):
"""
Twisted's practice of calling addFailure() with their skips is detected
and redirected to addSkip()
"""
err = None
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
reason = "Twisted is odd"
err = proto_error(err)
err.traceback_lines = ["UnsupportedTrialFeature: ('skip', '{}')".format(reason)]
self.gtr.addFailure(test, err)
self.gtr._reportOutcome.assert_called_with(
test, "s", self.gtr.colors.skipped, reason=reason
)
def test_addSkip(self):
"""
addSkip() makes the correct calls to other functions.
"""
test = proto_test(MagicMock())
reason = "skip reason"
self.gtr.addSkip(test, reason)
self.gtr._reportOutcome.assert_called_with(
test, "s", self.gtr.colors.skipped, reason=reason
)
def test_addSkip_with_test_time(self):
"""
addSkip() makes test time the correct value
"""
test = proto_test(MagicMock())
reason = "skip reason"
self.gtr.addSkip(test, reason, "0.42")
self.assertEqual(test.test_time, "0.42")
def test_addExpectedFailure(self):
"""
addExpectedFailure() makes the correct calls to other functions.
"""
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
err = proto_error(err)
self.gtr.addExpectedFailure(test, err)
self.gtr._reportOutcome.assert_called_with(
test, "x", self.gtr.colors.expectedFailure, err
)
def test_addExcepectedFailure_with_test_time(self):
"""
addExpectedFailure() makes test time correct value
"""
try:
raise Exception
except:
err = sys.exc_info()
test = proto_test(MagicMock())
err = proto_error(err)
self.gtr.addExpectedFailure(test, err, "0.42")
self.assertEqual(test.test_time, "0.42")
def test_addUnexpectedSuccess(self):
"""
addUnexpectedSuccess() makes the correct calls to other functions.
"""
test = proto_test(MagicMock())
self.gtr.addUnexpectedSuccess(test)
self.gtr._reportOutcome.assert_called_with(
test, "u", self.gtr.colors.unexpectedSuccess
)
def test_addUnexpectedSuccess_with_test_time(self):
"""
addUnexpectedSuccess() makes test time with correct value
"""
test = proto_test(MagicMock())
self.gtr.addUnexpectedSuccess(test, "0.42")
self.assertEqual(test.test_time, "0.42")
def test_wasSuccessful(self):
"""
wasSuccessful returns what we expect.
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
self.assertEqual(gtr.wasSuccessful(), False)
gtr.passing.append("anything")
self.assertEqual(gtr.wasSuccessful(), True)
gtr.all_errors.append("anything")
self.assertEqual(gtr.wasSuccessful(), False)
def test_wasSuccessful_expectedFailures(self):
"""
wasSuccessful returns what we expect when we only have expectedFailures
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.expectedFailures.append("anything")
self.assertEqual(gtr.wasSuccessful(), True)
def test_wasSuccessful_passing(self):
"""
wasSuccessful returns what we expect when we only have passing tests
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.passing.append("anything")
self.assertEqual(gtr.wasSuccessful(), True)
def test_wasSuccessful_skipped(self):
"""
wasSuccessful returns what we expect when we only have skipped tests
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.skipped.append("anything")
self.assertEqual(gtr.wasSuccessful(), True)
def test_wasSuccessful_unexpectedSuccesses(self):
"""
wasSuccessful returns what we expect when we only have unexpectedSuccesses
"""
self.args.verbose = 1
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.unexpectedSuccesses.append("anything")
self.assertEqual(gtr.wasSuccessful(), False)
def test_wasSuccessful_coverageFails(self):
"""
wasSuccessful fails if minimum coverage is not met
"""
self.args.minimum_coverage = 50
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.coverage_percent = 49
self.assertEqual(gtr.wasSuccessful(), False)
def test_wasSuccessful_coverageSucceeds(self):
"""
wasSuccessful succeds if minimum coverage is met
"""
self.args.minimum_coverage = 50
gtr = GreenTestResult(self.args, GreenStream(self.stream))
gtr.passing.append("anything")
gtr.coverage_percent = 60
self.assertEqual(gtr.wasSuccessful(), True)
class TestGreenTestRunCoverage(unittest.TestCase):
def setUp(self):
self.args = copy.deepcopy(default_args)
cov_file = tempfile.NamedTemporaryFile(delete=False)
cov_file.close()
self.args.cov = coverage(
data_file=cov_file.name,
omit=self.args.omit_patterns,
include=self.args.include_patterns,
)
self.args.cov.start()
self.stream = StringIO()
def tearDown(self):
del self.stream
del self.args
def _outputFromTest(self, args):
class FakeCase(unittest.TestCase):
def runTest(self):
pass
gtr = GreenTestResult(args, GreenStream(self.stream))
gtr.startTestRun()
gtr.startTest(FakeCase())
gtr.stopTestRun()
output = self.stream.getvalue()
return output.split("\n")
def test_coverage(self):
self.args.run_coverage = True
output = self._outputFromTest(self.args)
self.assertIn("Stmts Miss Cover Missing", "\n".join(output))
def test_quiet_coverage(self):
self.args.run_coverage = True
self.args.quiet_coverage = True
output = self._outputFromTest(self.args)
self.assertNotIn("Stmts Miss Cover Missing", "\n".join(output))
|
CleanCut/green
|
green/test/test_result.py
|
Python
|
mit
| 36,981
|
import subprocess
import time
import os
import logging
import select
import fcntl
import platform
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
from . import xrandr
def get_here():
return os.path.abspath(os.path.dirname(__file__))
class Backend:
def __init__(self, use_root=False, geo=None):
args = [os.path.join(
get_here(),
"plugins/__{}__/viewer".format(platform.machine())
)]
if use_root:
args.append("-r")
if geo:
args += ["--geometry", str(geo)]
logger.info("Calling %s", " ".join(args))
self.proc = subprocess.Popen(
args,
# bufsize=0,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE
)
fd = self.proc.stdout.fileno()
fcntl.fcntl(fd, fcntl.F_SETFL,
fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
)
self.proc_bufin = b""
self.percent = 100
self.geometry = xrandr.Geometry()
self.geometry.width = geo.width
self.geometry.height = geo.height
def start_cross_fade(self, start, end):
filepath = end.filepath()
logger.debug("Fade_down=%i, fade_up=%i",
start.fade_down, end.fade_up
)
fade_up = max(start.fade_down, end.fade_up)
if filepath:
geo = end.geometry
if geo is None:
geo = self.geometry
filepath = bytes("{0} {1} {2}\n".format(
geo, fade_up, filepath), "utf-8"
)
else:
filepath = bytes("blank {}\n".format(
fade_up), "utf-8"
)
logger.debug("Next image is '%s'", filepath)
self.proc.stdin.write(filepath)
self.proc.stdin.flush()
return True
def get_cross_fade_percent(self):
# ret = select.select([self.proc.stdout],[],[],0)
# print("Select=", str(ret))
# if len(ret[0]) > 0:
data = self.proc.stdout.read()
if data and len(data) > 0:
data = self.proc_bufin + data
while True:
i = data.find(b">")
if i < 0:
self.proc_bufin = data
break
j = data.find(b"<")
percent = int(data[j+1:i].decode("utf8"))
data = data[i+1:]
self.percent = percent
return self.percent
|
peter1010/projector_sequencer
|
sequencer/plugin_viewer.py
|
Python
|
lgpl-3.0
| 2,457
|
# -*- coding: utf-8 -*-
################################################################
# License, author and contributors information in: #
# __openerp__.py file at the root folder of this module. #
################################################################
import controllers
import models
|
markeTIC/mtic-addons
|
competition_website/__init__.py
|
Python
|
agpl-3.0
| 318
|
# -*- coding: utf-8 -*-
from ..helpers import construct_similarity_matrix_via_profiles
class SimilarityMatrix:
def __init__(self, keywords, matrix):
self.keywords = keywords
self.matrix = matrix
def construct_similarity_matrix(relevance_matrix, relevance_threshold=0.2):
"""
Constructs keyword similarity matrix by the given relevance_matrix
NOTE: final similarity matrix may contain not all the keywords (only those that are highly relevant to
at least one of the texts)
:param relevance_matrix: instance of SimilarityMatrix
:param relevance_threshold: a value in range [0, 1)
:return: instance of a class SimilarityMatrix
"""
# create relevance profiles
relevance_profiles = []
keywords = relevance_matrix.keywords
max_score = relevance_matrix.max_relevance_score
# print 'max score: %s' % max_score
real_threshold = relevance_threshold * max_score
relevant_keywords = []
for (i, keyword) in enumerate(keywords):
keyword_row = relevance_matrix.matrix[i]
relevance_profile = set([i for i, val in enumerate(keyword_row) if val >= real_threshold])
if len(relevance_profile) > 0:
# print 'keyword: %s, relevance profile size: %s' % (keyword, len(relevance_profile))
relevant_keywords.append(keyword)
relevance_profiles.append(relevance_profile)
keyword_similarity_matrix = construct_similarity_matrix_via_profiles(relevant_keywords, relevance_profiles)
return SimilarityMatrix(relevant_keywords, keyword_similarity_matrix)
|
luntos/bianalyzer
|
bianalyzer/relevance/similarity_matrix.py
|
Python
|
mit
| 1,579
|
from collections import defaultdict, Counter
from .utils import _
from .card import Group, \
BUSH_WARBLER, CUCKOO, GEESE, PINE_RED_POEM, PLUM_RED_POEM, \
CHERRY_RED_POEM, PEONY_BLUE_POEM, CHRYSANTHEMUM_BLUE_PEOM, MAPLE_BLUE_POEM, \
WISTERIA_RED, IRIS_RED, BUSH_CLOVER_RED, \
CUP, RAIN
class CardList(object):
def __init__(self, *cards):
self.cards = list(cards)
def __str__(self):
return ", ".join(str(card) for card in self.cards)
def __repr__(self):
return "{__class__.__name__}({_cards_str})".format(
__class__=self.__class__,
_cards_str=", ".join(repr(card) for card in self.cards))
def __getitem__(self, index):
return self.cards[index]
def __iadd__(self, card):
self.cards.append(card)
return self
def __add__(self, other):
if isinstance(other, list):
return self.__class__(self.cards + other)
else:
return self.__class__(self.cards + other.cards)
def __len__(self):
return len(self.cards)
def __eq__(self, other):
if other is None:
return False
elif isinstance(other, self.__class__):
return set(self.cards) == set(other.cards) and \
len(self.cards) == len(other.cards)
return NotImplemented
def __hash__(self):
return hash(tuple(sorted(self.cards)))
def __iter__(self):
return iter(self.cards)
def remove(self, card):
self.cards.remove(card)
def pop(self):
return self.cards.pop()
def clear(self):
while len(self.cards) > 0:
self.cards.pop()
def split_by_month(self):
month_cards = defaultdict(list)
for card in self.cards:
month_cards[card.month].append(card)
return month_cards
def split_by_group(self):
group_cards = defaultdict(list)
for card in self.cards:
if isinstance(card.group, tuple):
for group in card.group:
group_cards[group].append(card)
else:
group_cards[card.group].append(card)
return group_cards
class Hand(CardList):
@property
def score(self):
scores = []
month_cards = self.split_by_month()
month_count = Counter(len(cards) for cards in month_cards.values())
if month_count[3] > 0:
scores.append((_('Three cards of a month'), month_count[3]))
if month_count[4] > 0:
scores.append((_('Four cards of a month'), month_count[4]))
return scores
class TakenCards(CardList):
@property
def score(self):
self.scores = []
self.month_cards = self.split_by_month()
self.group_cards = self.split_by_group()
self.score_junk()
self.score_brights()
self.score_animals()
self.score_ribbons()
return self.scores
def score_brights(self):
bright_cards = self.group_cards[Group.BRIGHT]
has_rain = RAIN in bright_cards
if len(bright_cards) == 5:
self.scores.append((_('Five brights'), 15))
elif len(bright_cards) == 4:
self.scores.append((_('Four brights'), 4))
elif len(bright_cards) == 3:
if has_rain:
self.scores.append((_('Three brights with rain'), 2))
else:
self.scores.append((_('Three brights without rain'), 3))
def score_animals(self):
animal_cards = self.group_cards[Group.ANIMAL]
if len(animal_cards) >= 5:
self.scores.append(
(str(len(animal_cards)) + _(' animals'), len(animal_cards)-4))
if all(bird_card in animal_cards
for bird_card in [BUSH_WARBLER, CUCKOO, GEESE]):
self.scores.append((_('Godori'), 5))
def score_ribbons(self):
ribbon_cards = self.group_cards[Group.RIBBON]
has_red_poem = all(
card in ribbon_cards
for card in [PINE_RED_POEM, PLUM_RED_POEM, CHERRY_RED_POEM])
has_blue_poem = all(
card in ribbon_cards
for card in [PEONY_BLUE_POEM, CHRYSANTHEMUM_BLUE_PEOM, MAPLE_BLUE_POEM])
has_red = all(
card in ribbon_cards
for card in [WISTERIA_RED, IRIS_RED, BUSH_CLOVER_RED])
if len(ribbon_cards) >= 5:
self.scores.append(
(str(len(ribbon_cards)) + _(' ribbons'), len(ribbon_cards)-4))
if has_red_poem:
self.scores.append((_('Three red ribbons with poem'), 3))
if has_blue_poem:
self.scores.append((_('Three blue ribbons with poem'), 3))
if has_red:
self.scores.append((_('Three red ribbons'), 3))
def score_junk(self):
junk_cards = self.group_cards[Group.JUNK]
junk_2_cards = self.group_cards[Group.JUNK_2]
total_junk = len(junk_cards) + 2*len(junk_2_cards)
if CUP in self.group_cards[Group.ANIMAL] and total_junk >= 10:
self.group_cards[Group.ANIMAL].remove(CUP)
if total_junk >= 10:
self.scores.append(
(str(len(junk_cards)+len(junk_2_cards)) + _(' junk cards'),
total_junk-9))
class TableCards(CardList):
def get_paired_cards(self, card):
paired_cards = []
for match_card in self.cards:
if match_card.month == card.month:
paired_cards.append(match_card)
return paired_cards
|
reidlindsay/gostop
|
gostop/core/hand.py
|
Python
|
mit
| 5,521
|
"""
TickerHandler
This implements an efficient Ticker which uses a subscription
model to 'tick' subscribed objects at regular intervals.
The ticker mechanism is used by importing and accessing
the instantiated TICKER_HANDLER instance in this module. This
instance is run by the server; it will save its status across
server reloads and be started automaticall on boot.
Example:
```python
from evennia.scripts.tickerhandler import TICKER_HANDLER
# tick myobj every 15 seconds
TICKER_HANDLER.add(myobj, 15)
```
The handler will by default try to call a hook `at_tick()`
on the subscribing object. The hook's name can be changed
if the `hook_key` keyword is given to the `add()` method (only
one such alternate name per interval though). The
handler will transparently set up and add new timers behind
the scenes to tick at given intervals, using a TickerPool.
To remove:
```python
TICKER_HANDLER.remove(myobj, 15)
```
The interval must be given since a single object can be subscribed
to many different tickers at the same time.
The TickerHandler's functionality can be overloaded by modifying the
Ticker class and then changing TickerPool and TickerHandler to use the
custom classes
```python
class MyTicker(Ticker):
# [doing custom stuff]
class MyTickerPool(TickerPool):
ticker_class = MyTicker
class MyTickerHandler(TickerHandler):
ticker_pool_class = MyTickerPool
```
If one wants to duplicate TICKER_HANDLER's auto-saving feature in
a custom handler one can make a custom `AT_STARTSTOP_MODULE` entry to
call the handler's `save()` and `restore()` methods when the server reboots.
"""
from twisted.internet.defer import inlineCallbacks
from django.core.exceptions import ObjectDoesNotExist
from evennia.scripts.scripts import ExtendedLoopingCall
from evennia.server.models import ServerConfig
from evennia.utils.logger import log_trace, log_err
from evennia.utils.dbserialize import dbserialize, dbunserialize, pack_dbobj, unpack_dbobj
_GA = object.__getattribute__
_SA = object.__setattr__
_ERROR_ADD_INTERVAL = \
"""TickerHandler: Tried to add a ticker with invalid interval:
obj={obj}, interval={interval}, args={args}, kwargs={kwargs}
store_key={store_key}
Ticker was not added."""
class Ticker(object):
"""
Represents a repeatedly running task that calls
hooks repeatedly. Overload `_callback` to change the
way it operates.
"""
@inlineCallbacks
def _callback(self):
"""
This will be called repeatedly every `self.interval` seconds.
`self.subscriptions` contain tuples of (obj, args, kwargs) for
each subscribing object.
If overloading, this callback is expected to handle all
subscriptions when it is triggered. It should not return
anything and should not traceback on poorly designed hooks.
The callback should ideally work under @inlineCallbacks so it
can yield appropriately.
The _hook_key, which is passed down through the handler via
kwargs is used here to identify which hook method to call.
"""
for store_key, (obj, args, kwargs) in self.subscriptions.items():
hook_key = yield kwargs.pop("_hook_key", "at_tick")
if not obj or not obj.pk:
# object was deleted between calls
self.remove(store_key)
continue
try:
yield _GA(obj, hook_key)(*args, **kwargs)
except ObjectDoesNotExist:
log_trace()
self.remove(store_key)
except Exception:
log_trace()
finally:
# make sure to re-store
kwargs["_hook_key"] = hook_key
def __init__(self, interval):
"""
Set up the ticker
Args:
interval (int): The stepping interval.
"""
self.interval = interval
self.subscriptions = {}
# set up a twisted asynchronous repeat call
self.task = ExtendedLoopingCall(self._callback)
def validate(self, start_delay=None):
"""
Start/stop the task depending on how many subscribers we have
using it.
Args:
start_delay (int): Time to way before starting.
"""
subs = self.subscriptions
if None in subs.values():
# clean out objects that may have been deleted
subs = dict((store_key, obj) for store_key, obj in subs if obj)
self.subscriptions = subs
if self.task.running:
if not subs:
self.task.stop()
elif subs:
self.task.start(self.interval, now=False, start_delay=start_delay)
def add(self, store_key, obj, *args, **kwargs):
"""
Sign up a subscriber to this ticker.
Args:
store_key (str): Unique storage hash for this ticker subscription.
obj (Object): Object subscribing to this ticker.
args (any, optional): Arguments to call the hook method with.
Kwargs:
_start_delay (int): If set, this will be
used to delay the start of the trigger instead of
`interval`.
_hooK_key (str): This carries the name of the hook method
to call. It is passed on as-is from this method.
"""
start_delay = kwargs.pop("_start_delay", None)
self.subscriptions[store_key] = (obj, args, kwargs)
self.validate(start_delay=start_delay)
def remove(self, store_key):
"""
Unsubscribe object from this ticker
Args:
store_key (str): Unique store key.
"""
self.subscriptions.pop(store_key, False)
self.validate()
def stop(self):
"""
Kill the Task, regardless of subscriptions.
"""
self.subscriptions = {}
self.validate()
class TickerPool(object):
"""
This maintains a pool of
`evennia.scripts.scripts.ExtendedLoopingCall` tasks for calling
subscribed objects at given times.
"""
ticker_class = Ticker
def __init__(self):
"""
Initialize the pool.
"""
self.tickers = {}
def add(self, store_key, obj, interval, *args, **kwargs):
"""
Add new ticker subscriber.
Args:
store_key (str): Unique storage hash.
obj (Object): Object subscribing.
interval (int): How often to call the ticker.
args (any, optional): Arguments to send to the hook method.
Kwargs:
_start_delay (int): If set, this will be
used to delay the start of the trigger instead of
`interval`. It is passed on as-is from this method.
_hooK_key (str): This carries the name of the hook method
to call. It is passed on as-is from this method.
"""
if not interval:
log_err(_ERROR_ADD_INTERVAL.format(store_key=store_key, obj=obj,
interval=interval, args=args, kwargs=kwargs))
return
if interval not in self.tickers:
self.tickers[interval] = self.ticker_class(interval)
self.tickers[interval].add(store_key, obj, *args, **kwargs)
def remove(self, store_key, interval):
"""
Remove subscription from pool.
Args:
store_key (str): Unique storage hash.
interval (int): Ticker interval.
Notes:
A given subscription is uniquely identified both
via its `store_key` and its `interval`.
"""
if interval in self.tickers:
self.tickers[interval].remove(store_key)
def stop(self, interval=None):
"""
Stop all scripts in pool. This is done at server reload since
restoring the pool will automatically re-populate the pool.
Args:
interval (int, optional): Only stop tickers with this
interval.
"""
if interval and interval in self.tickers:
self.tickers[interval].stop()
else:
for ticker in self.tickers.values():
ticker.stop()
class TickerHandler(object):
"""
The Tickerhandler maintains a pool of tasks for subscribing
objects to various tick rates. The pool maintains creation
instructions and and re-applies them at a server restart.
"""
ticker_pool_class = TickerPool
def __init__(self, save_name="ticker_storage"):
"""
Initialize handler
save_name (str, optional): The name of the ServerConfig
instance to store the handler state persistently.
"""
self.ticker_storage = {}
self.save_name = save_name
self.ticker_pool = self.ticker_pool_class()
def _store_key(self, obj, interval, idstring=""):
"""
Tries to create a store_key for the object. Returns a tuple
(isdb, store_key) where isdb is a boolean True if obj was a
database object, False otherwise.
Args:
obj (Object): Subscribing object.
interval (int): Ticker interval
idstring (str, optional): Additional separator between
different subscription types.
"""
if hasattr(obj, "db_key"):
# create a store_key using the database representation
objkey = pack_dbobj(obj)
isdb = True
else:
# non-db object, look for a property "key" on it, otherwise
# use its memory location.
try:
objkey = _GA(obj, "key")
except AttributeError:
objkey = id(obj)
isdb = False
# return sidb and store_key
return isdb, (objkey, interval, idstring)
def save(self):
"""
Save ticker_storage as a serialized string into a temporary
ServerConf field. Whereas saving is done on the fly, if called
by server when it shuts down, the current timer of each ticker
will be saved so it can start over from that point.
"""
if self.ticker_storage:
start_delays = dict((interval, ticker.task.next_call_time())
for interval, ticker in self.ticker_pool.tickers.items())
# update the timers for the tickers
#for (obj, interval, idstring), (args, kwargs) in self.ticker_storage.items():
for store_key, (args, kwargs) in self.ticker_storage.items():
interval = store_key[1]
# this is a mutable, so it's updated in-place in ticker_storage
kwargs["_start_delay"] = start_delays.get(interval, None)
ServerConfig.objects.conf(key=self.save_name,
value=dbserialize(self.ticker_storage))
else:
# make sure we have nothing lingering in the database
ServerConfig.objects.conf(key=self.save_name, delete=True)
def restore(self):
"""
Restore ticker_storage from database and re-initialize the
handler from storage. This is triggered by the server at
restart.
"""
# load stored command instructions and use them to re-initialize handler
ticker_storage = ServerConfig.objects.conf(key=self.save_name)
if ticker_storage:
self.ticker_storage = dbunserialize(ticker_storage)
#print "restore:", self.ticker_storage
for store_key, (args, kwargs) in self.ticker_storage.items():
obj, interval, idstring = store_key
obj = unpack_dbobj(obj)
_, store_key = self._store_key(obj, interval, idstring)
self.ticker_pool.add(store_key, obj, interval, *args, **kwargs)
def add(self, obj, interval, idstring="", hook_key="at_tick", *args, **kwargs):
"""
Add object to tickerhandler
Args:
obj (Object): The object to subscribe to the ticker.
interval (int): Interval in seconds between calling
`hook_key` below.
idstring (str, optional): Identifier for separating
this ticker-subscription from others with the same
interval. Allows for managing multiple calls with
the same time interval
hook_key (str, optional): The name of the hook method
on `obj` to call every `interval` seconds. Defaults to
`at_tick(*args, **kwargs`. All hook methods must
always accept *args, **kwargs.
args, kwargs (optional): These will be passed into the
method given by `hook_key` every time it is called.
Notes:
The combination of `obj`, `interval` and `idstring`
together uniquely defines the ticker subscription. They
must all be supplied in order to unsubscribe from it
later.
"""
isdb, store_key = self._store_key(obj, interval, idstring)
if isdb:
self.ticker_storage[store_key] = (args, kwargs)
self.save()
kwargs["_hook_key"] = hook_key
self.ticker_pool.add(store_key, obj, interval, *args, **kwargs)
def remove(self, obj, interval=None, idstring=""):
"""
Remove object from ticker or only remove it from tickers with
a given interval.
Args:
obj (Object): The object subscribing to the ticker.
interval (int, optional): Interval of ticker to remove. If
`None`, all tickers on this object matching `idstring`
will be removed, regardless of their `interval` setting.
idstring (str, optional): Identifier id of ticker to remove.
"""
if interval:
isdb, store_key = self._store_key(obj, interval, idstring)
if isdb:
self.ticker_storage.pop(store_key, None)
self.save()
self.ticker_pool.remove(store_key, interval)
else:
# remove all objects with any intervals
intervals = self.ticker_pool.tickers.keys()
should_save = False
for interval in intervals:
isdb, store_key = self._store_key(obj, interval, idstring)
if isdb:
self.ticker_storage.pop(store_key, None)
should_save = True
self.ticker_pool.remove(store_key, interval)
if should_save:
self.save()
def clear(self, interval=None):
"""
Stop/remove all tickers from handler.
Args:
interval (int): Only stop tickers with this interval.
Notes:
This is the only supported way to kill tickers related to
non-db objects.
"""
self.ticker_pool.stop(interval)
if interval:
self.ticker_storage = dict((store_key, store_key)
for store_key in self.ticker_storage
if store_key[1] != interval)
else:
self.ticker_storage = {}
self.save()
def all(self, interval=None):
"""
Get all subscriptions.
Args:
interval (int): Limit match to tickers with this interval.
Returns:
tickers (list): If `interval` was given, this is a list of
tickers using that interval.
tickerpool_layout (dict): If `interval` was *not* given,
this is a dict {interval1: [ticker1, ticker2, ...], ...}
"""
if interval is None:
# return dict of all, ordered by interval
return dict((interval, ticker.subscriptions.values())
for interval, ticker in self.ticker_pool.tickers.items())
else:
# get individual interval
ticker = self.ticker_pool.tickers.get(interval, None)
if ticker:
return ticker.subscriptions.values()
# main tickerhandler
TICKER_HANDLER = TickerHandler()
|
ypwalter/evennia
|
evennia/scripts/tickerhandler.py
|
Python
|
bsd-3-clause
| 16,222
|
# Copyright (c) 2018 NEC, Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_config import cfg
from oslo_upgradecheck import common_checks
from oslo_upgradecheck import upgradecheck
from stevedore import driver as stevedore_driver
# Need to import to load config
from octavia.common import config # noqa: F401 pylint: disable=unused-import
from octavia.common import constants
from octavia.common import policy
from octavia.controller.worker.v2 import taskflow_jobboard_driver as tsk_driver
from octavia.i18n import _
CONF = cfg.CONF
class Checks(upgradecheck.UpgradeCommands):
"""Contains upgrade checks
Various upgrade checks should be added as separate methods in this class
and added to _upgrade_checks tuple.
"""
def _check_persistence(self):
try:
pers_driver = tsk_driver.MysqlPersistenceDriver()
with pers_driver.get_persistence() as pers:
if pers.engine.dialect.name == 'sqlite':
return upgradecheck.Result(
upgradecheck.Code.WARNING,
_('Persistence database is using sqlite backend. '
'Verification required if persistence_connecton URL '
'has been set properly.'))
return pers
except Exception:
return upgradecheck.Result(upgradecheck.Code.FAILURE,
_('Failed to connect to persistence '
'backend for AmphoraV2 provider.'))
def _check_jobboard(self, persistence):
try:
jobboard_driver = stevedore_driver.DriverManager(
namespace='octavia.worker.jobboard_driver',
name=CONF.task_flow.jobboard_backend_driver,
invoke_args=(persistence,),
invoke_on_load=True).driver
with jobboard_driver.job_board(persistence) as jb:
if jb.connected:
return upgradecheck.Result(
upgradecheck.Code.SUCCESS,
_('Persistence database and Jobboard backend for '
'AmphoraV2 provider configured.'))
except Exception:
# Return FAILURE later
pass
return upgradecheck.Result(
upgradecheck.Code.FAILURE,
_('Failed to connect to jobboard backend for AmphoraV2 provider. '
'Check jobboard configuration options in task_flow config '
'section.'))
def _check_amphorav2(self):
default_provider_driver = CONF.api_settings.default_provider_driver
enabled_provider_drivers = CONF.api_settings.enabled_provider_drivers
if (default_provider_driver == constants.AMPHORAV2 or
constants.AMPHORAV2 in enabled_provider_drivers):
persistence = self._check_persistence()
if isinstance(persistence, upgradecheck.Result):
return persistence
return self._check_jobboard(persistence)
return upgradecheck.Result(upgradecheck.Code.SUCCESS,
_('AmphoraV2 provider is not enabled.'))
def _check_yaml_policy(self):
if CONF.oslo_policy.policy_file.lower().endswith('yaml'):
return upgradecheck.Result(upgradecheck.Code.SUCCESS,
_('The [oslo_policy] policy_file '
'setting is configured for YAML '
'policy file format.'))
if CONF.oslo_policy.policy_file.lower().endswith('json'):
return upgradecheck.Result(
upgradecheck.Code.WARNING,
_('The [oslo_policy] policy_file setting is configured for '
'JSON policy file format. JSON format policy files have '
'been deprecated by oslo policy. Please use the oslo policy '
'tool to convert your policy file to YAML format. See this '
'patch for more information: '
'https://review.opendev.org/733650'))
return upgradecheck.Result(upgradecheck.Code.FAILURE,
_('Unable to determine the [oslo_policy] '
'policy_file setting file format. '
'Please make sure your policy file is '
'in YAML format and has the suffix of '
'.yaml for the filename. Oslo policy '
'has deprecated the JSON file format.'))
_upgrade_checks = (
(_('AmphoraV2 Check'), _check_amphorav2),
(_('YAML Policy File'), _check_yaml_policy),
(_('Policy File JSON to YAML Migration'),
(common_checks.check_policy_json, {'conf': CONF})),
)
def main():
policy.Policy()
return upgradecheck.main(
CONF, project='octavia', upgrade_command=Checks())
if __name__ == '__main__':
sys.exit(main())
|
openstack/octavia
|
octavia/cmd/status.py
|
Python
|
apache-2.0
| 5,631
|
#!/usr/bin/env python3
"""
This file is part of pynadc
https://github.com/rmvanhees/pynadc
Defines class ArchiveGosat2 to add new entries to GOSAT-2 SQLite database
Copyright (c) 2019-2021 SRON - Netherlands Institute for Space Research
All Rights Reserved
License: BSD-3-Clause
"""
import argparse
import sqlite3
from pathlib import Path
from time import gmtime, strftime
import h5py
def cleanup_string(dset):
"""
Returns bytes as string
"""
return dset[:].tobytes().decode('ascii').rstrip('\0')
# --------------------------------------------------
def cre_sqlite_gosat2_db(dbname):
"""
function to define database for GOSAT-2 database and tables
"""
con = sqlite3.connect(dbname)
cur = con.cursor()
cur.execute('PRAGMA foreign_keys = ON')
cur.execute(
"""create table rootPaths (
pathID integer PRIMARY KEY AUTOINCREMENT,
hostName text NOT NULL,
localPath text NOT NULL,
nfsPath text NOT NULL)
""")
cur.execute(
"""create table swir_l1b (
swirID integer PRIMARY KEY AUTOINCREMENT,
name char(50) NOT NULL UNIQUE,
pathID integer NOT NULL,
passNumber smallint NOT NULL,
sceneNumber smallint NOT NULL,
operationMode char(4) NOT NULL,
algorithmVersion char(3) NOT NULL,
paramVersion char(3) NOT NULL,
dateTimeStart datetime NOT NULL default '0000-00-00T00:00:00',
dateTimeEnd datetime NOT NULL default '0000-00-00T00:00:00',
acquisitionDate datetime NOT NULL default '0000-00-00T00:00:00',
creationDate datetime NOT NULL default '0000-00-00T00:00:00',
receiveDate datetime NOT NULL default '0000-00-00T00:00:00',
numSoundings smallint NOT NULL,
fileSize integer NOT NULL,
FOREIGN KEY(pathID) REFERENCES rootPaths(pathID))
""")
cur.execute('create index dateTimeStartIndex1 on swir_l1b(dateTimeStart)')
cur.execute('create index receiveDateIndex1 on swir_l1b(receiveDate)')
cur.execute(
"""create table tir_l1b (
tirID integer PRIMARY KEY AUTOINCREMENT,
name char(50) NOT NULL UNIQUE,
pathID integer NOT NULL,
passNumber smallint NOT NULL,
sceneNumber smallint NOT NULL,
operationMode char(4) NOT NULL,
algorithmVersion char(3) NOT NULL,
paramVersion char(3) NOT NULL,
dateTimeStart datetime NOT NULL default '0000-00-00T00:00:00',
dateTimeEnd datetime NOT NULL default '0000-00-00T00:00:00',
acquisitionDate datetime NOT NULL default '0000-00-00T00:00:00',
creationDate datetime NOT NULL default '0000-00-00T00:00:00',
receiveDate datetime NOT NULL default '0000-00-00T00:00:00',
numSoundings smallint NOT NULL,
fileSize integer NOT NULL,
FOREIGN KEY(pathID) REFERENCES rootPaths(pathID))
""")
cur.execute('create index dateTimeStartIndex2 on tir_l1b(dateTimeStart)')
cur.execute('create index receiveDateIndex2 on tir_l1b(receiveDate)')
cur.execute(
"""create table common_l1b (
commonID integer PRIMARY KEY AUTOINCREMENT,
name char(50) NOT NULL UNIQUE,
pathID integer NOT NULL,
passNumber smallint NOT NULL,
sceneNumber smallint NOT NULL,
operationMode char(4) NOT NULL,
algorithmVersion char(3) NOT NULL,
paramVersion char(3) NOT NULL,
dateTimeStart datetime NOT NULL default '0000-00-00T00:00:00',
dateTimeEnd datetime NOT NULL default '0000-00-00T00:00:00',
acquisitionDate datetime NOT NULL default '0000-00-00T00:00:00',
creationDate datetime NOT NULL default '0000-00-00T00:00:00',
receiveDate datetime NOT NULL default '0000-00-00T00:00:00',
numSoundings smallint NOT NULL,
fileSize integer NOT NULL,
FOREIGN KEY(pathID) REFERENCES rootPaths(pathID))
""")
cur.execute('create index dateTimeStartIndex3 on tir_l1b(dateTimeStart)')
cur.execute('create index receiveDateIndex3 on tir_l1b(receiveDate)')
cur.close()
con.commit()
con.close()
# --------------------------------------------------
def sql_write_basedirs(dbname):
"""
write names of directories where to find the GOSAT-2 products
"""
list_paths = [
{"host": 'shogun',
"path": '/array/slot2B/GOSAT-2/FTS/L1B',
"nfs": '/nfs/GOSAT2/FTS/L1B'}
]
str_sql = ('insert into rootPaths values'
'(NULL, \'%(host)s\',\'%(path)s\',\'%(nfs)s\')')
con = sqlite3.connect(dbname)
cur = con.cursor()
for dict_path in list_paths:
cur.execute(str_sql % dict_path)
cur.close()
con.commit()
con.close()
# --------------------------------------------------
class ArchiveGosat2():
"""
class to archive GOSAT-2 products
"""
def __init__(self, db_name='./sron_gosat2.db'):
"""
initialize the class
"""
self.dbname = db_name
if not Path(db_name).is_file():
cre_sqlite_gosat2_db(db_name)
sql_write_basedirs(db_name)
# -------------------------
@staticmethod
def rd_fts(flname):
"""
read meta data from a GOSAT-2 FTS L1B product
"""
gosatfl = Path(flname)
stat = gosatfl.stat()
dict_gosat = {}
dict_gosat['fileName'] = gosatfl.name
dict_gosat['filePath'] = str(gosatfl.parent)
dict_gosat['acquisitionDate'] = strftime("%FT%T",
gmtime(stat.st_mtime))
dict_gosat['passNumber'] = int(dict_gosat['fileName'][23:26])
dict_gosat['sceneNumber'] = int(dict_gosat['fileName'][26:28])
dict_gosat['operationMode'] = dict_gosat['fileName'][36:40]
dict_gosat['productVersion'] = dict_gosat['fileName'][40:46]
dict_gosat['receiveDate'] = strftime("%FT%T",
gmtime(stat.st_ctime))
dict_gosat['fileSize'] = stat.st_size
with h5py.File(flname, mode='r') as fid:
if '/Metadata' in fid:
grp = fid['/Metadata']
dset = grp['processingDate']
dict_gosat['creationDate'] = cleanup_string(dset)
dset = grp['sensorName']
dict_gosat['sensorName'] = cleanup_string(dset)
dset = grp['algorithmVersion']
dict_gosat['algorithmVersion'] = cleanup_string(dset)
dset = grp['parameterVersion']
dict_gosat['paramVersion'] = cleanup_string(dset)
dset = grp['operationMode']
dict_gosat['operationMode'] = cleanup_string(dset)
if 'startDate' in grp:
dset = grp['startDate']
dict_gosat['dateTimeStart'] = cleanup_string(dset)
dset = grp['endDate']
dict_gosat['dateTimeEnd'] = cleanup_string(dset)
else:
if grp['startDateSWIR'][:] == b'-':
dset = grp['startDateTIR']
elif grp['startDateTIR'][:] == b'-':
dset = grp['startDateSWIR']
elif grp['startDateSWIR'][:] < grp['startDateTIR'][:]:
dset = grp['startDateSWIR']
else:
dset = grp['startDateTIR']
dict_gosat['dateTimeStart'] = cleanup_string(dset)
if grp['endDateSWIR'][:] == b'-':
dset = grp['endDateTIR']
elif grp['endDateTIR'][:] == b'-':
dset = grp['endDateSWIR']
elif grp['endDateSWIR'][:] > grp['endDateTIR'][:]:
dset = grp['endDateSWIR']
else:
dset = grp['endDateTIR']
dict_gosat['dateTimeEnd'] = cleanup_string(dset)
else:
return {}
if '/SoundingGeometry' in grp:
grp = fid['/SoundingGeometry']
dict_gosat['numSoundings'] = len(grp['latitude'])
else:
grp = fid['Telemetry_CAM']
dict_gosat['numSoundings'] = grp['numSoundings'][0]
return dict_gosat
# -------------------------
def check_entry(self, gosatfl, verbose=False) -> bool:
"""
check if entry is already present in database
"""
if gosatfl[6:11] != 'TFTS2':
raise ValueError('expect an FTS L1B product')
if gosatfl[29:32] == '1BS':
table = 'swir_l1b'
query_str = 'select swirID from %s where name=\'%s\''
elif gosatfl[29:32] == '1BT':
table = 'tir_l1b'
query_str = 'select tirID from %s where name=\'%s\''
elif gosatfl[29:32] == '1BC':
table = 'common_l1b'
query_str = 'select commonID from %s where name=\'%s\''
else:
raise ValueError('expect GOSAT-2 band SWIR, TIR or COMMON')
if verbose:
print(query_str)
con = sqlite3.connect(self.dbname)
cur = con.cursor()
cur.execute(query_str % (table, gosatfl))
row = cur.fetchone()
cur.close()
con.close()
if row is None:
return False
return True
# -------------------------
def remove_entry(self, gosatfl, verbose=False) -> None:
"""
remove entry from database
"""
if gosatfl[6:11] != 'TFTS2':
raise ValueError('expect an FTS L1B product')
if gosatfl[29:32] == '1BS':
table = 'swir_l1b'
query_str = 'select swirID from %s where name=\'%s\''
remove_str = 'delete from %s where swirID=%d'
elif gosatfl[29:32] == '1BT':
table = 'tir_l1b'
query_str = 'select tirID from %s where name=\'%s\''
remove_str = 'delete from %s where tirID=%d'
elif gosatfl[29:32] == '1BC':
table = 'common_l1b'
query_str = 'select commonID from %s where name=\'%s\''
remove_str = 'delete from %s where commonID=%d'
else:
raise ValueError('expect GOSAT-2 band SWIR, TIR or COMMON')
if verbose:
print(query_str)
con = sqlite3.connect(self.dbname)
cur = con.cursor()
cur.execute('PRAGMA foreign_keys = ON')
cur.execute(query_str % (table, gosatfl))
row = cur.fetchone()
if row is not None:
cur.execute(remove_str % (table, row[0]))
cur.close()
con.commit()
con.close()
# -------------------------
def add_entry(self, gosatfl_str, debug=False):
"""
add new entry to SQLite database
"""
str_path_sql = ('select pathID from rootPaths where'
' localPath == \'%s\' or nfsPath == \'%s\'')
gosatfl = Path(gosatfl_str)
if gosatfl.name[0:11] != 'GOSAT2TFTS2':
raise ValueError('Invalid sensor name: ', gosatfl.name[6:11])
obs_name = None
if gosatfl.name[29:32] == '1BS':
table = 'swir_l1b'
obs_name = 'SWIR_{}'
elif gosatfl.name[29:32] == '1BT':
table = 'tir_l1b'
obs_name = 'TIR_{}'
elif gosatfl.name[29:32] == '1BC':
table = 'common_l1b'
obs_name = 'COMMON_{}'
else:
raise ValueError('expect GOSAT-2 band SWIR, TIR and COMMON')
dict_gosat = self.rd_fts(gosatfl_str)
buffer = str(gosatfl.parent)
if dict_gosat['operationMode'][3] == 'D':
obs_name = obs_name.format('DAY')
elif dict_gosat['operationMode'][3] == 'N':
obs_name = obs_name.format('NIGHT')
else:
raise ValueError('expect GOSAT-2 band to contain DAY or NIGHT')
indx = buffer.find(obs_name)
basedir = buffer[0:indx-1]
str_sql = 'insert into {} values'.format(table)
str_sql += '(NULL,\'%(fileName)s\',%(pathID)d'\
',%(passNumber)d,%(sceneNumber)d'\
',\'%(operationMode)s\''\
',\'%(algorithmVersion)s\',\'%(paramVersion)s\''\
',\'%(dateTimeStart)s\',\'%(dateTimeEnd)s\''\
',\'%(acquisitionDate)s\',\'%(creationDate)s\''\
',\'%(receiveDate)s\',%(numSoundings)d'\
',%(fileSize)d)'
con = sqlite3.connect(self.dbname)
cur = con.cursor()
cur.execute('PRAGMA foreign_keys = ON')
# obtain pathID from table base-dirs
cur.execute(str_path_sql % (basedir, basedir))
row = cur.fetchone()
if row is not None:
dict_gosat['pathID'] = row[0]
else:
dict_gosat['pathID'] = 0
if debug:
print(repr(str_sql % dict_gosat))
return
# do actual query
cur.execute(str_sql % dict_gosat)
cur.close()
con.commit()
con.close()
# - main code --------------------------------------------------
def main():
"""
main function
"""
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true', default=False,
help='show what will be done, but do nothing')
parser.add_argument('--remove', action='store_true', default=False,
help='remove SQL data of INPUT_FILE from database')
parser.add_argument('--replace', action='store_true', default=False,
help='replace SQL data of INPUT_FILE in database')
parser.add_argument('--dbname', dest='dbname', default=b'sron_gosat2.db',
help='name of GOSAT/SQLite database')
parser.add_argument('input_file', nargs='?', type=str,
help='read from INPUT_FILE')
args = parser.parse_args()
if not h5py.is_hdf5(args.input_file):
print('Info: %s is not a HDF5/GOSAT-2 product' % args.input_file)
return
gosatdb = ArchiveGosat2(args.dbname)
gosatfl = Path(args.input_file).name
# Check if product is already in database
if not args.debug:
if args.remove or args.replace:
gosatdb.remove_entry(gosatfl)
if args.remove:
print('Info: {} is removed from database'.format(gosatfl))
return
if gosatdb.check_entry(gosatfl):
print('Info: {} is already stored in database'.format(gosatfl))
return
gosatdb.add_entry(args.input_file, debug=args.debug)
if __name__ == '__main__':
main()
|
rmvanhees/pynadc
|
scripts/add_entry_gosat2.py
|
Python
|
bsd-3-clause
| 14,977
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from jinja2.exceptions import UndefinedError
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
class LookupModule(LookupBase):
def _lookup_variables(self, terms, variables):
results = []
for x in terms:
try:
intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader, fail_on_undefined=True)
except UndefinedError as e:
raise AnsibleUndefinedVariable("One of the nested variables was undefined. The error was: %s" % e)
results.append(intermediate)
return results
def run(self, terms, variables=None, **kwargs):
terms = self._lookup_variables(terms, variables)
my_list = terms[:]
my_list.reverse()
result = []
if len(my_list) == 0:
raise AnsibleError("with_nested requires at least one element in the nested list")
result = my_list.pop()
while len(my_list) > 0:
result2 = self._combine(result, my_list.pop())
result = result2
new_result = []
for x in result:
new_result.append(self._flatten(x))
return new_result
|
kaarolch/ansible
|
lib/ansible/plugins/lookup/nested.py
|
Python
|
gpl-3.0
| 2,100
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common import fixed_network
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from tempest import test
CONF = config.CONF
class ListServerFiltersTestJSON(base.BaseV2ComputeTest):
@classmethod
def setup_credentials(cls):
cls.set_network_resources(network=True, subnet=True, dhcp=True)
super(ListServerFiltersTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(ListServerFiltersTestJSON, cls).setup_clients()
cls.client = cls.servers_client
@classmethod
def resource_setup(cls):
super(ListServerFiltersTestJSON, cls).resource_setup()
# Check to see if the alternate image ref actually exists...
images_client = cls.compute_images_client
images = images_client.list_images()['images']
if cls.image_ref != cls.image_ref_alt and \
any([image for image in images
if image['id'] == cls.image_ref_alt]):
cls.multiple_images = True
else:
cls.image_ref_alt = cls.image_ref
# Do some sanity checks here. If one of the images does
# not exist, fail early since the tests won't work...
try:
cls.compute_images_client.show_image(cls.image_ref)
except lib_exc.NotFound:
raise RuntimeError("Image %s (image_ref) was not found!" %
cls.image_ref)
try:
cls.compute_images_client.show_image(cls.image_ref_alt)
except lib_exc.NotFound:
raise RuntimeError("Image %s (image_ref_alt) was not found!" %
cls.image_ref_alt)
network = cls.get_tenant_network()
if network:
cls.fixed_network_name = network.get('name')
else:
cls.fixed_network_name = None
network_kwargs = fixed_network.set_networks_kwarg(network)
cls.s1_name = data_utils.rand_name(cls.__name__ + '-instance')
cls.s1 = cls.create_test_server(name=cls.s1_name,
wait_until='ACTIVE',
**network_kwargs)
cls.s2_name = data_utils.rand_name(cls.__name__ + '-instance')
cls.s2 = cls.create_test_server(name=cls.s2_name,
image_id=cls.image_ref_alt,
wait_until='ACTIVE')
cls.s3_name = data_utils.rand_name(cls.__name__ + '-instance')
cls.s3 = cls.create_test_server(name=cls.s3_name,
flavor=cls.flavor_ref_alt,
wait_until='ACTIVE')
@test.idempotent_id('05e8a8e7-9659-459a-989d-92c2f501f4ba')
@decorators.skip_unless_attr('multiple_images', 'Only one image found')
def test_list_servers_filter_by_image(self):
# Filter the list of servers by image
params = {'image': self.image_ref}
body = self.client.list_servers(**params)
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.idempotent_id('573637f5-7325-47bb-9144-3476d0416908')
def test_list_servers_filter_by_flavor(self):
# Filter the list of servers by flavor
params = {'flavor': self.flavor_ref_alt}
body = self.client.list_servers(**params)
servers = body['servers']
self.assertNotIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.idempotent_id('9b067a7b-7fee-4f6a-b29c-be43fe18fc5a')
def test_list_servers_filter_by_server_name(self):
# Filter the list of servers by server name
params = {'name': self.s1_name}
body = self.client.list_servers(**params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@test.idempotent_id('ca78e20e-fddb-4ce6-b7f7-bcbf8605e66e')
def test_list_servers_filter_by_server_status(self):
# Filter the list of servers by server status
params = {'status': 'active'}
body = self.client.list_servers(**params)
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.idempotent_id('451dbbb2-f330-4a9f-b0e1-5f5d2cb0f34c')
def test_list_servers_filter_by_shutoff_status(self):
# Filter the list of servers by server shutoff status
params = {'status': 'shutoff'}
self.client.stop_server(self.s1['id'])
waiters.wait_for_server_status(self.client, self.s1['id'],
'SHUTOFF')
body = self.client.list_servers(**params)
self.client.start_server(self.s1['id'])
waiters.wait_for_server_status(self.client, self.s1['id'],
'ACTIVE')
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.idempotent_id('614cdfc1-d557-4bac-915b-3e67b48eee76')
def test_list_servers_filter_by_limit(self):
# Verify only the expected number of servers are returned
params = {'limit': 1}
servers = self.client.list_servers(**params)
self.assertEqual(1, len([x for x in servers['servers'] if 'id' in x]))
@test.idempotent_id('b1495414-2d93-414c-8019-849afe8d319e')
def test_list_servers_filter_by_zero_limit(self):
# Verify only the expected number of servers are returned
params = {'limit': 0}
servers = self.client.list_servers(**params)
self.assertEqual(0, len(servers['servers']))
@test.idempotent_id('37791bbd-90c0-4de0-831e-5f38cba9c6b3')
def test_list_servers_filter_by_exceed_limit(self):
# Verify only the expected number of servers are returned
params = {'limit': 100000}
servers = self.client.list_servers(**params)
all_servers = self.client.list_servers()
self.assertEqual(len([x for x in all_servers['servers'] if 'id' in x]),
len([x for x in servers['servers'] if 'id' in x]))
@test.idempotent_id('b3304c3b-97df-46d2-8cd3-e2b6659724e7')
@decorators.skip_unless_attr('multiple_images', 'Only one image found')
def test_list_servers_detailed_filter_by_image(self):
# Filter the detailed list of servers by image
params = {'image': self.image_ref}
body = self.client.list_servers(detail=True, **params)
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.idempotent_id('80c574cc-0925-44ba-8602-299028357dd9')
def test_list_servers_detailed_filter_by_flavor(self):
# Filter the detailed list of servers by flavor
params = {'flavor': self.flavor_ref_alt}
body = self.client.list_servers(detail=True, **params)
servers = body['servers']
self.assertNotIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.idempotent_id('f9eb2b70-735f-416c-b260-9914ac6181e4')
def test_list_servers_detailed_filter_by_server_name(self):
# Filter the detailed list of servers by server name
params = {'name': self.s1_name}
body = self.client.list_servers(detail=True, **params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@test.idempotent_id('de2612ab-b7dd-4044-b0b1-d2539601911f')
def test_list_servers_detailed_filter_by_server_status(self):
# Filter the detailed list of servers by server status
params = {'status': 'active'}
body = self.client.list_servers(detail=True, **params)
servers = body['servers']
test_ids = [s['id'] for s in (self.s1, self.s2, self.s3)]
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
self.assertEqual(['ACTIVE'] * 3, [x['status'] for x in servers
if x['id'] in test_ids])
@test.idempotent_id('e9f624ee-92af-4562-8bec-437945a18dcb')
def test_list_servers_filtered_by_name_wildcard(self):
# List all servers that contains '-instance' in name
params = {'name': '-instance'}
body = self.client.list_servers(**params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertIn(self.s3_name, map(lambda x: x['name'], servers))
# Let's take random part of name and try to search it
part_name = self.s1_name[6:-1]
params = {'name': part_name}
body = self.client.list_servers(**params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@test.idempotent_id('24a89b0c-0d55-4a28-847f-45075f19b27b')
def test_list_servers_filtered_by_name_regex(self):
# list of regex that should match s1, s2 and s3
regexes = ['^.*\-instance\-[0-9]+$', '^.*\-instance\-.*$']
for regex in regexes:
params = {'name': regex}
body = self.client.list_servers(**params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertIn(self.s3_name, map(lambda x: x['name'], servers))
# Let's take random part of name and try to search it
part_name = self.s1_name[-10:]
params = {'name': part_name}
body = self.client.list_servers(**params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@test.idempotent_id('43a1242e-7b31-48d1-88f2-3f72aa9f2077')
def test_list_servers_filtered_by_ip(self):
# Filter servers by ip
# Here should be listed 1 server
if not self.fixed_network_name:
msg = 'fixed_network_name needs to be configured to run this test'
raise self.skipException(msg)
self.s1 = self.client.show_server(self.s1['id'])['server']
for addr_spec in self.s1['addresses'][self.fixed_network_name]:
ip = addr_spec['addr']
if addr_spec['version'] == 4:
params = {'ip': ip}
break
else:
msg = "Skipped until bug 1450859 is resolved"
raise self.skipException(msg)
body = self.client.list_servers(**params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@decorators.skip_because(bug="1540645")
@test.idempotent_id('a905e287-c35e-42f2-b132-d02b09f3654a')
def test_list_servers_filtered_by_ip_regex(self):
# Filter servers by regex ip
# List all servers filtered by part of ip address.
# Here should be listed all servers
if not self.fixed_network_name:
msg = 'fixed_network_name needs to be configured to run this test'
raise self.skipException(msg)
self.s1 = self.client.show_server(self.s1['id'])['server']
addr_spec = self.s1['addresses'][self.fixed_network_name][0]
ip = addr_spec['addr'][0:-3]
if addr_spec['version'] == 4:
params = {'ip': ip}
else:
params = {'ip6': ip}
# capture all servers in case something goes wrong
all_servers = self.client.list_servers(detail=True)
body = self.client.list_servers(**params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers),
"%s not found in %s, all servers %s" %
(self.s1_name, servers, all_servers))
self.assertIn(self.s2_name, map(lambda x: x['name'], servers),
"%s not found in %s, all servers %s" %
(self.s2_name, servers, all_servers))
self.assertIn(self.s3_name, map(lambda x: x['name'], servers),
"%s not found in %s, all servers %s" %
(self.s3_name, servers, all_servers))
@test.idempotent_id('67aec2d0-35fe-4503-9f92-f13272b867ed')
def test_list_servers_detailed_limit_results(self):
# Verify only the expected number of detailed results are returned
params = {'limit': 1}
servers = self.client.list_servers(detail=True, **params)
self.assertEqual(1, len(servers['servers']))
|
HybridF5/tempest_debug
|
tempest/api/compute/servers/test_list_server_filters.py
|
Python
|
apache-2.0
| 14,988
|
import logging
from collections import deque
import six
import flanker.addresslib.address
from flanker import _email
from flanker.mime.message.headers import parametrized
from flanker.mime.message.utils import to_utf8
_log = logging.getLogger(__name__)
_ADDRESS_HEADERS = ('From', 'To', 'Delivered-To', 'Cc', 'Bcc', 'Reply-To', 'Sender')
def to_mime(key, value):
if not value:
return ''
if type(value) == list:
return '; '.join(encode(key, v) for v in value)
return encode(key, value)
def encode(name, value):
try:
if parametrized.is_parametrized(name, value):
value, params = value
return _encode_parametrized(name, value, params)
return _encode_unstructured(name, value)
except Exception:
_log.exception('Failed to encode %s %s' % (name, value))
raise
def _encode_unstructured(name, value):
try:
return _email.encode_header(name, value.encode('ascii'), 'ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
if _is_address_header(name, value):
return _encode_address_header(name, value)
return _email.encode_header(name, to_utf8(value), 'utf-8')
def _encode_address_header(name, value):
out = deque()
for addr in flanker.addresslib.address.parse_list(value):
if addr.requires_non_ascii():
encoded_addr = addr.to_unicode()
if six.PY2:
encoded_addr = encoded_addr.encode('utf-8')
else:
encoded_addr = addr.full_spec()
out.append(encoded_addr)
return '; '.join(out)
def _encode_parametrized(key, value, params):
if params:
params = [_encode_param(key, n, v) for n, v in six.iteritems(params)]
return value + '; ' + ('; '.join(params))
return value
def _encode_param(key, name, value):
try:
if six.PY2:
value = value.encode('ascii')
return _email.format_param(name, value)
except Exception:
value = value.encode('utf-8')
encoded_param = _email.encode_header(key, value, 'utf-8')
return _email.format_param(name, encoded_param)
def _is_address_header(key, val):
return key in _ADDRESS_HEADERS and '@' in val
|
mailgun/flanker
|
flanker/mime/message/headers/encoding.py
|
Python
|
apache-2.0
| 2,245
|
# coding: utf-8
"""Generate HDF5 file with all catchment model input data.
Sources are DBF and some large text files.
"""
import sys
import numpy
import pandas
import tables
from crosswater.read_config import read_config
from crosswater.tools import dbflib
from crosswater.tools.hdf5_helpers import find_ids
from crosswater.tools.time_helper import ProgressDisplay
def read_dbf_cols(dbf_file_name, col_names=None):
"""Returns a dictionary with column names as keys and lists as values.
Returns dict with all columns if `col_names` is false.
dbf_file_name is a string
col_names is a list containing strings
"""
dbf_file = dbflib.DbfReader(dbf_file_name)
dbf_file.read_all()
if not col_names:
return dbf_file.data
res = {key: dbf_file.data[key] for key in col_names}
return res
def read_dbf_col(dbf_file_name, col_name):
"""Retruns all column entries for a given column name.
"""
return read_dbf_cols(dbf_file_name, [col_name])[col_name]
def get_value_by_id(dbf_file_name, col_name, converter=1, ids=None):
"""Returns a dict catchment-id: value
converter for units with default value 1
ids to filter (e.g. only Strahler)
"""
data = read_dbf_cols(dbf_file_name, ['WSO1_ID', col_name])
res = {id_: value * converter for id_, value in
zip(data['WSO1_ID'], data[col_name])}
if ids:
res = {id_: value for id_, value in res.items() if id_ in ids}
return res
def get_tot_areas(dbf_file_name, ids=None):
"""Returns a dict with catchment ids as keys and areas as values."""
return get_value_by_id(dbf_file_name, 'AREA', ids=ids)
def get_strahler(dbf_file_name, ids=None):
"""Returns a dict with catchment ids as keys and strahler as values."""
return get_value_by_id(dbf_file_name, 'STRAHLER', ids=ids)
def get_appl_areas(dbf_file_name, crops, ids=None):
"""Returns a dict with catchment ids as keys and total areas as values.
Crop areas are summed up to total area.
"""
res = get_value_by_id(dbf_file_name, crops[0], converter=1e6, ids=ids)
for crop in crops[1:]:
res_crop = get_value_by_id(dbf_file_name, crop, converter=1e6, ids=ids)
res = {id_: res.get(id_, 0) + res_crop.get(id_, 0) for id_ in set(res) & set(res_crop) }
return res
def get_appl_rates(dbf_file_name, ids=None):
"""Returns a dict with catchments ids as keys and application rate as
values.
"""
return get_value_by_id(dbf_file_name, 'APPL_RATES', ids=ids)
def filter_strahler_lessthan(strahler, tot_areas, appl_areas, appl_rates, strahler_limit=20):
"""Use only catchments where STRAHLER is <= limit.
"""
def apply_filter(old_values):
"""Filter for ids.
"""
return {id_: value for id_, value in old_values.items() if id_ in ids}
ids = {id_ for id_, value in strahler.items() if value <= strahler_limit}
return (apply_filter(strahler), apply_filter(tot_areas),
apply_filter(appl_areas), apply_filter(appl_rates))
class Parameters(tables.IsDescription):
# pylint: disable=too-few-public-methods
"""Table layout for parameters."""
name = tables.StringCol(100)
value = tables.Float64Col()
unit = tables.StringCol(20)
def create_hdf_file(file_name, tot_areas, appl_areas, appl_rates):
"""Create HDF5 file and add areas as parameters."""
ids = sorted(tot_areas.keys())
h5_file = tables.open_file(file_name, mode='w',
title='Input data for catchment models.')
for id_ in ids:
# create new group (where, name, title)
group = h5_file.create_group('/', 'catch_{}'.format(id_),
'catchment {}'.format(id_))
# create new table (where, name, description, title)
table = h5_file.create_table(group, 'parameters', Parameters,
'constant parameters')
tot_area = tot_areas[id_]
appl_area = appl_areas[id_]
appl_rate = appl_rates[id_]
# fill parameter table by rows
row = table.row
row['name'] = 'A_tot'
row['value'] = tot_area
row['unit'] = 'm**2'
row.append()
row['name'] = 'A_appl'
row['value'] = appl_area
row['unit'] = 'm**2'
row.append()
row['name'] = 'R_appl'
row['value'] = appl_rate
row['unit'] = 'g/m**2'
row.append()
h5_file.close()
def add_input_tables(h5_file_name, t_file_name, p_file_name, q_file_name, qloc_file_name, timesteps_per_day,
batch_size=None, total=365 * 24):
"""Add input with pandas.
"""
# pylint: disable=too-many-locals
filters = tables.Filters(complevel=5, complib='zlib')
h5_file = tables.open_file(h5_file_name, mode='a')
get_child = h5_file.root._f_get_child # pylint: disable=protected-access
all_ids = ids = find_ids(h5_file)
usecols = None
if batch_size is None:
batch_size = sys.maxsize
if batch_size < len(all_ids):
usecols = True
counter = 0
total_ids = len(all_ids)
prog = ProgressDisplay(total_ids)
# pylint: disable=undefined-loop-variable
while all_ids:
ids = all_ids[-batch_size:]
all_ids = all_ids[:-batch_size]
if usecols:
usecols = ids
temp = pandas.read_csv(t_file_name, sep=';', parse_dates=True,
usecols=usecols)
precip = pandas.read_csv(p_file_name, sep=';', parse_dates=True,
usecols=usecols)
dis = pandas.read_csv(q_file_name, sep=';', parse_dates=True,
usecols=usecols)
locdis = pandas.read_csv(qloc_file_name, sep=';', parse_dates=True,
usecols=usecols)
temp_hourly = temp.reindex(dis.index, method='ffill')
for id_ in ids:
counter += 1
inputs = pandas.concat([temp_hourly[id_], precip[id_], dis[id_], locdis[id_]],
axis=1)
inputs.columns = ['temperature', 'precipitation', 'discharge', 'local_discharge']
inputs['precipitation'] *= int(timesteps_per_day)
input_table = inputs.to_records(index=False)
name = 'catch_{}'.format(id_)
group = get_child(name)
h5_file.create_table(group, 'inputs', input_table,
'time varying inputs', expectedrows=total,
filters=filters)
prog.show_progress(counter, additional=id_)
prog.show_progress(counter, additional=id_, force=True)
int_steps = pandas.DataFrame(dis.index.to_series()).astype(numpy.int64)
int_steps.columns = ['timesteps']
time_steps = int_steps.to_records(index=False)
h5_file.create_table('/', 'time_steps', time_steps,
'time steps for all catchments')
h5_file.create_table('/', 'steps_per_day',
numpy.array([(timesteps_per_day,)], dtype=[('steps_per_day', '<i8')]),
'time steps for all catchments')
h5_file.close()
def get_first_ids(q_file_name, max_ids):
"""Get first `max_ids` from the dicharge file.
"""
with open(q_file_name) as fobj:
header = next(fobj).strip().split(';')
return {int(entry[1:-1]) for entry in header[:max_ids]}
def preprocess(config_file):
"""Do the preprocessing.
"""
config = read_config(config_file)
h5_file_name = config['preprocessing']['hdf_input_path']
t_file_name = config['preprocessing']['temperature_path']
p_file_name = config['preprocessing']['precipitation_path']
q_file_name = config['preprocessing']['discharge_path']
qloc_file_name = config['preprocessing']['local_discharge_path']
max_ids = config['preprocessing']['max_ids']
timesteps_per_day = config['preprocessing']['timesteps_per_day']
ids = None
if max_ids:
ids = get_first_ids(q_file_name, max_ids)
batch_size = config['preprocessing']['batch_size']
strahler = get_strahler(config['preprocessing']['catchment_path'], ids)
tot_areas = get_tot_areas(config['preprocessing']['catchment_path'], ids)
sel_areas = config['preprocessing']['selected_areas'].split(', ') #########
appl_areas = get_appl_areas(config['preprocessing']['landcover_path'], sel_areas, ids) #########
appl_rates = get_appl_rates(config['preprocessing']['micropollutant_path'], ids)
strahler_limit = config['preprocessing']['strahler_limit']
strahler, tot_areas, appl_areas, appl_rates = filter_strahler_lessthan(
strahler, tot_areas, appl_areas, appl_rates, strahler_limit)
create_hdf_file(h5_file_name, tot_areas, appl_areas, appl_rates)
add_input_tables(h5_file_name, t_file_name, p_file_name, q_file_name,
qloc_file_name, timesteps_per_day, batch_size=batch_size)
if __name__ == '__main__':
from crosswater.tools.time_helper import show_used_time
@show_used_time
def test():
"""Try it out.
"""
config = sys.argv[1]
print('processing with {} ...'.format(config))
preprocess(config)
test()
|
moserand/crosswater
|
crosswater/preprocessing/hdf_input.py
|
Python
|
gpl-3.0
| 9,310
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
import sys
from django.urls import reverse
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import force_unicode
from notebook.connectors.base import Api, QueryError
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
LOG = logging.getLogger(__name__)
try:
from hbase.api import HbaseApi
except ImportError as e:
LOG.warning("HBase app is not enabled: %s" % e)
def query_error_handler(func):
def decorator(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
message = force_unicode(str(e))
raise QueryError(message)
return decorator
class HBaseApi(Api):
@query_error_handler
def autocomplete(self, snippet, database=None, table=None, column=None, nested=None, operation=None):
db = HbaseApi(self.user)
cluster_name = database
response = {}
try:
if database is None:
response['databases'] = [cluster['name'] for cluster in db.getClusters()]
elif table is None:
tables_meta = db.getTableList(cluster_name)
response['tables_meta'] = [_table['name'] for _table in tables_meta if _table['enabled']]
elif column is None:
tables_meta = db.get(cluster_name, table)
response['columns'] = []
else:
raise PopupException('Could not find column `%s`.`%s`.`%s`' % (database, table, column))
except Exception as e:
LOG.warning('Autocomplete data fetching error: %s' % e)
response['code'] = 500
response['error'] = e.message
return response
|
cloudera/hue
|
desktop/libs/notebook/src/notebook/connectors/hbase.py
|
Python
|
apache-2.0
| 2,484
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ModelerAlgorithm.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import traceback
import copy
import os.path
import codecs
import time
from PyQt4 import QtCore, QtGui
from qgis.core import *
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import \
GeoAlgorithmExecutionException
from processing.gui.Help2Html import getHtmlFromHelpFile
from processing.modeler.WrongModelException import WrongModelException
from processing.modeler.ModelerUtils import ModelerUtils
from processing.parameters.ParameterFactory import ParameterFactory
from processing.parameters.ParameterRaster import ParameterRaster
from processing.parameters.ParameterDataObject import ParameterDataObject
from processing.parameters.ParameterExtent import ParameterExtent
from processing.parameters.ParameterMultipleInput import ParameterMultipleInput
from processing.parameters.Parameter import Parameter
from processing.parameters.ParameterVector import ParameterVector
from processing.parameters.ParameterTableField import ParameterTableField
from processing.outputs.OutputRaster import OutputRaster
from processing.outputs.OutputHTML import OutputHTML
from processing.outputs.OutputTable import OutputTable
from processing.outputs.OutputVector import OutputVector
from processing.outputs.OutputNumber import OutputNumber
from processing.outputs.OutputString import OutputString
from processing.tools import dataobjects
class ModelerAlgorithm(GeoAlgorithm):
CANVAS_SIZE = 4000
LINE_BREAK_STRING = '%%%'
def getCopy(self):
newone = ModelerAlgorithm()
newone.openModel(self.descriptionFile)
newone.provider = self.provider
newone.deactivated = self.deactivated
return newone
def __init__(self):
GeoAlgorithm.__init__(self)
# The dialog where this model is being edited
self.modelerdialog = None
self.descriptionFile = None
# Geoalgorithms in this model
self.algs = []
# Parameters of Geoalgorithms in self.algs. Each entry is a
# map with (paramname, paramvalue) values for algs[i].
# paramvalues are instances of AlgorithmAndParameter.
self.algParameters = []
# Algorithms that each algorithm depends on. This is just a
# list of dependencies not set by outputs and inputs but
# explicitly entered instead, meaning that an algorithm must
# 'wait' for another to finish. Each entry is a list with
# algorithm indexes
self.dependencies = []
# Outputs of Geoalgorithms in self.algs. Each entry is a map
# with (output, outputvalue) values for algs[i]. outputvalue
# is the name of the output if final. None if is an
# intermediate output
self.algOutputs = []
# Hardcoded parameter values entered by the user when defining
# the model. Keys are value names.
self.paramValues = {}
# Position of items in canvas
self.algPos = []
self.paramPos = []
self.outputPos = [] # same structure as algOutputs
# Deactivated algorithms that should not be executed
self.deactivated = []
def getIcon(self):
return QtGui.QIcon(os.path.dirname(__file__) + '/../images/model.png')
def openModel(self, filename):
self.algPos = []
self.paramPos = []
self.outputOutputs = []
self.algs = []
self.algParameters = []
self.algOutputs = []
self.paramValues = {}
self.dependencies = []
self.descriptionFile = filename
lines = codecs.open(filename, 'r', encoding='utf-8')
line = lines.readline().strip('\n').strip('\r')
iAlg = 0
try:
while line != '':
if line.startswith('PARAMETER:'):
paramLine = line[len('PARAMETER:'):]
param = ParameterFactory.getFromString(paramLine)
if param:
self.parameters.append(param)
else:
raise WrongModelException('Error in parameter line: '
+ line)
line = lines.readline().strip('\n')
tokens = line.split(',')
self.paramPos.append(QtCore.QPointF(float(tokens[0]),
float(tokens[1])))
elif line.startswith('VALUE:'):
valueLine = line[len('VALUE:'):]
tokens = valueLine.split('===')
self.paramValues[tokens[0]] = \
tokens[1].replace(ModelerAlgorithm.LINE_BREAK_STRING,
'\n')
elif line.startswith('NAME:'):
self.name = line[len('NAME:'):]
elif line.startswith('GROUP:'):
self.group = line[len('GROUP:'):]
if self.group == '[Test models]':
self.showInModeler = False
self.showInToolbox = False
elif line.startswith('ALGORITHM:'):
algParams = {}
algOutputs = {}
algLine = line[len('ALGORITHM:'):]
alg = ModelerUtils.getAlgorithm(algLine)
if alg is not None:
posline = lines.readline().strip('\n').strip('\r')
tokens = posline.split(',')
self.algPos.append(QtCore.QPointF(float(tokens[0]),
float(tokens[1])))
self.algs.append(alg)
dependenceline = lines.readline().strip('\n'
).strip('\r')
dependencies = []
if dependenceline != str(None):
for index in dependenceline.split(','):
try:
dependencies.append(int(index))
except:
# A quick fix while I figure out
# how to solve problems when
# parsing this
pass
for param in alg.parameters:
line = lines.readline().strip('\n').strip('\r')
if line == str(None):
algParams[param.name] = None
else:
tokens = line.split('|')
algParams[param.name] = \
AlgorithmAndParameter(int(tokens[0]),
tokens[1])
outputPos = {}
for out in alg.outputs:
line = lines.readline().strip('\n').strip('\r')
if str(None) != line:
if '|' in line:
tokens = line.split('|')
name = tokens[0]
tokens = tokens[1].split(',')
outputPos[out.name] = QtCore.QPointF(
float(tokens[0]), float(tokens[1]))
else:
name = line
outputPos[out.name] = None
algOutputs[out.name] = name
# We add the output to the algorithm,
# with a name indicating where it comes
# from that guarantees that the name is
# unique
output = copy.deepcopy(out)
output.description = name
output.name = self.getSafeNameForOutput(iAlg,
output)
self.addOutput(output)
else:
algOutputs[out.name] = None
self.outputPos.append(outputPos)
self.algOutputs.append(algOutputs)
self.algParameters.append(algParams)
self.dependencies.append(dependencies)
iAlg += 1
else:
raise WrongModelException('Error in algorithm name: '
+ algLine)
line = lines.readline().strip('\n').strip('\r')
except Exception, e:
if isinstance(e, WrongModelException):
raise e
else:
raise WrongModelException('Error in model definition line:'
+ line.strip() + ' : ' + traceback.format_exc())
def addParameter(self, param):
self.parameters.append(param)
self.paramPos.append(self.getPositionForParameterItem())
def updateParameter(self, paramIndex, param):
self.parameters[paramIndex] = param
def addAlgorithm(self, alg, parametersMap, valuesMap, outputsMap,
dependencies):
self.algs.append(alg)
self.algParameters.append(parametersMap)
self.algOutputs.append(outputsMap)
self.dependencies.append(dependencies)
for value in valuesMap.keys():
self.paramValues[value] = valuesMap[value]
algPos = self.getPositionForAlgorithmItem()
self.algPos.append(algPos)
pos = {}
i = 0
from processing.modeler.ModelerGraphicItem import ModelerGraphicItem
for out in outputsMap:
pos[out] = algPos + QtCore.QPointF(ModelerGraphicItem.BOX_WIDTH, i
* ModelerGraphicItem.BOX_HEIGHT)
i += 1
self.outputPos.append(pos)
def updateAlgorithm(self, algIndex, parametersMap, valuesMap, outputsMap,
dependencies):
self.algParameters[algIndex] = parametersMap
self.algOutputs[algIndex] = outputsMap
self.dependencies[algIndex] = dependencies
for value in valuesMap.keys():
self.paramValues[value] = valuesMap[value]
self.updateModelerView()
algPos = self.algPos[algIndex]
pos = {}
i = 0
from processing.modeler.ModelerGraphicItem import ModelerGraphicItem
for out in outputsMap:
pos[out] = algPos + QtCore.QPointF(ModelerGraphicItem.BOX_WIDTH, i
* ModelerGraphicItem.BOX_HEIGHT)
i += 1
self.outputPos[algIndex] = pos
def removeAlgorithm(self, index):
"""Returns True if the algorithm could be removed, False if
others depend on it and could not be removed.
"""
if self.hasDependencies(self.algs[index], index):
return False
for out in self.algs[index].outputs:
val = self.algOutputs[index][out.name]
if val:
name = self.getSafeNameForOutput(index, out)
self.removeOutputFromName(name)
del self.algs[index]
del self.algParameters[index]
del self.algOutputs[index]
del self.algPos[index]
del self.outputPos[index]
i = -1
for paramValues in self.algParameters:
i += 1
newValues = {}
for (name, value) in paramValues.iteritems():
if value:
if value.alg > index:
newValues[name] = AlgorithmAndParameter(value.alg - 1,
value.param, value.algName, value.paramName)
else:
newValues[name] = value
else:
newValues[name] = value
self.algParameters[i] = newValues
self.updateModelerView()
return True
def removeParameter(self, index):
"""Returns True if the parameter could be removed, False if
others depend on it and could not be removed.
"""
if self.hasDependencies(self.parameters[index], index):
return False
del self.parameters[index]
del self.paramPos[index]
self.updateModelerView()
return True
def hasDependencies(self, element, elementIndex):
"""This method returns True if some other element depends on
the passed one.
"""
if isinstance(element, Parameter):
for alg in self.algParameters:
for aap in alg.values():
if aap:
if aap.alg == \
AlgorithmAndParameter.PARENT_MODEL_ALGORITHM:
if aap.param == element.name:
return True
elif aap.param in self.paramValues:
# Check for multiple inputs
aap2 = self.paramValues[aap.param]
if element.name in aap2:
return True
if isinstance(element, ParameterVector):
for param in self.parameters:
if isinstance(param, ParameterTableField):
if param.parent == element.name:
return True
else:
for alg in self.algParameters:
for aap in alg.values():
if aap:
if aap.alg == elementIndex:
return True
return False
def deactivateAlgorithm(self, algIndex, update=False):
if algIndex not in self.deactivated:
dependent = self.getDependentAlgorithms(algIndex)
self.deactivated.extend(dependent)
if update:
self.updateModelerView()
def activateAlgorithm(self, algIndex, update=False):
if algIndex in self.deactivated:
dependsOn = self.getDependsOnAlgorithms(algIndex)
for alg in dependsOn:
if alg in self.deactivated and alg != algIndex:
return False
self.deactivated.remove(algIndex)
dependent = self.getDependentAlgorithms(algIndex)
for alg in dependent:
if alg in self.deactivated:
self.deactivated.remove(alg)
if update:
self.updateModelerView()
return True
def getDependsOnAlgorithms(self, algIndex):
"""This method returns a list with the indexes of algorithms
a given one depends on.
"""
algs = []
algs.extend(self.dependencies[algIndex])
index = -1
for aap in self.algParameters[algIndex].values():
index += 1
if aap is not None:
if aap.alg != AlgorithmAndParameter.PARENT_MODEL_ALGORITHM \
and aap.alg not in algs:
algs.append(aap.alg)
dep = self.getDependsOnAlgorithms(aap.alg)
for alg in dep:
if alg not in algs:
algs.append(alg)
return algs
def getDependentAlgorithms(self, algIndex):
"""This method returns a list with the indexes of algorithms
depending on a given one.
"""
dependent = [algIndex]
index = -1
for alg in self.algParameters:
index += 1
if index in dependent:
continue
for aap in alg.values():
if aap is not None:
if aap.alg == algIndex:
dep = self.getDependentAlgorithms(index)
for alg in dep:
if alg not in dependent:
dependent.append(alg)
break
index = -1
for dep in self.dependencies:
index += 1
if algIndex in dep:
dep = self.getDependentAlgorithms(index)
for alg in dep:
if alg not in dependent:
dependent.append(alg)
return dependent
def getPositionForAlgorithmItem(self):
MARGIN = 20
BOX_WIDTH = 200
BOX_HEIGHT = 80
if len(self.algPos) != 0:
maxX = max([pos.x() for pos in self.algPos])
maxY = max([pos.y() for pos in self.algPos])
newX = min(MARGIN + BOX_WIDTH + maxX, self.CANVAS_SIZE - BOX_WIDTH)
newY = min(MARGIN + BOX_HEIGHT + maxY, self.CANVAS_SIZE
- BOX_HEIGHT)
else:
newX = MARGIN + BOX_WIDTH / 2
newY = MARGIN * 2 + BOX_HEIGHT + BOX_HEIGHT / 2
return QtCore.QPointF(newX, newY)
def getPositionForParameterItem(self):
MARGIN = 20
BOX_WIDTH = 200
BOX_HEIGHT = 80
if len(self.paramPos) != 0:
maxX = max([pos.x() for pos in self.paramPos])
newX = min(MARGIN + BOX_WIDTH + maxX, self.CANVAS_SIZE - BOX_WIDTH)
else:
newX = MARGIN + BOX_WIDTH / 2
return QtCore.QPointF(newX, MARGIN + BOX_HEIGHT / 2)
def serialize(self):
s = 'NAME:' + unicode(self.name) + '\n'
s += 'GROUP:' + unicode(self.group) + '\n'
i = 0
for param in self.parameters:
s += 'PARAMETER:' + param.serialize() + '\n'
pt = self.paramPos[i]
s += str(pt.x()) + ',' + str(pt.y()) + '\n'
i += 1
for key in self.paramValues.keys():
s += 'VALUE:' + key + '===' \
+ str(self.paramValues[key]).replace('\n',
ModelerAlgorithm.LINE_BREAK_STRING) + '\n'
for i in range(len(self.algs)):
alg = self.algs[i]
s += 'ALGORITHM:' + alg.commandLineName() + '\n'
pt = self.algPos[i]
s += str(pt.x()) + ',' + str(pt.y()) + '\n'
if len(self.dependencies[i]) != 0:
s += ','.join([str(index) for index in self.dependencies[i]]) \
+ '\n'
else:
s += str(None) + '\n'
for param in alg.parameters:
value = self.algParameters[i][param.name]
if value:
s += value.serialize() + '\n'
else:
s += str(None) + '\n'
for out in alg.outputs:
value = self.algOutputs[i][out.name]
s += unicode(value)
if value is not None:
pt = self.outputPos[i][out.name]
s += '|' + str(pt.x()) + ',' + str(pt.y())
s += '\n'
return s
def setPositions(self, paramPos, algPos, outputPos):
self.paramPos = paramPos
self.algPos = algPos
self.outputPos = outputPos
def prepareAlgorithm(self, alg, iAlg):
for param in alg.parameters:
aap = self.algParameters[iAlg][param.name]
if aap is None:
if isinstance(param, ParameterExtent):
value = self.getMinCoveringExtent()
if not param.setValue(value):
raise GeoAlgorithmExecutionException('Wrong value: '
+ str(value))
else:
param.setValue(None)
continue
if isinstance(param, ParameterMultipleInput):
value = self.getValueFromAlgorithmAndParameter(aap)
tokens = value.split(';')
layerslist = []
for token in tokens:
(i, paramname) = token.split('|')
aap = AlgorithmAndParameter(int(i), paramname)
value = self.getValueFromAlgorithmAndParameter(aap)
layerslist.append(str(value))
value = ';'.join(layerslist)
else:
value = self.getValueFromAlgorithmAndParameter(aap)
# We allow unexistent filepaths, since that allows
# algorithms to skip some conversion routines
if not param.setValue(value) and not isinstance(param,
ParameterDataObject):
raise GeoAlgorithmExecutionException('Wrong value: '
+ str(value))
for out in alg.outputs:
val = self.algOutputs[iAlg][out.name]
if val:
name = self.getSafeNameForOutput(iAlg, out)
out.value = self.getOutputFromName(name).value
else:
out.value = None
def getMinCoveringExtent(self):
first = True
found = False
for param in self.parameters:
if param.value:
if isinstance(param, (ParameterRaster, ParameterVector)):
found = True
if isinstance(param.value, (QgsRasterLayer,
QgsVectorLayer)):
layer = param.value
else:
layer = dataobjects.getObjectFromUri(param.value)
self.addToRegion(layer, first)
first = False
elif isinstance(param, ParameterMultipleInput):
found = True
layers = param.value.split(';')
for layername in layers:
layer = dataobjects.getObjectFromUri(layername)
self.addToRegion(layer, first)
first = False
if found:
return str(self.xmin) + ',' + str(self.xmax) + ',' \
+ str(self.ymin) + ',' + str(self.ymax)
else:
return None
def addToRegion(self, layer, first):
if first:
self.xmin = layer.extent().xMinimum()
self.xmax = layer.extent().xMaximum()
self.ymin = layer.extent().yMinimum()
self.ymax = layer.extent().yMaximum()
else:
self.xmin = min(self.xmin, layer.extent().xMinimum())
self.xmax = max(self.xmax, layer.extent().xMaximum())
self.ymin = min(self.ymin, layer.extent().yMinimum())
self.ymax = max(self.ymax, layer.extent().yMaximum())
def getSafeNameForOutput(self, ialg, out):
return out.name + '_ALG' + str(ialg)
def getValueFromAlgorithmAndParameter(self, aap):
if aap is None:
return None
if float(aap.alg) \
== float(AlgorithmAndParameter.PARENT_MODEL_ALGORITHM):
for key in self.paramValues.keys():
if aap.param == key:
return self.paramValues[key]
for param in self.parameters:
if aap.param == param.name:
return param.value
else:
return self.producedOutputs[int(aap.alg)][aap.param]
def processAlgorithm(self, progress):
self.producedOutputs = {}
executed = []
while len(executed) < len(self.algs) - len(self.deactivated):
iAlg = 0
for alg in self.algs:
if iAlg not in self.deactivated and iAlg not in executed:
canExecute = True
required = self.getDependsOnAlgorithms(iAlg)
for requiredAlg in required:
if requiredAlg != iAlg and requiredAlg not in executed:
canExecute = False
break
if canExecute:
try:
alg = alg.getCopy()
progress.setDebugInfo('Prepare algorithm %i: %s'
% (iAlg, alg.name))
self.prepareAlgorithm(alg, iAlg)
progress.setText('Running ' + alg.name + ' ['
+ str(iAlg + 1) + '/' + str(len(self.algs)
- len(self.deactivated)) + ']')
outputs = {}
progress.setDebugInfo('Parameters: '
+ ', '.join([unicode(p).strip() + '='
+ unicode(p.value) for p in
alg.parameters]))
t0 = time.time()
alg.execute(progress, self)
dt = time.time() - t0
for out in alg.outputs:
outputs[out.name] = out.value
progress.setDebugInfo('Outputs: '
+ ', '.join([unicode(out).strip() + '='
+ unicode(outputs[out.name]) for out in
alg.outputs]))
self.producedOutputs[iAlg] = outputs
executed.append(iAlg)
progress.setDebugInfo(
'OK. Execution took %0.3f ms (%i outputs).'
% (dt, len(outputs)))
except GeoAlgorithmExecutionException, e:
progress.setDebugInfo('Failed')
raise GeoAlgorithmExecutionException(
'Error executing algorithm ' + str(iAlg)
+ '\n' + e.msg)
else:
pass
iAlg += 1
progress.setDebugInfo(
'Model processed ok. Executed %i algorithms total' % iAlg)
def getOutputType(self, i, outname):
for out in self.algs[i].outputs:
if out.name == outname:
if isinstance(out, OutputRaster):
return 'output raster'
elif isinstance(out, OutputVector):
return 'output vector'
elif isinstance(out, OutputTable):
return 'output table'
elif isinstance(out, OutputHTML):
return 'output html'
elif isinstance(out, OutputNumber):
return 'output number'
elif isinstance(out, OutputString):
return 'output string'
def getAsPythonCode(self):
s = []
for param in self.parameters:
s.append(str(param.getAsScriptCode().lower()))
i = 0
for outs in self.algOutputs:
for out in outs.keys():
if outs[out]:
s.append('##' + out.lower() + '_alg' + str(i) + '='
+ self.getOutputType(i, out))
i += 1
i = 0
iMultiple = 0
for alg in self.algs:
multiple = []
runline = 'outputs_' + str(i) + '=Processing.runalg("' \
+ alg.commandLineName() + '"'
for param in alg.parameters:
aap = self.algParameters[i][param.name]
if aap is None:
runline += ', None'
elif isinstance(param, ParameterMultipleInput):
value = self.paramValues[aap.param]
tokens = value.split(';')
layerslist = []
for token in tokens:
(iAlg, paramname) = token.split('|')
if float(iAlg) == float(
AlgorithmAndParameter.PARENT_MODEL_ALGORITHM):
if self.ismodelparam(paramname):
value = paramname.lower()
else:
value = self.paramValues[paramname]
else:
value = 'outputs_' + str(iAlg) + "['" + paramname \
+ "']"
layerslist.append(str(value))
multiple.append('multiple_' + str(iMultiple) + '=['
+ ','.join(layerslist) + ']')
runline += ', ";".join(multiple_' + str(iMultiple) + ') '
else:
if float(aap.alg) == float(
AlgorithmAndParameter.PARENT_MODEL_ALGORITHM):
if self.ismodelparam(aap.param):
runline += ', ' + aap.param.lower()
else:
runline += ', ' + str(self.paramValues[aap.param])
else:
runline += ', outputs_' + str(aap.alg) + "['" \
+ aap.param + "']"
for out in alg.outputs:
value = self.algOutputs[i][out.name]
if value:
name = out.name.lower() + '_alg' + str(i)
else:
name = str(None)
runline += ', ' + name
i += 1
s += multiple
s.append(str(runline + ')'))
return '\n'.join(s)
def ismodelparam(self, paramname):
for modelparam in self.parameters:
if modelparam.name == paramname:
return True
return False
def getAsCommand(self):
if self.descriptionFile:
return GeoAlgorithm.getAsCommand(self)
else:
return None
def commandLineName(self):
return 'modeler:' + os.path.basename(self.descriptionFile)[:-6].lower()
def setModelerView(self, dialog):
self.modelerdialog = dialog
def updateModelerView(self):
if self.modelerdialog:
self.modelerdialog.repaintModel()
def help(self):
helpfile = self.descriptionFile + '.help'
if os.path.exists(helpfile):
return True, getHtmlFromHelpFile(self, helpfile)
else:
return False, None
class AlgorithmAndParameter:
PARENT_MODEL_ALGORITHM = -1
# alg is the index of the algorithm in the list in
# ModelerAlgorithm.algs.
# -1 if the value is not taken from the output of an algorithm,
# but from an input of the model or a hardcoded value.
# Names are just used for decoration, and are not needed to
# create a hardcoded value.
def __init__(self, alg, param, algName='', paramName=''):
self.alg = alg
self.param = param
self.algName = algName
self.paramName = paramName
def serialize(self):
return str(self.alg) + '|' + str(self.param)
def name(self):
if self.alg != AlgorithmAndParameter.PARENT_MODEL_ALGORITHM:
return self.paramName + ' from algorithm ' + str(self.alg) + '(' \
+ self.algName + ')'
else:
return self.paramName
def __str__(self):
return str(self.alg) + '|' + str(self.param)
|
mhugent/Quantum-GIS
|
python/plugins/processing/modeler/ModelerAlgorithm.py
|
Python
|
gpl-2.0
| 32,306
|
"""
API client class for LittleSMS.ru service.
Ivan Grishaev, 2011.
ivan@grishaev.me
"""
from hashlib import md5, sha1
import urllib
# Find JSON lib
try:
import json # Python >= 2.6
except ImportError:
try:
import simplejson as json # Python <= 2.5
except ImportError:
try:
from django.utils import simplejson as json # Django
except ImportError:
raise ImportError("JSON lib not found.")
API_URL = "%s://littlesms.ru/api/%s"
def urllib_opener():
def opener(url):
return urllib.urlopen(url).read()
return opener
def curl_opener(proxy=None, port=None, user=None, passw=None):
"""cURL opener fabric."""
import curl
c = curl.Curl()
c.set_option(curl.pycurl.HTTPPROXYTUNNEL, True)
c.set_option(curl.pycurl.SSL_VERIFYPEER, False)
if proxy:
c.set_option(curl.pycurl.PROXY, proxy)
if port:
c.set_option(curl.pycurl.PROXYPORT, port)
if user and passw:
c.set_option(curl.pycurl.PROXYUSERPWD, "%s:%s" % (user, passw))
def opener(url):
return c.get(url)
return opener
def gae_opener():
"""Google APP Engine opener fabric."""
from google.appengine.api import urlfetch
def opener(url):
return urlfetch.fetch(url).content
return opener
class ApiError(Exception):
"""Service exception class."""
def __init__(self, code, message):
self.code = code
self.message = message
def __str__(self):
return u"Error %d: %s" % (self.code, self.message)
class Api(object):
"""Main API class.
Params:
user: user name;
key: secret API key;
secure: use https chema instead http;
opener: callable object for URL retrieving;
logger: logger for request/response logging.
"""
def __init__(self, user, key, secure=True, opener=None, logger=None):
self.user = user
self.key = key
self.secure = secure
self.logger = logger
self.opener = opener or urllib_opener()
def balance(self):
"""Get current balance."""
path = "user/balance"
return self._request(path)
def send(self, message, recipients, sender=None, test=False):
"""Send message.
See http://littlesms.ru/doc-messages#message-send
Params:
message: sms text, str or unicode;
recipients: phone number or list/tuple of phone numbers;
sender: sender name, 11 symbols max;
test: testing flag.
Sample response:
{
u'count': 1,
u'status': u'success',
u'recipients': [u'7xxxxxxxxxx'],
u'price': 0.5,
u'parts': 1,
u'test': 0,
u'balance': 0.5,
u'messages_id': [u'236234623']
}
"""
path = "message/send"
params = dict(message=message, recipients=recipients)
if sender is not None:
params.update(sender=sender)
if test:
params.update(test="1")
return self._request(path, **params)
def status(self, ids):
"""Get message status by id.
See http://littlesms.ru/doc-messages#message-status
Params:
ids: message id or list/tuple of ids.
"""
path = "message/status"
params = dict(messages_id=ids)
return self._request(path, **params)
def price(self, message, recipients):
"""Get pricing info.
See http://littlesms.ru/doc-messages#message-price
Params:
message: sms text, str or unicode;
recipients: phone number or list/tuple of phone numbers.
"""
path = "message/price"
params = dict(message=message, recipients=recipients)
return self._request(path, **params)
def history(self, **kwargs):
"""Get history info.
See http://littlesms.ru/doc-messages#message-history
Shows full info without filters.
Kwargs params (filters):
history_id: history id;
recipient: phone number;
sender: sender name;
status: message status;
date_from: low date limit;
date_to: hi date limit;
id: message id.
"""
path = "message/history"
params = kwargs.copy()
return self._request(path, **params)
def _sign(self, **params):
"""Calculate signature."""
keys = params.keys()
keys.sort()
values = [params[key] for key in keys]
values.insert(0, self.user)
values.append(self.key)
return md5(sha1("".join(values)).hexdigest()).hexdigest()
def _request(self, path, **params):
"""Make API request.
Returns parsed JSON or raise ApiError.
"""
arguments = params.copy()
for k, v in arguments.iteritems():
if isinstance(v, unicode):
arguments[k] = v.encode("utf8")
if isinstance(v, (int, long)):
arguments[k] = str(v)
if isinstance(v, (list, tuple)):
arguments[k] = ",".join(map(str, v))
sign = self._sign(**arguments)
query = arguments.copy()
query.update(sign=sign, user=self.user)
qs = urllib.urlencode(query)
scheme = "https" if self.secure else "http"
base_url = API_URL % (scheme, path)
url = base_url + "?" + qs
if self.logger:
self.logger.info(url)
response = self.opener(url)
if self.logger:
self.logger.info(response)
data = json.loads(response)
if data["status"] == u"error":
raise ApiError(data["error"], data["message"])
else:
return data
|
desecho/hangout-discontinued
|
hangout_project/hangout/littlesms.py
|
Python
|
mit
| 5,853
|
## Automatically adapted for numpy.oldnumeric Jun 27, 2008 by -c
#
# Copyright (C) 2000-2008 greg Landrum and Rational Discovery LLC
# All Rights Reserved
#
""" Utilities for data manipulation
**FILE FORMATS:**
- *.qdat files* contain quantized data suitable for
feeding to learning algorithms.
The .qdat file, written by _DecTreeGui_, is structured as follows:
1) Any number of lines which are ignored.
2) A line containing the string 'Variable Table'
any number of variable definitions in the format:
'# Variable_name [quant_bounds]'
where '[quant_bounds]' is a list of the boundaries used for quantizing
that variable. If the variable is inherently integral (i.e. not
quantized), this can be an empty list.
3) A line beginning with '# ----' which signals the end of the variable list
4) Any number of lines containing data points, in the format:
'Name_of_point var1 var2 var3 .... varN'
all variable values should be integers
Throughout, it is assumed that varN is the result
- *.dat files* contain the same information as .qdat files, but the variable
values can be anything (floats, ints, strings). **These files should
still contain quant_bounds!**
- *.qdat.pkl file* contain a pickled (binary) representation of
the data read in. They stores, in order:
1) A python list of the variable names
2) A python list of lists with the quantization bounds
3) A python list of the point names
4) A python list of lists with the data points
"""
from __future__ import print_function
import re, csv
import random
from rdkit import six
from rdkit.six.moves import cPickle
from rdkit.six.moves import xrange, map
from rdkit import RDConfig
from rdkit.utils import fileutils
from rdkit.ML.Data import MLData
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.DataStructs import BitUtils
def permutation(nToDo):
res = list(xrange(nToDo))
random.shuffle(res, random=random.random)
return res
def WriteData(outFile, varNames, qBounds, examples):
""" writes out a .qdat file
**Arguments**
- outFile: a file object
- varNames: a list of variable names
- qBounds: the list of quantization bounds (should be the same length
as _varNames_)
- examples: the data to be written
"""
outFile.write('# Quantized data from DataUtils\n')
outFile.write('# ----------\n')
outFile.write('# Variable Table\n')
for i in xrange(len(varNames)):
outFile.write('# %s %s\n' % (varNames[i], str(qBounds[i])))
outFile.write('# ----------\n')
for example in examples:
outFile.write(' '.join(map(str, example)) + '\n')
def ReadVars(inFile):
""" reads the variables and quantization bounds from a .qdat or .dat file
**Arguments**
- inFile: a file object
**Returns**
a 2-tuple containing:
1) varNames: a list of the variable names
2) qbounds: the list of quantization bounds for each variable
"""
varNames = []
qBounds = []
fileutils.MoveToMatchingLine(inFile, 'Variable Table')
inLine = inFile.readline()
while inLine.find('# ----') == -1:
splitLine = inLine[2:].split('[')
varNames.append(splitLine[0].strip())
qBounds.append(splitLine[1][:-2])
inLine = inFile.readline()
for i in xrange(len(qBounds)):
if qBounds[i] != '':
l = qBounds[i].split(',')
qBounds[i] = []
for item in l:
qBounds[i].append(float(item))
else:
qBounds[i] = []
return varNames, qBounds
def ReadQuantExamples(inFile):
""" reads the examples from a .qdat file
**Arguments**
- inFile: a file object
**Returns**
a 2-tuple containing:
1) the names of the examples
2) a list of lists containing the examples themselves
**Note**
because this is reading a .qdat file, it assumed that all variable values
are integers
"""
expr1 = re.compile(r'^#')
expr2 = re.compile(r'[\ ]*|[\t]*')
examples = []
names = []
inLine = inFile.readline()
while inLine:
if expr1.search(inLine) is None:
resArr = expr2.split(inLine)
if len(resArr) > 1:
examples.append(list(map(lambda x: int(x), resArr[1:])))
names.append(resArr[0])
inLine = inFile.readline()
return names, examples
def ReadGeneralExamples(inFile):
""" reads the examples from a .dat file
**Arguments**
- inFile: a file object
**Returns**
a 2-tuple containing:
1) the names of the examples
2) a list of lists containing the examples themselves
**Note**
- this attempts to convert variable values to ints, then floats.
if those both fail, they are left as strings
"""
expr1 = re.compile(r'^#')
expr2 = re.compile(r'[\ ]*|[\t]*')
examples = []
names = []
inLine = inFile.readline()
while inLine:
if expr1.search(inLine) is None:
resArr = expr2.split(inLine)[:-1]
if len(resArr) > 1:
for i in xrange(1, len(resArr)):
d = resArr[i]
try:
resArr[i] = int(d)
except ValueError:
try:
resArr[i] = float(d)
except ValueError:
pass
examples.append(resArr[1:])
names.append(resArr[0])
inLine = inFile.readline()
return names, examples
def BuildQuantDataSet(fileName):
""" builds a data set from a .qdat file
**Arguments**
- fileName: the name of the .qdat file
**Returns**
an _MLData.MLQuantDataSet_
"""
with open(fileName, 'r') as inFile:
varNames, qBounds = ReadVars(inFile)
ptNames, examples = ReadQuantExamples(inFile)
data = MLData.MLQuantDataSet(examples, qBounds=qBounds, varNames=varNames, ptNames=ptNames)
return data
def BuildDataSet(fileName):
""" builds a data set from a .dat file
**Arguments**
- fileName: the name of the .dat file
**Returns**
an _MLData.MLDataSet_
"""
with open(fileName, 'r') as inFile:
varNames, qBounds = ReadVars(inFile)
ptNames, examples = ReadGeneralExamples(inFile)
data = MLData.MLDataSet(examples, qBounds=qBounds, varNames=varNames, ptNames=ptNames)
return data
def CalcNPossibleUsingMap(data, order, qBounds, nQBounds=None):
""" calculates the number of possible values for each variable in a data set
**Arguments**
- data: a list of examples
- order: the ordering map between the variables in _data_ and _qBounds_
- qBounds: the quantization bounds for the variables
**Returns**
a list with the number of possible values each variable takes on in the data set
**Notes**
- variables present in _qBounds_ will have their _nPossible_ number read
from _qbounds
- _nPossible_ for other numeric variables will be calculated
"""
numericTypes = [int, float]
if six.PY2:
numericTypes.append(long)
print('order:', order, len(order))
print('qB:', qBounds)
#print('nQB:',nQBounds, len(nQBounds))
assert (qBounds and len(order)==len(qBounds)) or (nQBounds and len(order)==len(nQBounds)),\
'order/qBounds mismatch'
nVars = len(order)
nPossible = [-1] * nVars
cols = range(nVars)
for i in xrange(nVars):
if nQBounds and nQBounds[i] != 0:
nPossible[i] = -1
cols.remove(i)
elif len(qBounds[i]) > 0:
nPossible[i] = len(qBounds[i])
cols.remove(i)
nPts = len(data)
for i in xrange(nPts):
for col in cols[:]:
d = data[i][order[col]]
if type(d) in numericTypes:
if int(d) == d:
nPossible[col] = max(int(d), nPossible[col])
else:
nPossible[col] = -1
cols.remove(col)
else:
print('bye bye col %d: %s' % (col, repr(d)))
nPossible[col] = -1
cols.remove(col)
return list(map(lambda x: int(x) + 1, nPossible))
def WritePickledData(outName, data):
""" writes either a .qdat.pkl or a .dat.pkl file
**Arguments**
- outName: the name of the file to be used
- data: either an _MLData.MLDataSet_ or an _MLData.MLQuantDataSet_
"""
varNames = data.GetVarNames()
qBounds = data.GetQuantBounds()
ptNames = data.GetPtNames()
examples = data.GetAllData()
with open(outName, 'wb+') as outFile:
cPickle.dump(varNames, outFile)
cPickle.dump(qBounds, outFile)
cPickle.dump(ptNames, outFile)
cPickle.dump(examples, outFile)
def TakeEnsemble(vect, ensembleIds, isDataVect=False):
"""
>>> v = [10,20,30,40,50]
>>> TakeEnsemble(v,(1,2,3))
[20, 30, 40]
>>> v = ['foo',10,20,30,40,50,1]
>>> TakeEnsemble(v,(1,2,3),isDataVect=True)
['foo', 20, 30, 40, 1]
"""
if isDataVect:
ensembleIds = [x + 1 for x in ensembleIds]
vect = [vect[0]] + [vect[x] for x in ensembleIds] + [vect[-1]]
else:
vect = [vect[x] for x in ensembleIds]
return vect
def DBToData(dbName, tableName, user='sysdba', password='masterkey', dupCol=-1, what='*', where='',
join='', pickleCol=-1, pickleClass=None, ensembleIds=None):
""" constructs an _MLData.MLDataSet_ from a database
**Arguments**
- dbName: the name of the database to be opened
- tableName: the table name containing the data in the database
- user: the user name to be used to connect to the database
- password: the password to be used to connect to the database
- dupCol: if nonzero specifies which column should be used to recognize
duplicates.
**Returns**
an _MLData.MLDataSet_
**Notes**
- this uses Dbase.DataUtils functionality
"""
conn = DbConnect(dbName, tableName, user, password)
res = conn.GetData(fields=what, where=where, join=join, removeDups=dupCol, forceList=1)
nPts = len(res)
vals = [None] * nPts
ptNames = [None] * nPts
classWorks = True
for i in range(nPts):
tmp = list(res[i])
ptNames[i] = tmp.pop(0)
if pickleCol >= 0:
if not pickleClass or not classWorks:
tmp[pickleCol] = cPickle.loads(str(tmp[pickleCol]))
else:
try:
tmp[pickleCol] = pickleClass(str(tmp[pickleCol]))
except Exception:
tmp[pickleCol] = cPickle.loads(str(tmp[pickleCol]))
classWorks = False
if ensembleIds:
tmp[pickleCol] = BitUtils.ConstructEnsembleBV(tmp[pickleCol], ensembleIds)
else:
if ensembleIds:
tmp = TakeEnsemble(tmp, ensembleIds, isDataVect=True)
vals[i] = tmp
varNames = conn.GetColumnNames(join=join, what=what)
data = MLData.MLDataSet(vals, varNames=varNames, ptNames=ptNames)
return data
def TextToData(reader, ignoreCols=[], onlyCols=None):
""" constructs an _MLData.MLDataSet_ from a bunch of text
#DOC
**Arguments**
- reader needs to be iterable and return lists of elements
(like a csv.reader)
**Returns**
an _MLData.MLDataSet_
"""
varNames = next(reader)
if not onlyCols:
keepCols = []
for i, name in enumerate(varNames):
if name not in ignoreCols:
keepCols.append(i)
else:
keepCols = [-1] * len(onlyCols)
for i, name in enumerate(varNames):
if name in onlyCols:
keepCols[onlyCols.index(name)] = i
nCols = len(varNames)
varNames = tuple([varNames[x] for x in keepCols])
nVars = len(varNames)
vals = []
ptNames = []
for splitLine in reader:
if len(splitLine):
if len(splitLine) != nCols:
raise ValueError('unequal line lengths')
tmp = [splitLine[x] for x in keepCols]
ptNames.append(tmp[0])
pt = [None] * (nVars - 1)
for j in range(nVars - 1):
try:
val = int(tmp[j + 1])
except ValueError:
try:
val = float(tmp[j + 1])
except ValueError:
val = str(tmp[j + 1])
pt[j] = val
vals.append(pt)
data = MLData.MLDataSet(vals, varNames=varNames, ptNames=ptNames)
return data
def TextFileToData(fName, onlyCols=None):
"""
#DOC
"""
ext = fName.split('.')[-1]
with open(fName, 'r') as inF:
if ext.upper() == 'CSV':
# CSV module distributed with python2.3 and later
splitter = csv.reader(inF)
else:
splitter = csv.reader(inF, delimiter='\t')
res = TextToData(splitter, onlyCols=onlyCols)
return res
def InitRandomNumbers(seed):
""" Seeds the random number generators
**Arguments**
- seed: a 2-tuple containing integers to be used as the random number seeds
**Notes**
this seeds both the RDRandom generator and the one in the standard
Python _random_ module
"""
from rdkit import RDRandom
RDRandom.seed(seed[0])
import random
random.seed(seed[0])
def FilterData(inData, val, frac, col=-1, indicesToUse=None, indicesOnly=0):
"""
#DOC
"""
if frac < 0 or frac > 1:
raise ValueError('filter fraction out of bounds')
try:
inData[0][col]
except IndexError:
raise ValueError('target column index out of range')
# convert the input data to a list and sort them
if indicesToUse:
tmp = [inData[x] for x in indicesToUse]
else:
tmp = list(inData)
nOrig = len(tmp)
sortOrder = list(xrange(nOrig))
#sortOrder.sort(lambda x,y,col=col,tmp=tmp:cmp(tmp[x][col],tmp[y][col]))
# no more cmp in python3, must use a key function
sortOrder.sort(key=lambda x: tmp[x][col])
tmp = [tmp[x] for x in sortOrder]
# find the start of the entries with value val
start = 0
while start < nOrig and tmp[start][col] != val:
start += 1
if start >= nOrig:
raise ValueError('target value (%d) not found in data' % (val))
# find the end of the entries with value val
finish = start + 1
while finish < nOrig and tmp[finish][col] == val:
finish += 1
# how many entries have the target value?
nWithVal = finish - start
# how many don't?
nOthers = len(tmp) - nWithVal
currFrac = float(nWithVal) / nOrig
if currFrac < frac:
#
# We're going to keep most of (all) the points with the target value,
# We need to figure out how many of the other points we'll
# toss out
#
nTgtFinal = nWithVal
nFinal = int(round(nWithVal / frac))
nOthersFinal = nFinal - nTgtFinal
#
# We may need to reduce the number of targets to keep
# because it may make it impossible to hit exactly the
# fraction we're trying for. Take care of that now
#
while float(nTgtFinal) / nFinal > frac:
nTgtFinal -= 1
nFinal -= 1
else:
#
# There are too many points with the target value,
# we'll keep most of (all) the other points and toss a random
# selection of the target value points
#
nOthersFinal = nOthers
nFinal = int(round(nOthers / (1 - frac)))
nTgtFinal = nFinal - nOthersFinal
#
# We may need to reduce the number of others to keep
# because it may make it impossible to hit exactly the
# fraction we're trying for. Take care of that now
#
while float(nTgtFinal) / nFinal < frac:
nOthersFinal -= 1
nFinal -= 1
others = list(xrange(start)) + list(xrange(finish, nOrig))
othersTake = permutation(nOthers)
others = [others[x] for x in othersTake[:nOthersFinal]]
targets = list(xrange(start, finish))
targetsTake = permutation(nWithVal)
targets = [targets[x] for x in targetsTake[:nTgtFinal]]
# these are all the indices we'll be keeping
indicesToKeep = targets + others
nToKeep = len(indicesToKeep)
nRej = nOrig - nToKeep
res = []
rej = []
# now pull the points, but in random order
if not indicesOnly:
for i in permutation(nOrig):
if i in indicesToKeep:
res.append(tmp[i])
else:
rej.append(tmp[i])
else:
# EFF: this is slower than it needs to be
for i in permutation(nOrig):
if not indicesToUse:
idx = sortOrder[i]
else:
idx = indicesToUse[sortOrder[i]]
if i in indicesToKeep:
res.append(idx)
else:
rej.append(idx)
return res, rej
def CountResults(inData, col=-1, bounds=None):
""" #DOC
"""
counts = {}
for p in inData:
if not bounds:
r = p[col]
else:
act = p[col]
bound = 0
placed = 0
while not placed and bound < len(bounds):
if act < bounds[bound]:
r = bound
placed = 1
else:
bound += 1
if not placed:
r = bound
counts[r] = counts.get(r, 0) + 1
return counts
def RandomizeActivities(dataSet, shuffle=0, runDetails=None):
""" randomizes the activity values of a dataset
**Arguments**
- dataSet: a _ML.Data.MLQuantDataSet_, the activities here will be randomized
- shuffle: an optional toggle. If this is set, the activity values
will be shuffled (so the number in each class remains constant)
- runDetails: an optional CompositeRun object
**Note**
- _examples_ are randomized in place
"""
nPossible = dataSet.GetNPossibleVals()[-1]
nPts = dataSet.GetNPts()
if shuffle:
if runDetails:
runDetails.shuffled = 1
acts = dataSet.GetResults()[:]
random.shuffle(acts, random=random.random)
else:
if runDetails:
runDetails.randomized = 1
acts = [random.randint(0, nPossible) for x in len(examples)]
for i in range(nPts):
tmp = dataSet[i]
tmp[-1] = acts[i]
dataSet[i] = tmp
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest, sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed, tried = _test()
sys.exit(failed)
|
jandom/rdkit
|
rdkit/ML/Data/DataUtils.py
|
Python
|
bsd-3-clause
| 17,580
|
from spider import Spider
from lxml import etree
import html5lib
import url as moz_url
import traceback, urlparse, threading, Queue
from multiprocessing import JoinableQueue, Event, Process
def is_html(response):
if not response:
return False
return 'html' in response.headers.get('Content-Type', 'text/html').lower()
class GhostSpider(Spider):
crawl_requires_gevent = False
queue_class = JoinableQueue
def _scrape_page(self, url, ghost):
uurl = url.utf8()
print "Scraping %s..." % uurl
page, resources = ghost.open(uurl)
ghost.wait_for_page_loaded()
if is_html(page):
parsed = html5lib.parse(str(page.content), treebuilder='lxml', namespaceHTMLElements=False)
links = parsed.xpath("//a[@href]")
for link in links:
new_link = moz_url.parse(urlparse.urljoin(url.utf8(), link.attrib['href']))
new_link._fragment = None
new_link._userinfo = None
self._add_to_queue(new_link.canonical())
# mark this one as processed
with self._scraper.cache_storage._conn as conn:
conn.execute("UPDATE seen SET processed = 1 WHERE key = ?", (uurl,))
def _crawl_worker(self):
from ghost import Ghost
ghost = Ghost()
while True:
item = self._queue.get()
try:
self._scrape_page(item, ghost)
except:
traceback.print_exc()
finally:
self._queue.task_done()
def crawl(self):
for i in range(self._worker_count):
worker = Process(target=self._crawl_worker)
self._workers.append(worker)
worker.start()
# initialize after spawn for the MP version because weird stuff seems to happen if we make any requests before spawning
self._initialize_crawl()
self._queue.join()
# clean up workers
for worker in self._workers:
worker.terminate()
self._workers = []
|
sunlightlabs/nanospider
|
nanospider/ghost_spider.py
|
Python
|
bsd-3-clause
| 2,066
|
import functools
import numpy as np
from scipy.stats import norm as ndist
import regreg.api as rr
from selection.tests.instance import gaussian_instance
from selection.learning.utils import full_model_inference, pivot_plot
from selection.learning.core import split_sampler, gbm_fit
def simulate(n=200, p=100, s=10, signal=(0.5, 1), sigma=2, alpha=0.1):
# description of statistical problem
X, y, truth = gaussian_instance(n=n,
p=p,
s=s,
equicorrelated=False,
rho=0.5,
sigma=sigma,
signal=signal,
random_signs=True,
scale=False)[:3]
dispersion = sigma**2
S = X.T.dot(y)
covS = dispersion * X.T.dot(X)
splitting_sampler = split_sampler(X * y[:, None], covS)
def meta_algorithm(XTX, XTXi, lam, sampler):
p = XTX.shape[0]
success = np.zeros(p)
loss = rr.quadratic_loss((p,), Q=XTX)
pen = rr.l1norm(p, lagrange=lam)
scale = 0.
noisy_S = sampler(scale=scale)
loss.quadratic = rr.identity_quadratic(0, 0, -noisy_S, 0)
problem = rr.simple_problem(loss, pen)
soln = problem.solve(max_its=100, tol=1.e-10)
success += soln != 0
return set(np.nonzero(success)[0])
XTX = X.T.dot(X)
XTXi = np.linalg.inv(XTX)
resid = y - X.dot(XTXi.dot(X.T.dot(y)))
dispersion = np.linalg.norm(resid)**2 / (n-p)
lam = 4. * np.sqrt(n)
selection_algorithm = functools.partial(meta_algorithm, XTX, XTXi, lam)
# run selection algorithm
return full_model_inference(X,
y,
truth,
selection_algorithm,
splitting_sampler,
success_params=(1, 1),
B=B,
fit_probability=gbm_fit_sk,
fit_args={'n_estimators':1000})
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
U = np.linspace(0, 1, 101)
plt.clf()
for i in range(500):
df = simulate()
csvfile = 'lasso_multi_gbm_sk.csv'
outbase = csvfile[:-4]
if df is not None and i > 0:
try: # concatenate to disk
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
pivot_ax, length_ax = pivot_plot(df, outbase)
|
selective-inference/selective-inference
|
doc/learning_examples/multi_target/lasso_example_multi_gbm_sk.py
|
Python
|
bsd-3-clause
| 2,838
|
#! /usr/bin/env python
# Copyright (c) 2018, Rethink Robotics Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
from intera_core_msgs.msg import InteractionControlCommand
from .interaction_options import InteractionOptions
import intera_interface
from intera_interface import CHECK_VERSION
class InteractionPublisher(object):
"""
ROS publisher for sending command messages to robot
"""
def __init__(self):
"""
Constructor - creates a publisher for interaction_control_command
Note, that the program may need to sleep for 0.5 seconds before the
publisher is established.
"""
self.pub = rospy.Publisher('/robot/limb/right/interaction_control_command',
InteractionControlCommand, queue_size=1,
tcp_nodelay=True)
self.enable = intera_interface.RobotEnable(CHECK_VERSION)
def send_command(self, msg, pub_rate):
"""
@param msg: either an InteractionControlCommand message or
InteractionOptions object to be published
@param pub_rate: the rate in Hz to publish the command
Note that this function is blocking for non-zero pub_rates until
the node is shutdown (e.g. via cntl+c) or the robot is disabled.
A pub_rate of zero will publish the function once and return.
"""
repeat = False
if pub_rate > 0:
rate = rospy.Rate(pub_rate)
repeat = True
elif pub_rate < 0:
rospy.logerr('Invalid publish rate!')
if isinstance(msg, InteractionOptions):
msg = msg.to_msg()
try:
self.pub.publish(msg)
while repeat and not rospy.is_shutdown() and self.enable.state().enabled:
rate.sleep()
self.pub.publish(msg)
except rospy.ROSInterruptException:
rospy.logerr('Keyboard interrupt detected from the user. %s',
'Exiting the node...')
finally:
if repeat:
self.send_position_mode_cmd()
def send_position_mode_cmd(self):
'''
Send a message to put the robot back into position mode
'''
position_mode = InteractionOptions()
position_mode.set_interaction_control_active(False)
self.pub.publish(position_mode.to_msg())
rospy.loginfo('Sending position command')
rospy.sleep(0.5)
|
RethinkRobotics/intera_sdk
|
intera_interface/src/intera_motion_interface/interaction_publisher.py
|
Python
|
apache-2.0
| 2,984
|
# Copyright 2009-2015 Canonical
# Copyright 2015-2018 Chicharreros (https://launchpad.net/~chicharreros)
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Pretty API for protocol client."""
from __future__ import with_statement
import logging
import os
import sys
import shutil
import time
import uuid
import zlib
from cStringIO import StringIO
from logging.handlers import RotatingFileHandler
from Queue import Queue
from threading import Lock
from dirspec.basedir import xdg_cache_home
from magicicadaprotocol import request, volumes
from magicicadaprotocol.content_hash import crc32
from magicicadaprotocol.context import get_ssl_context
from magicicadaprotocol.client import (
StorageClientFactory, StorageClient)
from magicicadaprotocol.delta import DIRECTORY as delta_DIR
from magicicadaprotocol.dircontent_pb2 import DIRECTORY, FILE
from twisted.internet import reactor, defer
from twisted.internet.defer import inlineCallbacks, returnValue
from magicicada.u1sync.genericmerge import MergeNode
from magicicada.u1sync.utils import should_sync
CONSUMER_KEY = "ubuntuone"
CONSUMER_SECRET = "hammertime"
u1sync_log_dir = os.path.join(xdg_cache_home, 'u1sync', 'log')
LOGFILENAME = os.path.join(u1sync_log_dir, 'u1sync.log')
if not os.path.exists(u1sync_log_dir):
os.makedirs(u1sync_log_dir)
u1_logger = logging.getLogger("u1sync.timing.log")
handler = RotatingFileHandler(LOGFILENAME)
u1_logger.addHandler(handler)
def share_str(share_uuid):
"""Converts a share UUID to a form the protocol likes."""
return str(share_uuid) if share_uuid is not None else request.ROOT
def log_timing(func):
def wrapper(*arg, **kwargs):
start = time.time()
ent = func(*arg, **kwargs)
stop = time.time()
u1_logger.debug('for %s %0.5f ms elapsed',
func.func_name, stop-start * 1000.0)
return ent
return wrapper
class ForcedShutdown(Exception):
"""Client shutdown forced."""
class Waiter(object):
"""Wait object for blocking waits."""
def __init__(self):
"""Initializes the wait object."""
self.queue = Queue()
def wake(self, result):
"""Wakes the waiter with a result."""
self.queue.put((result, None))
def wakeAndRaise(self, exc_info):
"""Wakes the waiter, raising the given exception in it."""
self.queue.put((None, exc_info))
def wakeWithResult(self, func, *args, **kw):
"""Wakes the waiter with the result of the given function."""
try:
result = func(*args, **kw)
except Exception:
self.wakeAndRaise(sys.exc_info())
else:
self.wake(result)
def wait(self):
"""Waits for wakeup."""
(result, exc_info) = self.queue.get()
if exc_info:
try:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
else:
return result
class SyncStorageClient(StorageClient):
"""Simple client that calls a callback on connection."""
@log_timing
def connectionMade(self):
"""Setup and call callback."""
StorageClient.connectionMade(self)
if self.factory.current_protocol not in (None, self):
self.factory.current_protocol.transport.loseConnection()
self.factory.current_protocol = self
self.factory.observer.connected()
@log_timing
def connectionLost(self, reason=None):
"""Callback for established connection lost."""
StorageClient.connectionLost(self, reason)
if self.factory.current_protocol is self:
self.factory.current_protocol = None
self.factory.observer.disconnected(reason)
class SyncClientFactory(StorageClientFactory):
"""A cmd protocol factory."""
protocol = SyncStorageClient
@log_timing
def __init__(self, observer):
"""Create the factory"""
self.observer = observer
self.current_protocol = None
@log_timing
def clientConnectionFailed(self, connector, reason):
"""We failed at connecting."""
self.current_protocol = None
self.observer.connection_failed(reason)
class UnsupportedOperationError(Exception):
"""The operation is unsupported by the protocol version."""
class ConnectionError(Exception):
"""A connection error."""
class AuthenticationError(Exception):
"""An authentication error."""
class NoSuchShareError(Exception):
"""Error when there is no such share available."""
class CapabilitiesError(Exception):
"""A capabilities set/query related error."""
class Client(object):
"""U1 storage client facade."""
required_caps = frozenset([
"no-content", "account-info", "resumable-uploads",
"fix462230", "volumes", "generations",
])
def __init__(self, realm=None, reactor=reactor):
"""Create the instance.
'realm' is no longer used, but is left as param for API compatibility.
"""
self.reactor = reactor
self.factory = SyncClientFactory(self)
self._status_lock = Lock()
self._status = "disconnected"
self._status_reason = None
self._status_waiting = []
self._active_waiters = set()
self.consumer_key = CONSUMER_KEY
self.consumer_secret = CONSUMER_SECRET
def force_shutdown(self):
"""Forces the client to shut itself down."""
with self._status_lock:
self._status = "forced_shutdown"
self._reason = None
for waiter in self._active_waiters:
waiter.wakeAndRaise((ForcedShutdown("Forced shutdown"),
None, None))
self._active_waiters.clear()
def _get_waiter_locked(self):
"""Gets a wait object for blocking waits. Should be called with the
status lock held.
"""
waiter = Waiter()
if self._status == "forced_shutdown":
raise ForcedShutdown("Forced shutdown")
self._active_waiters.add(waiter)
return waiter
def _get_waiter(self):
"""Get a wait object for blocking waits. Acquires the status lock."""
with self._status_lock:
return self._get_waiter_locked()
def _wait(self, waiter):
"""Waits for the waiter."""
try:
return waiter.wait()
finally:
with self._status_lock:
if waiter in self._active_waiters:
self._active_waiters.remove(waiter)
@log_timing
def _change_status(self, status, reason=None):
"""Changes the client status. Usually called from the reactor
thread.
"""
with self._status_lock:
if self._status == "forced_shutdown":
return
self._status = status
self._status_reason = reason
waiting = self._status_waiting
self._status_waiting = []
for waiter in waiting:
waiter.wake((status, reason))
@log_timing
def _await_status_not(self, *ignore_statuses):
"""Blocks until the client status changes, returning the new status.
Should never be called from the reactor thread.
"""
with self._status_lock:
status = self._status
reason = self._status_reason
while status in ignore_statuses:
waiter = self._get_waiter_locked()
self._status_waiting.append(waiter)
self._status_lock.release()
try:
status, reason = self._wait(waiter)
finally:
self._status_lock.acquire()
if status == "forced_shutdown":
raise ForcedShutdown("Forced shutdown.")
return (status, reason)
def connection_failed(self, reason):
"""Notification that connection failed."""
self._change_status("disconnected", reason)
def connected(self):
"""Notification that connection succeeded."""
self._change_status("connected")
def disconnected(self, reason):
"""Notification that we were disconnected."""
self._change_status("disconnected", reason)
def defer_from_thread(self, function, *args, **kwargs):
"""Do twisted defer magic to get results and show exceptions."""
waiter = self._get_waiter()
@log_timing
def runner():
"""inner."""
try:
d = function(*args, **kwargs)
if isinstance(d, defer.Deferred):
d.addCallbacks(lambda r: waiter.wake((r, None, None)),
lambda f: waiter.wake((None, None, f)))
else:
waiter.wake((d, None, None))
except Exception:
waiter.wake((None, sys.exc_info(), None))
self.reactor.callFromThread(runner)
result, exc_info, failure = self._wait(waiter)
if exc_info:
try:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
elif failure:
failure.raiseException()
else:
return result
@log_timing
def connect(self, host, port):
"""Connect to host/port."""
def _connect():
"""Deferred part."""
self.reactor.connectTCP(host, port, self.factory)
self._connect_inner(_connect)
@log_timing
def connect_ssl(self, host, port, no_verify):
"""Connect to host/port using ssl."""
def _connect():
"""deferred part."""
ctx = get_ssl_context(no_verify, host)
self.reactor.connectSSL(host, port, self.factory, ctx)
self._connect_inner(_connect)
@log_timing
def _connect_inner(self, _connect):
"""Helper function for connecting."""
self._change_status("connecting")
self.reactor.callFromThread(_connect)
status, reason = self._await_status_not("connecting")
if status != "connected":
raise ConnectionError(reason.value)
@log_timing
def disconnect(self):
"""Disconnect."""
if self.factory.current_protocol is not None:
self.reactor.callFromThread(
self.factory.current_protocol.transport.loseConnection)
self._await_status_not("connecting", "connected", "authenticated")
@log_timing
def simple_auth(self, username, password):
"""Perform simple authorisation."""
@inlineCallbacks
def _wrapped_authenticate():
"""Wrapped authenticate."""
try:
yield self.factory.current_protocol.simple_authenticate(
username, password)
except Exception:
self.factory.current_protocol.transport.loseConnection()
else:
self._change_status("authenticated")
try:
self.defer_from_thread(_wrapped_authenticate)
except request.StorageProtocolError as e:
raise AuthenticationError(e)
status, reason = self._await_status_not("connected")
if status != "authenticated":
raise AuthenticationError(reason.value)
@log_timing
def set_capabilities(self):
"""Set the capabilities with the server"""
client = self.factory.current_protocol
@log_timing
def set_caps_callback(req):
"Caps query succeeded"
if not req.accepted:
de = defer.fail("The server denied setting %s capabilities" %
req.caps)
return de
@log_timing
def query_caps_callback(req):
"Caps query succeeded"
if req.accepted:
set_d = client.set_caps(self.required_caps)
set_d.addCallback(set_caps_callback)
return set_d
else:
# the server don't have the requested capabilities.
# return a failure for now, in the future we might want
# to reconnect to another server
de = defer.fail("The server don't have the requested"
" capabilities: %s" % str(req.caps))
return de
@log_timing
def _wrapped_set_capabilities():
"""Wrapped set_capabilities """
d = client.query_caps(self.required_caps)
d.addCallback(query_caps_callback)
return d
try:
self.defer_from_thread(_wrapped_set_capabilities)
except request.StorageProtocolError as e:
raise CapabilitiesError(e)
@log_timing
def get_root_info(self, volume_uuid):
"""Returns the UUID of the applicable share root."""
if volume_uuid is None:
_get_root = self.factory.current_protocol.get_root
root = self.defer_from_thread(_get_root)
return (uuid.UUID(root), True)
else:
str_volume_uuid = str(volume_uuid)
volume = self._match_volume(
lambda v: str(v.volume_id) == str_volume_uuid)
if isinstance(volume, volumes.ShareVolume):
modify = volume.access_level == "Modify"
if isinstance(volume, volumes.UDFVolume):
modify = True
return (uuid.UUID(str(volume.node_id)), modify)
@log_timing
def resolve_path(self, share_uuid, root_uuid, path):
"""Resolve path relative to the given root node."""
@inlineCallbacks
def _resolve_worker():
"""Path resolution worker."""
node_uuid = root_uuid
local_path = path.strip('/')
while local_path != '':
local_path, name = os.path.split(local_path)
hashes = yield self._get_node_hashes(share_uuid)
content_hash = hashes.get(root_uuid, None)
if content_hash is None:
raise KeyError("Content hash not available")
entries = yield self._get_dir_entries(share_uuid, root_uuid)
match_name = name.decode('utf-8')
match = None
for entry in entries:
if match_name == entry.name:
match = entry
break
if match is None:
raise KeyError("Path not found")
node_uuid = uuid.UUID(match.node)
returnValue(node_uuid)
return self.defer_from_thread(_resolve_worker)
@log_timing
def find_volume(self, volume_spec):
"""Finds a share matching the given UUID. Looks at both share UUIDs
and root node UUIDs."""
def match(s):
return (str(s.volume_id) == volume_spec or
str(s.node_id) == volume_spec)
volume = self._match_volume(match)
return uuid.UUID(str(volume.volume_id))
@log_timing
def _match_volume(self, predicate):
"""Finds a volume matching the given predicate."""
_list_shares = self.factory.current_protocol.list_volumes
r = self.defer_from_thread(_list_shares)
for volume in r.volumes:
if predicate(volume):
return volume
raise NoSuchShareError()
@log_timing
def build_tree(self, share_uuid, root_uuid):
"""Builds and returns a tree representing the metadata for the given
subtree in the given share.
@param share_uuid: the share UUID or None for the user's volume
@param root_uuid: the root UUID of the subtree (must be a directory)
@return: a MergeNode tree
"""
root = MergeNode(node_type=DIRECTORY, uuid=root_uuid)
@log_timing
@inlineCallbacks
def _get_root_content_hash():
"""Obtain the content hash for the root node."""
result = yield self._get_node_hashes(share_uuid)
returnValue(result.get(root_uuid, None))
root.content_hash = self.defer_from_thread(_get_root_content_hash)
if root.content_hash is None:
raise ValueError("No content available for node %s" % root_uuid)
@log_timing
@inlineCallbacks
def _get_children(parent_uuid, parent_content_hash):
"""Obtain a sequence of MergeNodes corresponding to a node's
immediate children.
"""
entries = yield self._get_dir_entries(share_uuid, parent_uuid)
children = {}
for entry in entries:
if should_sync(entry.name):
child = MergeNode(node_type=entry.node_type,
uuid=uuid.UUID(entry.node))
children[entry.name] = child
content_hashes = yield self._get_node_hashes(share_uuid)
for child in children.itervalues():
child.content_hash = content_hashes.get(child.uuid, None)
returnValue(children)
need_children = [root]
while need_children:
node = need_children.pop()
if node.content_hash is not None:
children = self.defer_from_thread(_get_children, node.uuid,
node.content_hash)
node.children = children
for child in children.itervalues():
if child.node_type == DIRECTORY:
need_children.append(child)
return root
@log_timing
@defer.inlineCallbacks
def _get_dir_entries(self, share_uuid, node_uuid):
"""Get raw dir entries for the given directory."""
result = yield self.factory.current_protocol.get_delta(
share_str(share_uuid), from_scratch=True)
node_uuid = share_str(node_uuid)
children = []
for n in result.response:
if n.parent_id == node_uuid:
# adapt here some attrs so we don't need to change ALL the code
n.node_type = DIRECTORY if n.file_type == delta_DIR else FILE
n.node = n.node_id
children.append(n)
defer.returnValue(children)
@log_timing
def download_string(self, share_uuid, node_uuid, content_hash):
"""Reads a file from the server into a string."""
output = StringIO()
self._download_inner(share_uuid=share_uuid, node_uuid=node_uuid,
content_hash=content_hash, output=output)
return output.getValue()
@log_timing
def download_file(self, share_uuid, node_uuid, content_hash, filename):
"""Downloads a file from the server."""
partial_filename = "%s.u1partial" % filename
output = open(partial_filename, "w")
@log_timing
def rename_file():
"""Renames the temporary file to the final name."""
output.close()
os.rename(partial_filename, filename)
@log_timing
def delete_file():
"""Deletes the temporary file."""
output.close()
os.remove(partial_filename)
self._download_inner(share_uuid=share_uuid, node_uuid=node_uuid,
content_hash=content_hash, output=output,
on_success=rename_file, on_failure=delete_file)
@log_timing
def _download_inner(self, share_uuid, node_uuid, content_hash, output,
on_success=lambda: None, on_failure=lambda: None):
"""Helper function for content downloads."""
dec = zlib.decompressobj()
@log_timing
def write_data(data):
"""Helper which writes data to the output file."""
uncompressed_data = dec.decompress(data)
output.write(uncompressed_data)
@log_timing
def finish_download(value):
"""Helper which finishes the download."""
uncompressed_data = dec.flush()
output.write(uncompressed_data)
on_success()
return value
@log_timing
def abort_download(value):
"""Helper which aborts the download."""
on_failure()
return value
@log_timing
def _download():
"""Async helper."""
_get_content = self.factory.current_protocol.get_content
d = _get_content(share_str(share_uuid), str(node_uuid),
content_hash, callback=write_data)
d.addCallbacks(finish_download, abort_download)
return d
self.defer_from_thread(_download)
@log_timing
def create_directory(self, share_uuid, parent_uuid, name):
"""Creates a directory on the server."""
r = self.defer_from_thread(self.factory.current_protocol.make_dir,
share_str(share_uuid), str(parent_uuid),
name)
return uuid.UUID(r.new_id)
@log_timing
def create_file(self, share_uuid, parent_uuid, name):
"""Creates a file on the server."""
r = self.defer_from_thread(self.factory.current_protocol.make_file,
share_str(share_uuid), str(parent_uuid),
name)
return uuid.UUID(r.new_id)
@log_timing
def create_symlink(self, share_uuid, parent_uuid, name, target):
"""Creates a symlink on the server."""
raise UnsupportedOperationError("Protocol does not support symlinks")
@log_timing
def upload_string(self, share_uuid, node_uuid, old_content_hash,
content_hash, content):
"""Uploads a string to the server as file content."""
crc = crc32(content, 0)
compressed_content = zlib.compress(content, 9)
compressed = StringIO(compressed_content)
self.defer_from_thread(self.factory.current_protocol.put_content,
share_str(share_uuid), str(node_uuid),
old_content_hash, content_hash,
crc, len(content), len(compressed_content),
compressed)
@log_timing
def upload_file(self, share_uuid, node_uuid, old_content_hash,
content_hash, filename):
"""Uploads a file to the server."""
parent_dir = os.path.split(filename)[0]
unique_filename = os.path.join(parent_dir, "." + str(uuid.uuid4()))
class StagingFile(object):
"""An object which tracks data being compressed for staging."""
def __init__(self, stream):
"""Initialize a compression object."""
self.crc32 = 0
self.enc = zlib.compressobj(9)
self.size = 0
self.compressed_size = 0
self.stream = stream
def write(self, bytes):
"""Compress bytes, keeping track of length and crc32."""
self.size += len(bytes)
self.crc32 = crc32(bytes, self.crc32)
compressed_bytes = self.enc.compress(bytes)
self.compressed_size += len(compressed_bytes)
self.stream.write(compressed_bytes)
def finish(self):
"""Finish staging compressed data."""
compressed_bytes = self.enc.flush()
self.compressed_size += len(compressed_bytes)
self.stream.write(compressed_bytes)
with open(unique_filename, "w+") as compressed:
os.remove(unique_filename)
with open(filename, "r") as original:
staging = StagingFile(compressed)
shutil.copyfileobj(original, staging)
staging.finish()
compressed.seek(0)
self.defer_from_thread(self.factory.current_protocol.put_content,
share_str(share_uuid), str(node_uuid),
old_content_hash, content_hash,
staging.crc32,
staging.size, staging.compressed_size,
compressed)
@log_timing
def move(self, share_uuid, parent_uuid, name, node_uuid):
"""Moves a file on the server."""
self.defer_from_thread(self.factory.current_protocol.move,
share_str(share_uuid), str(node_uuid),
str(parent_uuid), name)
@log_timing
def unlink(self, share_uuid, node_uuid):
"""Unlinks a file on the server."""
self.defer_from_thread(self.factory.current_protocol.unlink,
share_str(share_uuid), str(node_uuid))
@log_timing
@defer.inlineCallbacks
def _get_node_hashes(self, share_uuid):
"""Fetches hashes for the given nodes."""
result = yield self.factory.current_protocol.get_delta(
share_str(share_uuid), from_scratch=True)
hashes = {}
for fid in result.response:
node_uuid = uuid.UUID(fid.node_id)
hashes[node_uuid] = fid.content_hash
defer.returnValue(hashes)
@log_timing
def get_incoming_shares(self):
"""Returns a list of incoming shares as (name, uuid, accepted)
tuples.
"""
_list_shares = self.factory.current_protocol.list_shares
r = self.defer_from_thread(_list_shares)
return [(s.name, s.id, s.other_visible_name,
s.accepted, s.access_level)
for s in r.shares if s.direction == "to_me"]
|
magicicada-bot/magicicada-server
|
magicicada/u1sync/client.py
|
Python
|
agpl-3.0
| 26,330
|
"""
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
from __future__ import unicode_literals
import mimetypes
import os
import stat
import posixpath
import re
import urllib
from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseNotModified
from django.template import loader, Template, Context, TemplateDoesNotExist
from django.utils.http import http_date, parse_http_date
from django.utils.translation import ugettext as _, ugettext_noop
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
(r'^(?P<path>.*)$', 'django.views.static.serve', {'document_root' : '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(urllib.unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not os.path.exists(fullpath):
raise Http404(_('"%(path)s" does not exist') % {'path': fullpath})
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
mimetype, encoding = mimetypes.guess_type(fullpath)
mimetype = mimetype or 'application/octet-stream'
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified(content_type=mimetype)
with open(fullpath, 'rb') as f:
response = HttpResponse(f.read(), content_type=mimetype)
response["Last-Modified"] = http_date(statobj.st_mtime)
if stat.S_ISREG(statobj.st_mode):
response["Content-Length"] = statobj.st_size
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</title>
</head>
<body>
<h1>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</h1>
<ul>
{% ifnotequal directory "/" %}
<li><a href="../">../</a></li>
{% endifnotequal %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = ugettext_noop("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template(['static/directory_index.html',
'static/directory_index'])
except TemplateDoesNotExist:
t = Template(DEFAULT_DIRECTORY_INDEX_TEMPLATE, name='Default directory index template')
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory' : path + '/',
'file_list' : files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if mtime > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
|
rebost/django
|
django/views/static.py
|
Python
|
bsd-3-clause
| 5,092
|
#!/usr/bin/env python
import urllib2
from time import sleep
# test that we are not authorized initially
print "test that the desired rate gets banned"
req = urllib2.Request("http://127.0.0.1:8080/firstrate")
res = None
status = []
expected_status = [404]*3 + [403]
times = 4
total_interval = 35.0
rate = times/total_interval
for i in range(0, times):
sleep(rate)
try:
res = urllib2.urlopen(req)
status.append(res.getcode())
except urllib2.HTTPError, error:
status.append(error.getcode())
pass
assert status == expected_status, "Expected" + str(expected_status) + "but got status " + str(status)
# print "fetch captcha and cookie"
# req = urllib2.Request("http://127.0.0.1/__captcha")
# res = None
# status = -1
# res = urllib2.urlopen(req)
# status = res.getcode()
# cookie = res.info()['Set-Cookie'][len("deflect="):][:-1*len("; path=/; HttpOnly")]
# assert status == 200, "Captcha should return 200: %d" % status
# assert len(cookie) > 0, "Captcha should return a cookie: %d" % status
# print "validate"
# req = urllib2.Request("http://127.0.0.1/__validate/aaaaa", headers={"Cookie" : "deflect=%s" % cookie})
# res = None
# status = -1
# res = urllib2.urlopen(req)
# status = res.getcode()
# cookie = res.info()['Set-Cookie'][len("deflect="):][:-1*len("; path=/; HttpOnly")]
# assert status == 200, "validate should return 200: %d" % status
# assert len(cookie) > 0, "validate should return a cookie: %d" % status
# print "ensure we are authorized with the cookie we got from the validator"
# req = urllib2.Request("http://127.0.0.1", headers={"Cookie" : "deflect=%s" % cookie})
# res = None
# status = -1
# res = urllib2.urlopen(req)
# status = res.getcode()
# cookie = res.info()['Set-Cookie'][len("deflect="):][:-1*len("; path=/; HttpOnly")]
# assert status >= 200 and status < 400, "After validation, we should not receive an error code" % status
|
equalitie/banjax
|
test/regex_rate_test.py
|
Python
|
agpl-3.0
| 1,920
|
"""
This module descibes how to manually train and test an algorithm without using
the evaluate() function.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from surprise import BaselineOnly
from surprise import Dataset
from surprise import accuracy
# Load the movielens-100k dataset and split it into 3 folds for
# cross-validation.
data = Dataset.load_builtin('ml-100k')
data.split(n_folds=3)
algo = BaselineOnly()
for trainset, testset in data.folds():
# train and test algorithm.
algo.train(trainset)
predictions = algo.test(testset)
# Compute and print Root Mean Squared Error
rmse = accuracy.rmse(predictions, verbose=True)
|
charmoniumQ/Surprise
|
examples/iterate_over_folds.py
|
Python
|
bsd-3-clause
| 721
|
"""
========================
Random Number Generation
========================
Use ``default_rng()`` to create a `Generator` and call its methods.
=============== =========================================================
Generator
--------------- ---------------------------------------------------------
Generator Class implementing all of the random number distributions
default_rng Default constructor for ``Generator``
=============== =========================================================
============================================= ===
BitGenerator Streams that work with Generator
--------------------------------------------- ---
MT19937
PCG64
Philox
SFC64
============================================= ===
============================================= ===
Getting entropy to initialize a BitGenerator
--------------------------------------------- ---
SeedSequence
============================================= ===
Legacy
------
For backwards compatibility with previous versions of numpy before 1.17, the
various aliases to the global `RandomState` methods are left alone and do not
use the new `Generator` API.
==================== =========================================================
Utility functions
-------------------- ---------------------------------------------------------
random Uniformly distributed floats over ``[0, 1)``
bytes Uniformly distributed random bytes.
permutation Randomly permute a sequence / generate a random sequence.
shuffle Randomly permute a sequence in place.
choice Random sample from 1-D array.
==================== =========================================================
==================== =========================================================
Compatibility
functions - removed
in the new API
-------------------- ---------------------------------------------------------
rand Uniformly distributed values.
randn Normally distributed values.
ranf Uniformly distributed floating point numbers.
random_integers Uniformly distributed integers in a given range.
(deprecated, use ``integers(..., closed=True)`` instead)
random_sample Alias for `random_sample`
randint Uniformly distributed integers in a given range
seed Seed the legacy random number generator.
==================== =========================================================
==================== =========================================================
Univariate
distributions
-------------------- ---------------------------------------------------------
beta Beta distribution over ``[0, 1]``.
binomial Binomial distribution.
chisquare :math:`\\chi^2` distribution.
exponential Exponential distribution.
f F (Fisher-Snedecor) distribution.
gamma Gamma distribution.
geometric Geometric distribution.
gumbel Gumbel distribution.
hypergeometric Hypergeometric distribution.
laplace Laplace distribution.
logistic Logistic distribution.
lognormal Log-normal distribution.
logseries Logarithmic series distribution.
negative_binomial Negative binomial distribution.
noncentral_chisquare Non-central chi-square distribution.
noncentral_f Non-central F distribution.
normal Normal / Gaussian distribution.
pareto Pareto distribution.
poisson Poisson distribution.
power Power distribution.
rayleigh Rayleigh distribution.
triangular Triangular distribution.
uniform Uniform distribution.
vonmises Von Mises circular distribution.
wald Wald (inverse Gaussian) distribution.
weibull Weibull distribution.
zipf Zipf's distribution over ranked data.
==================== =========================================================
==================== ==========================================================
Multivariate
distributions
-------------------- ----------------------------------------------------------
dirichlet Multivariate generalization of Beta distribution.
multinomial Multivariate generalization of the binomial distribution.
multivariate_normal Multivariate generalization of the normal distribution.
==================== ==========================================================
==================== =========================================================
Standard
distributions
-------------------- ---------------------------------------------------------
standard_cauchy Standard Cauchy-Lorentz distribution.
standard_exponential Standard exponential distribution.
standard_gamma Standard Gamma distribution.
standard_normal Standard normal distribution.
standard_t Standard Student's t-distribution.
==================== =========================================================
==================== =========================================================
Internal functions
-------------------- ---------------------------------------------------------
get_state Get tuple representing internal state of generator.
set_state Set state of generator.
==================== =========================================================
"""
from __future__ import division, absolute_import, print_function
__all__ = [
'beta',
'binomial',
'bytes',
'chisquare',
'choice',
'dirichlet',
'exponential',
'f',
'gamma',
'geometric',
'get_state',
'gumbel',
'hypergeometric',
'laplace',
'logistic',
'lognormal',
'logseries',
'multinomial',
'multivariate_normal',
'negative_binomial',
'noncentral_chisquare',
'noncentral_f',
'normal',
'pareto',
'permutation',
'poisson',
'power',
'rand',
'randint',
'randn',
'random',
'random_integers',
'random_sample',
'ranf',
'rayleigh',
'sample',
'seed',
'set_state',
'shuffle',
'standard_cauchy',
'standard_exponential',
'standard_gamma',
'standard_normal',
'standard_t',
'triangular',
'uniform',
'vonmises',
'wald',
'weibull',
'zipf',
]
# add these for module-freeze analysis (like PyInstaller)
from . import _pickle
from . import _common
from . import _bounded_integers
from ._generator import Generator, default_rng
from ._bit_generator import SeedSequence, BitGenerator
from ._mt19937 import MT19937
from ._pcg64 import PCG64
from ._philox import Philox
from ._sfc64 import SFC64
from .mtrand import *
__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937',
'Philox', 'PCG64', 'SFC64', 'default_rng', 'BitGenerator']
def __RandomState_ctor():
"""Return a RandomState instance.
This function exists solely to assist (un)pickling.
Note that the state of the RandomState returned here is irrelevant, as this
function's entire purpose is to return a newly allocated RandomState whose
state pickle can set. Consequently the RandomState returned by this function
is a freshly allocated copy with a seed=0.
See https://github.com/numpy/numpy/issues/4763 for a detailed discussion
"""
return RandomState(seed=0)
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
|
jorisvandenbossche/numpy
|
numpy/random/__init__.py
|
Python
|
bsd-3-clause
| 7,527
|
from collections import OrderedDict
from datetime import timedelta
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import update_session_auth_hash
from django.core.exceptions import FieldDoesNotExist, ValidationError
from django.db import models
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.utils.http import urlencode
from django.utils.timezone import now
from allauth.compat import base36_to_int, force_str, int_to_base36, six
from ..exceptions import ImmediateHttpResponse
from ..utils import (
get_request_param,
get_user_model,
import_callable,
valid_email_or_none,
)
from . import app_settings, signals
from .adapter import get_adapter
from .app_settings import EmailVerificationMethod
def get_next_redirect_url(request, redirect_field_name="next"):
"""
Returns the next URL to redirect to, if it was explicitly passed
via the request.
"""
redirect_to = get_request_param(request, redirect_field_name)
if not get_adapter(request).is_safe_url(redirect_to):
redirect_to = None
return redirect_to
def get_login_redirect_url(request, url=None, redirect_field_name="next"):
if url and callable(url):
# In order to be able to pass url getters around that depend
# on e.g. the authenticated state.
url = url()
redirect_url = (
url or
get_next_redirect_url(
request,
redirect_field_name=redirect_field_name) or
get_adapter(request).get_login_redirect_url(request))
return redirect_url
_user_display_callable = None
def logout_on_password_change(request, user):
# Since it is the default behavior of Django to invalidate all sessions on
# password change, this function actually has to preserve the session when
# logout isn't desired.
if not app_settings.LOGOUT_ON_PASSWORD_CHANGE:
update_session_auth_hash(request, user)
def default_user_display(user):
if app_settings.USER_MODEL_USERNAME_FIELD:
return getattr(user, app_settings.USER_MODEL_USERNAME_FIELD)
else:
return force_str(user)
def user_display(user):
global _user_display_callable
if not _user_display_callable:
f = getattr(settings, "ACCOUNT_USER_DISPLAY",
default_user_display)
_user_display_callable = import_callable(f)
return _user_display_callable(user)
def user_field(user, field, *args):
"""
Gets or sets (optional) user model fields. No-op if fields do not exist.
"""
if not field:
return
User = get_user_model()
try:
field_meta = User._meta.get_field(field)
max_length = field_meta.max_length
except FieldDoesNotExist:
if not hasattr(user, field):
return
max_length = None
if args:
# Setter
v = args[0]
if v:
v = v[0:max_length]
setattr(user, field, v)
else:
# Getter
return getattr(user, field)
def user_username(user, *args):
if args and not app_settings.PRESERVE_USERNAME_CASING and args[0]:
args = [args[0].lower()]
return user_field(user, app_settings.USER_MODEL_USERNAME_FIELD, *args)
def user_email(user, *args):
return user_field(user, app_settings.USER_MODEL_EMAIL_FIELD, *args)
def perform_login(request, user, email_verification,
redirect_url=None, signal_kwargs=None,
signup=False):
"""
Keyword arguments:
signup -- Indicates whether or not sending the
email is essential (during signup), or if it can be skipped (e.g. in
case email verification is optional and we are only logging in).
"""
# Local users are stopped due to form validation checking
# is_active, yet, adapter methods could toy with is_active in a
# `user_signed_up` signal. Furthermore, social users should be
# stopped anyway.
adapter = get_adapter(request)
if not user.is_active:
return adapter.respond_user_inactive(request, user)
from .models import EmailAddress
has_verified_email = EmailAddress.objects.filter(user=user,
verified=True).exists()
if email_verification == EmailVerificationMethod.NONE:
pass
elif email_verification == EmailVerificationMethod.OPTIONAL:
# In case of OPTIONAL verification: send on signup.
if not has_verified_email and signup:
send_email_confirmation(request, user, signup=signup)
elif email_verification == EmailVerificationMethod.MANDATORY:
if not has_verified_email:
send_email_confirmation(request, user, signup=signup)
return adapter.respond_email_verification_sent(
request, user)
try:
adapter.login(request, user)
response = HttpResponseRedirect(
get_login_redirect_url(request, redirect_url))
if signal_kwargs is None:
signal_kwargs = {}
signals.user_logged_in.send(sender=user.__class__,
request=request,
response=response,
user=user,
**signal_kwargs)
adapter.add_message(
request,
messages.SUCCESS,
'account/messages/logged_in.txt',
{'user': user})
except ImmediateHttpResponse as e:
response = e.response
return response
def complete_signup(request, user, email_verification, success_url,
signal_kwargs=None):
if signal_kwargs is None:
signal_kwargs = {}
signals.user_signed_up.send(sender=user.__class__,
request=request,
user=user,
**signal_kwargs)
return perform_login(request, user,
email_verification=email_verification,
signup=True,
redirect_url=success_url,
signal_kwargs=signal_kwargs)
def cleanup_email_addresses(request, addresses):
"""
Takes a list of EmailAddress instances and cleans it up, making
sure only valid ones remain, without multiple primaries etc.
Order is important: e.g. if multiple primary e-mail addresses
exist, the first one encountered will be kept as primary.
"""
from .models import EmailAddress
adapter = get_adapter(request)
# Let's group by `email`
e2a = OrderedDict() # maps email to EmailAddress
primary_addresses = []
verified_addresses = []
primary_verified_addresses = []
for address in addresses:
# Pick up only valid ones...
email = valid_email_or_none(address.email)
if not email:
continue
# ... and non-conflicting ones...
if (app_settings.UNIQUE_EMAIL and
EmailAddress.objects.filter(email__iexact=email).exists()):
continue
a = e2a.get(email.lower())
if a:
a.primary = a.primary or address.primary
a.verified = a.verified or address.verified
else:
a = address
a.verified = a.verified or adapter.is_email_verified(request,
a.email)
e2a[email.lower()] = a
if a.primary:
primary_addresses.append(a)
if a.verified:
primary_verified_addresses.append(a)
if a.verified:
verified_addresses.append(a)
# Now that we got things sorted out, let's assign a primary
if primary_verified_addresses:
primary_address = primary_verified_addresses[0]
elif verified_addresses:
# Pick any verified as primary
primary_address = verified_addresses[0]
elif primary_addresses:
# Okay, let's pick primary then, even if unverified
primary_address = primary_addresses[0]
elif e2a:
# Pick the first
primary_address = e2a.keys()[0]
else:
# Empty
primary_address = None
# There can only be one primary
for a in e2a.values():
a.primary = primary_address.email.lower() == a.email.lower()
return list(e2a.values()), primary_address
def setup_user_email(request, user, addresses):
"""
Creates proper EmailAddress for the user that was just signed
up. Only sets up, doesn't do any other handling such as sending
out email confirmation mails etc.
"""
from .models import EmailAddress
assert not EmailAddress.objects.filter(user=user).exists()
priority_addresses = []
# Is there a stashed e-mail?
adapter = get_adapter(request)
stashed_email = adapter.unstash_verified_email(request)
if stashed_email:
priority_addresses.append(EmailAddress(user=user,
email=stashed_email,
primary=True,
verified=True))
email = user_email(user)
if email:
priority_addresses.append(EmailAddress(user=user,
email=email,
primary=True,
verified=False))
addresses, primary = cleanup_email_addresses(
request,
priority_addresses + addresses)
for a in addresses:
a.user = user
a.save()
EmailAddress.objects.fill_cache_for_user(user, addresses)
if (primary and email and email.lower() != primary.email.lower()):
user_email(user, primary.email)
user.save()
return primary
def send_email_confirmation(request, user, signup=False):
"""
E-mail verification mails are sent:
a) Explicitly: when a user signs up
b) Implicitly: when a user attempts to log in using an unverified
e-mail while EMAIL_VERIFICATION is mandatory.
Especially in case of b), we want to limit the number of mails
sent (consider a user retrying a few times), which is why there is
a cooldown period before sending a new mail. This cooldown period
can be configured in ACCOUNT_EMAIL_CONFIRMATION_COOLDOWN setting.
"""
from .models import EmailAddress, EmailConfirmation
cooldown_period = timedelta(
seconds=app_settings.EMAIL_CONFIRMATION_COOLDOWN
)
email = user_email(user)
if email:
try:
email_address = EmailAddress.objects.get_for_user(user, email)
if not email_address.verified:
if app_settings.EMAIL_CONFIRMATION_HMAC:
send_email = True
else:
send_email = not EmailConfirmation.objects.filter(
sent__gt=now() - cooldown_period,
email_address=email_address).exists()
if send_email:
email_address.send_confirmation(request,
signup=signup)
else:
send_email = False
except EmailAddress.DoesNotExist:
send_email = True
email_address = EmailAddress.objects.add_email(request,
user,
email,
signup=signup,
confirm=True)
assert email_address
# At this point, if we were supposed to send an email we have sent it.
if send_email:
get_adapter(request).add_message(
request,
messages.INFO,
'account/messages/'
'email_confirmation_sent.txt',
{'email': email})
if signup:
get_adapter(request).stash_user(request, user_pk_to_url_str(user))
def sync_user_email_addresses(user):
"""
Keep user.email in sync with user.emailaddress_set.
Under some circumstances the user.email may not have ended up as
an EmailAddress record, e.g. in the case of manually created admin
users.
"""
from .models import EmailAddress
email = user_email(user)
if email and not EmailAddress.objects.filter(user=user,
email__iexact=email).exists():
if app_settings.UNIQUE_EMAIL \
and EmailAddress.objects.filter(email__iexact=email).exists():
# Bail out
return
EmailAddress.objects.create(user=user,
email=email,
primary=False,
verified=False)
def filter_users_by_username(*username):
if app_settings.PRESERVE_USERNAME_CASING:
qlist = [
Q(**{app_settings.USER_MODEL_USERNAME_FIELD + '__iexact': u})
for u in username]
q = qlist[0]
for q2 in qlist[1:]:
q = q | q2
ret = get_user_model().objects.filter(q)
else:
ret = get_user_model().objects.filter(
**{app_settings.USER_MODEL_USERNAME_FIELD + '__in':
[u.lower() for u in username]})
return ret
def filter_users_by_email(email):
"""Return list of users by email address
Typically one, at most just a few in length. First we look through
EmailAddress table, than customisable User model table. Add results
together avoiding SQL joins and deduplicate.
"""
from .models import EmailAddress
User = get_user_model()
mails = EmailAddress.objects.filter(email__iexact=email)
users = [e.user for e in mails.prefetch_related('user')]
if app_settings.USER_MODEL_EMAIL_FIELD:
q_dict = {app_settings.USER_MODEL_EMAIL_FIELD + '__iexact': email}
users += list(User.objects.filter(**q_dict))
return list(set(users))
def passthrough_next_redirect_url(request, url, redirect_field_name):
assert url.find("?") < 0 # TODO: Handle this case properly
next_url = get_next_redirect_url(request, redirect_field_name)
if next_url:
url = url + '?' + urlencode({redirect_field_name: next_url})
return url
def user_pk_to_url_str(user):
"""
This should return a string.
"""
User = get_user_model()
if issubclass(type(User._meta.pk), models.UUIDField):
if isinstance(user.pk, six.string_types):
return user.pk
return user.pk.hex
ret = user.pk
if isinstance(ret, six.integer_types):
ret = int_to_base36(user.pk)
return str(ret)
def url_str_to_user_pk(s):
User = get_user_model()
# TODO: Ugh, isn't there a cleaner way to determine whether or not
# the PK is a str-like field?
if getattr(User._meta.pk, 'remote_field', None):
pk_field = User._meta.pk.remote_field.to._meta.pk
else:
pk_field = User._meta.pk
if issubclass(type(pk_field), models.UUIDField):
return pk_field.to_python(s)
try:
pk_field.to_python('a')
pk = s
except ValidationError:
pk = base36_to_int(s)
return pk
|
AltSchool/django-allauth
|
allauth/account/utils.py
|
Python
|
mit
| 15,330
|
"""Borůvka's algorithm.
Determines the minimum spanning tree (MST) of a graph using the Borůvka's algorithm.
Borůvka's algorithm is a greedy algorithm for finding a minimum spanning tree in a
connected graph, or a minimum spanning forest if a graph that is not connected.
The time complexity of this algorithm is O(ELogV), where E represents the number
of edges, while V represents the number of nodes.
O(number_of_edges Log number_of_nodes)
The space complexity of this algorithm is O(V + E), since we have to keep a couple
of lists whose sizes are equal to the number of nodes, as well as keep all the
edges of a graph inside of the data structure itself.
Borůvka's algorithm gives us pretty much the same result as other MST Algorithms -
they all find the minimum spanning tree, and the time complexity is approximately
the same.
One advantage that Borůvka's algorithm has compared to the alternatives is that it
doesn't need to presort the edges or maintain a priority queue in order to find the
minimum spanning tree.
Even though that doesn't help its complexity, since it still passes the edges logE
times, it is a bit simpler to code.
Details: https://en.wikipedia.org/wiki/Bor%C5%AFvka%27s_algorithm
"""
from __future__ import annotations
from typing import Any
class Graph:
def __init__(self, num_of_nodes: int) -> None:
"""
Arguments:
num_of_nodes - the number of nodes in the graph
Attributes:
m_num_of_nodes - the number of nodes in the graph.
m_edges - the list of edges.
m_component - the dictionary which stores the index of the component which
a node belongs to.
"""
self.m_num_of_nodes = num_of_nodes
self.m_edges: list[list[int]] = []
self.m_component: dict[int, int] = {}
def add_edge(self, u_node: int, v_node: int, weight: int) -> None:
"""Adds an edge in the format [first, second, edge weight] to graph."""
self.m_edges.append([u_node, v_node, weight])
def find_component(self, u_node: int) -> int:
"""Propagates a new component throughout a given component."""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def set_component(self, u_node: int) -> None:
"""Finds the component index of a given node"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
self.m_component[k] = self.find_component(k)
def union(self, component_size: list[int], u_node: int, v_node: int) -> None:
"""Union finds the roots of components for two nodes, compares the components
in terms of size, and attaches the smaller one to the larger one to form
single component"""
if component_size[u_node] <= component_size[v_node]:
self.m_component[u_node] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(u_node)
elif component_size[u_node] >= component_size[v_node]:
self.m_component[v_node] = self.find_component(u_node)
component_size[u_node] += component_size[v_node]
self.set_component(v_node)
def boruvka(self) -> None:
"""Performs Borůvka's algorithm to find MST."""
# Initialize additional lists required to algorithm.
component_size = []
mst_weight = 0
minimum_weight_edge: list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
num_of_components = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
u, v, w = edge
u_component = self.m_component[u]
v_component = self.m_component[v]
if u_component != v_component:
"""If the current minimum weight edge of component u doesn't
exist (is -1), or if it's greater than the edge we're
observing right now, we will assign the value of the edge
we're observing to it.
If the current minimum weight edge of component v doesn't
exist (is -1), or if it's greater than the edge we're
observing right now, we will assign the value of the edge
we're observing to it"""
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
minimum_weight_edge[component] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(edge, list):
u, v, w = edge
u_component = self.m_component[u]
v_component = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(component_size, u_component, v_component)
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n")
num_of_components -= 1
minimum_weight_edge = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}")
def test_vector() -> None:
"""
>>> g = Graph(8)
>>> for u_v_w in ((0, 1, 10), (0, 2, 6), (0, 3, 5), (1, 3, 15), (2, 3, 4),
... (3, 4, 8), (4, 5, 10), (4, 6, 6), (4, 7, 5), (5, 7, 15), (6, 7, 4)):
... g.add_edge(*u_v_w)
>>> g.boruvka()
Added edge [0 - 3]
Added weight: 5
<BLANKLINE>
Added edge [0 - 1]
Added weight: 10
<BLANKLINE>
Added edge [2 - 3]
Added weight: 4
<BLANKLINE>
Added edge [4 - 7]
Added weight: 5
<BLANKLINE>
Added edge [4 - 5]
Added weight: 10
<BLANKLINE>
Added edge [6 - 7]
Added weight: 4
<BLANKLINE>
Added edge [3 - 4]
Added weight: 8
<BLANKLINE>
The total weight of the minimal spanning tree is: 46
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
|
TheAlgorithms/Python
|
graphs/boruvka.py
|
Python
|
mit
| 6,482
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
from dashboard import change_internal_only
from dashboard.common import testing_common
from dashboard.models import anomaly
from dashboard.models import graph_data
class ChangeInternalOnlyTest(testing_common.TestCase):
def testUpdateBots(self):
testing_common.AddTests(
['ChromiumPerf', 'ChromiumGPU'],
['win7', 'mac'],
{'scrolling': {'first_paint': {}}})
for key in graph_data.TestMetadata.query().fetch(keys_only=True):
anomaly.Anomaly(
test=key, start_revision=15001, end_revision=15005,
median_before_anomaly=100, median_after_anomaly=200).put()
internal_master_bots = [
('ChromiumPerf', 'win7'),
('ChromiumGPU', 'mac'),
]
change_internal_only.UpdateBots(internal_master_bots, True)
self.PatchDatastoreHooksRequest()
self.ExecuteDeferredTasks(change_internal_only.QUEUE_NAME)
for bot in graph_data.Bot.query().fetch():
master_name = bot.key.parent().id()
bot_name = bot.key.id()
expected = (master_name, bot_name) in internal_master_bots
self.assertEqual(expected, bot.internal_only)
query = graph_data.TestMetadata.query(
graph_data.TestMetadata.master_name == master_name,
graph_data.TestMetadata.bot_name == bot_name)
for test in query.fetch():
self.assertEqual(expected, test.internal_only)
anomalies, _, _ = anomaly.Anomaly.QueryAsync(
test=test.test_path).get_result()
for alert in anomalies:
self.assertEqual(expected, alert.internal_only)
if __name__ == '__main__':
unittest.main()
|
endlessm/chromium-browser
|
third_party/catapult/dashboard/dashboard/change_internal_only_test.py
|
Python
|
bsd-3-clause
| 1,897
|
from __future__ import absolute_import, print_function
import unittest
from bokeh.application.spellings import ScriptHandler
from bokeh.document import Document
def _with_temp_file(func):
import tempfile
f = tempfile.NamedTemporaryFile()
try:
func(f)
finally:
f.close()
def _with_script_contents(contents, func):
def with_file_object(f):
f.write(contents.encode("UTF-8"))
f.flush()
func(f.name)
_with_temp_file(with_file_object)
script_adds_two_roots = """
from bokeh.io import curdoc
from bokeh.plot_object import PlotObject
from bokeh.properties import Int, Instance
class AnotherModelInTestScript(PlotObject):
bar = Int(1)
class SomeModelInTestScript(PlotObject):
foo = Int(2)
child = Instance(PlotObject)
curdoc().add_root(AnotherModelInTestScript())
curdoc().add_root(SomeModelInTestScript())
"""
class TestScriptHandler(unittest.TestCase):
def test_empty_script(self):
doc = Document()
def load(filename):
handler = ScriptHandler(filename=filename)
handler.modify_document(doc)
if handler.failed:
raise RuntimeError(handler.error)
_with_script_contents("# This script does nothing", load)
assert not doc.roots
def test_script_adds_roots(self):
doc = Document()
def load(filename):
handler = ScriptHandler(filename=filename)
handler.modify_document(doc)
if handler.failed:
raise RuntimeError(handler.error)
_with_script_contents(script_adds_two_roots, load)
assert len(doc.roots) == 2
def test_script_bad_syntax(self):
doc = Document()
result = {}
def load(filename):
handler = ScriptHandler(filename=filename)
result['handler'] = handler
handler.modify_document(doc)
_with_script_contents("This is a syntax error", load)
handler = result['handler']
assert handler.error is not None
assert 'Invalid syntax' in handler.error
def test_script_runtime_error(self):
doc = Document()
result = {}
def load(filename):
handler = ScriptHandler(filename=filename)
result['handler'] = handler
handler.modify_document(doc)
_with_script_contents("raise RuntimeError('nope')", load)
handler = result['handler']
assert handler.error is not None
assert 'nope' in handler.error
|
gpfreitas/bokeh
|
bokeh/application/spellings/tests/test_script.py
|
Python
|
bsd-3-clause
| 2,526
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Non-relativistic restricted open-shell Kohn-Sham
'''
import numpy
from pyscf import lib
from pyscf.scf import rohf
from pyscf.dft.uks import energy_elec
from pyscf.dft import rks
from pyscf.dft import uks
@lib.with_doc(uks.get_veff.__doc__)
def get_veff(ks, mol=None, dm=None, dm_last=0, vhf_last=0, hermi=1):
if getattr(dm, 'mo_coeff', None) is not None:
mo_coeff = dm.mo_coeff
mo_occ_a = (dm.mo_occ > 0).astype(numpy.double)
mo_occ_b = (dm.mo_occ ==2).astype(numpy.double)
dm = lib.tag_array(dm, mo_coeff=(mo_coeff,mo_coeff),
mo_occ=(mo_occ_a,mo_occ_b))
return uks.get_veff(ks, mol, dm, dm_last, vhf_last, hermi)
class ROKS(rks.KohnShamDFT, rohf.ROHF):
'''Restricted open-shell Kohn-Sham
See pyscf/dft/rks.py RKS class for the usage of the attributes'''
def __init__(self, mol, xc='LDA,VWN'):
rohf.ROHF.__init__(self, mol)
rks.KohnShamDFT.__init__(self, xc)
def dump_flags(self, verbose=None):
rohf.ROHF.dump_flags(self, verbose)
rks.KohnShamDFT.dump_flags(self, verbose)
return self
get_veff = get_veff
get_vsap = rks.get_vsap
energy_elec = energy_elec
init_guess_by_vsap = rks.init_guess_by_vsap
def nuc_grad_method(self):
from pyscf.grad import roks
return roks.Gradients(self)
if __name__ == '__main__':
from pyscf import gto
from pyscf.dft import xcfun
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'#'out_rks'
mol.atom.extend([['He', (0.,0.,0.)], ])
mol.basis = { 'He': 'cc-pvdz'}
#mol.grids = { 'He': (10, 14),}
mol.build()
m = ROKS(mol).run()
m.xc = 'b88,lyp'
print(m.scf()) # -2.8978518405
m = ROKS(mol)
m._numint.libxc = xcfun
m.xc = 'b88,lyp'
print(m.scf()) # -2.8978518405
|
sunqm/pyscf
|
pyscf/dft/roks.py
|
Python
|
apache-2.0
| 2,523
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.