code
stringlengths
1
199k
from __future__ import division from __future__ import print_function from PIL import Image from PIL import ImageFont from PIL import ImageDraw from PIL import ImageEnhance import nwcsaf import numpy as np from satpy import Scene, find_files_and_readers from datetime import datetime, timedelta from copy import deepcopy import netCDF4 import subprocess import sys import inspect import logging LOG = logging.getLogger(__name__) LOG.setLevel(50) import matplotlib.pyplot as plt def get_last_SEVIRI_date(RSS, delay=0, time_slot=None): ''' input: RSS logical variable True or False specifies if you like get (RSS=True) the last rapid scan observation date (every 5 min) (RSS=False) the last full disk observation date (every 15 min) (delay=INT) number of minutes to substract before finding the date (good if data needs a few min before arriving) (time_slot) If not given, take last time otherwise search scanning time of SEVIRI before given time_slot output: date structure with the date of the last SEVIRI observation ''' from time import gmtime LOG.info("*** start get_last_SEVIRI_date ("+inspect.getfile(inspect.currentframe())+")") # if rapid scan service than 5min otherwise 15 if RSS: nmin = 5 else: nmin = 15 if (time_slot is None): # get the current time gmt = gmtime() #print ("GMT time: "+ str(gmt)) # or alternatively # utc = datetime.utcnow() # convert to datetime format t0 = datetime(gmt.tm_year, gmt.tm_mon, gmt.tm_mday, gmt.tm_hour, gmt.tm_min, 0) LOG.debug(" current time = "+str(t0)) else: t0 = time_slot + timedelta(seconds=nmin*60) # we substract one scanning time later, so we can add it here LOG.debug(" reference time = "+str(t0)) # apply delay (if it usually takes 5 min for the data to arrive, use delay 5min) if delay != 0: t0 -= timedelta(minutes=delay) LOG.debug(" applying delay "+str(delay)+" min delay, time = "+ str(t0)) LOG.debug(" round by scanning time "+str(nmin)+" min, RSS = "+str(RSS)) #tm_min2 = gmt.tm_min - (gmt.tm_min % nmin) minute1 = t0.minute - (t0.minute % nmin) # define current date rounded by one scan time #date1 = datetime(gmt.tm_year, gmt.tm_mon, gmt.tm_mday, gmt.tm_hour, tm_min2 , 0) t1 = datetime(t0.year, t0.month, t0.day, t0.hour, minute1, 0) LOG.debug(" end time of last scan: "+str(t1)) # substracting one scan time (as the start time of scan is returned) t1 -= timedelta(seconds=nmin*60) LOG.info(" start time of last scan: "+str(t1)) return t1 def rewrite_xy_axis(netCDF_file): print("... re-place values on the x and y axis with lon/lat values in "+netCDF_file) ds = netCDF4.Dataset(netCDF_file, 'r+') lat = ds["latitude"][:,0] ds["y"][:] = lat.data ds["y"].units = 'Degrees North' lon = ds["longitude"][0,:] ds["x"][:] = lon.data ds["x"].units = 'Degrees East' ds.close() if __name__ == '__main__': sat='MSG4' if len(sys.argv) == 1: start_time = get_last_SEVIRI_date(False, delay=6) base_dir_sat = "/data/cinesat/in/eumetcast1/" base_dir_nwc = "/data/cinesat/in/eumetcast1/" #base_dir_nwc = "/data/cinesat/in/safnwc_v2016/" elif len(sys.argv) == 6: year = int(sys.argv[1]) month = int(sys.argv[2]) day = int(sys.argv[3]) hour = int(sys.argv[4]) minute = int(sys.argv[5]) start_time = datetime(year, month, day, hour, minute) base_dir_sat = start_time.strftime("/data/COALITION2/database/meteosat/radiance_HRIT/case-studies/%Y/%m/%d/") #base_dir_sat = start_time.strftime("/data/COALITION2/database/meteosat/radiance_HRIT/%Y/%m/%d/") base_dir_nwc = start_time.strftime("/data/OWARNA/hau/database/meteosat/SAFNWC/%Y/%m/%d/CT/") else: start_time = datetime(2020, 10, 7, 16, 0) base_dir_sat = start_time.strftime("/data/COALITION2/database/meteosat/radiance_HRIT/%Y/%m/%d/") base_dir_nwc = start_time.strftime("/data/COALITION2/database/meteosat/SAFNWC_v2016/%Y/%m/%d/CT/") print("... processing time ", start_time) show_interactively=False save_black_white_png=False print("") print("") print("*** Creating LSCL (low stratus confidence level) product") print("") # read MSG (full disk service) L2 ################################# print("... read "+sat+" L1.5 data") print(" search for HRIT files in "+base_dir_sat) files_sat = find_files_and_readers(sensor='seviri', start_time=start_time, end_time=start_time, base_dir=base_dir_sat, reader='seviri_l1b_hrit') files = deepcopy(files_sat['seviri_l1b_hrit']) #print(" found SEVIRI files: ", files_sat) for f in files: if not (sat in f): files_sat['seviri_l1b_hrit'].remove(f) continue if ("HRV" in f) or ("VIS006" in f) or ("VIS008" in f) or ("IR_016" in f) or ("IR_039" in f): files_sat['seviri_l1b_hrit'].remove(f) continue if ("WV_062" in f) or ("WV_073" in f) or ("IR_097" in f) or ("IR_108" in f) or ("IR_134" in f): files_sat['seviri_l1b_hrit'].remove(f) continue global_scene = Scene(reader="seviri_l1b_hrit", filenames=files_sat) global_scene.load(['IR_087','IR_120']) # read NWCSAF files ######################## print("... read "+sat+" NWCSAF CTTH") print(" search for NWCSAF files in "+base_dir_nwc) files_nwc = find_files_and_readers(sensor='seviri', start_time=start_time, end_time=start_time, base_dir=base_dir_nwc, reader='nwcsaf-geo') print(" found NWCSAF files: ", files_nwc) files = deepcopy(files_nwc['nwcsaf-geo']) for f in files: # remove files from other satellites if not (sat in f): files_nwc['nwcsaf-geo'].remove(f) continue # remove CTTH files if ("CTTH" in f): files_nwc['nwcsaf-geo'].remove(f) continue global_nwc = Scene(filenames=files_nwc) global_nwc.load(['ct']) # "CT" # loop over areas, resample and create products # create netCDF file for area cosmo1 # create png file for area cosmo1_150 (50% more pixels) ############################################################ #for area in ['SeviriDisk00Cosmo',"cosmo1x150"]: #for area in ['cosmo1', 'cosmo1eqc3km']: for area in ['cosmo1eqc3km']: #for area in ['cosmo1x150', 'cosmo1eqc3km']: # resample MSG L2 ################## print("") print("=======================") print("resample to "+area) local_scene = global_scene.resample(area) # fake a new channel print("fake a new channel") local_scene['lscl'] = deepcopy(local_scene['IR_120']) #local_scene['lscl'].wavelength="" #local_scene['lscl'].standard_name="low_stratus_confidence_level" #local_scene['lscl'].calibration="brightness_temperature_difference" #print(local_scene['IR_120']) #print(dir(local_scene['IR_120'])) #print(local_scene['IR_120'].standard_name) #print(type(local_scene['IR_120'].standard_name)) #local_scene['lscl'].standard_name = "toa_brightness_temperature_difference" #print(local_scene['lscl']) ############################################## # calculate lscl "low stratus confidence level # see MSc Thesis of Anna Ehrler (chapter 3.2.1 to 3.2.2) ############################################## th_liquid_cloud = 1.8 # K # cloud_confidence_range ccr = 1.0 # K local_scene['lscl'].values = (th_liquid_cloud - (local_scene['IR_120']-local_scene['IR_087']) - ccr) / (-2. * ccr) #local_scene['lscl'].area_def = local_scene['IR_120'].area_def # print(global_nwc) local_nwc = global_nwc.resample(area) # delete values for high clouds ########################################### # !!! ONLY NWCSAF VERSION 2016 and 2018 !!! # !!! Numbers are different for v2013 # ct:comment = "1: Cloud-free land; 2: Cloud-free sea; 3: Snow over land; 4: Sea ice; 5: Very low clouds; # 6: Low clouds; 7: Mid-level clouds; 8: High opaque clouds; 9: Very high opaque clouds; # 10: Fractional clouds; 11: High semitransparent thin clouds; 12: High semitransparent meanly thick clouds; # 13: High semitransparent thick clouds; 14: High semitransparent above low or medium clouds; 15: High semitransparent above snow/ice" ; for _ct_ in [7,8,9,10,11,12,13,14,15]: print("replace cloud type",_ct_) local_scene['lscl'].values = np.where(local_nwc['ct'].values==_ct_, np.nan, local_scene['lscl'].values) if show_interactively: fig, ax = plt.subplots(figsize=(13, 7)) pos = plt.imshow(local_scene['lscl'].values, vmin=0, vmax=1) fig.colorbar(pos) plt.title(start_time.strftime('low stratus confidence level, %y-%m-%d %H:%MUTC')) plt.show() if save_black_white_png: local_scene.save_dataset('lscl', './lscl_'+area+'.png') print(dir(local_scene.save_dataset)) print('display ./lscl_'+area+'.png &') # save png file for SATLive ############################## if area=="cosmo1x150" or area=="cosmo1": png_file = start_time.strftime('/data/cinesat/out/MSG_lscl-'+area+'_%y%m%d%H%M.png') from trollimage.colormap import spectral, greys, ylorrd, rdgy imgarr = np.array(local_scene['lscl'].data) from trollimage.image import Image as Timage img = Timage(imgarr, mode="L") img.colorize( rdgy.reverse() ) img.save(png_file) # local_scene.save_dataset( 'lscl', png_file ) from pyresample.utils import load_area swiss = load_area("/opt/users/hau/monti-pytroll/etc/areas.def", area) from pycoast import ContourWriterAGG cw = ContourWriterAGG('/opt/users/common/shapes') cw.add_borders_to_file(png_file, swiss, outline="green", resolution='i', level=3, width=2) img = Image.open(png_file) draw = ImageDraw.Draw(img) draw.rectangle([(0, 0), (img.size[0]*0.7, 25)], fill=(0,0,0,200)) font = ImageFont.truetype("/usr/openv/java/jre/lib/fonts/LucidaTypewriterBold.ttf", 18) title = start_time.strftime(" "+sat[0:3]+"-"+sat[3]+', %y-%m-%d %H:%MUTC, low stratus confidence level') draw.text( (1, 1), title, "green" , font=font) # (255,255,255) img.save(png_file) print("display " + png_file +" &") if area=="cosmo1x150": scpID="-i ~/.ssh/id_rsa_las" scpOutputDir="las@zueub241:/srn/las/www/satellite/DATA/MSG_"+"lscl"+"-"+area+"_/" scp_command = "/usr/bin/scp "+scpID+" "+png_file+" "+scpOutputDir+" 2>&1 &" print(scp_command) subprocess.call(scp_command, shell=True) elif area=="cosmo1": scpID="-i ~/.ssh/id_rsa_tsa" scpOutputDir="hamann@tsa.cscs.ch:/scratch/hamann/DayNightFog/" print("... scp "+png_file+" to "+scpOutputDir) subprocess.call("/usr/bin/scp "+scpID+" "+png_file+" "+scpOutputDir+" 2>&1 &", shell=True) # save netCDF file for APN ############################## if area=='cosmo1eqc3km': netCDF_file = start_time.strftime('/data/cinesat/out/MSG_lscl-'+area+'_%y%m%d%H%M.nc') print("... save result in: "+ netCDF_file) print("include_lonlats=True") local_scene.save_dataset('lscl', netCDF_file, include_lonlats=True, writer='cf', exclude_attrs=['raw_metadata'], epoch='seconds since 1970-01-01 00:00:00') #, writer='cf' #import netCDF4 as nc #file_input = nc.Dataset(netCDF_file, 'r+') #print(file_input.variables.keys()) #lonlats = local_scene['lscl'].area.get_lonlats() #lons = file_input.createVariable('longitues', 'single', ('y', 'x')) #lats = file_input.createVariable('latitudes', 'single', ('y', 'x')) #lons[:] = lonlats[0][:,:] #lats[:] = lonlats[1][:,:] #local_scene.save_datasets(['lscl'], filename=netCDF_file, include_lonlats=True) #, writer='cf' print("... ncview " + netCDF_file +" &") rewrite_xy_axis(netCDF_file) scpID="-i ~/.ssh/id_rsa_tsa" #scpOutputDir="hamann@tsa.cscs.ch:/scratch/hamann/DayNightFog/" scpOutputDir="hamann@tsa.cscs.ch:/scratch/hamann/DayNightFog_Filter-CT-7-15/" print("... scp "+netCDF_file+" to "+scpOutputDir) subprocess.call("/usr/bin/scp "+scpID+" "+netCDF_file+" "+scpOutputDir+" 2>&1 &", shell=True)
class Usecase: def __init__(self, file, **settings): self.file = file self.settings = {"ifc_class": None} for key, value in settings.items(): self.settings[key] = value def execute(self): return self.file.create_entity(self.settings["ifc_class"])
import tornado.testing from testexample import ExampleApp class TestExampleApp(tornado.testing.AsyncHTTPTestCase, tornado.testing.LogTrapTestCase): def get_app(self): return ExampleApp() def test_home(self): response = self.fetch('/') self.assertEqual(response.code, 200) def test_ticker(self): response = self.fetch('/ticker') self.assertEqual(response.code, 200)
low_primes = {1,3,5,7,11,13} low_primes.add(17) # It will be {1,3,5,7,11,13,17} low_primes.update({19,23},{2,29}) # It will be {1,2,3,5,7,11,13,17,19,23,29}, sorted order while low_primes: print(low_primes.pop()/3) #It will pop the first one (1) out, but because it is within a while loop, it will eventually pop everything out
""" Python 'utf-8' Codec Written by Marc-Andre Lemburg (mal@lemburg.com). (c) Copyright CNRI, All Rights Reserved. NO WARRANTY. """ import codecs encode = codecs.utf_8_encode def decode(input, errors='strict'): return codecs.utf_8_decode(input, errors, True) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.utf_8_encode(input, self.errors)[0] class IncrementalDecoder(codecs.BufferedIncrementalDecoder): _buffer_decode = codecs.utf_8_decode class StreamWriter(codecs.StreamWriter): encode = codecs.utf_8_encode class StreamReader(codecs.StreamReader): decode = codecs.utf_8_decode def getregentry(): return codecs.CodecInfo(name='utf-8', encode=encode, decode=decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
import os try: f = file('blah','r') except IOError,e: print 'could not open file:',e def safe_float(obj): try: return float(obj) except ValueError: pass ccfile = None log = file('log.txt','w+') try: ccfile = file('card.txt','r') txns = ccfile.readlines() ccfile.close() except IOError: log.write('no txns this month%s' % os.linesep) finally: log.close() if ccfile: ccfile.close()
""" Copyright 2017-present Airbnb, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from streamalert.shared import ALERT_PROCESSOR_NAME from streamalert_cli.terraform.common import infinitedict from streamalert_cli.terraform.lambda_module import generate_lambda def generate_alert_processor(config): """Generate Terraform for the Alert Processor Args: config (dict): The loaded config from the 'conf/' directory Returns: dict: Alert Processor dict to be marshaled to JSON """ prefix = config['global']['account']['prefix'] result = infinitedict() # Set variables for the IAM permissions module result['module']['alert_processor_iam'] = { 'source': './modules/tf_alert_processor_iam', 'account_id': config['global']['account']['aws_account_id'], 'region': config['global']['account']['region'], 'prefix': prefix, 'role_id': '${module.alert_processor_lambda.role_id}', 'kms_key_arn': '${aws_kms_key.streamalert_secrets.arn}', 'sse_kms_key_arn': '${aws_kms_key.server_side_encryption.arn}', 'output_lambda_functions': [ # Strip qualifiers: only the function name is needed for the IAM permissions func.split(':')[0] for func in list(config['outputs'].get('aws-lambda', {}).values()) ], 'output_s3_buckets': list(config['outputs'].get('aws-s3', {}).values()), 'output_sns_topics': list(config['outputs'].get('aws-sns', {}).values()), 'output_sqs_queues': list(config['outputs'].get('aws-sqs', {}).values()) } # Set variables for the Lambda module result['module']['alert_processor_lambda'] = generate_lambda( '{}_streamalert_{}'.format(config['global']['account']['prefix'], ALERT_PROCESSOR_NAME), 'streamalert.alert_processor.main.handler', config['lambda']['alert_processor_config'], config, environment={ 'ALERTS_TABLE': '{}_streamalert_alerts'.format(prefix), 'AWS_ACCOUNT_ID': config['global']['account']['aws_account_id'], 'STREAMALERT_PREFIX': prefix } ) return result
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('socialnet', '0029_auto_20161121_0543'), ] operations = [ migrations.AddField( model_name='author', name='displayname', field=models.CharField(blank=True, max_length=255, null=True), ), ]
class Coordinates: """ WhiteSource model for artifact's coordinates. """ def __init__(self, group_id, artifact_id, version_id): self.groupId = group_id self.artifactId = artifact_id self.versionId = version_id def create_project_coordinates(distribution): """ Creates a 'Coordinates' instance for the user package""" dist_name = distribution.get_name() dist_version = distribution.get_version() coordinates = Coordinates(group_id=None, artifact_id=dist_name, version_id=dist_version) return coordinates
class SessionHelper: def __init__(self, app): self.app = app # Функция входа на сайт def login(self, username, password): wd = self.app.wd self.app.open_home_page() wd.find_element_by_name("user").click() wd.find_element_by_name("user").clear() wd.find_element_by_name("user").send_keys(username) wd.find_element_by_name("pass").click() wd.find_element_by_name("pass").clear() wd.find_element_by_name("pass").send_keys(password) wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click() # Функция выхода с сайта def logout(self): wd = self.app.wd wd.find_element_by_link_text("Logout").click() # Функция удаления фикстуры после завершения теста def destroy(self): self.app.wd.quit() # Функция проверки выхода с сайта def ensure_logout(self): wd = self.app.wd if self.is_logged_in(): self.logout() # Функция проверки входа на сайт def is_logged_in(self): wd = self.app.wd # Если на странице есть элемент с текстом "Logout", то пользователь вошел на сайт return len(wd.find_elements_by_link_text("Logout")) > 0 # Функция проверки имени с которым произошел вход на сайт def is_logged_in_as(self, username): wd = self.app.wd # Если на странице есть элемент с текстом который соответсвует имени пользователя, то есть логин return wd.find_element_by_xpath("//div/div[1]/form/b").text == "("+username+")" # Функция проверки логина во время прогона тестов def ensure_login(self, username, password): wd = self.app.wd # Если пользователь вошел на сайт if self.is_logged_in(): # И если пользователь вошел на сайт под ожидаемым именем if self.is_logged_in_as(username): # Тогда ничего не делаем return else: # Иначе производим выход с сайта, для последующего входа self.logout() self.login(username, password)
"""This test checks that Nevergrad is functional. It also checks that it is usable with a separate scheduler. """ import ray from ray.tune import run from ray.tune.schedulers import AsyncHyperBandScheduler from ray.tune.suggest.nevergrad import NevergradSearch def easy_objective(config, reporter): import time time.sleep(0.2) for i in range(config["iterations"]): reporter( timesteps_total=i, mean_loss=(config["height"] - 14)**2 - abs(config["width"] - 3)) time.sleep(0.02) if __name__ == "__main__": import argparse from nevergrad.optimization import optimizerlib parser = argparse.ArgumentParser() parser.add_argument( "--smoke-test", action="store_true", help="Finish quickly for testing") args, _ = parser.parse_known_args() ray.init() config = { "num_samples": 10 if args.smoke_test else 50, "config": { "iterations": 100, }, "stop": { "timesteps_total": 100 } } instrumentation = 2 parameter_names = ["height", "width"] # With nevergrad v0.2.0+ the following is also possible: # from nevergrad import instrumentation as inst # instrumentation = inst.Instrumentation( # height=inst.var.Array(1).bounded(0, 200).asfloat(), # width=inst.var.OrderedDiscrete([0, 10, 20, 30, 40, 50])) # parameter_names = None # names are provided by the instrumentation optimizer = optimizerlib.OnePlusOne(instrumentation) algo = NevergradSearch( optimizer, parameter_names, max_concurrent=4, metric="mean_loss", mode="min") scheduler = AsyncHyperBandScheduler(metric="mean_loss", mode="min") run(easy_objective, name="nevergrad", search_alg=algo, scheduler=scheduler, **config)
"""Test cases for the bfloat16 Python type.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import itertools import math from absl.testing import absltest from absl.testing import parameterized import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.lib.core import _pywrap_bfloat16 from tensorflow.python.platform import test bfloat16 = _pywrap_bfloat16.TF_bfloat16_type() def numpy_assert_allclose(a, b, **kwargs): a = a.astype(np.float32) if a.dtype == bfloat16 else a b = b.astype(np.float32) if b.dtype == bfloat16 else b return np.testing.assert_allclose(a, b, **kwargs) epsilon = float.fromhex("1.0p-7") FLOAT_VALUES = [ 0.0, 1.0, -1, 0.5, -0.5, epsilon, 1.0 + epsilon, 1.0 - epsilon, -1.0 - epsilon, -1.0 + epsilon, 3.5, 42.0, 255.0, 256.0, float("inf"), float("-inf"), float("nan") ] class Bfloat16Test(parameterized.TestCase): """Tests the non-numpy Python methods of the bfloat16 type.""" def testRoundTripToFloat(self): for v in FLOAT_VALUES: np.testing.assert_equal(v, float(bfloat16(v))) def testRoundTripNumpyTypes(self): for dtype in [np.float16, np.float32, np.float64]: np.testing.assert_equal(-3.75, dtype(bfloat16(dtype(-3.75)))) np.testing.assert_equal(1.5, float(bfloat16(dtype(1.5)))) np.testing.assert_equal(4.5, dtype(bfloat16(np.array(4.5, dtype)))) np.testing.assert_equal( np.array([2, 5, -1], bfloat16), bfloat16(np.array([2, 5, -1], dtype))) def testRoundTripToInt(self): for v in [-256, -255, -34, -2, -1, 0, 1, 2, 10, 47, 128, 255, 256, 512]: self.assertEqual(v, int(bfloat16(v))) # pylint: disable=g-complex-comprehension @parameterized.named_parameters(({ "testcase_name": "_" + dtype.__name__, "dtype": dtype } for dtype in [bfloat16, np.float16, np.float32, np.float64])) def testRoundTripToNumpy(self, dtype): for v in FLOAT_VALUES: np.testing.assert_equal(v, bfloat16(dtype(v))) np.testing.assert_equal(v, dtype(bfloat16(dtype(v)))) np.testing.assert_equal(v, dtype(bfloat16(np.array(v, dtype)))) if dtype != bfloat16: np.testing.assert_equal( np.array(FLOAT_VALUES, dtype), bfloat16(np.array(FLOAT_VALUES, dtype)).astype(dtype)) def testStr(self): self.assertEqual("0", str(bfloat16(0.0))) self.assertEqual("1", str(bfloat16(1.0))) self.assertEqual("-3.5", str(bfloat16(-3.5))) self.assertEqual("0.0078125", str(bfloat16(float.fromhex("1.0p-7")))) self.assertEqual("inf", str(bfloat16(float("inf")))) self.assertEqual("-inf", str(bfloat16(float("-inf")))) self.assertEqual("nan", str(bfloat16(float("nan")))) def testRepr(self): self.assertEqual("0", repr(bfloat16(0))) self.assertEqual("1", repr(bfloat16(1))) self.assertEqual("-3.5", repr(bfloat16(-3.5))) self.assertEqual("0.0078125", repr(bfloat16(float.fromhex("1.0p-7")))) self.assertEqual("inf", repr(bfloat16(float("inf")))) self.assertEqual("-inf", repr(bfloat16(float("-inf")))) self.assertEqual("nan", repr(bfloat16(float("nan")))) def testHash(self): self.assertEqual(0, hash(bfloat16(0.0))) self.assertEqual(0x3f80, hash(bfloat16(1.0))) self.assertEqual(0x7fc0, hash(bfloat16(float("nan")))) # Tests for Python operations def testNegate(self): for v in FLOAT_VALUES: np.testing.assert_equal(-v, float(-bfloat16(v))) def testAdd(self): np.testing.assert_equal(0, float(bfloat16(0) + bfloat16(0))) np.testing.assert_equal(1, float(bfloat16(1) + bfloat16(0))) np.testing.assert_equal(0, float(bfloat16(1) + bfloat16(-1))) np.testing.assert_equal(5.5, float(bfloat16(2) + bfloat16(3.5))) np.testing.assert_equal(1.25, float(bfloat16(3.5) + bfloat16(-2.25))) np.testing.assert_equal( float("inf"), float(bfloat16(float("inf")) + bfloat16(-2.25))) np.testing.assert_equal( float("-inf"), float(bfloat16(float("-inf")) + bfloat16(-2.25))) self.assertTrue(math.isnan(float(bfloat16(3.5) + bfloat16(float("nan"))))) # Test type promotion against Numpy scalar values. self.assertEqual(np.float32, type(bfloat16(3.5) + np.float16(2.25))) self.assertEqual(np.float32, type(np.float16(3.5) + bfloat16(2.25))) self.assertEqual(np.float32, type(bfloat16(3.5) + np.float32(2.25))) self.assertEqual(np.float32, type(np.float32(3.5) + bfloat16(2.25))) self.assertEqual(np.float64, type(bfloat16(3.5) + np.float64(2.25))) self.assertEqual(np.float64, type(np.float64(3.5) + bfloat16(2.25))) self.assertEqual(np.float64, type(bfloat16(3.5) + float(2.25))) self.assertEqual(np.float64, type(float(3.5) + bfloat16(2.25))) self.assertEqual(np.float32, type(bfloat16(3.5) + np.array(2.25, np.float32))) self.assertEqual(np.float32, type(np.array(3.5, np.float32) + bfloat16(2.25))) def testSub(self): np.testing.assert_equal(0, float(bfloat16(0) - bfloat16(0))) np.testing.assert_equal(1, float(bfloat16(1) - bfloat16(0))) np.testing.assert_equal(2, float(bfloat16(1) - bfloat16(-1))) np.testing.assert_equal(-1.5, float(bfloat16(2) - bfloat16(3.5))) np.testing.assert_equal(5.75, float(bfloat16(3.5) - bfloat16(-2.25))) np.testing.assert_equal( float("-inf"), float(bfloat16(-2.25) - bfloat16(float("inf")))) np.testing.assert_equal( float("inf"), float(bfloat16(-2.25) - bfloat16(float("-inf")))) self.assertTrue(math.isnan(float(bfloat16(3.5) - bfloat16(float("nan"))))) def testMul(self): np.testing.assert_equal(0, float(bfloat16(0) * bfloat16(0))) np.testing.assert_equal(0, float(bfloat16(1) * bfloat16(0))) np.testing.assert_equal(-1, float(bfloat16(1) * bfloat16(-1))) np.testing.assert_equal(-7.875, float(bfloat16(3.5) * bfloat16(-2.25))) np.testing.assert_equal( float("-inf"), float(bfloat16(float("inf")) * bfloat16(-2.25))) np.testing.assert_equal( float("inf"), float(bfloat16(float("-inf")) * bfloat16(-2.25))) self.assertTrue(math.isnan(float(bfloat16(3.5) * bfloat16(float("nan"))))) def testDiv(self): self.assertTrue(math.isnan(float(bfloat16(0) / bfloat16(0)))) np.testing.assert_equal(float("inf"), float(bfloat16(1) / bfloat16(0))) np.testing.assert_equal(-1, float(bfloat16(1) / bfloat16(-1))) np.testing.assert_equal(-1.75, float(bfloat16(3.5) / bfloat16(-2))) np.testing.assert_equal( float("-inf"), float(bfloat16(float("inf")) / bfloat16(-2.25))) np.testing.assert_equal( float("inf"), float(bfloat16(float("-inf")) / bfloat16(-2.25))) self.assertTrue(math.isnan(float(bfloat16(3.5) / bfloat16(float("nan"))))) def testLess(self): for v in FLOAT_VALUES: for w in FLOAT_VALUES: self.assertEqual(v < w, bfloat16(v) < bfloat16(w)) def testLessEqual(self): for v in FLOAT_VALUES: for w in FLOAT_VALUES: self.assertEqual(v <= w, bfloat16(v) <= bfloat16(w)) def testGreater(self): for v in FLOAT_VALUES: for w in FLOAT_VALUES: self.assertEqual(v > w, bfloat16(v) > bfloat16(w)) def testGreaterEqual(self): for v in FLOAT_VALUES: for w in FLOAT_VALUES: self.assertEqual(v >= w, bfloat16(v) >= bfloat16(w)) def testEqual(self): for v in FLOAT_VALUES: for w in FLOAT_VALUES: self.assertEqual(v == w, bfloat16(v) == bfloat16(w)) def testNotEqual(self): for v in FLOAT_VALUES: for w in FLOAT_VALUES: self.assertEqual(v != w, bfloat16(v) != bfloat16(w)) def testNan(self): a = np.isnan(bfloat16(float("nan"))) self.assertTrue(a) numpy_assert_allclose(np.array([1.0, a]), np.array([1.0, a])) a = np.array([bfloat16(1.34375), bfloat16(1.4375), bfloat16(float("nan"))], dtype=bfloat16) b = np.array( [bfloat16(1.3359375), bfloat16(1.4375), bfloat16(float("nan"))], dtype=bfloat16) numpy_assert_allclose( a, b, rtol=0.1, atol=0.1, equal_nan=True, err_msg="", verbose=True) def testSort(self): values_to_sort = np.float32(FLOAT_VALUES) sorted_f32 = np.sort(values_to_sort) sorted_bf16 = np.sort(values_to_sort.astype(bfloat16)) np.testing.assert_equal(sorted_f32, np.float32(sorted_bf16)) BinaryOp = collections.namedtuple("BinaryOp", ["op"]) UNARY_UFUNCS = [ np.negative, np.positive, np.absolute, np.fabs, np.rint, np.sign, np.conjugate, np.exp, np.exp2, np.expm1, np.log, np.log10, np.log1p, np.log2, np.sqrt, np.square, np.cbrt, np.reciprocal, np.sin, np.cos, np.tan, np.arcsin, np.arccos, np.arctan, np.sinh, np.cosh, np.tanh, np.arcsinh, np.arccosh, np.arctanh, np.deg2rad, np.rad2deg, np.floor, np.ceil, np.trunc ] BINARY_UFUNCS = [ np.add, np.subtract, np.multiply, np.divide, np.logaddexp, np.logaddexp2, np.floor_divide, np.power, np.remainder, np.fmod, np.heaviside, np.arctan2, np.hypot, np.maximum, np.minimum, np.fmax, np.fmin, np.copysign ] BINARY_PREDICATE_UFUNCS = [ np.equal, np.not_equal, np.less, np.greater, np.less_equal, np.greater_equal, np.logical_and, np.logical_or, np.logical_xor ] class Bfloat16NumPyTest(parameterized.TestCase): """Tests the NumPy integration of the bfloat16 type.""" def testDtype(self): self.assertEqual(bfloat16, np.dtype(bfloat16)) def testDeepCopyDoesNotAlterHash(self): # For context, see https://github.com/google/jax/issues/4651. If the hash # value of the type descriptor is not initialized correctly, a deep copy # can change the type hash. dtype = np.dtype(bfloat16) h = hash(dtype) _ = copy.deepcopy(dtype) self.assertEqual(h, hash(dtype)) def testArray(self): x = np.array([[1, 2, 3]], dtype=bfloat16) self.assertEqual(bfloat16, x.dtype) self.assertEqual("[[1 2 3]]", str(x)) np.testing.assert_equal(x, x) numpy_assert_allclose(x, x) self.assertTrue((x == x).all()) def testComparisons(self): x = np.array([401408, 7, -32], dtype=np.float32) bx = x.astype(bfloat16) y = np.array([82432, 7, 0], dtype=np.float32) by = y.astype(bfloat16) np.testing.assert_equal(x == y, bx == by) np.testing.assert_equal(x != y, bx != by) np.testing.assert_equal(x < y, bx < by) np.testing.assert_equal(x > y, bx > by) np.testing.assert_equal(x <= y, bx <= by) np.testing.assert_equal(x >= y, bx >= by) def testEqual2(self): a = np.array([401408], bfloat16) b = np.array([82432], bfloat16) self.assertFalse(a.__eq__(b)) def testCasts(self): for dtype in [ np.float16, np.float32, np.float64, np.int8, np.int16, np.int32, np.int64, np.complex64, np.complex128, np.uint8, np.uint16, np.uint32, np.uint64, np.intc, np.int_, np.longlong, np.uintc, np.ulonglong ]: x = np.array([[1, 2, 3]], dtype=dtype) y = x.astype(bfloat16) z = y.astype(dtype) self.assertTrue(np.all(x == y)) self.assertEqual(bfloat16, y.dtype) self.assertTrue(np.all(x == z)) self.assertEqual(dtype, z.dtype) def testConformNumpyComplex(self): for dtype in [np.complex64, np.complex128]: x = np.array([1.1, 2.2 + 2.2j, 3.3], dtype=dtype) y_np = x.astype(np.float32) y_tf = x.astype(bfloat16) numpy_assert_allclose(y_np, y_tf, atol=2e-2) z_np = y_np.astype(dtype) z_tf = y_tf.astype(dtype) numpy_assert_allclose(z_np, z_tf, atol=2e-2) def testArange(self): np.testing.assert_equal( np.arange(100, dtype=np.float32).astype(bfloat16), np.arange(100, dtype=bfloat16)) np.testing.assert_equal( np.arange(-10.5, 7.8, 0.5, dtype=np.float32).astype(bfloat16), np.arange(-10.5, 7.8, 0.5, dtype=bfloat16)) np.testing.assert_equal( np.arange(-0., -7., -0.25, dtype=np.float32).astype(bfloat16), np.arange(-0., -7., -0.25, dtype=bfloat16)) np.testing.assert_equal( np.arange(-16384., 16384., 64., dtype=np.float32).astype(bfloat16), np.arange(-16384., 16384., 64., dtype=bfloat16)) # pylint: disable=g-complex-comprehension @parameterized.named_parameters(({ "testcase_name": "_" + op.__name__, "op": op } for op in UNARY_UFUNCS)) def testUnaryUfunc(self, op): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7, 10).astype(bfloat16) numpy_assert_allclose( op(x).astype(np.float32), op(x.astype(np.float32)), rtol=1e-2) @parameterized.named_parameters(({ "testcase_name": "_" + op.__name__, "op": op } for op in BINARY_UFUNCS)) def testBinaryUfunc(self, op): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7, 10).astype(bfloat16) y = rng.randn(4, 1, 7, 10).astype(bfloat16) numpy_assert_allclose( op(x, y).astype(np.float32), op(x.astype(np.float32), y.astype(np.float32)), rtol=1e-2) @parameterized.named_parameters(({ "testcase_name": "_" + op.__name__, "op": op } for op in BINARY_PREDICATE_UFUNCS)) def testBinaryPredicateUfunc(self, op): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7).astype(bfloat16) y = rng.randn(4, 1, 7).astype(bfloat16) np.testing.assert_equal( op(x, y), op(x.astype(np.float32), y.astype(np.float32))) @parameterized.named_parameters(({ "testcase_name": "_" + op.__name__, "op": op } for op in [np.isfinite, np.isinf, np.isnan, np.signbit, np.logical_not])) def testPredicateUfunc(self, op): rng = np.random.RandomState(seed=42) shape = (3, 7, 10) posinf_flips = rng.rand(*shape) < 0.1 neginf_flips = rng.rand(*shape) < 0.1 nan_flips = rng.rand(*shape) < 0.1 vals = rng.randn(*shape) vals = np.where(posinf_flips, np.inf, vals) vals = np.where(neginf_flips, -np.inf, vals) vals = np.where(nan_flips, np.nan, vals) vals = vals.astype(bfloat16) np.testing.assert_equal(op(vals), op(vals.astype(np.float32))) def testDivmod(self): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7).astype(bfloat16) y = rng.randn(4, 1, 7).astype(bfloat16) o1, o2 = np.divmod(x, y) e1, e2 = np.divmod(x.astype(np.float32), y.astype(np.float32)) numpy_assert_allclose(o1, e1, rtol=1e-2) numpy_assert_allclose(o2, e2, rtol=1e-2) def testModf(self): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7).astype(bfloat16) o1, o2 = np.modf(x) e1, e2 = np.modf(x.astype(np.float32)) numpy_assert_allclose(o1.astype(np.float32), e1, rtol=1e-2) numpy_assert_allclose(o2.astype(np.float32), e2, rtol=1e-2) def testLdexp(self): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7).astype(bfloat16) y = rng.randint(-50, 50, (1, 7)) numpy_assert_allclose( np.ldexp(x, y).astype(np.float32), np.ldexp(x.astype(np.float32), y), rtol=1e-2, atol=1e-6) def testFrexp(self): rng = np.random.RandomState(seed=42) x = rng.randn(3, 7).astype(bfloat16) mant1, exp1 = np.frexp(x) mant2, exp2 = np.frexp(x.astype(np.float32)) np.testing.assert_equal(exp1, exp2) numpy_assert_allclose(mant1, mant2, rtol=1e-2) def testNextAfter(self): one = np.array(1., dtype=bfloat16) two = np.array(2., dtype=bfloat16) zero = np.array(0., dtype=bfloat16) nan = np.array(np.nan, dtype=bfloat16) np.testing.assert_equal(np.nextafter(one, two) - one, epsilon) np.testing.assert_equal(np.nextafter(one, zero) - one, -epsilon / 2) np.testing.assert_equal(np.isnan(np.nextafter(nan, one)), True) np.testing.assert_equal(np.isnan(np.nextafter(one, nan)), True) np.testing.assert_equal(np.nextafter(one, one), one) smallest_denormal = float.fromhex("1.0p-133") np.testing.assert_equal(np.nextafter(zero, one), smallest_denormal) np.testing.assert_equal(np.nextafter(zero, -one), -smallest_denormal) for a, b in itertools.permutations([0., -0., nan], 2): np.testing.assert_equal( np.nextafter( np.array(a, dtype=np.float32), np.array(b, dtype=np.float32)), np.nextafter( np.array(a, dtype=bfloat16), np.array(b, dtype=bfloat16))) if __name__ == "__main__": absltest.main()
"""Library of training functions.""" import inspect import json import os import time from absl import logging from ddsp.training import cloud import gin import tensorflow.compat.v2 as tf def get_strategy(tpu='', cluster_config=''): """Create a distribution strategy for running on accelerators. For CPU, single-GPU, or multi-GPU jobs on a single machine, call this function without args to return a MirroredStrategy. For TPU jobs, specify an address to the `tpu` argument. For multi-machine GPU jobs, specify a `cluster_config` argument of the cluster configuration. Args: tpu: Address of the TPU. No TPU if left blank. cluster_config: Should be specified only for multi-worker jobs. Task specific dictionary for cluster config dict in the TF_CONFIG format. https://www.tensorflow.org/guide/distributed_training#setting_up_tf_config_environment_variable If passed as a string, will be parsed to a dictionary. Two components should be specified: cluster and task. Cluster provides information about the training cluster, which is a dict consisting of different types of jobs such as chief and worker. Task is information about the current task. For example: "{"cluster": {"worker": ["host1:port", "host2:port"]}, "task": {"type": "worker", "index": 0}}" Returns: A distribution strategy. MirroredStrategy by default. TPUStrategy if `tpu` arg is specified. MultiWorkerMirroredStrategy if `cluster_config` arg is specified. """ if tpu: logging.info('Use TPU at %s', tpu) resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=tpu) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.TPUStrategy(resolver) elif cluster_config: if not isinstance(cluster_config, dict): cluster_config = json.loads(cluster_config) cluster_spec = tf.train.ClusterSpec(cluster_config['cluster']) resolver = tf.distribute.cluster_resolver.SimpleClusterResolver( cluster_spec=cluster_spec, task_type=cluster_config['task']['type'], task_id=cluster_config['task']['index'], num_accelerators={'GPU': len(tf.config.list_physical_devices('GPU'))}, rpc_layer='grpc') strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy( cluster_resolver=resolver) else: logging.info('Defaulting to MirroredStrategy') strategy = tf.distribute.MirroredStrategy() return strategy def expand_path(file_path): return os.path.expanduser(os.path.expandvars(file_path)) def get_latest_file(dir_path, prefix='operative_config-', suffix='.gin'): """Returns latest file with pattern '/dir_path/prefix[iteration]suffix'. Args: dir_path: Path to the directory. prefix: Filename prefix, not including directory. suffix: Filename suffix, including extension. Returns: Path to the latest file Raises: FileNotFoundError: If no files match the pattern '/dir_path/prefix[int]suffix'. """ dir_path = expand_path(dir_path) dir_prefix = os.path.join(dir_path, prefix) search_pattern = dir_prefix + '*' + suffix file_paths = tf.io.gfile.glob(search_pattern) if not file_paths: raise FileNotFoundError( f'No files found matching the pattern \'{search_pattern}\'.') try: # Filter to get highest iteration, no negative iterations. get_iter = lambda fp: abs(int(fp.split(dir_prefix)[-1].split(suffix)[0])) latest_file = max(file_paths, key=get_iter) return latest_file except ValueError as verror: raise FileNotFoundError( f'Files found with pattern \'{search_pattern}\' do not match ' f'the pattern \'{dir_prefix}[iteration_number]{suffix}\'.\n\n' f'Files found:\n{file_paths}') from verror def get_latest_checkpoint(checkpoint_path): """Helper function to get path to latest checkpoint. Args: checkpoint_path: Path to the directory containing model checkpoints, or to a specific checkpoint (e.g. `/path/to/model.ckpt-iteration`). Returns: Path to latest checkpoint. Raises: FileNotFoundError: If no checkpoint is found. """ checkpoint_path = expand_path(checkpoint_path) is_checkpoint = tf.io.gfile.exists(checkpoint_path + '.index') if is_checkpoint: # Return the path if it points to a checkpoint. return checkpoint_path else: # Search using 'checkpoints' file. # Returns None if no 'checkpoints' file, or directory doesn't exist. ckpt = tf.train.latest_checkpoint(checkpoint_path) if ckpt: return ckpt else: # Last resort, look for '/path/ckpt-[iter].index' files. ckpt_f = get_latest_file(checkpoint_path, prefix='ckpt-', suffix='.index') return ckpt_f.split('.index')[0] def get_latest_operative_config(restore_dir): """Finds the most recently saved operative_config in a directory. Args: restore_dir: Path to directory with gin operative_configs. Will also work if passing a path to a file in that directory such as a checkpoint. Returns: Filepath to most recent operative config. Raises: FileNotFoundError: If no config is found. """ try: return get_latest_file( restore_dir, prefix='operative_config-', suffix='.gin') except FileNotFoundError: return get_latest_file( os.path.dirname(restore_dir), prefix='operative_config-', suffix='.gin') def write_gin_config(summary_writer, save_dir, step): """"Writes gin operative_config to save_dir and tensorboard.""" config_str = gin.operative_config_str() # Save the original config string to a file. base_name = 'operative_config-{}'.format(step) fname = os.path.join(save_dir, base_name + '.gin') with tf.io.gfile.GFile(fname, 'w') as f: f.write(config_str) # Formatting hack copied from gin.tf.GinConfigSaverHook. def format_for_tensorboard(line): """Convert a single line to markdown format.""" if not line.startswith('#'): return ' ' + line line = line[2:] if line.startswith('===='): return '' if line.startswith('None'): return ' # None.' if line.endswith(':'): return '#### ' + line return line # Convert config string to markdown. md_lines = [] for line in config_str.splitlines(): md_line = format_for_tensorboard(line) if md_line is not None: md_lines.append(md_line) md_config_str = '\n'.join(md_lines) # Add to tensorboard. with summary_writer.as_default(): text_tensor = tf.convert_to_tensor(md_config_str) tf.summary.text(name='gin/' + base_name, data=text_tensor, step=step) summary_writer.flush() def gin_register_keras_layers(): """Registers all keras layers and Sequential to be referenceable in gin.""" # Register sequential model. gin.external_configurable(tf.keras.Sequential, 'tf.keras.Sequential') # Register all the layers. for k, v in inspect.getmembers(tf.keras.layers): # Duck typing for tf.keras.layers.Layer since keras uses metaclasses. if hasattr(v, 'variables'): gin.external_configurable(v, f'tf.keras.layers.{k}') @gin.configurable def train(data_provider, trainer, batch_size=32, num_steps=1000000, steps_per_summary=300, steps_per_save=300, save_dir='/tmp/ddsp', restore_dir='/tmp/ddsp', early_stop_loss_value=None, report_loss_to_hypertune=False): """Main training loop. Args: data_provider: DataProvider object for training data. trainer: Trainer object built with Model to train. batch_size: Total batch size. num_steps: Number of training steps. steps_per_summary: Number of training steps per summary save. steps_per_save: Number of training steps per checkpoint save. save_dir: Directory where checkpoints and summaries will be saved. If empty string, no checkpoints or summaries will be saved. restore_dir: Directory where latest checkpoints for resuming the training are stored. If there are no checkpoints in this directory, training will begin anew. early_stop_loss_value: Early stopping. When the total_loss reaches below this value training stops. If None training will run for num_steps steps. report_loss_to_hypertune: Report loss values to hypertune package for hyperparameter tuning, such as on Google Cloud AI-Platform. """ # Get a distributed dataset iterator. dataset = data_provider.get_batch(batch_size, shuffle=True, repeats=-1) dataset = trainer.distribute_dataset(dataset) dataset_iter = iter(dataset) # Build model, easiest to just run forward pass. trainer.build(next(dataset_iter)) # Load latest checkpoint if one exists in load directory. try: trainer.restore(restore_dir) except FileNotFoundError: logging.info('No existing checkpoint found in %s, skipping ' 'checkpoint loading.', restore_dir) if save_dir: # Set up the summary writer and metrics. summary_dir = os.path.join(save_dir, 'summaries', 'train') summary_writer = tf.summary.create_file_writer(summary_dir) # Save the gin config. write_gin_config(summary_writer, save_dir, trainer.step.numpy()) else: # Need to create a dummy writer, even if no save_dir is provided. summary_writer = tf.summary.create_noop_writer() # Train. with summary_writer.as_default(): tick = time.time() for iteration in range(num_steps): step = trainer.step # Step is not iteration if restarting a model. # Take a step. losses = trainer.train_step(dataset_iter) # Create training loss metrics when starting/restarting training. if iteration == 0: loss_names = list(losses.keys()) logging.info('Creating metrics for %s', loss_names) avg_losses = {name: tf.keras.metrics.Mean(name=name, dtype=tf.float32) for name in loss_names} # Update metrics. for k, v in losses.items(): avg_losses[k].update_state(v) # Log the step. log_str = 'step: {}\t'.format(int(step.numpy())) for k, v in losses.items(): log_str += '{}: {:.2f}\t'.format(k, v) logging.info(log_str) # Write Summaries. if step % steps_per_summary == 0 and save_dir: # Speed. steps_per_sec = steps_per_summary / (time.time() - tick) tf.summary.scalar('steps_per_sec', steps_per_sec, step=step) tick = time.time() # Metrics. for k, metric in avg_losses.items(): tf.summary.scalar('losses/{}'.format(k), metric.result(), step=step) metric.reset_states() # Report metrics for hyperparameter tuning if enabled. if report_loss_to_hypertune: cloud.report_metric_to_hypertune(losses['total_loss'], step.numpy()) # Stop the training when the loss reaches given value if (early_stop_loss_value is not None and losses['total_loss'] <= early_stop_loss_value): logging.info('Total loss reached early stopping value of %s', early_stop_loss_value) # Write a final checkpoint. if save_dir: trainer.save(save_dir) summary_writer.flush() break # Save Model. if step % steps_per_save == 0 and save_dir: trainer.save(save_dir) summary_writer.flush() logging.info('Training Finished!')
"""A binary to train CIFAR-10 using a single GPU. Accuracy: cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of data) as judged by cifar10_eval.py. Speed: With batch_size 128. System | Step Time (sec/batch) | Accuracy ------------------------------------------------------------------ 1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours) 1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours) Usage: Please see the tutorial and website for how to download the CIFAR-10 data set, compile the program and train the model. http://tensorflow.org/tutorials/deep_cnn/ """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from datetime import datetime import time import tensorflow as tf import cifar10 FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train', """Directory where to write event logs """ """and checkpoint.""") tf.app.flags.DEFINE_integer('max_steps', 1000000, """Number of batches to run.""") tf.app.flags.DEFINE_boolean('log_device_placement', False, """Whether to log device placement.""") tf.app.flags.DEFINE_integer('log_frequency', 10, """How often to log results to the console.""") def train(): """Train CIFAR-10 for a number of steps.""" with tf.Graph().as_default(): global_step = tf.train.get_or_create_global_step() # Get images and labels for CIFAR-10. # Force input pipeline to CPU:0 to avoid operations sometimes ending up on # GPU and resulting in a slow down. with tf.device('/cpu:0'): images, labels = cifar10.distorted_inputs() # Build a Graph that computes the logits predictions from the # inference model. logits = cifar10.inference(images) # Calculate loss. loss = cifar10.loss(logits, labels) # Build a Graph that trains the model with one batch of examples and # updates the model parameters. train_op = cifar10.train(loss, global_step) class _LoggerHook(tf.train.SessionRunHook): """Logs loss and runtime.""" def begin(self): self._step = -1 self._start_time = time.time() def before_run(self, run_context): self._step += 1 return tf.train.SessionRunArgs(loss) # Asks for loss value. def after_run(self, run_context, run_values): if self._step % FLAGS.log_frequency == 0: current_time = time.time() duration = current_time - self._start_time self._start_time = current_time loss_value = run_values.results examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration sec_per_batch = float(duration / FLAGS.log_frequency) format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f ' 'sec/batch)') print (format_str % (datetime.now(), self._step, loss_value, examples_per_sec, sec_per_batch)) with tf.train.MonitoredTrainingSession( checkpoint_dir=FLAGS.train_dir, hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps), tf.train.NanTensorHook(loss), _LoggerHook()], config=tf.ConfigProto( log_device_placement=FLAGS.log_device_placement)) as mon_sess: while not mon_sess.should_stop(): mon_sess.run(train_op) def main(argv=None): # pylint: disable=unused-argument cifar10.maybe_download_and_extract() if tf.gfile.Exists(FLAGS.train_dir): tf.gfile.DeleteRecursively(FLAGS.train_dir) tf.gfile.MakeDirs(FLAGS.train_dir) train() if __name__ == '__main__': tf.app.run()
"""The parsers and plugins interface classes.""" import abc import os from plaso.lib import errors class BaseFileEntryFilter(object): """File entry filter interface.""" # pylint: disable=redundant-returns-doc @abc.abstractmethod def Match(self, file_entry): """Determines if a file entry matches the filter. Args: file_entry (dfvfs.FileEntry): a file entry. Returns: bool: True if the file entry matches the filter. """ class FileNameFileEntryFilter(BaseFileEntryFilter): """File name file entry filter.""" def __init__(self, filename): """Initializes a file entry filter. Args: filename (str): name of the file. """ super(FileNameFileEntryFilter, self).__init__() self._filename = filename.lower() def Match(self, file_entry): """Determines if a file entry matches the filter. Args: file_entry (dfvfs.FileEntry): a file entry. Returns: bool: True if the file entry matches the filter. """ if not file_entry: return False filename = file_entry.name.lower() return filename == self._filename class BaseParser(object): """The parser interface.""" # The name of the parser. This is the name that is used in the registration # and used for parser/plugin selection, so this needs to be concise and unique # for all plugins/parsers, such as 'Chrome', 'Safari' or 'UserAssist'. NAME = 'base_parser' # Data format supported by the parser plugin. This information is used by # the parser manager to generate parser and plugin information. DATA_FORMAT = '' # List of filters that should match for the parser to be applied. FILTERS = frozenset() # Every derived parser class that implements plugins should define # its own _plugin_classes dict: # _plugin_classes = {} # We deliberately don't define it here to make sure the plugins of # different parser classes don't end up in the same dict. _plugin_classes = None def __init__(self): """Initializes a parser. By default all plugins will be enabled. To only enable specific plugins use the EnablePlugins method and pass it a list of strings containing the names of the plugins to enable. The default plugin, named "{self.NAME:s}_default", if it exists, is always enabled and cannot be disabled. """ super(BaseParser, self).__init__() self._default_plugin = None self._plugins = None self.EnablePlugins([]) @classmethod def DeregisterPlugin(cls, plugin_class): """Deregisters a plugin class. The plugin classes are identified based on their lower case name. Args: plugin_class (type): class of the plugin. Raises: KeyError: if plugin class is not set for the corresponding name. """ plugin_name = plugin_class.NAME.lower() if plugin_name not in cls._plugin_classes: raise KeyError( 'Plugin class not set for name: {0:s}.'.format( plugin_class.NAME)) del cls._plugin_classes[plugin_name] def EnablePlugins(self, plugin_includes): """Enables parser plugins. Args: plugin_includes (list[str]): names of the plugins to enable, where None or an empty list represents all plugins. Note the default plugin, if it exists, is always enabled and cannot be disabled. """ self._plugins = [] if not self._plugin_classes: return default_plugin_name = '{0:s}_default'.format(self.NAME) for plugin_name, plugin_class in self._plugin_classes.items(): if plugin_name == default_plugin_name: self._default_plugin = plugin_class() continue if plugin_includes and plugin_name not in plugin_includes: continue plugin_object = plugin_class() self._plugins.append(plugin_object) # TODO: move this to a filter. # pylint: disable=redundant-returns-doc @classmethod def GetFormatSpecification(cls): """Retrieves the format specification. Returns: FormatSpecification: a format specification or None if not available. """ return @classmethod def GetPluginObjectByName(cls, plugin_name): """Retrieves a specific plugin object by its name. Args: plugin_name (str): name of the plugin. Returns: BasePlugin: a plugin object or None if not available. """ plugin_class = cls._plugin_classes.get(plugin_name, None) if plugin_class: return plugin_class() return None @classmethod def GetPlugins(cls): """Retrieves the registered plugins. Yields: tuple[str, type]: name and class of the plugin. """ for plugin_name, plugin_class in cls._plugin_classes.items(): yield plugin_name, plugin_class @classmethod def RegisterPlugin(cls, plugin_class): """Registers a plugin class. The plugin classes are identified based on their lower case name. Args: plugin_class (type): class of the plugin. Raises: KeyError: if plugin class is already set for the corresponding name. """ plugin_name = plugin_class.NAME.lower() if plugin_name in cls._plugin_classes: raise KeyError(( 'Plugin class already set for name: {0:s}.').format( plugin_class.NAME)) cls._plugin_classes[plugin_name] = plugin_class @classmethod def RegisterPlugins(cls, plugin_classes): """Registers plugin classes. Args: plugin_classes (list[type]): classes of plugins. Raises: KeyError: if plugin class is already set for the corresponding name. """ for plugin_class in plugin_classes: cls.RegisterPlugin(plugin_class) @classmethod def SupportsPlugins(cls): """Determines if a parser supports plugins. Returns: bool: True if the parser supports plugins. """ return cls._plugin_classes is not None class FileEntryParser(BaseParser): """The file entry parser interface.""" def Parse(self, parser_mediator): """Parsers the file entry and extracts event objects. Args: parser_mediator (ParserMediator): a parser mediator. Raises: UnableToParseFile: when the file cannot be parsed. """ file_entry = parser_mediator.GetFileEntry() if not file_entry: raise errors.UnableToParseFile('Invalid file entry') parser_mediator.AppendToParserChain(self) try: self.ParseFileEntry(parser_mediator, file_entry) finally: parser_mediator.PopFromParserChain() @abc.abstractmethod def ParseFileEntry(self, parser_mediator, file_entry): """Parses a file entry. Args: parser_mediator (ParserMediator): a parser mediator. file_entry (dfvfs.FileEntry): a file entry to parse. Raises: UnableToParseFile: when the file cannot be parsed. """ class FileObjectParser(BaseParser): """The file-like object parser interface.""" # The initial file offset. Set this value to None if no initial # file offset seek needs to be performed. _INITIAL_FILE_OFFSET = 0 def Parse(self, parser_mediator, file_object): """Parses a single file-like object. Args: parser_mediator (ParserMediator): a parser mediator. file_object (dvfvs.FileIO): a file-like object to parse. Raises: UnableToParseFile: when the file cannot be parsed. """ if not file_object: raise errors.UnableToParseFile('Invalid file object') if self._INITIAL_FILE_OFFSET is not None: file_object.seek(self._INITIAL_FILE_OFFSET, os.SEEK_SET) parser_mediator.AppendToParserChain(self) try: self.ParseFileObject(parser_mediator, file_object) finally: parser_mediator.PopFromParserChain() @abc.abstractmethod def ParseFileObject(self, parser_mediator, file_object): """Parses a file-like object. Args: parser_mediator (ParserMediator): a parser mediator. file_object (dvfvs.FileIO): a file-like object to parse. Raises: UnableToParseFile: when the file cannot be parsed. """
from __future__ import print_function from argparse import ArgumentParser import os from oauth2client.client import OAuth2WebServerFlow from oauth2client import tools from oauth2client.file import Storage VERBOSE = False CLIENT_ID = '586186890913-atr969tu3lf7u574khjjplb45fgpq1bg.apps.googleusercontent.com' CLIENT_SECRET = 'XeBxiK7NQ0yvAkAnRIKufkFE' EMAIL_SCOPE = 'https://www.googleapis.com/auth/userinfo.email' DEFAULT_STORAGE_FILE = os.path.join(os.path.expanduser("~"), '.isb_credentials') def maybe_print(msg): if VERBOSE: print(msg) def get_credentials(storage=None, oauth_flow_args=[]): noweb = '--noauth_local_webserver' if __name__ != '__main__' and noweb not in oauth_flow_args: oauth_flow_args.append(noweb) if storage is None: storage = Storage(DEFAULT_STORAGE_FILE) credentials = storage.get() if not credentials or credentials.invalid: maybe_print('credentials missing/invalid, kicking off OAuth flow') flow = OAuth2WebServerFlow(CLIENT_ID, CLIENT_SECRET, EMAIL_SCOPE) flow.auth_uri = flow.auth_uri.rstrip('/') + '?approval_prompt=force' credentials = tools.run_flow(flow, storage, tools.argparser.parse_args(oauth_flow_args)) return credentials def main(): global VERBOSE args = parse_args() oauth_flow_args = [args.noauth_local_webserver] if args.noauth_local_webserver else [] VERBOSE = args.verbose maybe_print('--verbose: printing extra information') storage = Storage(args.storage_file) credentials = get_credentials(storage, oauth_flow_args) maybe_print('credentials stored in ' + args.storage_file) maybe_print('access_token: ' + credentials.access_token) maybe_print('refresh_token: ' + credentials.refresh_token) def parse_args(): parser = ArgumentParser() parser.add_argument('--storage_file', '-s', default=DEFAULT_STORAGE_FILE, help='storage file to use for the credentials (default is {})'.format(DEFAULT_STORAGE_FILE)) parser.add_argument('--verbose', '-v', dest='verbose', action='store_true', help='display credentials storage location, access token, and refresh token') parser.set_defaults(verbose=False) parser.add_argument('--noauth_local_webserver','-u', action='store_const', const='--noauth_local_webserver') return parser.parse_args() if __name__ == '__main__': main()
import rospy from basics.msg import Complex from random import random rospy.init_node('message_publisher') pub = rospy.Publisher('complex', Complex) rate = rospy.Rate(2) while not rospy.is_shutdown(): msg = Complex() msg.real = random() msg.imaginary = random() pub.publish(msg) rate.sleep()
import mock import pytest from urlparse import urlparse from api.base.settings.defaults import API_BASE from framework.auth.core import Auth from osf.models import NodeLog from osf.models.licenses import NodeLicense from osf.utils.sanitize import strip_html from osf.utils import permissions from osf_tests.factories import ( NodeFactory, ProjectFactory, RegistrationFactory, AuthUserFactory, CollectionFactory, CommentFactory, NodeLicenseRecordFactory, PrivateLinkFactory, PreprintFactory, IdentifierFactory, InstitutionFactory, ) from rest_framework import exceptions from tests.base import fake from tests.utils import assert_items_equal, assert_latest_log, assert_latest_log_not from website.views import find_bookmark_collection @pytest.fixture() def user(): return AuthUserFactory() @pytest.mark.django_db class TestNodeDetail: @pytest.fixture() def user_two(self): return AuthUserFactory() @pytest.fixture() def project_public(self, user): return ProjectFactory( title='Project One', is_public=True, creator=user) @pytest.fixture() def project_private(self, user): return ProjectFactory( title='Project Two', is_public=False, creator=user) @pytest.fixture() def component_public(self, user, project_public): return NodeFactory(parent=project_public, creator=user, is_public=True) @pytest.fixture() def url_public(self, project_public): return '/{}nodes/{}/'.format(API_BASE, project_public._id) @pytest.fixture() def url_private(self, project_private): return '/{}nodes/{}/'.format(API_BASE, project_private._id) @pytest.fixture() def url_component_public(self, component_public): return '/{}nodes/{}/'.format(API_BASE, component_public._id) @pytest.fixture() def permissions_read(self): return ['read'] @pytest.fixture() def permissions_write(self): return ['read', 'write'] @pytest.fixture() def permissions_admin(self): return ['read', 'admin', 'write'] def test_return_project_details( self, app, user, user_two, project_public, project_private, url_public, url_private, permissions_read, permissions_admin): # test_return_public_project_details_logged_out res = app.get(url_public) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == project_public.title assert res.json['data']['attributes']['description'] == project_public.description assert res.json['data']['attributes']['category'] == project_public.category assert_items_equal( res.json['data']['attributes']['current_user_permissions'], permissions_read) # test_return_public_project_details_contributor_logged_in res = app.get(url_public, auth=user.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == project_public.title assert res.json['data']['attributes']['description'] == project_public.description assert res.json['data']['attributes']['category'] == project_public.category assert_items_equal( res.json['data']['attributes']['current_user_permissions'], permissions_admin) # test_return_public_project_details_non_contributor_logged_in res = app.get(url_public, auth=user_two.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == project_public.title assert res.json['data']['attributes']['description'] == project_public.description assert res.json['data']['attributes']['category'] == project_public.category assert_items_equal( res.json['data']['attributes']['current_user_permissions'], permissions_read) # test_return_private_project_details_logged_in_admin_contributor res = app.get(url_private, auth=user.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == project_private.title assert res.json['data']['attributes']['description'] == project_private.description assert res.json['data']['attributes']['category'] == project_private.category assert_items_equal( res.json['data']['attributes']['current_user_permissions'], permissions_admin) # test_return_private_project_details_logged_out res = app.get(url_private, expect_errors=True) assert res.status_code == 401 assert 'detail' in res.json['errors'][0] # test_return_private_project_details_logged_in_non_contributor res = app.get(url_private, auth=user_two.auth, expect_errors=True) assert res.status_code == 403 assert 'detail' in res.json['errors'][0] def test_return_private_project_details_logged_in_write_contributor( self, app, user, user_two, project_private, url_private, permissions_write): project_private.add_contributor( contributor=user_two, auth=Auth(user), save=True) res = app.get(url_private, auth=user_two.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == project_private.title assert res.json['data']['attributes']['description'] == project_private.description assert res.json['data']['attributes']['category'] == project_private.category assert_items_equal( res.json['data']['attributes']['current_user_permissions'], permissions_write) def test_top_level_project_has_no_parent(self, app, url_public): res = app.get(url_public) assert res.status_code == 200 assert 'parent' not in res.json['data']['relationships'] assert 'id' in res.json['data'] assert res.content_type == 'application/vnd.api+json' def test_child_project_has_parent( self, app, user, project_public, url_public): public_component = NodeFactory( parent=project_public, creator=user, is_public=True) public_component_url = '/{}nodes/{}/'.format( API_BASE, public_component._id) res = app.get(public_component_url) assert res.status_code == 200 url = res.json['data']['relationships']['parent']['links']['related']['href'] assert urlparse(url).path == url_public def test_node_has(self, app, url_public): # test_node_has_children_link res = app.get(url_public) url = res.json['data']['relationships']['children']['links']['related']['href'] expected_url = '{}children/'.format(url_public) assert urlparse(url).path == expected_url # test_node_has_contributors_link res = app.get(url_public) url = res.json['data']['relationships']['contributors']['links']['related']['href'] expected_url = '{}contributors/'.format(url_public) assert urlparse(url).path == expected_url # test_node_has_node_links_link res = app.get(url_public) url = res.json['data']['relationships']['node_links']['links']['related']['href'] expected_url = '{}node_links/'.format(url_public) assert urlparse(url).path == expected_url # test_node_has_registrations_link res = app.get(url_public) url = res.json['data']['relationships']['registrations']['links']['related']['href'] expected_url = '{}registrations/'.format(url_public) assert urlparse(url).path == expected_url # test_node_has_files_link res = app.get(url_public) url = res.json['data']['relationships']['files']['links']['related']['href'] expected_url = '{}files/'.format(url_public) assert urlparse(url).path == expected_url def test_node_has_comments_link( self, app, user, project_public, url_public): CommentFactory(node=project_public, user=user) res = app.get(url_public) assert res.status_code == 200 assert 'comments' in res.json['data']['relationships'].keys() url = res.json['data']['relationships']['comments']['links']['related']['href'] res = app.get(url) assert res.status_code == 200 assert res.json['data'][0]['type'] == 'comments' def test_node_comments_link_query_params_formatted( self, app, user, project_public, project_private, url_private): CommentFactory(node=project_public, user=user) project_private_link = PrivateLinkFactory(anonymous=False) project_private_link.nodes.add(project_private) project_private_link.save() res = app.get(url_private, auth=user.auth) url = res.json['data']['relationships']['comments']['links']['related']['href'] assert project_private_link.key not in url res = app.get( '{}?view_only={}'.format( url_private, project_private_link.key)) url = res.json['data']['relationships']['comments']['links']['related']['href'] assert project_private_link.key in url def test_node_has_correct_unread_comments_count( self, app, user, project_public, url_public): contributor = AuthUserFactory() project_public.add_contributor( contributor=contributor, auth=Auth(user), save=True) CommentFactory( node=project_public, user=contributor, page='node') res = app.get( '{}?related_counts=True'.format(url_public), auth=user.auth) unread = res.json['data']['relationships']['comments']['links']['related']['meta']['unread'] unread_comments_node = unread['node'] assert unread_comments_node == 1 def test_node_properties(self, app, url_public): res = app.get(url_public) assert res.json['data']['attributes']['public'] is True assert res.json['data']['attributes']['registration'] is False assert res.json['data']['attributes']['collection'] is False assert res.json['data']['attributes']['tags'] == [] def test_requesting_folder_returns_error(self, app, user): folder = CollectionFactory(creator=user) res = app.get( '/{}nodes/{}/'.format(API_BASE, folder._id), auth=user.auth, expect_errors=True ) assert res.status_code == 404 def test_cannot_return_registrations_at_node_detail_endpoint( self, app, user, project_public): registration = RegistrationFactory( project=project_public, creator=user) res = app.get('/{}nodes/{}/'.format( API_BASE, registration._id), auth=user.auth, expect_errors=True) assert res.status_code == 404 def test_cannot_return_folder_at_node_detail_endpoint(self, app, user): folder = CollectionFactory(creator=user) res = app.get( '/{}nodes/{}/'.format(API_BASE, folder._id), auth=user.auth, expect_errors=True) assert res.status_code == 404 def test_node_list_embed_identifier_link(self, app, user, project_public, url_public): url = url_public + '?embed=identifiers' res = app.get(url) assert res.status_code == 200 link = res.json['data']['relationships']['identifiers']['links']['related']['href'] assert '{}identifiers/'.format(url_public) in link @pytest.mark.django_db class NodeCRUDTestCase: @pytest.fixture() def institution_one(self): return InstitutionFactory() @pytest.fixture() def institution_two(self): return InstitutionFactory() @pytest.fixture() def user_two(self, institution_one, institution_two): auth_user = AuthUserFactory() auth_user.affiliated_institutions.add(institution_one) auth_user.affiliated_institutions.add(institution_two) return auth_user @pytest.fixture() def title(self): return 'Cool Project' @pytest.fixture() def title_new(self): return 'Super Cool Project' @pytest.fixture() def description(self): return 'A Properly Cool Project' @pytest.fixture() def description_new(self): return 'An even cooler project' @pytest.fixture() def category(self): return 'data' @pytest.fixture() def category_new(self): return 'project' @pytest.fixture() def project_public(self, user, title, description, category): return ProjectFactory( title=title, description=description, category=category, is_public=True, creator=user ) @pytest.fixture() def project_private(self, user, title, description, category): return ProjectFactory( title=title, description=description, category=category, is_public=False, creator=user ) @pytest.fixture() def url_public(self, project_public): return '/{}nodes/{}/'.format(API_BASE, project_public._id) @pytest.fixture() def url_private(self, project_private): return '/{}nodes/{}/'.format(API_BASE, project_private._id) @pytest.fixture() def url_fake(self): return '/{}nodes/{}/'.format(API_BASE, '12345') @pytest.fixture() def make_node_payload(self): def payload(node, attributes, relationships=None): payload_data = { 'data': { 'id': node._id, 'type': 'nodes', 'attributes': attributes, } } if relationships: payload_data['data']['relationships'] = relationships return payload_data return payload @pytest.mark.django_db class TestNodeUpdate(NodeCRUDTestCase): def test_node_institution_update(self, app, user_two, project_private, url_private, make_node_payload, institution_one, institution_two): project_private.add_contributor( user_two, permissions=(permissions.READ, permissions.WRITE, permissions.ADMIN), auth=Auth(project_private.creator) ) affiliated_institutions = { 'affiliated_institutions': {'data': [ { 'type': 'institutions', 'id': institution_one._id }, { 'type': 'institutions', 'id': institution_two._id }, ] } } payload = make_node_payload(project_private, {'public': False}, relationships=affiliated_institutions) res = app.patch_json_api(url_private, payload, auth=user_two.auth, expect_errors=False) assert res.status_code == 200 institutions = project_private.affiliated_institutions.all() assert institution_one in institutions assert institution_two in institutions def test_node_update_invalid_data(self, app, user, url_public): res = app.put_json_api( url_public, 'Incorrect data', auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail res = app.put_json_api( url_public, ['Incorrect data'], auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail def test_cannot_make_project_public_if_non_contributor( self, app, project_private, url_private, make_node_payload): with assert_latest_log_not(NodeLog.MADE_PUBLIC, project_private): non_contrib = AuthUserFactory() res = app.patch_json( url_private, make_node_payload(project_private, {'public': True}), auth=non_contrib.auth, expect_errors=True ) assert res.status_code == 403 def test_cannot_make_project_public_if_non_admin_contributor( self, app, project_private, url_private, make_node_payload): non_admin = AuthUserFactory() project_private.add_contributor( non_admin, permissions=(permissions.READ, permissions.WRITE), auth=Auth(project_private.creator) ) project_private.save() res = app.patch_json( url_private, make_node_payload(project_private, {'public': True}), auth=non_admin.auth, expect_errors=True ) assert res.status_code == 403 project_private.reload() assert not project_private.is_public def test_can_make_project_public_if_admin_contributor( self, app, project_private, url_private, make_node_payload): with assert_latest_log(NodeLog.MADE_PUBLIC, project_private): admin_user = AuthUserFactory() project_private.add_contributor( admin_user, permissions=(permissions.READ, permissions.WRITE, permissions.ADMIN), auth=Auth(project_private.creator)) project_private.save() res = app.patch_json_api( url_private, make_node_payload(project_private, {'public': True}), auth=admin_user.auth # self.user is creator/admin ) assert res.status_code == 200 project_private.reload() assert project_private.is_public def test_update_errors( self, app, user, user_two, title_new, description_new, category_new, project_public, project_private, url_public, url_private): # test_update_project_properties_not_nested res = app.put_json_api(url_public, { 'id': project_public._id, 'type': 'nodes', 'title': title_new, 'description': description_new, 'category': category_new, 'public': True, }, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Request must include /data.' assert res.json['errors'][0]['source']['pointer'] == '/data' # test_update_invalid_id res = app.put_json_api(url_public, { 'data': { 'id': '12345', 'type': 'nodes', 'attributes': { 'title': title_new, 'description': description_new, 'category': category_new, 'public': True } } }, auth=user.auth, expect_errors=True) assert res.status_code == 409 # test_update_invalid_type res = app.put_json_api(url_public, { 'data': { 'id': project_public._id, 'type': 'node', 'attributes': { 'title': title_new, 'description': description_new, 'category': category_new, 'public': True } } }, auth=user.auth, expect_errors=True) assert res.status_code == 409 # test_update_no_id res = app.put_json_api(url_public, { 'data': { 'type': 'nodes', 'attributes': { 'title': title_new, 'description': description_new, 'category': category_new, 'public': True } } }, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'This field may not be null.' assert res.json['errors'][0]['source']['pointer'] == '/data/id' # test_update_no_type res = app.put_json_api(url_public, { 'data': { 'id': project_public._id, 'attributes': { 'title': title_new, 'description': description_new, 'category': category_new, 'public': True } } }, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'This field may not be null.' assert res.json['errors'][0]['source']['pointer'] == '/data/type' # test_update_public_project_logged_out res = app.put_json_api(url_public, { 'data': { 'id': project_public._id, 'type': 'nodes', 'attributes': { 'title': title_new, 'description': description_new, 'category': category_new, 'public': True } } }, expect_errors=True) assert res.status_code == 401 assert 'detail' in res.json['errors'][0] # test_update_project_invalid_title project = { 'data': { 'type': 'nodes', 'id': project_public._id, 'attributes': { 'title': 'A' * 201, 'category': 'project', } } } res = app.put_json_api( url_public, project, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Title cannot exceed 200 characters.' # test_update_public_project_logged_in_but_unauthorized res = app.put_json_api(url_public, { 'data': { 'id': project_private._id, 'type': 'nodes', 'attributes': { 'title': title_new, 'description': description_new, 'category': category_new, 'public': True } } }, auth=user_two.auth, expect_errors=True) assert res.status_code == 403 assert 'detail' in res.json['errors'][0] # test_update_private_project_logged_out res = app.put_json_api(url_private, { 'data': { 'id': project_private._id, 'type': 'nodes', 'attributes': { 'title': title_new, 'description': description_new, 'category': category_new, 'public': False } } }, expect_errors=True) assert res.status_code == 401 assert 'detail' in res.json['errors'][0] # test_update_private_project_logged_in_non_contributor res = app.put_json_api(url_private, { 'data': { 'id': project_private._id, 'type': 'nodes', 'attributes': { 'title': title_new, 'description': description_new, 'category': category_new, 'public': False } } }, auth=user_two.auth, expect_errors=True) assert res.status_code == 403 assert 'detail' in res.json['errors'][0] def test_update_public_project_logged_in( self, app, user, title_new, description_new, category_new, project_public, url_public): with assert_latest_log(NodeLog.UPDATED_FIELDS, project_public): res = app.put_json_api(url_public, { 'data': { 'id': project_public._id, 'type': 'nodes', 'attributes': { 'title': title_new, 'description': description_new, 'category': category_new, 'public': True } } }, auth=user.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == title_new assert res.json['data']['attributes']['description'] == description_new assert res.json['data']['attributes']['category'] == category_new def test_cannot_update_a_registration(self, app, user, project_public): registration = RegistrationFactory( project=project_public, creator=user) original_title = registration.title original_description = registration.description url = '/{}nodes/{}/'.format(API_BASE, registration._id) res = app.put_json_api(url, { 'data': { 'id': registration._id, 'type': 'nodes', 'attributes': { 'title': fake.catch_phrase(), 'description': fake.bs(), 'category': 'hypothesis', 'public': True } } }, auth=user.auth, expect_errors=True) registration.reload() assert res.status_code == 404 assert registration.title == original_title assert registration.description == original_description def test_update_private_project_logged_in_contributor( self, app, user, title_new, description_new, category_new, project_private, url_private): with assert_latest_log(NodeLog.UPDATED_FIELDS, project_private): res = app.put_json_api(url_private, { 'data': { 'id': project_private._id, 'type': 'nodes', 'attributes': { 'title': title_new, 'description': description_new, 'category': category_new, 'public': False } } }, auth=user.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == title_new assert res.json['data']['attributes']['description'] == description_new assert res.json['data']['attributes']['category'] == category_new def test_update_project_sanitizes_html_properly( self, app, user, category_new, project_public, url_public): with assert_latest_log(NodeLog.UPDATED_FIELDS, project_public): """Post request should update resource, and any HTML in fields should be stripped""" new_title = '<strong>Super</strong> Cool Project' new_description = 'An <script>alert("even cooler")</script> project' res = app.put_json_api(url_public, { 'data': { 'id': project_public._id, 'type': 'nodes', 'attributes': { 'title': new_title, 'description': new_description, 'category': category_new, 'public': True, } } }, auth=user.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == strip_html( new_title) assert res.json['data']['attributes']['description'] == strip_html( new_description) def test_partial_update_project_updates_project_correctly_and_sanitizes_html( self, app, user, description, category, project_public, url_public): with assert_latest_log(NodeLog.EDITED_TITLE, project_public): new_title = 'An <script>alert("even cooler")</script> project' res = app.patch_json_api(url_public, { 'data': { 'id': project_public._id, 'type': 'nodes', 'attributes': { 'title': new_title } } }, auth=user.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' res = app.get(url_public) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == strip_html( new_title) assert res.json['data']['attributes']['description'] == description assert res.json['data']['attributes']['category'] == category def test_partial_update_public_project_logged_in( self, app, user, title_new, description, category, project_public, url_public): with assert_latest_log(NodeLog.EDITED_TITLE, project_public): res = app.patch_json_api(url_public, { 'data': { 'id': project_public._id, 'type': 'nodes', 'attributes': { 'title': title_new, } } }, auth=user.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == title_new assert res.json['data']['attributes']['description'] == description assert res.json['data']['attributes']['category'] == category def test_write_to_public_field_non_contrib_forbidden( self, app, user_two, project_public, url_public): # Test non-contrib writing to public field res = app.patch_json_api(url_public, { 'data': { 'attributes': { 'public': False}, 'id': project_public._id, 'type': 'nodes' } }, auth=user_two.auth, expect_errors=True) assert res.status_code == 403 assert 'detail' in res.json['errors'][0] def test_partial_update_errors( self, app, user, user_two, title_new, project_public, project_private, url_public, url_private): # test_partial_update_public_project_logged_out res = app.patch_json_api(url_public, { 'data': { 'id': project_public._id, 'type': 'nodes', 'attributes': { 'title': title_new } } }, expect_errors=True) assert res.status_code == 401 assert 'detail' in res.json['errors'][0] # test_partial_update_public_project_logged_in_but_unauthorized # Public resource, logged in, unauthorized res = app.patch_json_api(url_public, { 'data': { 'attributes': { 'title': title_new}, 'id': project_public._id, 'type': 'nodes', } }, auth=user_two.auth, expect_errors=True) assert res.status_code == 403 assert 'detail' in res.json['errors'][0] # test_partial_update_private_project_logged_out res = app.patch_json_api(url_private, { 'data': { 'id': project_private._id, 'type': 'nodes', 'attributes': { 'title': title_new } } }, expect_errors=True) assert res.status_code == 401 assert 'detail' in res.json['errors'][0] # test_partial_update_private_project_logged_in_non_contributor res = app.patch_json_api(url_private, { 'data': { 'attributes': { 'title': title_new}, 'id': project_private._id, 'type': 'nodes', } }, auth=user_two.auth, expect_errors=True) assert res.status_code == 403 assert 'detail' in res.json['errors'][0] # test_partial_update_invalid_id res = app.patch_json_api(url_public, { 'data': { 'id': '12345', 'type': 'nodes', 'attributes': { 'title': title_new, } } }, auth=user.auth, expect_errors=True) assert res.status_code == 409 # test_partial_update_invalid_type res = app.patch_json_api(url_public, { 'data': { 'id': project_public._id, 'type': 'node', 'attributes': { 'title': title_new, } } }, auth=user.auth, expect_errors=True) assert res.status_code == 409 # test_partial_update_no_id res = app.patch_json_api(url_public, { 'data': { 'type': 'nodes', 'attributes': { 'title': title_new, } } }, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'This field may not be null.' assert res.json['errors'][0]['source']['pointer'] == '/data/id' # test_partial_update_no_type res = app.patch_json_api(url_public, { 'data': { 'id': project_public._id, 'attributes': { 'title': title_new, } } }, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'This field may not be null.' assert res.json['errors'][0]['source']['pointer'] == '/data/type' # Nothing will be updated here # test_partial_update_project_properties_not_nested res = app.patch_json_api(url_public, { 'data': { 'id': project_public._id, 'type': 'nodes', 'title': title_new, } }, auth=user.auth, expect_errors=True) assert res.status_code == 400 def test_partial_update_private_project_logged_in_contributor( self, app, user, title_new, description, category, project_private, url_private): with assert_latest_log(NodeLog.EDITED_TITLE, project_private): res = app.patch_json_api(url_private, { 'data': { 'attributes': { 'title': title_new}, 'id': project_private._id, 'type': 'nodes', } }, auth=user.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert res.json['data']['attributes']['title'] == title_new assert res.json['data']['attributes']['description'] == description assert res.json['data']['attributes']['category'] == category def test_multiple_patch_requests_with_same_category_generates_one_log( self, app, user, project_private, url_private, make_node_payload): project_private.category = 'project' project_private.save() new_category = 'data' payload = make_node_payload( project_private, attributes={'category': new_category}) original_n_logs = project_private.logs.count() res = app.patch_json_api(url_private, payload, auth=user.auth) assert res.status_code == 200 project_private.reload() assert project_private.category == new_category assert project_private.logs.count() == original_n_logs + 1 # sanity check app.patch_json_api(url_private, payload, auth=user.auth) project_private.reload() assert project_private.category == new_category assert project_private.logs.count() == original_n_logs + 1 def test_public_project_with_publicly_editable_wiki_turns_private( self, app, user, project_public, url_public, make_node_payload): wiki = project_public.get_addon('wiki') wiki.set_editing(permissions=True, auth=Auth(user=user), log=True) res = app.patch_json_api( url_public, make_node_payload(project_public, {'public': False}), auth=user.auth # self.user is creator/admin ) assert res.status_code == 200 @mock.patch('website.identifiers.tasks.update_ezid_metadata_on_change.s') def test_set_node_private_updates_ezid( self, mock_update_ezid_metadata, app, user, project_public, url_public, make_node_payload): IdentifierFactory(referent=project_public, category='doi') res = app.patch_json_api( url_public, make_node_payload( project_public, {'public': False}), auth=user.auth) assert res.status_code == 200 project_public.reload() assert not project_public.is_public mock_update_ezid_metadata.assert_called_with( project_public._id, status='unavailable') @mock.patch('website.preprints.tasks.update_ezid_metadata_on_change') def test_set_node_with_preprint_private_updates_ezid( self, mock_update_ezid_metadata, app, user, project_public, url_public, make_node_payload): target_object = PreprintFactory(project=project_public) res = app.patch_json_api( url_public, make_node_payload( project_public, {'public': False}), auth=user.auth) assert res.status_code == 200 project_public.reload() assert not project_public.is_public mock_update_ezid_metadata.assert_called_with( target_object._id, status='unavailable') @pytest.mark.django_db class TestNodeDelete(NodeCRUDTestCase): def test_deletes_node_errors( self, app, user, user_two, project_public, project_private, url_public, url_private, url_fake): # test_deletes_public_node_logged_out res = app.delete(url_public, expect_errors=True) assert res.status_code == 401 assert 'detail' in res.json['errors'][0] # test_deletes_public_node_fails_if_unauthorized res = app.delete_json_api( url_public, auth=user_two.auth, expect_errors=True) project_public.reload() assert res.status_code == 403 assert project_public.is_deleted is False assert 'detail' in res.json['errors'][0] # test_deletes_private_node_logged_out res = app.delete(url_private, expect_errors=True) assert res.status_code == 401 assert 'detail' in res.json['errors'][0] # test_deletes_private_node_logged_in_non_contributor res = app.delete(url_private, auth=user_two.auth, expect_errors=True) project_private.reload() assert res.status_code == 403 assert project_private.is_deleted is False assert 'detail' in res.json['errors'][0] # test_deletes_invalid_node res = app.delete(url_fake, auth=user.auth, expect_errors=True) assert res.status_code == 404 assert 'detail' in res.json['errors'][0] def test_deletes_private_node_logged_in_read_only_contributor( self, app, user_two, project_private, url_private): project_private.add_contributor( user_two, permissions=[permissions.READ]) project_private.save() res = app.delete(url_private, auth=user_two.auth, expect_errors=True) project_private.reload() assert res.status_code == 403 assert project_private.is_deleted is False assert 'detail' in res.json['errors'][0] def test_delete_project_with_component_returns_error(self, app, user): project = ProjectFactory(creator=user) NodeFactory(parent=project, creator=user) # Return a 400 because component must be deleted before deleting the # parent res = app.delete_json_api( '/{}nodes/{}/'.format(API_BASE, project._id), auth=user.auth, expect_errors=True ) assert res.status_code == 400 errors = res.json['errors'] assert len(errors) == 1 assert ( errors[0]['detail'] == 'Any child components must be deleted prior to deleting this project.') def test_delete_bookmark_collection_returns_error(self, app, user): bookmark_collection = find_bookmark_collection(user) res = app.delete_json_api( '/{}nodes/{}/'.format(API_BASE, bookmark_collection._id), auth=user.auth, expect_errors=True ) # Bookmark collections are collections, so a 404 is returned assert res.status_code == 404 @mock.patch('website.identifiers.tasks.update_ezid_metadata_on_change.s') def test_delete_node_with_preprint_calls_preprint_update_status( self, mock_update_ezid_metadata_on_change, app, user, project_public, url_public): PreprintFactory(project=project_public) app.delete_json_api(url_public, auth=user.auth, expect_errors=True) project_public.reload() assert mock_update_ezid_metadata_on_change.called @mock.patch('website.identifiers.tasks.update_ezid_metadata_on_change.s') def test_delete_node_with_identifier_calls_preprint_update_status( self, mock_update_ezid_metadata_on_change, app, user, project_public, url_public): IdentifierFactory(referent=project_public, category='doi') app.delete_json_api(url_public, auth=user.auth, expect_errors=True) project_public.reload() assert mock_update_ezid_metadata_on_change.called def test_deletes_public_node_succeeds_as_owner( self, app, user, project_public, url_public): with assert_latest_log(NodeLog.PROJECT_DELETED, project_public): res = app.delete_json_api( url_public, auth=user.auth, expect_errors=True) project_public.reload() assert res.status_code == 204 assert project_public.is_deleted is True def test_requesting_deleted_returns_410( self, app, project_public, url_public): project_public.is_deleted = True project_public.save() res = app.get(url_public, expect_errors=True) assert res.status_code == 410 assert 'detail' in res.json['errors'][0] def test_deletes_private_node_logged_in_contributor( self, app, user, project_private, url_private): with assert_latest_log(NodeLog.PROJECT_DELETED, project_private): res = app.delete(url_private, auth=user.auth, expect_errors=True) project_private.reload() assert res.status_code == 204 assert project_private.is_deleted is True @pytest.mark.django_db class TestReturnDeletedNode: @pytest.fixture() def project_public_deleted(self, user): return ProjectFactory( is_deleted=True, creator=user, title='This public project has been deleted', category='project', is_public=True ) @pytest.fixture() def project_private_deleted(self, user): return ProjectFactory( is_deleted=True, creator=user, title='This private project has been deleted', category='project', is_public=False ) @pytest.fixture() def title_new(self): return 'This deleted node has been edited' @pytest.fixture() def url_project_public_deleted(self, project_public_deleted): return '/{}nodes/{}/'.format(API_BASE, project_public_deleted._id) @pytest.fixture() def url_project_private_deleted(self, project_private_deleted): return '/{}nodes/{}/'.format(API_BASE, project_private_deleted._id) def test_return_deleted_node( self, app, user, title_new, project_public_deleted, project_private_deleted, url_project_public_deleted, url_project_private_deleted): # test_return_deleted_public_node res = app.get(url_project_public_deleted, expect_errors=True) assert res.status_code == 410 # test_return_deleted_private_node res = app.get( url_project_private_deleted, auth=user.auth, expect_errors=True) assert res.status_code == 410 # test_edit_deleted_public_node res = app.put_json_api( url_project_public_deleted, params={ 'title': title_new, 'node_id': project_public_deleted._id, 'category': project_public_deleted.category }, auth=user.auth, expect_errors=True) assert res.status_code == 410 # test_edit_deleted_private_node res = app.put_json_api( url_project_private_deleted, params={ 'title': title_new, 'node_id': project_private_deleted._id, 'category': project_private_deleted.category }, auth=user.auth, expect_errors=True) assert res.status_code == 410 # test_delete_deleted_public_node res = app.delete( url_project_public_deleted, auth=user.auth, expect_errors=True) assert res.status_code == 410 # test_delete_deleted_private_node res = app.delete( url_project_private_deleted, auth=user.auth, expect_errors=True) assert res.status_code == 410 @pytest.mark.django_db class TestNodeTags: @pytest.fixture() def user_admin(self): return AuthUserFactory() @pytest.fixture() def user_non_contrib(self): return AuthUserFactory() @pytest.fixture() def user_read_contrib(self): return AuthUserFactory() @pytest.fixture() def project_public(self, user, user_admin): project_public = ProjectFactory( title='Project One', is_public=True, creator=user) project_public.add_contributor( user_admin, permissions=permissions.CREATOR_PERMISSIONS, save=True) project_public.add_contributor( user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True) return project_public @pytest.fixture() def project_private(self, user, user_admin): project_private = ProjectFactory( title='Project Two', is_public=False, creator=user) project_private.add_contributor( user_admin, permissions=permissions.CREATOR_PERMISSIONS, save=True) project_private.add_contributor( user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True) return project_private @pytest.fixture() def url_public(self, project_public): return '/{}nodes/{}/'.format(API_BASE, project_public._id) @pytest.fixture() def url_private(self, project_private): return '/{}nodes/{}/'.format(API_BASE, project_private._id) @pytest.fixture() def payload_public(self, project_public): return { 'data': { 'id': project_public._id, 'type': 'nodes', 'attributes': { 'tags': ['new-tag'] } } } @pytest.fixture() def payload_private(self, project_private): return { 'data': { 'id': project_private._id, 'type': 'nodes', 'attributes': { 'tags': ['new-tag'] } } } def test_public_project_starts_with_no_tags(self, app, url_public): res = app.get(url_public) assert res.status_code == 200 assert len(res.json['data']['attributes']['tags']) == 0 def test_node_detail_does_not_expose_system_tags( self, app, project_public, url_public): project_public.add_system_tag('systag', save=True) res = app.get(url_public) assert res.status_code == 200 assert len(res.json['data']['attributes']['tags']) == 0 def test_contributor_can_add_tag_to_public_project( self, app, user, project_public, payload_public, url_public): with assert_latest_log(NodeLog.TAG_ADDED, project_public): res = app.patch_json_api( url_public, payload_public, auth=user.auth, expect_errors=True) assert res.status_code == 200 # Ensure data is correct from the PATCH response assert len(res.json['data']['attributes']['tags']) == 1 assert res.json['data']['attributes']['tags'][0] == 'new-tag' # Ensure data is correct in the database project_public.reload() assert project_public.tags.count() == 1 assert project_public.tags.first()._id == 'new-tag' # Ensure data is correct when GETting the resource again reload_res = app.get(url_public) assert len(reload_res.json['data']['attributes']['tags']) == 1 assert reload_res.json['data']['attributes']['tags'][0] == 'new-tag' def test_contributor_can_add_tag_to_private_project( self, app, user, project_private, payload_private, url_private): with assert_latest_log(NodeLog.TAG_ADDED, project_private): res = app.patch_json_api( url_private, payload_private, auth=user.auth) assert res.status_code == 200 # Ensure data is correct from the PATCH response assert len(res.json['data']['attributes']['tags']) == 1 assert res.json['data']['attributes']['tags'][0] == 'new-tag' # Ensure data is correct in the database project_private.reload() assert project_private.tags.count() == 1 assert project_private.tags.first()._id == 'new-tag' # Ensure data is correct when GETting the resource again reload_res = app.get(url_private, auth=user.auth) assert len(reload_res.json['data']['attributes']['tags']) == 1 assert reload_res.json['data']['attributes']['tags'][0] == 'new-tag' def test_partial_update_project_does_not_clear_tags( self, app, user_admin, project_private, payload_private, url_private): res = app.patch_json_api( url_private, payload_private, auth=user_admin.auth) assert res.status_code == 200 assert len(res.json['data']['attributes']['tags']) == 1 new_payload = { 'data': { 'id': project_private._id, 'type': 'nodes', 'attributes': { 'public': True } } } res = app.patch_json_api( url_private, new_payload, auth=user_admin.auth) assert res.status_code == 200 assert len(res.json['data']['attributes']['tags']) == 1 new_payload['data']['attributes']['public'] = False res = app.patch_json_api( url_private, new_payload, auth=user_admin.auth) assert res.status_code == 200 assert len(res.json['data']['attributes']['tags']) == 1 def test_add_tag_to_project_errors( self, app, user_non_contrib, user_read_contrib, payload_public, payload_private, url_public, url_private): # test_non_authenticated_user_cannot_add_tag_to_public_project res = app.patch_json_api( url_public, payload_public, expect_errors=True, auth=None) assert res.status_code == 401 # test_non_authenticated_user_cannot_add_tag_to_private_project res = app.patch_json_api( url_private, payload_private, expect_errors=True, auth=None) assert res.status_code == 401 # test_non_contributor_cannot_add_tag_to_public_project res = app.patch_json_api( url_public, payload_public, expect_errors=True, auth=user_non_contrib.auth) assert res.status_code == 403 # test_non_contributor_cannot_add_tag_to_private_project res = app.patch_json_api( url_private, payload_private, expect_errors=True, auth=user_non_contrib.auth) assert res.status_code == 403 # test_read_only_contributor_cannot_add_tag_to_public_project res = app.patch_json_api( url_public, payload_public, expect_errors=True, auth=user_read_contrib.auth) assert res.status_code == 403 # test_read_only_contributor_cannot_add_tag_to_private_project res = app.patch_json_api( url_private, payload_private, expect_errors=True, auth=user_read_contrib.auth) assert res.status_code == 403 def test_tags_add_and_remove_properly( self, app, user, project_private, payload_private, url_private): with assert_latest_log(NodeLog.TAG_ADDED, project_private): res = app.patch_json_api( url_private, payload_private, auth=user.auth) assert res.status_code == 200 # Ensure adding tag data is correct from the PATCH response assert len(res.json['data']['attributes']['tags']) == 1 assert res.json['data']['attributes']['tags'][0] == 'new-tag' with assert_latest_log(NodeLog.TAG_REMOVED, project_private), assert_latest_log(NodeLog.TAG_ADDED, project_private, 1): # Ensure removing and adding tag data is correct from the PATCH # response res = app.patch_json_api( url_private, { 'data': { 'id': project_private._id, 'type': 'nodes', 'attributes': {'tags': ['newer-tag']} } }, auth=user.auth) assert res.status_code == 200 assert len(res.json['data']['attributes']['tags']) == 1 assert res.json['data']['attributes']['tags'][0] == 'newer-tag' with assert_latest_log(NodeLog.TAG_REMOVED, project_private): # Ensure removing tag data is correct from the PATCH response res = app.patch_json_api( url_private, { 'data': { 'id': project_private._id, 'type': 'nodes', 'attributes': {'tags': []} } }, auth=user.auth) assert res.status_code == 200 assert len(res.json['data']['attributes']['tags']) == 0 def test_tags_post_object_instead_of_list(self, user, app): url = '/{}nodes/'.format(API_BASE) payload = {'data': { 'type': 'nodes', 'attributes': { 'title': 'new title', 'category': 'project', 'tags': {'foo': 'bar'} } }} res = app.post_json_api( url, payload, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".' def test_tags_patch_object_instead_of_list( self, app, user, payload_public, url_public): payload_public['data']['attributes']['tags'] = {'foo': 'bar'} res = app.patch_json_api( url_public, payload_public, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".' @pytest.mark.django_db class TestNodeLicense: @pytest.fixture() def user_admin(self): return AuthUserFactory() @pytest.fixture() def user_two(self): return AuthUserFactory() @pytest.fixture() def user_read_contrib(self): return AuthUserFactory() @pytest.fixture() def license_name(self): return 'MIT License' @pytest.fixture() def node_license(self, license_name): return NodeLicense.objects.filter(name=license_name).first() @pytest.fixture() def year(self): return '2105' @pytest.fixture() def copyright_holders(self): return ['Foo', 'Bar'] @pytest.fixture() def project_public( self, user, user_admin, node_license, year, copyright_holders): project_public = ProjectFactory( title='Project One', is_public=True, creator=user) project_public.add_contributor( user_admin, permissions=permissions.CREATOR_PERMISSIONS, save=True) project_public.add_contributor( user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True) project_public.node_license = NodeLicenseRecordFactory( node_license=node_license, year=year, copyright_holders=copyright_holders ) project_public.save() return project_public @pytest.fixture() def project_private( self, user, user_admin, node_license, year, copyright_holders): project_private = ProjectFactory( title='Project Two', is_public=False, creator=user) project_private.add_contributor( user_admin, permissions=permissions.CREATOR_PERMISSIONS, save=True) project_private.add_contributor( user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True) project_private.node_license = NodeLicenseRecordFactory( node_license=node_license, year=year, copyright_holders=copyright_holders ) project_private.save() return project_private @pytest.fixture() def url_public(self, project_public): return '/{}nodes/{}/'.format(API_BASE, project_public._id) @pytest.fixture() def url_private(self, project_private): return '/{}nodes/{}/'.format(API_BASE, project_private._id) def test_node_has( self, app, user, node_license, project_public, project_private, url_private, url_public): # test_public_node_has_node_license res = app.get(url_public) assert project_public.node_license.year == res.json[ 'data']['attributes']['node_license']['year'] # test_public_node_has_license_relationship res = app.get(url_public) expected_license_url = '/{}licenses/{}'.format( API_BASE, node_license._id) actual_license_url = res.json['data']['relationships']['license']['links']['related']['href'] assert expected_license_url in actual_license_url # test_private_node_has_node_license res = app.get(url_private, auth=user.auth) assert project_private.node_license.year == res.json[ 'data']['attributes']['node_license']['year'] # test_private_node_has_license_relationship res = app.get(url_private, auth=user.auth) expected_license_url = '/{}licenses/{}'.format( API_BASE, node_license._id) actual_license_url = res.json['data']['relationships']['license']['links']['related']['href'] assert expected_license_url in actual_license_url def test_component_return_parent_license_if_no_license( self, app, user, node_license, project_public): node = NodeFactory(parent=project_public, creator=user) node.save() node_url = '/{}nodes/{}/'.format(API_BASE, node._id) res = app.get(node_url, auth=user.auth) assert not node.node_license assert project_public.node_license.year == \ res.json['data']['attributes']['node_license']['year'] actual_license_url = res.json['data']['relationships']['license']['links']['related']['href'] expected_license_url = '/{}licenses/{}'.format( API_BASE, node_license._id) assert expected_license_url in actual_license_url @pytest.mark.django_db class TestNodeUpdateLicense: @pytest.fixture() def user_admin_contrib(self): return AuthUserFactory() @pytest.fixture() def user_write_contrib(self): return AuthUserFactory() @pytest.fixture() def user_read_contrib(self): return AuthUserFactory() @pytest.fixture() def user_non_contrib(self): return AuthUserFactory() @pytest.fixture() def node(self, user_admin_contrib, user_write_contrib, user_read_contrib): node = NodeFactory(creator=user_admin_contrib) node.add_contributor(user_write_contrib, auth=Auth(user_admin_contrib)) node.add_contributor( user_read_contrib, auth=Auth(user_admin_contrib), permissions=['read']) node.save() return node @pytest.fixture() def license_cc0(self): return NodeLicense.objects.filter(name='CC0 1.0 Universal').first() @pytest.fixture() def license_mit(self): return NodeLicense.objects.filter(name='MIT License').first() @pytest.fixture() def license_no(self): return NodeLicense.objects.get(name='No license') @pytest.fixture() def url_node(self, node): return '/{}nodes/{}/'.format(API_BASE, node._id) @pytest.fixture() def make_payload(self): def payload( node_id, license_id=None, license_year=None, copyright_holders=None): attributes = {} if license_year and copyright_holders: attributes = { 'node_license': { 'year': license_year, 'copyright_holders': copyright_holders } } elif license_year: attributes = { 'node_license': { 'year': license_year } } elif copyright_holders: attributes = { 'node_license': { 'copyright_holders': copyright_holders } } return { 'data': { 'type': 'nodes', 'id': node_id, 'attributes': attributes, 'relationships': { 'license': { 'data': { 'type': 'licenses', 'id': license_id } } } } } if license_id else { 'data': { 'type': 'nodes', 'id': node_id, 'attributes': attributes } } return payload @pytest.fixture() def make_request(self, app): def request(url, data, auth=None, expect_errors=False): return app.patch_json_api( url, data, auth=auth, expect_errors=expect_errors) return request def test_admin_update_license_with_invalid_id( self, user_admin_contrib, node, make_payload, make_request, url_node): data = make_payload( node_id=node._id, license_id='thisisafakelicenseid' ) assert node.node_license is None res = make_request( url_node, data, auth=user_admin_contrib.auth, expect_errors=True) assert res.status_code == 404 assert res.json['errors'][0]['detail'] == 'Unable to find specified license.' node.reload() assert node.node_license is None def test_admin_can_update_license( self, user_admin_contrib, node, make_payload, make_request, license_cc0, url_node): data = make_payload( node_id=node._id, license_id=license_cc0._id ) assert node.node_license is None res = make_request(url_node, data, auth=user_admin_contrib.auth) assert res.status_code == 200 node.reload() assert node.node_license.node_license == license_cc0 assert node.node_license.year is None assert node.node_license.copyright_holders == [] def test_admin_can_update_license_record( self, user_admin_contrib, node, make_payload, make_request, license_no, url_node): data = make_payload( node_id=node._id, license_id=license_no._id, license_year='2015', copyright_holders=['Mr. Monument', 'Princess OSF'] ) assert node.node_license is None res = make_request(url_node, data, auth=user_admin_contrib.auth) assert res.status_code == 200 node.reload() assert node.node_license.node_license == license_no assert node.node_license.year == '2015' assert node.node_license.copyright_holders == [ 'Mr. Monument', 'Princess OSF'] def test_cannot_update( self, user_write_contrib, user_read_contrib, user_non_contrib, node, make_payload, make_request, license_cc0, url_node): # def test_rw_contributor_cannot_update_license(self): data = make_payload( node_id=node._id, license_id=license_cc0._id ) res = make_request( url_node, data, auth=user_write_contrib.auth, expect_errors=True) assert res.status_code == 403 assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail # def test_read_contributor_cannot_update_license(self): data = make_payload( node_id=node._id, license_id=license_cc0._id ) res = make_request( url_node, data, auth=user_read_contrib.auth, expect_errors=True) assert res.status_code == 403 assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail # def test_non_contributor_cannot_update_license(self): data = make_payload( node_id=node._id, license_id=license_cc0._id ) res = make_request( url_node, data, auth=user_non_contrib.auth, expect_errors=True) assert res.status_code == 403 assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail # def test_unauthenticated_user_cannot_update_license(self): data = make_payload( node_id=node._id, license_id=license_cc0._id ) res = make_request(url_node, data, expect_errors=True) assert res.status_code == 401 assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail def test_update_node_with_existing_license_year_attribute_only( self, user_admin_contrib, node, make_payload, make_request, license_no, url_node): node.set_node_license( { 'id': license_no.license_id, 'year': '2014', 'copyrightHolders': ['Reason', 'Mr. E'] }, Auth(user_admin_contrib), ) node.save() assert node.node_license.node_license == license_no assert node.node_license.year == '2014' assert node.node_license.copyright_holders == ['Reason', 'Mr. E'] data = make_payload( node_id=node._id, license_year='2015' ) res = make_request(url_node, data, auth=user_admin_contrib.auth) assert res.status_code == 200 node.node_license.reload() assert node.node_license.node_license == license_no assert node.node_license.year == '2015' assert node.node_license.copyright_holders == ['Reason', 'Mr. E'] def test_update_node_with_existing_license_copyright_holders_attribute_only( self, user_admin_contrib, node, make_payload, make_request, license_no, url_node): node.set_node_license( { 'id': license_no.license_id, 'year': '2014', 'copyrightHolders': ['Reason', 'Mr. E'] }, Auth(user_admin_contrib), ) node.save() assert node.node_license.node_license == license_no assert node.node_license.year == '2014' assert node.node_license.copyright_holders == ['Reason', 'Mr. E'] data = make_payload( node_id=node._id, copyright_holders=['Mr. Monument', 'Princess OSF'] ) res = make_request(url_node, data, auth=user_admin_contrib.auth) assert res.status_code == 200 node.node_license.reload() assert node.node_license.node_license == license_no assert node.node_license.year == '2014' assert node.node_license.copyright_holders == [ 'Mr. Monument', 'Princess OSF'] def test_update_node_with_existing_license_relationship_only( self, user_admin_contrib, node, make_payload, make_request, license_cc0, license_no, url_node): node.set_node_license( { 'id': license_no.license_id, 'year': '2014', 'copyrightHolders': ['Reason', 'Mr. E'] }, Auth(user_admin_contrib), ) node.save() assert node.node_license.node_license == license_no assert node.node_license.year == '2014' assert node.node_license.copyright_holders == ['Reason', 'Mr. E'] data = make_payload( node_id=node._id, license_id=license_cc0._id ) res = make_request(url_node, data, auth=user_admin_contrib.auth) assert res.status_code == 200 node.node_license.reload() assert node.node_license.node_license == license_cc0 assert node.node_license.year == '2014' assert node.node_license.copyright_holders == ['Reason', 'Mr. E'] def test_update_node_with_existing_license_relationship_and_attributes( self, user_admin_contrib, node, make_payload, make_request, license_no, license_cc0, url_node): node.set_node_license( { 'id': license_no.license_id, 'year': '2014', 'copyrightHolders': ['Reason', 'Mr. E'] }, Auth(user_admin_contrib), save=True ) assert node.node_license.node_license == license_no assert node.node_license.year == '2014' assert node.node_license.copyright_holders == ['Reason', 'Mr. E'] data = make_payload( node_id=node._id, license_id=license_cc0._id, license_year='2015', copyright_holders=['Mr. Monument', 'Princess OSF'] ) res = make_request(url_node, data, auth=user_admin_contrib.auth) assert res.status_code == 200 node.node_license.reload() assert node.node_license.node_license == license_cc0 assert node.node_license.year == '2015' assert node.node_license.copyright_holders == [ 'Mr. Monument', 'Princess OSF'] def test_update_node_license_without_required_year_in_payload( self, user_admin_contrib, node, make_payload, make_request, license_no, url_node): data = make_payload( node_id=node._id, license_id=license_no._id, copyright_holders=['Rick', 'Morty'] ) res = make_request( url_node, data, auth=user_admin_contrib.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'year must be specified for this license' def test_update_node_license_without_required_copyright_holders_in_payload_( self, user_admin_contrib, node, make_payload, make_request, license_no, url_node): data = make_payload( node_id=node._id, license_id=license_no._id, license_year='1994' ) res = make_request( url_node, data, auth=user_admin_contrib.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'copyrightHolders must be specified for this license' def test_update_node_license_adds_log( self, user_admin_contrib, node, make_payload, make_request, license_cc0, url_node): data = make_payload( node_id=node._id, license_id=license_cc0._id ) logs_before_update = node.logs.count() res = make_request(url_node, data, auth=user_admin_contrib.auth) assert res.status_code == 200 node.reload() logs_after_update = node.logs.count() assert logs_before_update != logs_after_update assert node.logs.latest().action == 'license_changed' def test_update_node_license_without_change_does_not_add_log( self, user_admin_contrib, node, make_payload, make_request, license_no, url_node): node.set_node_license( { 'id': license_no.license_id, 'year': '2015', 'copyrightHolders': ['Kim', 'Kanye'] }, auth=Auth(user_admin_contrib), save=True ) before_num_logs = node.logs.count() before_update_log = node.logs.latest() data = make_payload( node_id=node._id, license_id=license_no._id, license_year='2015', copyright_holders=['Kanye', 'Kim'] ) res = make_request(url_node, data, auth=user_admin_contrib.auth) node.reload() after_num_logs = node.logs.count() after_update_log = node.logs.latest() assert res.status_code == 200 assert before_num_logs == after_num_logs assert before_update_log._id == after_update_log._id
example_template = Template({ 'A': RsrcDef({}, []), 'B': RsrcDef({}, []), 'C': RsrcDef({'a': '4alpha'}, ['A', 'B']), 'D': RsrcDef({'c': GetRes('C')}, []), 'E': RsrcDef({'ca': GetAtt('C', 'a')}, []), }) engine.create_stack('foo', example_template) engine.noop(3) engine.rollback_stack('foo') engine.noop(6) engine.call(verify, Template())
""" Helper classes for creating frontend metadata """ class ContactPersonDesc(object): """ Description class for a contact person """ def __init__(self): self.contact_type = None self._email_address = [] self.given_name = None self.sur_name = None def add_email_address(self, address): """ Adds an email address to the person description :type address: str :param address: Address to be added """ self._email_address.append(address) def to_dict(self): """ Returns a dictionary representation of the ContactPersonDesc. The format is the same as a pysaml2 configuration for a contact person. :rtype: dict[str, str] :return: A dictionary representation """ person = {} if self.contact_type: person["contact_type"] = self.contact_type if self._email_address: person["email_address"] = self._email_address if self.given_name: person["given_name"] = self.given_name if self.sur_name: person["sur_name"] = self.sur_name return person class UIInfoDesc(object): """ Description class for UI info """ def __init__(self): self._description = [] self._display_name = [] self._logos = [] def add_description(self, text, lang): """ Binds a description to the given language :type text: str :type lang: str :param text: Description :param lang: description language """ self._description.append({"text": text, "lang": lang}) def add_display_name(self, text, lang): """ Binds a display name to the given language :type text: str :type lang: str :param text: Display name :param lang: Language """ self._display_name.append({"text": text, "lang": lang}) def add_logo(self, text, width, height, lang=None): """ Binds a logo to the given language :type text: str :type width: str :type height: str :type lang: Optional[str] :param text: Path to logo :param width: width of logo :param height: height of logo :param lang: language """ logo_entry ={"text": text, "width": width, "height": height} if lang: logo_entry["lang"] = lang self._logos.append(logo_entry) def to_dict(self): """ Returns a dictionary representation of the UIInfoDesc object. The format is the same as a pysaml2 configuration for ui info. :rtype: dict[str, str] :return: A dictionary representation """ ui_info = {} if self._description: ui_info["description"] = self._description if self._display_name: ui_info["display_name"] = self._display_name if self._logos: ui_info["logo"] = self._logos return {"service": {"idp": {"ui_info": ui_info}}} if ui_info else {} class OrganizationDesc(object): """ Description class for an organization """ def __init__(self): self._display_name = [] self._name = [] self._url = [] def add_display_name(self, name, lang): """ Binds a display name to the given language :type name: str :type lang: str :param name: display name :param lang: language """ self._display_name.append((name, lang)) def add_name(self, name, lang): """ Binds a name to the given language :type name: str :type lang: str :param name: Name of the organization :param lang: language """ self._name.append((name, lang)) def add_url(self, url, lang): """ Binds an url to the given language :type url: str :type lang: str :param url: url to bind :param lang: language """ self._url.append((url, lang)) def to_dict(self): """ Returns a dictionary representation of the OrganizationDesc object. The format is the same as a pysaml2 configuration for organization. :rtype: dict[str, str] :return: A dictionary representation """ org = {} if self._display_name: org["display_name"] = self._display_name if self._name: org["name"] = self._name if self._url: org["url"] = self._url return {"organization": org} if org else {} class MetadataDescription(object): """ Description class for a backend module """ def __init__(self, entity_id): self.entity_id = entity_id self._organization = None self._contact_person = [] self._ui_info = None def organization(self, organization): """ Set an organization to the description :type organization: satosa.metadata_creation.description.OrganizationDesc :param organization: Organization description """ if not isinstance(organization, OrganizationDesc): raise TypeError("organization must be of type OrganizationDesc") self._organization = organization organization = property(None, organization) def add_contact_person(self, person): """ Adds a contact person to the description :type person: satosa.metadata_creation.description.ContactPersonDesc :param person: The contact person to be added """ if not isinstance(person, ContactPersonDesc): raise TypeError("person must be of type ContactPersonDesc") self._contact_person.append(person) def ui_info(self, ui_info): """ Set an ui info to the description :type ui_info: satosa.metadata_creation.description.UIInfoDesc :param ui_info: The ui info to be set """ if not isinstance(ui_info, UIInfoDesc): raise TypeError("ui_info must be of type UIInfoDesc") self._ui_info = ui_info ui_info = property(None, ui_info) def to_dict(self): """ Returns a dictionary representation of the MetadataDescription object. The format is the same as a pysaml2 configuration :rtype: dict[str, Any] :return: A dictionary representation """ description = {} description["entityid"] = self.entity_id if self._organization: description.update(self._organization.to_dict()) if self._contact_person: description['contact_person'] = [] for person in self._contact_person: description['contact_person'].append(person.to_dict()) if self._ui_info: description.update(self._ui_info.to_dict()) return description
from __future__ import absolute_import from django.utils.translation import ugettext as _ from zerver.lib.actions import check_send_message from zerver.lib.response import json_success, json_error from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view from zerver.models import Client, UserProfile from django.http import HttpRequest, HttpResponse from typing import Any, Dict, Text CRASHLYTICS_SUBJECT_TEMPLATE = '{display_id}: {title}' CRASHLYTICS_MESSAGE_TEMPLATE = '[Issue]({url}) impacts at least {impacted_devices_count} device(s).' CRASHLYTICS_SETUP_SUBJECT_TEMPLATE = "Setup" CRASHLYTICS_SETUP_MESSAGE_TEMPLATE = "Webhook has been successfully configured." VERIFICATION_EVENT = 'verification' @api_key_only_webhook_view('Crashlytics') @has_request_variables def api_crashlytics_webhook(request, user_profile, client, payload=REQ(argument_type='body'), stream=REQ(default='crashlytics')): # type: (HttpRequest, UserProfile, Client, Dict[str, Any], Text) -> HttpResponse try: event = payload['event'] if event == VERIFICATION_EVENT: subject = CRASHLYTICS_SETUP_SUBJECT_TEMPLATE body = CRASHLYTICS_SETUP_MESSAGE_TEMPLATE else: issue_body = payload['payload'] subject = CRASHLYTICS_SUBJECT_TEMPLATE.format( display_id=issue_body['display_id'], title=issue_body['title'] ) body = CRASHLYTICS_MESSAGE_TEMPLATE.format( impacted_devices_count=issue_body['impacted_devices_count'], url=issue_body['url'] ) except KeyError as e: return json_error(_("Missing key {} in JSON".format(str(e)))) check_send_message(user_profile, client, 'stream', [stream], subject, body) return json_success()
import contextlib import ctypes from ctypes import wintypes import os import re import struct import time from oslo_log import log as oslo_logging import six from six.moves import winreg from tzlocal import windows_tz from win32com import client import win32net import win32netcon import win32process import win32security import wmi from cloudbaseinit import exception from cloudbaseinit.osutils import base from cloudbaseinit.utils.windows import disk from cloudbaseinit.utils.windows import network from cloudbaseinit.utils.windows import privilege from cloudbaseinit.utils.windows import timezone LOG = oslo_logging.getLogger(__name__) AF_INET6 = 23 UNICAST = 1 MANUAL = 1 PREFERRED_ADDR = 4 advapi32 = ctypes.windll.advapi32 kernel32 = ctypes.windll.kernel32 netapi32 = ctypes.windll.netapi32 userenv = ctypes.windll.userenv iphlpapi = ctypes.windll.iphlpapi Ws2_32 = ctypes.windll.Ws2_32 setupapi = ctypes.windll.setupapi msvcrt = ctypes.cdll.msvcrt ntdll = ctypes.windll.ntdll class Win32_PROFILEINFO(ctypes.Structure): _fields_ = [ ('dwSize', wintypes.DWORD), ('dwFlags', wintypes.DWORD), ('lpUserName', wintypes.LPWSTR), ('lpProfilePath', wintypes.LPWSTR), ('lpDefaultPath', wintypes.LPWSTR), ('lpServerName', wintypes.LPWSTR), ('lpPolicyPath', wintypes.LPWSTR), ('hprofile', wintypes.HANDLE) ] class Win32_LOCALGROUP_MEMBERS_INFO_3(ctypes.Structure): _fields_ = [ ('lgrmi3_domainandname', wintypes.LPWSTR) ] class Win32_MIB_IPFORWARDROW(ctypes.Structure): _fields_ = [ ('dwForwardDest', wintypes.DWORD), ('dwForwardMask', wintypes.DWORD), ('dwForwardPolicy', wintypes.DWORD), ('dwForwardNextHop', wintypes.DWORD), ('dwForwardIfIndex', wintypes.DWORD), ('dwForwardType', wintypes.DWORD), ('dwForwardProto', wintypes.DWORD), ('dwForwardAge', wintypes.DWORD), ('dwForwardNextHopAS', wintypes.DWORD), ('dwForwardMetric1', wintypes.DWORD), ('dwForwardMetric2', wintypes.DWORD), ('dwForwardMetric3', wintypes.DWORD), ('dwForwardMetric4', wintypes.DWORD), ('dwForwardMetric5', wintypes.DWORD) ] class Win32_MIB_IPFORWARDTABLE(ctypes.Structure): _fields_ = [ ('dwNumEntries', wintypes.DWORD), ('table', Win32_MIB_IPFORWARDROW * 1) ] class Win32_OSVERSIONINFOEX_W(ctypes.Structure): _fields_ = [ ('dwOSVersionInfoSize', wintypes.DWORD), ('dwMajorVersion', wintypes.DWORD), ('dwMinorVersion', wintypes.DWORD), ('dwBuildNumber', wintypes.DWORD), ('dwPlatformId', wintypes.DWORD), ('szCSDVersion', wintypes.WCHAR * 128), ('wServicePackMajor', wintypes.WORD), ('wServicePackMinor', wintypes.WORD), ('wSuiteMask', wintypes.WORD), ('wProductType', wintypes.BYTE), ('wReserved', wintypes.BYTE) ] class Win32_SP_DEVICE_INTERFACE_DATA(ctypes.Structure): _fields_ = [ ('cbSize', wintypes.DWORD), ('InterfaceClassGuid', disk.GUID), ('Flags', wintypes.DWORD), ('Reserved', ctypes.POINTER(wintypes.ULONG)) ] class Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W(ctypes.Structure): _fields_ = [ ('cbSize', wintypes.DWORD), ('DevicePath', ctypes.c_byte * 2) ] class Win32_STORAGE_DEVICE_NUMBER(ctypes.Structure): _fields_ = [ ('DeviceType', wintypes.DWORD), ('DeviceNumber', wintypes.DWORD), ('PartitionNumber', wintypes.DWORD) ] msvcrt.malloc.argtypes = [ctypes.c_size_t] msvcrt.malloc.restype = ctypes.c_void_p msvcrt.free.argtypes = [ctypes.c_void_p] msvcrt.free.restype = None ntdll.RtlVerifyVersionInfo.argtypes = [ ctypes.POINTER(Win32_OSVERSIONINFOEX_W), wintypes.DWORD, wintypes.ULARGE_INTEGER] ntdll.RtlVerifyVersionInfo.restype = wintypes.DWORD kernel32.VerSetConditionMask.argtypes = [wintypes.ULARGE_INTEGER, wintypes.DWORD, wintypes.BYTE] kernel32.VerSetConditionMask.restype = wintypes.ULARGE_INTEGER kernel32.SetComputerNameExW.argtypes = [ctypes.c_int, wintypes.LPCWSTR] kernel32.SetComputerNameExW.restype = wintypes.BOOL kernel32.GetLogicalDriveStringsW.argtypes = [wintypes.DWORD, wintypes.LPWSTR] kernel32.GetLogicalDriveStringsW.restype = wintypes.DWORD kernel32.GetDriveTypeW.argtypes = [wintypes.LPCWSTR] kernel32.GetDriveTypeW.restype = wintypes.UINT kernel32.CreateFileW.argtypes = [wintypes.LPCWSTR, wintypes.DWORD, wintypes.DWORD, wintypes.LPVOID, wintypes.DWORD, wintypes.DWORD, wintypes.HANDLE] kernel32.CreateFileW.restype = wintypes.HANDLE kernel32.DeviceIoControl.argtypes = [wintypes.HANDLE, wintypes.DWORD, wintypes.LPVOID, wintypes.DWORD, wintypes.LPVOID, wintypes.DWORD, ctypes.POINTER(wintypes.DWORD), wintypes.LPVOID] kernel32.DeviceIoControl.restype = wintypes.BOOL kernel32.GetProcessHeap.argtypes = [] kernel32.GetProcessHeap.restype = wintypes.HANDLE kernel32.HeapAlloc.argtypes = [wintypes.HANDLE, wintypes.DWORD, ctypes.c_size_t] kernel32.HeapAlloc.restype = wintypes.LPVOID kernel32.HeapFree.argtypes = [wintypes.HANDLE, wintypes.DWORD, wintypes.LPVOID] kernel32.HeapFree.restype = wintypes.BOOL iphlpapi.GetIpForwardTable.argtypes = [ ctypes.POINTER(Win32_MIB_IPFORWARDTABLE), ctypes.POINTER(wintypes.ULONG), wintypes.BOOL] iphlpapi.GetIpForwardTable.restype = wintypes.DWORD Ws2_32.inet_ntoa.restype = ctypes.c_char_p setupapi.SetupDiGetClassDevsW.argtypes = [ctypes.POINTER(disk.GUID), wintypes.LPCWSTR, wintypes.HANDLE, wintypes.DWORD] setupapi.SetupDiGetClassDevsW.restype = wintypes.HANDLE setupapi.SetupDiEnumDeviceInterfaces.argtypes = [ wintypes.HANDLE, wintypes.LPVOID, ctypes.POINTER(disk.GUID), wintypes.DWORD, ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DATA)] setupapi.SetupDiEnumDeviceInterfaces.restype = wintypes.BOOL setupapi.SetupDiGetDeviceInterfaceDetailW.argtypes = [ wintypes.HANDLE, ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DATA), ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W), wintypes.DWORD, ctypes.POINTER(wintypes.DWORD), wintypes.LPVOID] setupapi.SetupDiGetDeviceInterfaceDetailW.restype = wintypes.BOOL setupapi.SetupDiDestroyDeviceInfoList.argtypes = [wintypes.HANDLE] setupapi.SetupDiDestroyDeviceInfoList.restype = wintypes.BOOL VER_MAJORVERSION = 1 VER_MINORVERSION = 2 VER_BUILDNUMBER = 4 VER_GREATER_EQUAL = 3 GUID_DEVINTERFACE_DISK = disk.GUID(0x53f56307, 0xb6bf, 0x11d0, 0x94, 0xf2, 0x00, 0xa0, 0xc9, 0x1e, 0xfb, 0x8b) class WindowsUtils(base.BaseOSUtils): NERR_GroupNotFound = 2220 NERR_UserNotFound = 2221 ERROR_ACCESS_DENIED = 5 ERROR_INSUFFICIENT_BUFFER = 122 ERROR_NO_DATA = 232 ERROR_NO_SUCH_MEMBER = 1387 ERROR_MEMBER_IN_ALIAS = 1378 ERROR_INVALID_MEMBER = 1388 ERROR_NO_MORE_FILES = 18 STATUS_REVISION_MISMATCH = 0xC0000059 ADS_UF_PASSWORD_EXPIRED = 0x800000 PASSWORD_CHANGED_FLAG = 1 INVALID_HANDLE_VALUE = 0xFFFFFFFF FILE_SHARE_READ = 1 FILE_SHARE_WRITE = 2 OPEN_EXISTING = 3 IOCTL_STORAGE_GET_DEVICE_NUMBER = 0x002D1080 MAX_PATH = 260 DIGCF_PRESENT = 2 DIGCF_DEVICEINTERFACE = 0x10 DRIVE_CDROM = 5 SERVICE_STATUS_STOPPED = "Stopped" SERVICE_STATUS_START_PENDING = "Start Pending" SERVICE_STATUS_STOP_PENDING = "Stop Pending" SERVICE_STATUS_RUNNING = "Running" SERVICE_STATUS_CONTINUE_PENDING = "Continue Pending" SERVICE_STATUS_PAUSE_PENDING = "Pause Pending" SERVICE_STATUS_PAUSED = "Paused" SERVICE_STATUS_UNKNOWN = "Unknown" SERVICE_START_MODE_AUTOMATIC = "Automatic" SERVICE_START_MODE_MANUAL = "Manual" SERVICE_START_MODE_DISABLED = "Disabled" ComputerNamePhysicalDnsHostname = 5 _config_key = 'SOFTWARE\\Cloudbase Solutions\\Cloudbase-Init\\' _service_name = 'cloudbase-init' _FW_IP_PROTOCOL_TCP = 6 _FW_IP_PROTOCOL_UDP = 17 _FW_SCOPE_ALL = 0 _FW_SCOPE_LOCAL_SUBNET = 1 def reboot(self): with privilege.acquire_privilege(win32security.SE_SHUTDOWN_NAME): ret_val = advapi32.InitiateSystemShutdownExW( 0, "Cloudbase-Init reboot", 0, True, True, 0) if not ret_val: raise exception.WindowsCloudbaseInitException( "Reboot failed: %r") def user_exists(self, username): try: self._get_user_info(username, 1) return True except exception.ItemNotFoundException: # User not found return False def create_user(self, username, password, password_expires=False): user_info = { "name": username, "password": password, "priv": win32netcon.USER_PRIV_USER, "flags": win32netcon.UF_NORMAL_ACCOUNT | win32netcon.UF_SCRIPT, } if not password_expires: user_info["flags"] |= win32netcon.UF_DONT_EXPIRE_PASSWD try: win32net.NetUserAdd(None, 1, user_info) except win32net.error as ex: raise exception.CloudbaseInitException( "Create user failed: %s" % ex.args[2]) def _get_user_info(self, username, level): try: return win32net.NetUserGetInfo(None, username, level) except win32net.error as ex: if ex.args[0] == self.NERR_UserNotFound: raise exception.ItemNotFoundException( "User not found: %s" % username) else: raise exception.CloudbaseInitException( "Failed to get user info: %s" % ex.args[2]) def set_user_password(self, username, password, password_expires=False): user_info = self._get_user_info(username, 1) user_info["password"] = password if password_expires: user_info["flags"] &= ~win32netcon.UF_DONT_EXPIRE_PASSWD else: user_info["flags"] |= win32netcon.UF_DONT_EXPIRE_PASSWD try: win32net.NetUserSetInfo(None, username, 1, user_info) except win32net.error as ex: raise exception.CloudbaseInitException( "Set user password failed: %s" % ex.args[2]) def change_password_next_logon(self, username): """Force the given user to change the password at next logon.""" user_info = self._get_user_info(username, 4) user_info["flags"] &= ~win32netcon.UF_DONT_EXPIRE_PASSWD user_info["password_expired"] = 1 try: win32net.NetUserSetInfo(None, username, 4, user_info) except win32net.error as ex: raise exception.CloudbaseInitException( "Setting password expiration failed: %s" % ex.args[2]) @staticmethod def _get_cch_referenced_domain_name(domain_name): return wintypes.DWORD( ctypes.sizeof(domain_name) // ctypes.sizeof(wintypes.WCHAR)) def _get_user_sid_and_domain(self, username): sid = ctypes.create_string_buffer(1024) cbSid = wintypes.DWORD(ctypes.sizeof(sid)) domainName = ctypes.create_unicode_buffer(1024) cchReferencedDomainName = self._get_cch_referenced_domain_name( domainName) sidNameUse = wintypes.DWORD() ret_val = advapi32.LookupAccountNameW( 0, six.text_type(username), sid, ctypes.byref(cbSid), domainName, ctypes.byref(cchReferencedDomainName), ctypes.byref(sidNameUse)) if not ret_val: raise exception.WindowsCloudbaseInitException( "Cannot get user SID: %r") return sid, domainName.value def add_user_to_local_group(self, username, groupname): lmi = Win32_LOCALGROUP_MEMBERS_INFO_3() lmi.lgrmi3_domainandname = six.text_type(username) ret_val = netapi32.NetLocalGroupAddMembers(0, six.text_type(groupname), 3, ctypes.addressof(lmi), 1) if ret_val == self.NERR_GroupNotFound: raise exception.CloudbaseInitException('Group not found') elif ret_val == self.ERROR_ACCESS_DENIED: raise exception.CloudbaseInitException('Access denied') elif ret_val == self.ERROR_NO_SUCH_MEMBER: raise exception.CloudbaseInitException('Username not found') elif ret_val == self.ERROR_MEMBER_IN_ALIAS: # The user is already a member of the group pass elif ret_val == self.ERROR_INVALID_MEMBER: raise exception.CloudbaseInitException('Invalid user') elif ret_val != 0: raise exception.CloudbaseInitException('Unknown error') def get_user_sid(self, username): try: user_info = self._get_user_info(username, 4) return str(user_info["user_sid"])[6:] except exception.ItemNotFoundException: # User not found pass def create_user_logon_session(self, username, password, domain='.', load_profile=True): token = wintypes.HANDLE() ret_val = advapi32.LogonUserW(six.text_type(username), six.text_type(domain), six.text_type(password), 2, 0, ctypes.byref(token)) if not ret_val: raise exception.WindowsCloudbaseInitException( "User logon failed: %r") if load_profile: pi = Win32_PROFILEINFO() pi.dwSize = ctypes.sizeof(Win32_PROFILEINFO) pi.lpUserName = six.text_type(username) ret_val = userenv.LoadUserProfileW(token, ctypes.byref(pi)) if not ret_val: kernel32.CloseHandle(token) raise exception.WindowsCloudbaseInitException( "Cannot load user profile: %r") return token def close_user_logon_session(self, token): kernel32.CloseHandle(token) def get_user_home(self, username): user_sid = self.get_user_sid(username) if user_sid: with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\' 'Microsoft\\Windows NT\\CurrentVersion\\' 'ProfileList\\%s' % user_sid) as key: return winreg.QueryValueEx(key, 'ProfileImagePath')[0] LOG.debug('Home directory not found for user %r', username) return None def sanitize_shell_input(self, value): return value.replace('"', '\\"') def set_host_name(self, new_host_name): ret_val = kernel32.SetComputerNameExW( self.ComputerNamePhysicalDnsHostname, six.text_type(new_host_name)) if not ret_val: raise exception.WindowsCloudbaseInitException( "Cannot set host name: %r") return True def get_network_adapters(self): """Return available adapters as a list of tuples of (name, mac).""" conn = wmi.WMI(moniker='//./root/cimv2') # Get Ethernet adapters only wql = ('SELECT * FROM Win32_NetworkAdapter WHERE ' 'AdapterTypeId = 0 AND MACAddress IS NOT NULL') if self.check_os_version(6, 0): wql += ' AND PhysicalAdapter = True' q = conn.query(wql) return [(r.Name, r.MACAddress) for r in q] def get_dhcp_hosts_in_use(self): dhcp_hosts = [] for net_addr in network.get_adapter_addresses(): if net_addr["dhcp_enabled"] and net_addr["dhcp_server"]: dhcp_hosts.append((net_addr["mac_address"], net_addr["dhcp_server"])) return dhcp_hosts def set_ntp_client_config(self, ntp_hosts): base_dir = self._get_system_dir() w32tm_path = os.path.join(base_dir, "w32tm.exe") # Convert the NTP hosts list to a string, in order to pass # it to w32tm. ntp_hosts = ",".join(ntp_hosts) args = [w32tm_path, '/config', '/manualpeerlist:%s' % ntp_hosts, '/syncfromflags:manual', '/update'] (out, err, ret_val) = self.execute_process(args, shell=False) if ret_val: raise exception.CloudbaseInitException( 'w32tm failed to configure NTP.\nOutput: %(out)s\nError:' ' %(err)s' % {'out': out, 'err': err}) def set_network_adapter_mtu(self, mac_address, mtu): if not self.check_os_version(6, 0): raise exception.CloudbaseInitException( 'Setting the MTU is currently not supported on Windows XP ' 'and Windows Server 2003') iface_index_list = [ net_addr["interface_index"] for net_addr in network.get_adapter_addresses() if net_addr["mac_address"] == mac_address] if not iface_index_list: raise exception.CloudbaseInitException( 'Network interface with MAC address "%s" not found' % mac_address) else: iface_index = iface_index_list[0] LOG.debug('Setting MTU for interface "%(mac_address)s" with ' 'value "%(mtu)s"', {'mac_address': mac_address, 'mtu': mtu}) base_dir = self._get_system_dir() netsh_path = os.path.join(base_dir, 'netsh.exe') args = [netsh_path, "interface", "ipv4", "set", "subinterface", str(iface_index), "mtu=%s" % mtu, "store=persistent"] (out, err, ret_val) = self.execute_process(args, shell=False) if ret_val: raise exception.CloudbaseInitException( 'Setting MTU for interface "%(mac_address)s" with ' 'value "%(mtu)s" failed' % {'mac_address': mac_address, 'mtu': mtu}) def set_static_network_config(self, mac_address, address, netmask, broadcast, gateway, dnsnameservers): conn = wmi.WMI(moniker='//./root/cimv2') query = conn.query("SELECT * FROM Win32_NetworkAdapter WHERE " "MACAddress = '{}'".format(mac_address)) if not len(query): raise exception.CloudbaseInitException( "Network adapter not found") adapter_config = query[0].associators( wmi_result_class='Win32_NetworkAdapterConfiguration')[0] LOG.debug("Setting static IP address") (ret_val,) = adapter_config.EnableStatic([address], [netmask]) if ret_val > 1: raise exception.CloudbaseInitException( "Cannot set static IP address on network adapter (%d)", ret_val) reboot_required = (ret_val == 1) if gateway: LOG.debug("Setting static gateways") (ret_val,) = adapter_config.SetGateways([gateway], [1]) if ret_val > 1: raise exception.CloudbaseInitException( "Cannot set gateway on network adapter (%d)", ret_val) reboot_required = reboot_required or ret_val == 1 if dnsnameservers: LOG.debug("Setting static DNS servers") (ret_val,) = adapter_config.SetDNSServerSearchOrder(dnsnameservers) if ret_val > 1: raise exception.CloudbaseInitException( "Cannot set DNS on network adapter (%d)", ret_val) reboot_required = reboot_required or ret_val == 1 return reboot_required def set_static_network_config_v6(self, mac_address, address6, netmask6, gateway6): """Set IPv6 info for a network card.""" # Get local properties by MAC identification. adapters = network.get_adapter_addresses() for adapter in adapters: if mac_address == adapter["mac_address"]: ifname = adapter["friendly_name"] ifindex = adapter["interface_index"] break else: raise exception.CloudbaseInitException( "Adapter with MAC {!r} not available".format(mac_address)) # TODO(cpoieana): Extend support for other platforms. # Currently windows8 @ ws2012 or above. if not self.check_os_version(6, 2): LOG.warning("Setting IPv6 info not available " "on this system") return conn = wmi.WMI(moniker='//./root/StandardCimv2') query = conn.query("SELECT * FROM MSFT_NetIPAddress " "WHERE InterfaceAlias = '{}'".format(ifname)) netip = query[0] params = { "InterfaceIndex": ifindex, "InterfaceAlias": ifname, "IPAddress": address6, "AddressFamily": AF_INET6, "PrefixLength": netmask6, # Manual set type. "Type": UNICAST, "PrefixOrigin": MANUAL, "SuffixOrigin": MANUAL, "AddressState": PREFERRED_ADDR, # No expiry. "ValidLifetime": None, "PreferredLifetime": None, "SkipAsSource": False, "DefaultGateway": gateway6, "PolicyStore": None, "PassThru": False, } LOG.debug("Setting IPv6 info for %s", ifname) try: netip.Create(**params) except wmi.x_wmi as exc: raise exception.CloudbaseInitException(exc.com_error) def _get_config_key_name(self, section): key_name = self._config_key if section: key_name += section.replace('/', '\\') + '\\' return key_name def set_config_value(self, name, value, section=None): key_name = self._get_config_key_name(section) with winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE, key_name) as key: if type(value) == int: regtype = winreg.REG_DWORD else: regtype = winreg.REG_SZ winreg.SetValueEx(key, name, 0, regtype, value) def get_config_value(self, name, section=None): key_name = self._get_config_key_name(section) try: with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key_name) as key: (value, regtype) = winreg.QueryValueEx(key, name) return value except WindowsError: return None def wait_for_boot_completion(self): try: with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, "SYSTEM\\Setup\\Status\\SysprepStatus", 0, winreg.KEY_READ) as key: while True: gen_state = winreg.QueryValueEx(key, "GeneralizationState")[0] if gen_state == 7: break time.sleep(1) LOG.info('Waiting for sysprep completion. ' 'GeneralizationState: %d', gen_state) except WindowsError as ex: if ex.winerror == 2: LOG.debug('Sysprep data not found in the registry, ' 'skipping sysprep completion check.') else: raise ex def _get_service(self, service_name): conn = wmi.WMI(moniker='//./root/cimv2') service_list = conn.Win32_Service(Name=service_name) if len(service_list): return service_list[0] def check_service_exists(self, service_name): return self._get_service(service_name) is not None def get_service_status(self, service_name): service = self._get_service(service_name) return service.State def get_service_start_mode(self, service_name): service = self._get_service(service_name) return service.StartMode def set_service_start_mode(self, service_name, start_mode): # TODO(alexpilotti): Handle the "Delayed Start" case service = self._get_service(service_name) (ret_val,) = service.ChangeStartMode(start_mode) if ret_val != 0: raise exception.CloudbaseInitException( 'Setting service %(service_name)s start mode failed with ' 'return value: %(ret_val)d' % {'service_name': service_name, 'ret_val': ret_val}) def start_service(self, service_name): LOG.debug('Starting service %s', service_name) service = self._get_service(service_name) (ret_val,) = service.StartService() if ret_val != 0: raise exception.CloudbaseInitException( 'Starting service %(service_name)s failed with return value: ' '%(ret_val)d' % {'service_name': service_name, 'ret_val': ret_val}) def stop_service(self, service_name): LOG.debug('Stopping service %s', service_name) service = self._get_service(service_name) (ret_val,) = service.StopService() if ret_val != 0: raise exception.CloudbaseInitException( 'Stopping service %(service_name)s failed with return value:' ' %(ret_val)d' % {'service_name': service_name, 'ret_val': ret_val}) def terminate(self): # Wait for the service to start. Polling the service "Started" property # is not enough time.sleep(3) self.stop_service(self._service_name) def get_default_gateway(self): default_routes = [r for r in self._get_ipv4_routing_table() if r[0] == '0.0.0.0'] if default_routes: return default_routes[0][3], default_routes[0][2] else: return None, None @staticmethod def _heap_alloc(heap, size): table_mem = kernel32.HeapAlloc(heap, 0, ctypes.c_size_t(size.value)) if not table_mem: raise exception.CloudbaseInitException( 'Unable to allocate memory for the IP forward table') return table_mem @contextlib.contextmanager def _get_forward_table(self): heap = kernel32.GetProcessHeap() forward_table_size = ctypes.sizeof(Win32_MIB_IPFORWARDTABLE) size = wintypes.ULONG(forward_table_size) table_mem = self._heap_alloc(heap, size) p_forward_table = ctypes.cast( table_mem, ctypes.POINTER(Win32_MIB_IPFORWARDTABLE)) try: err = iphlpapi.GetIpForwardTable(p_forward_table, ctypes.byref(size), 0) if err == self.ERROR_INSUFFICIENT_BUFFER: kernel32.HeapFree(heap, 0, p_forward_table) table_mem = self._heap_alloc(heap, size) p_forward_table = ctypes.cast( table_mem, ctypes.POINTER(Win32_MIB_IPFORWARDTABLE)) err = iphlpapi.GetIpForwardTable(p_forward_table, ctypes.byref(size), 0) if err and err != kernel32.ERROR_NO_DATA: raise exception.CloudbaseInitException( 'Unable to get IP forward table. Error: %s' % err) yield p_forward_table finally: kernel32.HeapFree(heap, 0, p_forward_table) def _get_ipv4_routing_table(self): routing_table = [] with self._get_forward_table() as p_forward_table: forward_table = p_forward_table.contents table = ctypes.cast( ctypes.addressof(forward_table.table), ctypes.POINTER(Win32_MIB_IPFORWARDROW * forward_table.dwNumEntries)).contents for row in table: destination = Ws2_32.inet_ntoa( row.dwForwardDest).decode() netmask = Ws2_32.inet_ntoa( row.dwForwardMask).decode() gateway = Ws2_32.inet_ntoa( row.dwForwardNextHop).decode() routing_table.append(( destination, netmask, gateway, row.dwForwardIfIndex, row.dwForwardMetric1)) return routing_table def check_static_route_exists(self, destination): return len([r for r in self._get_ipv4_routing_table() if r[0] == destination]) > 0 def add_static_route(self, destination, mask, next_hop, interface_index, metric): args = ['ROUTE', 'ADD', destination, 'MASK', mask, next_hop] (out, err, ret_val) = self.execute_process(args) # Cannot use the return value to determine the outcome if ret_val or err: raise exception.CloudbaseInitException( 'Unable to add route: %s' % err) def check_os_version(self, major, minor, build=0): vi = Win32_OSVERSIONINFOEX_W() vi.dwOSVersionInfoSize = ctypes.sizeof(Win32_OSVERSIONINFOEX_W) vi.dwMajorVersion = major vi.dwMinorVersion = minor vi.dwBuildNumber = build mask = 0 for type_mask in [VER_MAJORVERSION, VER_MINORVERSION, VER_BUILDNUMBER]: mask = kernel32.VerSetConditionMask(mask, type_mask, VER_GREATER_EQUAL) type_mask = VER_MAJORVERSION | VER_MINORVERSION | VER_BUILDNUMBER ret_val = ntdll.RtlVerifyVersionInfo(ctypes.byref(vi), type_mask, mask) if not ret_val: return True elif ret_val == self.STATUS_REVISION_MISMATCH: return False else: raise exception.CloudbaseInitException( "RtlVerifyVersionInfo failed with error: %s" % ret_val) def get_volume_label(self, drive): max_label_size = 261 label = ctypes.create_unicode_buffer(max_label_size) ret_val = kernel32.GetVolumeInformationW(six.text_type(drive), label, max_label_size, 0, 0, 0, 0, 0) if ret_val: return label.value def generate_random_password(self, length): while True: pwd = super(WindowsUtils, self).generate_random_password(length) # Make sure that the Windows complexity requirements are met: # http://technet.microsoft.com/en-us/library/cc786468(v=ws.10).aspx valid = True for r in ["[a-z]", "[A-Z]", "[0-9]"]: if not re.search(r, pwd): valid = False if valid: return pwd def _split_str_buf_list(self, buf, buf_len): i = 0 value = '' values = [] while i < buf_len: c = buf[i] if c != '\x00': value += c else: values.append(value) value = '' i += 1 return values def _get_logical_drives(self): buf_size = self.MAX_PATH buf = ctypes.create_unicode_buffer(buf_size + 1) buf_len = kernel32.GetLogicalDriveStringsW(buf_size, buf) if not buf_len: raise exception.WindowsCloudbaseInitException( "GetLogicalDriveStringsW failed: %r") return self._split_str_buf_list(buf, buf_len) def get_cdrom_drives(self): drives = self._get_logical_drives() return [d for d in drives if kernel32.GetDriveTypeW(d) == self.DRIVE_CDROM] def _is_64bit_arch(self): # interpreter's bits return struct.calcsize("P") == 8 def get_physical_disks(self): physical_disks = [] disk_guid = GUID_DEVINTERFACE_DISK handle_disks = setupapi.SetupDiGetClassDevsW( ctypes.byref(disk_guid), None, None, self.DIGCF_PRESENT | self.DIGCF_DEVICEINTERFACE) if handle_disks == self.INVALID_HANDLE_VALUE: raise exception.CloudbaseInitException( "SetupDiGetClassDevs failed") try: did = Win32_SP_DEVICE_INTERFACE_DATA() did.cbSize = ctypes.sizeof(Win32_SP_DEVICE_INTERFACE_DATA) index = 0 while setupapi.SetupDiEnumDeviceInterfaces( handle_disks, None, ctypes.byref(disk_guid), index, ctypes.byref(did)): index += 1 handle_disk = self.INVALID_HANDLE_VALUE required_size = wintypes.DWORD() if not setupapi.SetupDiGetDeviceInterfaceDetailW( handle_disks, ctypes.byref(did), None, 0, ctypes.byref(required_size), None): if (kernel32.GetLastError() != self.ERROR_INSUFFICIENT_BUFFER): raise exception.WindowsCloudbaseInitException( "SetupDiGetDeviceInterfaceDetailW failed: %r") pdidd = ctypes.cast( msvcrt.malloc(ctypes.c_size_t(required_size.value)), ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W)) try: pdidd.contents.cbSize = ctypes.sizeof( Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W) if not self._is_64bit_arch(): # NOTE(cpoieana): For some reason, on x86 platforms # the alignment or content of the struct # is not taken into consideration. pdidd.contents.cbSize = 6 if not setupapi.SetupDiGetDeviceInterfaceDetailW( handle_disks, ctypes.byref(did), pdidd, required_size, None, None): raise exception.WindowsCloudbaseInitException( "SetupDiGetDeviceInterfaceDetailW failed: %r") device_path = ctypes.cast( pdidd.contents.DevicePath, wintypes.LPWSTR).value handle_disk = kernel32.CreateFileW( device_path, 0, self.FILE_SHARE_READ, None, self.OPEN_EXISTING, 0, 0) if handle_disk == self.INVALID_HANDLE_VALUE: raise exception.CloudbaseInitException( 'CreateFileW failed') sdn = Win32_STORAGE_DEVICE_NUMBER() b = wintypes.DWORD() if not kernel32.DeviceIoControl( handle_disk, self.IOCTL_STORAGE_GET_DEVICE_NUMBER, None, 0, ctypes.byref(sdn), ctypes.sizeof(sdn), ctypes.byref(b), None): raise exception.WindowsCloudbaseInitException( 'DeviceIoControl failed: %r') physical_disks.append( r"\\.\PHYSICALDRIVE%d" % sdn.DeviceNumber) finally: msvcrt.free(pdidd) if handle_disk != self.INVALID_HANDLE_VALUE: kernel32.CloseHandle(handle_disk) finally: setupapi.SetupDiDestroyDeviceInfoList(handle_disks) return physical_disks def get_volumes(self): """Retrieve a list with all the volumes found on all disks.""" volumes = [] volume = ctypes.create_unicode_buffer(chr(0) * self.MAX_PATH) handle_volumes = kernel32.FindFirstVolumeW(volume, self.MAX_PATH) if handle_volumes == self.INVALID_HANDLE_VALUE: raise exception.WindowsCloudbaseInitException( "FindFirstVolumeW failed: %r") try: while True: volumes.append(volume.value) found = kernel32.FindNextVolumeW(handle_volumes, volume, self.MAX_PATH) if not found: errno = ctypes.GetLastError() if errno == self.ERROR_NO_MORE_FILES: break else: raise exception.WindowsCloudbaseInitException( "FindNextVolumeW failed: %r") finally: kernel32.FindVolumeClose(handle_volumes) return volumes def _get_fw_protocol(self, protocol): if protocol == self.PROTOCOL_TCP: fw_protocol = self._FW_IP_PROTOCOL_TCP elif protocol == self.PROTOCOL_UDP: fw_protocol = self._FW_IP_PROTOCOL_UDP else: raise NotImplementedError("Unsupported protocol") return fw_protocol def firewall_create_rule(self, name, port, protocol, allow=True): if not allow: raise NotImplementedError() fw_port = client.Dispatch("HNetCfg.FWOpenPort") fw_port.Name = name fw_port.Protocol = self._get_fw_protocol(protocol) fw_port.Port = port fw_port.Scope = self._FW_SCOPE_ALL fw_port.Enabled = True fw_mgr = client.Dispatch("HNetCfg.FwMgr") fw_profile = fw_mgr.LocalPolicy.CurrentProfile fw_profile = fw_profile.GloballyOpenPorts.Add(fw_port) def firewall_remove_rule(self, name, port, protocol, allow=True): if not allow: raise NotImplementedError() fw_mgr = client.Dispatch("HNetCfg.FwMgr") fw_profile = fw_mgr.LocalPolicy.CurrentProfile fw_protocol = self._get_fw_protocol(protocol) fw_profile = fw_profile.GloballyOpenPorts.Remove(port, fw_protocol) def is_wow64(self): return win32process.IsWow64Process() def get_system32_dir(self): return os.path.expandvars('%windir%\\system32') def get_syswow64_dir(self): return os.path.expandvars('%windir%\\syswow64') def get_sysnative_dir(self): return os.path.expandvars('%windir%\\sysnative') def check_sysnative_dir_exists(self): sysnative_dir_exists = os.path.isdir(self.get_sysnative_dir()) if not sysnative_dir_exists and self.is_wow64(): LOG.warning('Unable to validate sysnative folder presence. ' 'If Target OS is Server 2003 x64, please ensure ' 'you have KB942589 installed') return sysnative_dir_exists def _get_system_dir(self, sysnative=True): """Return Windows system directory with compatibility support. Depending on the interpreter bits and platform architecture, the return value may vary between C:\Windows\(System32|SysWOW64|Sysnative). Note that "Sysnative" is just an alias (doesn't really exist on disk). More info about this can be found in documentation. """ if sysnative and self.check_sysnative_dir_exists(): return self.get_sysnative_dir() if not sysnative and self._is_64bit_arch(): return self.get_syswow64_dir() return self.get_system32_dir() def is_nano_server(self): return self._check_server_level("NanoServer") def _check_server_level(self, server_level): try: with winreg.OpenKey( winreg.HKEY_LOCAL_MACHINE, "Software\\Microsoft\\Windows NT\\CurrentVersion\\Server\\" "ServerLevels") as key: return winreg.QueryValueEx(key, server_level)[0] == 1 except WindowsError as ex: if ex.winerror == 2: return False else: raise def execute_powershell_script(self, script_path, sysnative=True): base_dir = self._get_system_dir(sysnative) powershell_path = os.path.join(base_dir, 'WindowsPowerShell\\v1.0\\' 'powershell.exe') args = [powershell_path] if not self.is_nano_server(): args += ['-ExecutionPolicy', 'RemoteSigned', '-NonInteractive', '-File'] args.append(script_path) return self.execute_process(args, shell=False) def execute_system32_process(self, args, shell=True, decode_output=False, sysnative=True): base_dir = self._get_system_dir(sysnative) process_path = os.path.join(base_dir, args[0]) return self.execute_process([process_path] + args[1:], decode_output=decode_output, shell=shell) def get_maximum_password_length(self): return 20 def set_timezone(self, timezone_name): windows_name = windows_tz.tz_win.get(timezone_name) if not windows_name: raise exception.CloudbaseInitException( "The given timezone name is unrecognised: %r" % timezone_name) timezone.Timezone(windows_name).set(self)
from suds.sudsobject import Object as SudsObject class _FactoryKeywords(object): def set_wsdl_object_attribute(self, object, name, value): """Sets the attribute of a WSDL object. Example: | ${order search request}= | Create Wsdl Object | OrderSearchRequest | | | Set Wsdl Object Attribute | ${order search request} | id | 4065 | """ self._assert_is_suds_object(object) getattr(object, name) setattr(object, name, value) def get_wsdl_object_attribute(self, object, name): """Gets the attribute of a WSDL object. Extendend variable syntax may be used to access attributes; however, some WSDL objects may have attribute names that are illegal in Python, necessitating this keyword. Example: | ${sale record}= | Call Soap Method | getLastSale | | | ${price}= | Get Wsdl Object Attribute | ${sale record} | Price | """ self._assert_is_suds_object(object) return getattr(object, name) def create_wsdl_object(self, type, *name_value_pairs): """Creates a WSDL object of the specified `type`. Requested `type` must be defined in the WSDL, in an import specified by the WSDL, or with `Add Doctor Import`. `type` is case sensitive. Example: | ${contact}= | Create Wsdl Object | Contact | | | Set Wsdl Object Attribute | ${contact} | Name | Kelly Newman | Attribute values can be set by passing the attribute name and value in pairs. This is equivalent to the two lines above: | ${contact}= | Create Wsdl Object | Contact | Name | Kelly Newman | """ if len(name_value_pairs) % 2 != 0: raise ValueError("Creating a WSDL object failed. There should be " "an even number of name-value pairs.") obj = self._client().factory.create(type) for i in range(0, len(name_value_pairs), 2): self.set_wsdl_object_attribute(obj, name_value_pairs[i], name_value_pairs[i + 1]) return obj # private def _assert_is_suds_object(self, object): if not isinstance(object, SudsObject): raise ValueError("Object must be a WSDL object (suds.sudsobject.Object).")
from google.cloud import datacatalog_v1 def sample_list_tags(): # Create a client client = datacatalog_v1.DataCatalogClient() # Initialize request argument(s) request = datacatalog_v1.ListTagsRequest( parent="parent_value", ) # Make the request page_result = client.list_tags(request=request) # Handle the response for response in page_result: print(response)
BOT_NAME = 'DynamicItemsScrapy' SPIDER_MODULES = ['DynamicItemsScrapy.spiders'] NEWSPIDER_MODULE = 'DynamicItemsScrapy.spiders'
import copy import re import fixtures from jsonschema import exceptions as jsonschema_exc import six from nova.api.openstack import api_version_request as api_version from nova.api import validation from nova.api.validation import parameter_types from nova.api.validation import validators from nova import exception from nova import test from nova.tests.unit.api.openstack import fakes query_schema = { 'type': 'object', 'properties': { 'foo': parameter_types.single_param({'type': 'string', 'format': 'uuid'}), 'foos': parameter_types.multi_params({'type': 'string'}) }, 'patternProperties': { "^_": parameter_types.multi_params({'type': 'string'})}, 'additionalProperties': True } class FakeQueryParametersController(object): @validation.query_schema(query_schema, '2.3') def get(self, req): return list(set(req.GET.keys())) class RegexFormatFakeController(object): schema = { 'type': 'object', 'properties': { 'foo': { 'format': 'regex', }, }, } @validation.schema(request_body_schema=schema) def post(self, req, body): return 'Validation succeeded.' class FakeRequest(object): api_version_request = api_version.APIVersionRequest("2.1") environ = {} legacy_v2 = False def is_legacy_v2(self): return self.legacy_v2 class ValidationRegex(test.NoDBTestCase): def test_build_regex_range(self): # this is much easier to think about if we only use the ascii # subset because it's a printable range we can think # about. The algorithm works for all ranges. def _get_all_chars(): for i in range(0x7F): yield six.unichr(i) self.useFixture(fixtures.MonkeyPatch( 'nova.api.validation.parameter_types._get_all_chars', _get_all_chars)) # note that since we use only the ascii range in the tests # we have to clear the cache to recompute them. parameter_types._reset_cache() r = parameter_types._build_regex_range(ws=False) self.assertEqual(r, re.escape('!') + '-' + re.escape('~')) # if we allow whitespace the range starts earlier r = parameter_types._build_regex_range(ws=True) self.assertEqual(r, re.escape(' ') + '-' + re.escape('~')) # excluding a character will give us 2 ranges r = parameter_types._build_regex_range(ws=True, exclude=['A']) self.assertEqual(r, re.escape(' ') + '-' + re.escape('@') + 'B' + '-' + re.escape('~')) # inverting which gives us all the initial unprintable characters. r = parameter_types._build_regex_range(ws=False, invert=True) self.assertEqual(r, re.escape('\x00') + '-' + re.escape(' ')) # excluding characters that create a singleton. Naively this would be: # ' -@B-BD-~' which seems to work, but ' -@BD-~' is more natural. r = parameter_types._build_regex_range(ws=True, exclude=['A', 'C']) self.assertEqual(r, re.escape(' ') + '-' + re.escape('@') + 'B' + 'D' + '-' + re.escape('~')) # ws=True means the positive regex has printable whitespaces, # so the inverse will not. The inverse will include things we # exclude. r = parameter_types._build_regex_range( ws=True, exclude=['A', 'B', 'C', 'Z'], invert=True) self.assertEqual(r, re.escape('\x00') + '-' + re.escape('\x1f') + 'A-CZ') class APIValidationTestCase(test.NoDBTestCase): post_schema = None def setUp(self): super(APIValidationTestCase, self).setUp() self.post = None if self.post_schema is not None: @validation.schema(request_body_schema=self.post_schema) def post(req, body): return 'Validation succeeded.' self.post = post def check_validation_error(self, method, body, expected_detail, req=None): if not req: req = FakeRequest() try: method(body=body, req=req) except exception.ValidationError as ex: self.assertEqual(400, ex.kwargs['code']) if isinstance(expected_detail, list): self.assertIn(ex.kwargs['detail'], expected_detail, 'Exception details did not match expected') elif not re.match(expected_detail, ex.kwargs['detail']): self.assertEqual(expected_detail, ex.kwargs['detail'], 'Exception details did not match expected') except Exception as ex: self.fail('An unexpected exception happens: %s' % ex) else: self.fail('Any exception does not happen.') class FormatCheckerTestCase(test.NoDBTestCase): def _format_checker(self, format, value, error_message): format_checker = validators.FormatChecker() exc = self.assertRaises(jsonschema_exc.FormatError, format_checker.check, value, format) self.assertIsInstance(exc.cause, exception.InvalidName) self.assertEqual(error_message, exc.cause.format_message()) def test_format_checker_failed_with_non_string_name(self): error_message = ("An invalid 'name' value was provided. The name must " "be: printable characters. " "Can not start or end with whitespace.") self._format_checker("name", " ", error_message) self._format_checker("name", None, error_message) def test_format_checker_failed_name_with_leading_trailing_spaces(self): error_message = ("An invalid 'name' value was provided. " "The name must be: printable characters with at " "least one non space character") self._format_checker("name_with_leading_trailing_spaces", None, error_message) class MicroversionsSchemaTestCase(APIValidationTestCase): def setUp(self): super(MicroversionsSchemaTestCase, self).setUp() schema_v21_int = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', } } } schema_v20_str = copy.deepcopy(schema_v21_int) schema_v20_str['properties']['foo'] = {'type': 'string'} @validation.schema(schema_v20_str, '2.0', '2.0') @validation.schema(schema_v21_int, '2.1') def post(req, body): return 'Validation succeeded.' self.post = post def test_validate_v2compatible_request(self): req = FakeRequest() req.legacy_v2 = True self.assertEqual(self.post(body={'foo': 'bar'}, req=req), 'Validation succeeded.') detail = ("Invalid input for field/attribute foo. Value: 1. " "1 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1}, expected_detail=detail, req=req) def test_validate_v21_request(self): req = FakeRequest() self.assertEqual(self.post(body={'foo': 1}, req=req), 'Validation succeeded.') detail = ("Invalid input for field/attribute foo. Value: bar. " "'bar' is not of type 'integer'") self.check_validation_error(self.post, body={'foo': 'bar'}, expected_detail=detail, req=req) def test_validate_v2compatible_request_with_none_min_version(self): schema_none = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer' } } } @validation.schema(schema_none) def post(req, body): return 'Validation succeeded.' req = FakeRequest() req.legacy_v2 = True self.assertEqual('Validation succeeded.', post(body={'foo': 1}, req=req)) detail = ("Invalid input for field/attribute foo. Value: bar. " "'bar' is not of type 'integer'") self.check_validation_error(post, body={'foo': 'bar'}, expected_detail=detail, req=req) class QueryParamsSchemaTestCase(test.NoDBTestCase): def setUp(self): super(QueryParamsSchemaTestCase, self).setUp() self.controller = FakeQueryParametersController() def test_validate_request(self): req = fakes.HTTPRequest.blank("/tests?foo=%s" % fakes.FAKE_UUID) req.api_version_request = api_version.APIVersionRequest("2.3") self.assertEqual(['foo'], self.controller.get(req)) def test_validate_request_failed(self): # parameter 'foo' expect a UUID req = fakes.HTTPRequest.blank("/tests?foo=abc") req.api_version_request = api_version.APIVersionRequest("2.3") ex = self.assertRaises(exception.ValidationError, self.controller.get, req) if six.PY3: self.assertEqual("Invalid input for query parameters foo. Value: " "abc. 'abc' is not a 'uuid'", six.text_type(ex)) else: self.assertEqual("Invalid input for query parameters foo. Value: " "abc. u'abc' is not a 'uuid'", six.text_type(ex)) def test_validate_request_with_multiple_values(self): req = fakes.HTTPRequest.blank("/tests?foos=abc") req.api_version_request = api_version.APIVersionRequest("2.3") self.assertEqual(['foos'], self.controller.get(req)) req = fakes.HTTPRequest.blank("/tests?foos=abc&foos=def") self.assertEqual(['foos'], self.controller.get(req)) def test_validate_request_with_multiple_values_fails(self): req = fakes.HTTPRequest.blank( "/tests?foo=%s&foo=%s" % (fakes.FAKE_UUID, fakes.FAKE_UUID)) req.api_version_request = api_version.APIVersionRequest("2.3") self.assertRaises(exception.ValidationError, self.controller.get, req) def test_validate_request_unicode_decode_failure(self): req = fakes.HTTPRequest.blank("/tests?foo=%88") req.api_version_request = api_version.APIVersionRequest("2.1") ex = self.assertRaises( exception.ValidationError, self.controller.get, req) self.assertIn("Query string is not UTF-8 encoded", six.text_type(ex)) def test_strip_out_additional_properties(self): req = fakes.HTTPRequest.blank( "/tests?foos=abc&foo=%s&bar=123&-bar=456" % fakes.FAKE_UUID) req.api_version_request = api_version.APIVersionRequest("2.3") res = self.controller.get(req) res.sort() self.assertEqual(['foo', 'foos'], res) def test_no_strip_out_additional_properties_when_not_match_version(self): req = fakes.HTTPRequest.blank( "/tests?foos=abc&foo=%s&bar=123&bar=456" % fakes.FAKE_UUID) # The JSON-schema matches to the API version 2.3 and above. Request # with version 2.1 to ensure there isn't no strip out for additional # parameters when schema didn't match the request version. req.api_version_request = api_version.APIVersionRequest("2.1") res = self.controller.get(req) res.sort() self.assertEqual(['bar', 'foo', 'foos'], res) def test_strip_out_correct_pattern_retained(self): req = fakes.HTTPRequest.blank( "/tests?foos=abc&foo=%s&bar=123&_foo_=456" % fakes.FAKE_UUID) req.api_version_request = api_version.APIVersionRequest("2.3") res = self.controller.get(req) res.sort() self.assertEqual(['_foo_', 'foo', 'foos'], res) class RequiredDisableTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, } def test_validate_required_disable(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'abc': 1}, req=FakeRequest()), 'Validation succeeded.') class RequiredEnableTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'] } def test_validate_required_enable(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') def test_validate_required_enable_fails(self): detail = "'foo' is a required property" self.check_validation_error(self.post, body={'abc': 1}, expected_detail=detail) class AdditionalPropertiesEnableTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'], } def test_validate_additionalProperties_enable(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': 1, 'ext': 1}, req=FakeRequest()), 'Validation succeeded.') class AdditionalPropertiesDisableTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'], 'additionalProperties': False, } def test_validate_additionalProperties_disable(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') def test_validate_additionalProperties_disable_fails(self): detail = "Additional properties are not allowed ('ext' was unexpected)" self.check_validation_error(self.post, body={'foo': 1, 'ext': 1}, expected_detail=detail) class PatternPropertiesTestCase(APIValidationTestCase): post_schema = { 'patternProperties': { '^[a-zA-Z0-9]{1,10}$': { 'type': 'string' }, }, 'additionalProperties': False, } def test_validate_patternProperties(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'bar'}, req=FakeRequest())) def test_validate_patternProperties_fails(self): details = [ "Additional properties are not allowed ('__' was unexpected)", "'__' does not match any of the regexes: '^[a-zA-Z0-9]{1,10}$'" ] self.check_validation_error(self.post, body={'__': 'bar'}, expected_detail=details) details = [ "'' does not match any of the regexes: '^[a-zA-Z0-9]{1,10}$'", "Additional properties are not allowed ('' was unexpected)" ] self.check_validation_error(self.post, body={'': 'bar'}, expected_detail=details) details = [ ("'0123456789a' does not match any of the regexes: " "'^[a-zA-Z0-9]{1,10}$'"), ("Additional properties are not allowed ('0123456789a' was" " unexpected)") ] self.check_validation_error(self.post, body={'0123456789a': 'bar'}, expected_detail=details) # Note(jrosenboom): This is referencing an internal python error # string, which is no stable interface. We need a patch in the # jsonschema library in order to fix this properly. if six.PY3: detail = "expected string or bytes-like object" else: detail = "expected string or buffer" self.check_validation_error(self.post, body={None: 'bar'}, expected_detail=detail) class StringTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', }, }, } def test_validate_string(self): self.assertEqual(self.post(body={'foo': 'abc'}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': '0'}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': ''}, req=FakeRequest()), 'Validation succeeded.') def test_validate_string_fails(self): detail = ("Invalid input for field/attribute foo. Value: 1." " 1 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1.5." " 1.5 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1.5}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) class StringLengthTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'minLength': 1, 'maxLength': 10, }, }, } def test_validate_string_length(self): self.assertEqual(self.post(body={'foo': '0'}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': '0123456789'}, req=FakeRequest()), 'Validation succeeded.') def test_validate_string_length_fails(self): detail = ("Invalid input for field/attribute foo. Value: ." " '' is too short") self.check_validation_error(self.post, body={'foo': ''}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 0123456789a." " '0123456789a' is too long") self.check_validation_error(self.post, body={'foo': '0123456789a'}, expected_detail=detail) class IntegerTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': ['integer', 'string'], 'pattern': '^[0-9]+$', }, }, } def test_validate_integer(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': '1'}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': '0123456789'}, req=FakeRequest()), 'Validation succeeded.') def test_validate_integer_fails(self): detail = ("Invalid input for field/attribute foo. Value: abc." " 'abc' does not match '^[0-9]+$'") self.check_validation_error(self.post, body={'foo': 'abc'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'integer', 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 0xffff." " '0xffff' does not match '^[0-9]+$'") self.check_validation_error(self.post, body={'foo': '0xffff'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1.0." " 1.0 is not of type 'integer', 'string'") self.check_validation_error(self.post, body={'foo': 1.0}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1.0." " '1.0' does not match '^[0-9]+$'") self.check_validation_error(self.post, body={'foo': '1.0'}, expected_detail=detail) class IntegerRangeTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': ['integer', 'string'], 'pattern': '^[0-9]+$', 'minimum': 1, 'maximum': 10, }, }, } def test_validate_integer_range(self): self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': 10}, req=FakeRequest()), 'Validation succeeded.') self.assertEqual(self.post(body={'foo': '1'}, req=FakeRequest()), 'Validation succeeded.') def test_validate_integer_range_fails(self): detail = ("Invalid input for field/attribute foo. Value: 0." " 0(.0)? is less than the minimum of 1") self.check_validation_error(self.post, body={'foo': 0}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 11." " 11(.0)? is greater than the maximum of 10") self.check_validation_error(self.post, body={'foo': 11}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 0." " 0(.0)? is less than the minimum of 1") self.check_validation_error(self.post, body={'foo': '0'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 11." " 11(.0)? is greater than the maximum of 10") self.check_validation_error(self.post, body={'foo': '11'}, expected_detail=detail) class BooleanTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': parameter_types.boolean, }, } def test_validate_boolean(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': True}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': False}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'True'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'False'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '1'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '0'}, req=FakeRequest())) def test_validate_boolean_fails(self): enum_boolean = ("[True, 'True', 'TRUE', 'true', '1', 'ON', 'On'," " 'on', 'YES', 'Yes', 'yes'," " False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off'," " 'off', 'NO', 'No', 'no']") detail = ("Invalid input for field/attribute foo. Value: bar." " 'bar' is not one of %s") % enum_boolean self.check_validation_error(self.post, body={'foo': 'bar'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 2." " '2' is not one of %s") % enum_boolean self.check_validation_error(self.post, body={'foo': '2'}, expected_detail=detail) class HostnameTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': parameter_types.hostname, }, } def test_validate_hostname(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'localhost'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'localhost.localdomain.com'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my-host'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my_host'}, req=FakeRequest())) def test_validate_hostname_fails(self): detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1." " 1 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: my$host." " 'my$host' does not match '^[a-zA-Z0-9-._]*$'") self.check_validation_error(self.post, body={'foo': 'my$host'}, expected_detail=detail) class HostnameIPaddressTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': parameter_types.hostname_or_ip_address, }, } def test_validate_hostname_or_ip_address(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'localhost'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'localhost.localdomain.com'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my-host'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my_host'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '192.168.10.100'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '2001:db8::9abc'}, req=FakeRequest())) def test_validate_hostname_or_ip_address_fails(self): detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1." " 1 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: my$host." " 'my$host' does not match '^[a-zA-Z0-9-_.:]*$'") self.check_validation_error(self.post, body={'foo': 'my$host'}, expected_detail=detail) class NameTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': parameter_types.name, }, } def test_validate_name(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'm1.small'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my server'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'a'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434\u2006\ufffd'}, req=FakeRequest())) def test_validate_name_fails(self): error = ("An invalid 'name' value was provided. The name must be: " "printable characters. " "Can not start or end with whitespace.") should_fail = (' ', ' server', 'server ', u'a\xa0', # trailing unicode space u'\uffff', # non-printable unicode ) for item in should_fail: self.check_validation_error(self.post, body={'foo': item}, expected_detail=error) # four-byte unicode, if supported by this python build try: self.check_validation_error(self.post, body={'foo': u'\U00010000'}, expected_detail=error) except ValueError: pass class NameWithLeadingTrailingSpacesTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': parameter_types.name_with_leading_trailing_spaces, }, } def test_validate_name(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'm1.small'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'my server'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'a'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434\u2006\ufffd'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': ' abc '}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'abc abc abc'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': ' abc abc abc '}, req=FakeRequest())) # leading unicode space self.assertEqual('Validation succeeded.', self.post(body={'foo': '\xa0abc'}, req=FakeRequest())) def test_validate_name_fails(self): error = ("An invalid 'name' value was provided. The name must be: " "printable characters with at least one non space character") should_fail = ( ' ', u'\xa0', # unicode space u'\uffff', # non-printable unicode ) for item in should_fail: self.check_validation_error(self.post, body={'foo': item}, expected_detail=error) # four-byte unicode, if supported by this python build try: self.check_validation_error(self.post, body={'foo': u'\U00010000'}, expected_detail=error) except ValueError: pass class NoneTypeTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': parameter_types.none } } def test_validate_none(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'None'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': None}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': {}}, req=FakeRequest())) def test_validate_none_fails(self): detail = ("Invalid input for field/attribute foo. Value: ." " '' is not one of ['None', None, {}]") self.check_validation_error(self.post, body={'foo': ''}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: " "{'key': 'val'}. {'key': 'val'} is not one of " "['None', None, {}]") self.check_validation_error(self.post, body={'foo': {'key': 'val'}}, expected_detail=detail) class NameOrNoneTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': parameter_types.name_or_none } } def test_valid(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': None}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '1'}, req=FakeRequest())) def test_validate_fails(self): detail = ("Invalid input for field/attribute foo. Value: 1234. 1234 " "is not valid under any of the given schemas") self.check_validation_error(self.post, body={'foo': 1234}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: . '' " "is not valid under any of the given schemas") self.check_validation_error(self.post, body={'foo': ''}, expected_detail=detail) too_long_name = 256 * "k" detail = ("Invalid input for field/attribute foo. Value: %s. " "'%s' is not valid under any of the " "given schemas") % (too_long_name, too_long_name) self.check_validation_error(self.post, body={'foo': too_long_name}, expected_detail=detail) class TcpUdpPortTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': parameter_types.tcp_udp_port, }, } def test_validate_tcp_udp_port(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 1024}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '1024'}, req=FakeRequest())) def test_validate_tcp_udp_port_fails(self): detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'integer', 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 65536." " 65536(.0)? is greater than the maximum of 65535") self.check_validation_error(self.post, body={'foo': 65536}, expected_detail=detail) class CidrFormatTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'cidr', }, }, } def test_validate_cidr(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': '192.168.10.0/24'}, req=FakeRequest() )) def test_validate_cidr_fails(self): detail = ("Invalid input for field/attribute foo." " Value: bar." " 'bar' is not a 'cidr'") self.check_validation_error(self.post, body={'foo': 'bar'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: . '' is not a 'cidr'") self.check_validation_error(self.post, body={'foo': ''}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: 192.168.1.0. '192.168.1.0' is not a 'cidr'") self.check_validation_error(self.post, body={'foo': '192.168.1.0'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: 192.168.1.0 /24." " '192.168.1.0 /24' is not a 'cidr'") self.check_validation_error(self.post, body={'foo': '192.168.1.0 /24'}, expected_detail=detail) class DatetimeTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'date-time', }, }, } def test_validate_datetime(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': '2014-01-14T01:00:00Z'}, req=FakeRequest() )) def test_validate_datetime_fails(self): detail = ("Invalid input for field/attribute foo." " Value: 2014-13-14T01:00:00Z." " '2014-13-14T01:00:00Z' is not a 'date-time'") self.check_validation_error(self.post, body={'foo': '2014-13-14T01:00:00Z'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: bar. 'bar' is not a 'date-time'") self.check_validation_error(self.post, body={'foo': 'bar'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1." " '1' is not a 'date-time'") self.check_validation_error(self.post, body={'foo': '1'}, expected_detail=detail) class UuidTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'uuid', }, }, } def test_validate_uuid(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': '70a599e0-31e7-49b7-b260-868f441e862b'}, req=FakeRequest() )) def test_validate_uuid_fails(self): detail = ("Invalid input for field/attribute foo." " Value: 70a599e031e749b7b260868f441e862." " '70a599e031e749b7b260868f441e862' is not a 'uuid'") self.check_validation_error(self.post, body={'foo': '70a599e031e749b7b260868f441e862'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1." " '1' is not a 'uuid'") self.check_validation_error(self.post, body={'foo': '1'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: abc." " 'abc' is not a 'uuid'") self.check_validation_error(self.post, body={'foo': 'abc'}, expected_detail=detail) class UriTestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'uri', }, }, } def test_validate_uri(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': 'http://localhost:8774/v2/servers'}, req=FakeRequest() )) self.assertEqual('Validation succeeded.', self.post( body={'foo': 'http://[::1]:8774/v2/servers'}, req=FakeRequest() )) def test_validate_uri_fails(self): base_detail = ("Invalid input for field/attribute foo. Value: {0}. " "'{0}' is not a 'uri'") invalid_uri = 'http://localhost:8774/v2/servers##' self.check_validation_error(self.post, body={'foo': invalid_uri}, expected_detail=base_detail.format( invalid_uri)) invalid_uri = 'http://[fdf8:01]:8774/v2/servers' self.check_validation_error(self.post, body={'foo': invalid_uri}, expected_detail=base_detail.format( invalid_uri)) invalid_uri = '1' self.check_validation_error(self.post, body={'foo': invalid_uri}, expected_detail=base_detail.format( invalid_uri)) invalid_uri = 'abc' self.check_validation_error(self.post, body={'foo': invalid_uri}, expected_detail=base_detail.format( invalid_uri)) class Ipv4TestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'ipv4', }, }, } def test_validate_ipv4(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': '192.168.0.100'}, req=FakeRequest() )) def test_validate_ipv4_fails(self): detail = ("Invalid input for field/attribute foo. Value: abc." " 'abc' is not a 'ipv4'") self.check_validation_error(self.post, body={'foo': 'abc'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: localhost." " 'localhost' is not a 'ipv4'") self.check_validation_error(self.post, body={'foo': 'localhost'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: 2001:db8::1234:0:0:9abc." " '2001:db8::1234:0:0:9abc' is not a 'ipv4'") self.check_validation_error(self.post, body={'foo': '2001:db8::1234:0:0:9abc'}, expected_detail=detail) class Ipv6TestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'ipv6', }, }, } def test_validate_ipv6(self): self.assertEqual('Validation succeeded.', self.post( body={'foo': '2001:db8::1234:0:0:9abc'}, req=FakeRequest() )) def test_validate_ipv6_fails(self): detail = ("Invalid input for field/attribute foo. Value: abc." " 'abc' is not a 'ipv6'") self.check_validation_error(self.post, body={'foo': 'abc'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: localhost." " 'localhost' is not a 'ipv6'") self.check_validation_error(self.post, body={'foo': 'localhost'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo." " Value: 192.168.0.100. '192.168.0.100' is not a 'ipv6'") self.check_validation_error(self.post, body={'foo': '192.168.0.100'}, expected_detail=detail) class Base64TestCase(APIValidationTestCase): post_schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'base64', }, }, } def test_validate_base64(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'aGVsbG8gd29ybGQ='}, req=FakeRequest())) # 'aGVsbG8gd29ybGQ=' is the base64 code of 'hello world' def test_validate_base64_fails(self): value = 'A random string' detail = ("Invalid input for field/attribute foo. " "Value: %s. '%s' is not a 'base64'") % (value, value) self.check_validation_error(self.post, body={'foo': value}, expected_detail=detail) class RegexFormatTestCase(APIValidationTestCase): def setUp(self): super(RegexFormatTestCase, self).setUp() self.controller = RegexFormatFakeController() def test_validate_regex(self): req = fakes.HTTPRequest.blank("") self.assertEqual('Validation succeeded.', self.controller.post(req, body={'foo': u'Myserver'})) def test_validate_regex_fails(self): value = 1 req = fakes.HTTPRequest.blank("") detail = ("Invalid input for field/attribute foo. " "Value: %s. %s is not a 'regex'") % (value, value) self.check_validation_error(self.controller.post, req=req, body={'foo': value}, expected_detail=detail)
import unittest import subprocess import os import platform import shutil from os.path import join, normpath, abspath, split import sys env_path = "/".join(os.path.dirname(os.path.abspath(__file__)).split('/')[:-1]) sys.path.insert(0, env_path) import littlechef test_path = split(normpath(abspath(__file__)))[0] littlechef_top = normpath(join(test_path, '..')) if platform.system() == 'Windows': fix = join(littlechef_top, 'fix.cmd') WIN32 = True else: fix = join(littlechef_top, 'fix') WIN32 = False class BaseTest(unittest.TestCase): def setUp(self): """Change to the test directory""" self.set_location() def set_location(self, location=test_path): """Change directories to a known location""" os.chdir(location) def execute(self, call): """Executes a command and returns stdout and stderr""" if WIN32: proc = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.PIPE) else: proc = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return proc.communicate() class TestConfig(BaseTest): def tearDown(self): self.set_location() def test_not_a_kitchen(self): """Should exit with error when not a kitchen directory""" # Change to parent dir, which has no nodes/cookbooks/roles dir self.set_location(littlechef_top) # Call fix from the current directory above "tests/" resp, error = self.execute([fix, 'node:a']) self.assertTrue("Fatal error" in error, resp) self.assertTrue( 'No {0} file found'.format(littlechef.CONFIGFILE) in error, error) self.assertEquals(resp, "", resp) def test_version(self): """Should output the correct Little Chef version""" resp, error = self.execute([fix, '-v']) self.assertEquals(resp, "", "Response should be empty, version should be in stderr") self.assertTrue( 'LittleChef {0}'.format(littlechef.__version__) in error) def test_list_commands(self): """Should output a list of available commands""" resp, error = self.execute([fix, '-l']) self.assertEquals(error, "") expected = "Starts a Chef Solo configuration run" self.assertTrue(expected in resp) commands = resp.split('\nAvailable commands:\n')[-1] commands = filter(None, commands.split('\n')) self.assertEquals(len(commands), 21) def test_verbose(self): """Should turn on verbose output""" resp, error = self.execute([fix, '--verbose', 'list_nodes']) self.assertEquals(error, "", error) self.assertTrue('Verbose output on' in resp, resp) def test_debug(self): """Should turn on debug loglevel""" resp, error = self.execute([fix, '--debug', 'list_nodes']) self.assertEquals(error, "", error) self.assertTrue('Debug level on' in resp, resp) class TestEnvironment(BaseTest): def test_no_valid_value(self): """Should error out when the env value is empty or is a fabric task""" resp, error = self.execute([fix, 'list_nodes', '--env']) self.assertEquals(resp, "") self.assertTrue( "error: argument -e/--env: expected one argument" in error, error) resp, error = self.execute([fix, '--env', 'list_nodes']) self.assertEquals(resp, "") self.assertTrue("error: No value given for --env" in error, error) cmd = [fix, '--env', 'nodes_with_role:base', 'role:base'] resp, error = self.execute(cmd) self.assertEquals(resp, "") self.assertTrue("error: No value given for --env" in error, error) def test_valid_environment(self): """Should set the chef_environment value when one is given""" resp, error = self.execute([fix, 'list_nodes', '--env', 'staging']) self.assertEquals(error, "", error) self.assertTrue("Environment: staging" in resp, resp) class TestRunner(BaseTest): def test_no_node_given(self): """Should abort when no node is given""" resp, error = self.execute([fix, 'node:']) self.assertTrue("Fatal error: No node was given" in error) def test_plugin(self): """Should execute the given plugin""" resp, error = self.execute([fix, 'node:testnode1', 'plugin:notthere']) expected = ", could not find 'notthere.py' in the plugin directory" self.assertTrue(expected in error, resp + error) resp, error = self.execute([fix, 'node:testnode1', 'plugin:bad']) expected = "Found plugin 'bad', but it seems to have a syntax error:" expected += " invalid syntax (bad.py, line 6)" self.assertTrue(expected in error, resp + error) resp, error = self.execute([fix, 'node:testnode1', 'plugin:dummy']) expected = "Executing plugin '{0}' on {1}".format("dummy", "testnode1") self.assertTrue(expected in resp, resp + error) def test_list_plugins(self): """Should print a list of available plugins""" resp, error = self.execute([fix, 'list_plugins']) self.assertTrue("List of available plugins:" in resp, resp) self.assertTrue("bad: Plugin has a syntax error" in resp, resp) self.assertTrue("dummy: Dummy LittleChef plugin" in resp, resp) class TestCookbooks(BaseTest): def test_list_recipes(self): """Should list available recipes""" resp, error = self.execute([fix, 'list_recipes']) self.assertEquals(error, "") self.assertTrue('subversion::client' in resp) self.assertTrue('subversion::server' in resp) def test_list_recipes_site_cookbooks(self): """Should give priority to site-cookbooks information""" resp, error = self.execute([fix, 'list_recipes']) self.assertTrue('Modified by site-cookbooks' in resp) def test_list_recipes_detailed(self): """Should show a detailed list of available recipes""" resp, error = self.execute([fix, 'list_recipes_detailed']) self.assertTrue('subversion::client' in resp) for field in ['description', 'version', 'dependencies', 'attributes']: self.assertTrue(field in resp) def test_list_recipes_detailed_site_cookbooks(self): """Should show a detailed list of available recipes with site-cookbook priority """ resp, error = self.execute([fix, 'list_recipes_detailed']) self.assertTrue('0.8.4' in resp) def test_no_metadata(self): """Should abort if cookbook has no metadata.json""" bad_cookbook = join(test_path, 'cookbooks', 'bad_cookbook') os.mkdir(bad_cookbook) try: resp, error = self.execute([fix, 'list_recipes']) except OSError: self.fail("Couldn't execute {0}".format(fix)) finally: os.rmdir(bad_cookbook) expected = 'Fatal error: Cookbook "bad_cookbook" has no metadata.json' self.assertTrue(expected in error) class TestListRoles(BaseTest): def test_list_roles(self): """Should list all roles""" resp, error = self.execute([fix, 'list_roles']) self.assertTrue('base' in resp and 'example aplication' in resp) def test_list_roles_detailed(self): """Should show a detailed list of all roles""" resp, error = self.execute([fix, 'list_roles_detailed']) self.assertTrue('base' in resp and 'example aplication' in resp) class TestListNodes(BaseTest): def test_list_nodes(self): """Should list all nodes""" resp, error = self.execute([fix, 'list_nodes']) for node in ['testnode1', 'testnode2', 'testnode3.mydomain.com']: self.assertTrue(node in resp) self.assertTrue('Recipes: subversion' in resp) def test_list_nodes_in_env(self): """Should list all nodes in an environment""" resp, error = self.execute([fix, '--env', 'staging', 'list_nodes']) self.assertTrue('testnode2' in resp) self.assertFalse('testnode1' in resp) self.assertFalse('testnode3.mydomain.com' in resp) def test_list_nodes_detailed(self): """Should show a detailed list of all nodes""" resp, error = self.execute([fix, 'list_nodes_detailed']) self.assertTrue('testnode1' in resp) self.assertTrue('Recipe: subversion' in resp) def test_list_nodes_with_recipe(self): """Should list all nodes with a recipe in the run list""" resp, error = self.execute([fix, 'list_nodes_with_recipe:subversion']) self.assertTrue('testnode1' in resp) self.assertTrue('Recipes: subversion' in resp) resp, error = self.execute([fix, 'list_nodes_with_recipe:apache2']) self.assertFalse('testnode1' in resp) class TestNewKitchen(BaseTest): def setUp(self): self.new_kitchen = join(test_path, 'test_new_kitchen') os.mkdir(self.new_kitchen) self.set_location(self.new_kitchen) def tearDown(self): shutil.rmtree(self.new_kitchen) self.set_location() def test_new_kitchen_creates_required_directories(self): resp, error = self.execute([fix, 'new_kitchen']) kitchen_contents = os.listdir(os.getcwd()) self.assertTrue('roles' in kitchen_contents) self.assertTrue('cookbooks' in kitchen_contents) self.assertTrue('site-cookbooks' in kitchen_contents) self.assertTrue('data_bags' in kitchen_contents) self.assertTrue('nodes' in kitchen_contents) self.assertTrue('environments' in kitchen_contents) self.assertTrue(littlechef.CONFIGFILE in kitchen_contents) def test_new_kitchen_can_list_nodes(self): self.execute([fix, 'new_kitchen']) with open(littlechef.CONFIGFILE, "w") as configfh: print >> configfh, "[userinfo]" print >> configfh, "user = testuser" print >> configfh, "password = testpassword" resp, error = self.execute([fix, 'list_nodes']) self.assertFalse(error) self.assertTrue('Found 0 nodes' in resp) self.assertEqual('', error)
"""Compose ACLs on ports.""" from faucet import valve_of from faucet.conf import InvalidConfigError def push_vlan(vlan_vid): """Push a VLAN tag with optional selection of eth type.""" vid = vlan_vid vlan_eth_type = None if isinstance(vlan_vid, dict): vid = vlan_vid['vid'] if 'eth_type' in vlan_vid: vlan_eth_type = vlan_vid['eth_type'] if vlan_eth_type is None: return valve_of.push_vlan_act(vid) return valve_of.push_vlan_act(vid, eth_type=vlan_eth_type) def rewrite_vlan(output_dict): """Implement actions to rewrite VLAN headers.""" vlan_actions = [] if 'pop_vlans' in output_dict: for _ in range(output_dict['pop_vlans']): vlan_actions.append(valve_of.pop_vlan()) # if vlan tag is specified, push it. if 'vlan_vid' in output_dict: vlan_actions.extend(push_vlan(output_dict['vlan_vid'])) # swap existing VID elif 'swap_vid' in output_dict: vlan_actions.append( valve_of.set_vlan_vid(output_dict['swap_vid'])) # or, if a list, push them all (all with type Q). elif 'vlan_vids' in output_dict: for vlan_vid in output_dict['vlan_vids']: vlan_actions.extend(push_vlan(vlan_vid)) return vlan_actions def build_output_actions(output_dict): """Implement actions to alter packet/output.""" output_actions = [] output_port = None ofmsgs = [] # rewrite any VLAN headers first always vlan_actions = rewrite_vlan(output_dict) if vlan_actions: output_actions.extend(vlan_actions) if 'set_fields' in output_dict: for set_fields in output_dict['set_fields']: output_actions.append(valve_of.set_field(**set_fields)) if 'port' in output_dict: output_port = output_dict['port'] output_actions.append(valve_of.output_port(output_port)) if 'ports' in output_dict: for output_port in output_dict['ports']: output_actions.append(valve_of.output_port(output_port)) if 'failover' in output_dict: failover = output_dict['failover'] group_id = failover['group_id'] buckets = [] for port in failover['ports']: buckets.append(valve_of.bucket( watch_port=port, actions=[valve_of.output_port(port)])) ofmsgs.append(valve_of.groupdel(group_id=group_id)) ofmsgs.append(valve_of.groupadd_ff(group_id=group_id, buckets=buckets)) output_actions.append(valve_of.group_act(group_id=group_id)) return (output_port, output_actions, ofmsgs) def build_acl_entry(rule_conf, meters, acl_allow_inst, acl_force_port_vlan_inst, port_num=None, vlan_vid=None): acl_inst = [] acl_act = [] acl_match_dict = {} acl_ofmsgs = [] acl_cookie = None allow_inst = acl_allow_inst for attrib, attrib_value in list(rule_conf.items()): if attrib == 'in_port': continue if attrib == 'cookie': acl_cookie = attrib_value continue if attrib == 'description': continue if attrib == 'actions': allow = False allow_specified = False if 'allow' in attrib_value: allow_specified = True if attrib_value['allow'] == 1: allow = True if 'force_port_vlan' in attrib_value: if attrib_value['force_port_vlan'] == 1: allow_inst = acl_force_port_vlan_inst if 'meter' in attrib_value: meter_name = attrib_value['meter'] acl_inst.append(valve_of.apply_meter(meters[meter_name].meter_id)) if 'mirror' in attrib_value: port_no = attrib_value['mirror'] acl_act.append(valve_of.output_port(port_no)) if not allow_specified: allow = True if 'output' in attrib_value: output_port, output_actions, output_ofmsgs = build_output_actions( attrib_value['output']) acl_act.extend(output_actions) acl_ofmsgs.extend(output_ofmsgs) # if port specified, output packet now and exit pipeline. if output_port is not None: continue if allow: acl_inst.append(allow_inst) else: acl_match_dict[attrib] = attrib_value if port_num is not None: acl_match_dict['in_port'] = port_num if vlan_vid is not None: acl_match_dict['vlan_vid'] = valve_of.vid_present(vlan_vid) try: acl_match = valve_of.match_from_dict(acl_match_dict) except TypeError: raise InvalidConfigError('invalid type in ACL') if acl_act: acl_inst.append(valve_of.apply_actions(acl_act)) return (acl_match, acl_inst, acl_cookie, acl_ofmsgs) def build_acl_ofmsgs(acls, acl_table, acl_allow_inst, acl_force_port_vlan_inst, highest_priority, meters, exact_match, port_num=None, vlan_vid=None): ofmsgs = [] acl_rule_priority = highest_priority for acl in acls: for rule_conf in acl.rules: acl_match, acl_inst, acl_cookie, acl_ofmsgs = build_acl_entry( rule_conf, meters, acl_allow_inst, acl_force_port_vlan_inst, port_num, vlan_vid) ofmsgs.extend(acl_ofmsgs) if exact_match: flowmod = acl_table.flowmod( acl_match, priority=highest_priority, inst=acl_inst, cookie=acl_cookie) else: flowmod = acl_table.flowmod( acl_match, priority=acl_rule_priority, inst=acl_inst, cookie=acl_cookie) ofmsgs.append(flowmod) acl_rule_priority -= 1 return ofmsgs
from resolwe.flow.models import Data from resolwe.test import tag_process, with_resolwe_host from resolwe_bio.utils.test import KBBioProcessTestCase class MicroRNATestCase(KBBioProcessTestCase): @with_resolwe_host @tag_process("workflow-mirna") def test_mirna_workflow(self): # Prepare data for aligning the reads with bowtie2 and annotation file for featureCounts. with self.preparation_stage(): inputs = { "src": "genome_rsem.fa.gz", "species": "Homo sapiens", "build": "fake_genome_RSEM", } ref_seq = self.run_process("upload-fasta-nucl", inputs) bowtie2_index = self.run_process("bowtie2-index", {"ref_seq": ref_seq.id}) single_reads = self.prepare_reads(["reads rsem.fq.gz"]) annotation = self.prepare_annotation( "annotation_rsem.gtf.gz", species="Homo sapiens", build="fake_genome_RSEM", ) inputs = { "preprocessing": { "reads": single_reads.pk, "adapters": {"down_primers_seq": ["TAATGAACAATGCAAGTTTGA"]}, "filtering": {"minlen": 15, "maxlen": 35, "error_rate": 0.2}, }, "alignment": { "genome": bowtie2_index.pk, "alignment_options": { "mode": "--local", "speed": "--very-sensitive", "L": 8, "rep_mode": "k", "k_reports": 5, }, }, "quant_options": { "annotation": annotation.pk, "id_attribute": "gene_id", "feature_class": "exon", "normalization_type": "CPM", "count_multi_mapping_reads": True, "allow_multi_overlap": True, }, "assay_type": "non_specific", } # Run process and assert. self.run_process("workflow-mirna", inputs) workflow = Data.objects.filter(process__slug="feature_counts").last() # check featureCount summary self.assertFile( workflow, "rc", "mirna_featurecounts_rc.tab.gz", compression="gzip" ) self.assertFile( workflow, "exp", "mirna_featurecounts_cpm.tab.gz", compression="gzip" )
import os import socket import time import uuid import testresources import testtools from heatclient import client as heatclient from keystoneclient.v2_0 import client as ksclient from muranoclient import client as mclient import muranoclient.common.exceptions as exceptions import murano.tests.functional.engine.config as cfg CONF = cfg.cfg.CONF class MuranoBase(testtools.TestCase, testtools.testcase.WithAttributes, testresources.ResourcedTestCase): @classmethod def setUpClass(cls): super(MuranoBase, cls).setUpClass() cfg.load_config() keystone_client = ksclient.Client(username=CONF.murano.user, password=CONF.murano.password, tenant_name=CONF.murano.tenant, auth_url=CONF.murano.auth_url) heat_url = keystone_client.service_catalog.url_for( service_type='orchestration', endpoint_type='publicURL') cls.heat_client = heatclient.Client('1', endpoint=heat_url, token=keystone_client.auth_token) url = CONF.murano.murano_url murano_url = url if 'v1' not in url else "/".join( url.split('/')[:url.split('/').index('v1')]) cls.muranoclient = mclient.Client('1', endpoint=murano_url, token=keystone_client.auth_token) cls.linux = CONF.murano.linux_image cls.pkgs_path = os.path.abspath(os.path.join( os.path.dirname(__file__), os.path.pardir, 'murano-app-incubator' )) def upload_package(package_name, body, app): files = {'%s' % package_name: open(app, 'rb')} return cls.muranoclient.packages.create(body, files) upload_package( 'PostgreSQL', {"categories": ["Databases"], "tags": ["tag"]}, os.path.join(cls.pkgs_path, 'io.murano.databases.PostgreSql.zip') ) upload_package( 'SqlDatabase', {"categories": ["Databases"], "tags": ["tag"]}, os.path.join(cls.pkgs_path, 'io.murano.databases.SqlDatabase.zip') ) upload_package( 'Apache', {"categories": ["Application Servers"], "tags": ["tag"]}, os.path.join(cls.pkgs_path, 'io.murano.apps.apache.ApacheHttpServer.zip') ) upload_package( 'Tomcat', {"categories": ["Application Servers"], "tags": ["tag"]}, os.path.join(cls.pkgs_path, 'io.murano.apps.apache.Tomcat.zip') ) upload_package( 'Telnet', {"categories": ["Web"], "tags": ["tag"]}, os.path.join(cls.pkgs_path, 'io.murano.apps.linux.Telnet.zip') ) def setUp(self): super(MuranoBase, self).setUp() self.environments = [] def tearDown(self): super(MuranoBase, self).tearDown() for env in self.environments: try: self.environment_delete(env) except Exception: pass def environment_delete(self, environment_id, timeout=180): self.muranoclient.environments.delete(environment_id) start_time = time.time() while time.time() - start_time < timeout: try: self.muranoclient.environments.get(environment_id) except exceptions.HTTPNotFound: return raise Exception( 'Environment {0} was not deleted in {1} seconds'.format( environment_id, timeout)) def wait_for_environment_deploy(self, environment): start_time = time.time() while environment.manager.get(environment.id).status != 'ready': if time.time() - start_time > 1200: self.fail( 'Environment deployment is not finished in 1200 seconds') time.sleep(5) return environment.manager.get(environment.id) def check_port_access(self, ip, port): result = 1 start_time = time.time() while time.time() - start_time < 300: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result = sock.connect_ex((str(ip), port)) sock.close() if result == 0: break time.sleep(5) self.assertEqual(0, result, '%s port is closed on instance' % port) def deployment_success_check(self, environment, port): deployment = self.muranoclient.deployments.list(environment.id)[-1] self.assertEqual('success', deployment.state, 'Deployment status is {0}'.format(deployment.state)) ip = environment.services[-1]['instance']['floatingIpAddress'] if ip: self.check_port_access(ip, port) else: self.fail('Instance does not have floating IP') def test_deploy_telnet(self): post_body = { "instance": { "flavor": "m1.medium", "image": self.linux, "assignFloatingIp": True, "?": { "type": "io.murano.resources.LinuxMuranoInstance", "id": str(uuid.uuid4()) }, "name": "testMurano" }, "name": "teMurano", "?": { "type": "io.murano.apps.linux.Telnet", "id": str(uuid.uuid4()) } } environment_name = 'Telnetenv' + uuid.uuid4().hex[:5] env = self._quick_deploy(environment_name, post_body) self.deployment_success_check(env, 23) def test_deploy_apache(self): post_body = { "instance": { "flavor": "m1.medium", "image": self.linux, "assignFloatingIp": True, "?": { "type": "io.murano.resources.LinuxMuranoInstance", "id": str(uuid.uuid4()) }, "name": "testMurano" }, "name": "teMurano", "?": { "type": "io.murano.apps.apache.ApacheHttpServer", "id": str(uuid.uuid4()) } } environment_name = 'Apacheenv' + uuid.uuid4().hex[:5] env = self._quick_deploy(environment_name, post_body) self.deployment_success_check(env, 80) def test_deploy_postgresql(self): post_body = { "instance": { "flavor": "m1.medium", "image": self.linux, "assignFloatingIp": True, "?": { "type": "io.murano.resources.LinuxMuranoInstance", "id": str(uuid.uuid4()) }, "name": "testMurano" }, "name": "teMurano", "database": "test_db", "username": "test_usr", "password": "test_pass", "?": { "type": "io.murano.databases.PostgreSql", "id": str(uuid.uuid4()) } } environment_name = 'Postgreenv' + uuid.uuid4().hex[:5] env = self._quick_deploy(environment_name, post_body) self.deployment_success_check(env, 5432) def test_deploy_tomcat(self): post_body = { "instance": { "flavor": "m1.medium", "image": self.linux, "assignFloatingIp": True, "?": { "type": "io.murano.resources.LinuxMuranoInstance", "id": str(uuid.uuid4()) }, "name": "testMurano" }, "name": "teMurano", "?": { "type": "io.murano.apps.apache.Tomcat", "id": str(uuid.uuid4()) } } environment_name = 'Tomcatenv' + uuid.uuid4().hex[:5] env = self._quick_deploy(environment_name, post_body) self.deployment_success_check(env, 8080) def _get_telnet_app(self): return { "instance": { "?": { "type": "io.murano.resources.LinuxMuranoInstance", "id": str(uuid.uuid4()) }, "flavor": "m1.medium", "image": self.linux, "name": "instance{0}".format(uuid.uuid4().hex[:5]), }, "name": "app{0}".format(uuid.uuid4().hex[:5]), "?": { "type": "io.murano.apps.linux.Telnet", "id": str(uuid.uuid4()) } } def _quick_deploy(self, name, *apps): environment = self.muranoclient.environments.create({'name': name}) self.environments.append(environment.id) session = self.muranoclient.sessions.configure(environment.id) for app in apps: self.muranoclient.services.post(environment.id, path='/', data=app, session_id=session.id) self.muranoclient.sessions.deploy(environment.id, session.id) return self.wait_for_environment_deploy(environment) def _get_stack(self, environment_id): for stack in self.heat_client.stacks.list(): if environment_id in stack.description: return stack def test_instance_refs_are_removed_after_application_is_removed(self): # FIXME(sergmelikyan): Revise this as part of proper fix for #1359998 self.skipTest('Skipped until proper fix for #1359998 is proposed') name = 'e' + uuid.uuid4().hex # create environment with telnet application application1 = self._get_telnet_app() application2 = self._get_telnet_app() application_id = application1['?']['id'] instance_name = application1['instance']['name'] apps = [application1, application2] environment = self._quick_deploy(name, *apps) # delete telnet application session = self.muranoclient.sessions.configure(environment.id) self.muranoclient.services.delete(environment.id, '/' + application_id, session.id) self.muranoclient.sessions.deploy(environment.id, session.id) self.wait_for_environment_deploy(environment) stack_name = self._get_stack(environment.id).stack_name template = self.heat_client.stacks.template(stack_name) ip_addresses = '{0}-assigned-ip'.format(instance_name) floating_ip = '{0}-FloatingIPaddress'.format(instance_name) self.assertNotIn(ip_addresses, template['outputs']) self.assertNotIn(floating_ip, template['outputs']) self.assertNotIn(instance_name, template['resources']) def test_stack_deletion_after_env_is_deleted(self): name = 'e' + uuid.uuid4().hex application = self._get_telnet_app() environment = self._quick_deploy(name, application) stack = self._get_stack(environment.id) self.assertIsNotNone(stack) self.muranoclient.environments.delete(environment.id) start_time = time.time() while stack is not None: if time.time() - start_time > 300: break time.sleep(5) stack = self._get_stack(environment.id) self.assertIsNone(stack, 'stack is not deleted')
from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('coursedashboards', '0005_auto_20170915_2036'), ] operations = [ migrations.CreateModel( name='CourseOfferingMajor', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('count', models.IntegerField()), ], options={ 'db_table': 'CourseOfferingMajor', }, ), migrations.AlterUniqueTogether( name='coursemajor', unique_together=set([]), ), migrations.RemoveField( model_name='coursemajor', name='course', ), migrations.RemoveField( model_name='coursemajor', name='major', ), migrations.AlterField( model_name='course', name='curriculum', field=models.CharField(max_length=20), ), migrations.DeleteModel( name='CourseMajor', ), migrations.AddField( model_name='courseofferingmajor', name='course', field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='coursedashboards.Course'), ), migrations.AddField( model_name='courseofferingmajor', name='major', field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='coursedashboards.Major'), ), migrations.AddField( model_name='courseofferingmajor', name='term', field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='coursedashboards.Term'), ), migrations.AlterUniqueTogether( name='courseofferingmajor', unique_together=set([('major', 'term', 'course')]), ), ]
from __future__ import print_function import sys from paradrop.base import settings from paradrop.lib.utils.pd_storage import PDStorage from .chute import Chute class ChuteStorage(PDStorage): """ ChuteStorage class. This class holds onto the list of Chutes on this AP. It implements the PDStorage class which allows us to save the chuteList to disk transparently """ # Class variable of chute list so all instances see the same thing chuteList = dict() def __init__(self, filename=None, save_timer=settings.FC_CHUTESTORAGE_SAVE_TIMER): if(not filename): filename = settings.FC_CHUTESTORAGE_FILE PDStorage.__init__(self, filename, save_timer) # Has it been loaded? if(len(ChuteStorage.chuteList) == 0): self.loadFromDisk() def setAttr(self, attr): """Save our attr however we want (as class variable for all to see)""" ChuteStorage.chuteList = attr def getAttr(self): """Get our attr (as class variable for all to see)""" return ChuteStorage.chuteList def getChuteList(self): """Return a list of the names of the chutes we know of.""" return ChuteStorage.chuteList.values() def getChute(self, name): """Returns a reference to a chute we have in our cache, or None.""" return ChuteStorage.chuteList.get(name, None) def deleteChute(self, ch): """Deletes a chute from the chute storage. Can be sent the chute object, or the chute name.""" if (isinstance(ch, Chute)): del ChuteStorage.chuteList[ch.name] else: del ChuteStorage.chuteList[ch] self.saveToDisk() def saveChute(self, ch): """ Saves the chute provided in our internal chuteList. Also since we just received a new chute to hold onto we should save our ChuteList to disk. """ # check if there is a version of the chute already oldch = ChuteStorage.chuteList.get(ch.name, None) if(oldch != None): # we should merge these chutes so we don't lose any data oldch.__dict__.update(ch.__dict__) # TODO: do we need to deal with cache separate? Old code we did else: ChuteStorage.chuteList[ch.name] = ch self.saveToDisk() def clearChuteStorage(self): ChuteStorage.chuteList.clear() self.saveToDisk() # # Functions we override to implement PDStorage Properly # def attrSaveable(self): """Returns True if we should save the ChuteList, otherwise False.""" return (type(ChuteStorage.chuteList) == dict) @classmethod def get_chute(cls, name): return cls.chuteList[name] if(__name__ == '__main__'): # pragma: no cover def usage(): print('Usage: $0 -ls : print chute storage details') exit(0) try: if(sys.argv[1] != '-ls'): usage() except Exception as e: print(e) usage() cs = ChuteStorage() chutes = cs.getChuteList() for ch in chutes: print(ch)
import envi.archs.h8.emu as h8_emu import envi.archs.h8.regs as h8_regs import vivisect.impemu.emulator as v_i_emulator class H8WorkspaceEmulator(v_i_emulator.WorkspaceEmulator, h8_emu.H8Emulator): taintregs = [h8_regs.REG_ER0, h8_regs.REG_ER1, h8_regs.REG_ER2] def __init__(self, vw, logwrite=False, logread=False): h8_emu.H8Emulator.__init__(self) v_i_emulator.WorkspaceEmulator.__init__(self, vw, logwrite=logwrite, logread=logread)
from django.conf import settings from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from Bio.PDB import * from Bio.PDB.PDBIO import Select from common.definitions import * from protein.models import Protein, ProteinSegment from residue.models import Residue from structure.functions import BlastSearch, MappedResidue, StructureSeqNumOverwrite from structure.sequence_parser import * import Bio.PDB.Polypeptide as polypeptide import os,logging from collections import OrderedDict logger = logging.getLogger("protwis") class GenericNumbering(object): residue_list = ["ARG","ASP","GLU","HIS","ASN","GLN","LYS","SER","THR","HID","PHE","LEU","ILE","TYR","TRP","VAL","MET","PRO","CYS","ALA","GLY"] exceptions = {'6GDG':[255, 10]} def __init__ (self, pdb_file=None, pdb_filename=None, structure=None, pdb_code=None, blast_path='blastp', blastdb=os.sep.join([settings.STATICFILES_DIRS[0], 'blast', 'protwis_blastdb']),top_results=1, sequence_parser=False, signprot=False): # pdb_file can be either a name/path or a handle to an open file self.pdb_file = pdb_file self.pdb_filename = pdb_filename # if pdb 4 letter code is specified self.pdb_code = pdb_code # dictionary of 'MappedResidue' object storing information about alignments and bw numbers self.residues = {} self.pdb_seq = {} #Seq('') # list of uniprot ids returned from blast self.prot_id_list = [] #setup for local blast search self.blast = BlastSearch(blast_path=blast_path, blastdb=blastdb,top_results=top_results) # calling sequence parser if sequence_parser: if pdb_code: struct = Structure.objects.get(pdb_code__index=self.pdb_code) if not signprot: if pdb_code: s = SequenceParser(pdb_file=self.pdb_file, wt_protein_id=struct.protein_conformation.protein.parent.id) else: s = SequenceParser(pdb_file=self.pdb_file)#, wt_protein_id=struct.protein_conformation.protein.parent.id) else: s = SequenceParser(pdb_file=self.pdb_file, wt_protein_id=signprot.id) self.pdb_structure = s.pdb_struct self.mapping = s.mapping self.wt = s.wt else: if self.pdb_file: self.pdb_structure = PDBParser(PERMISSIVE=True, QUIET=True).get_structure('ref', self.pdb_file)[0] elif self.pdb_filename: self.pdb_structure = PDBParser(PERMISSIVE=True, QUIET=True).get_structure('ref', self.pdb_filename)[0] else: self.pdb_structure = structure self.parse_structure(self.pdb_structure) def parse_structure(self, pdb_struct): """ extracting sequence and preparing dictionary of residues bio.pdb reads pdb in the following cascade: model->chain->residue->atom """ for chain in pdb_struct: self.residues[chain.id] = {} self.pdb_seq[chain.id] = Seq('') for res in chain: #in bio.pdb the residue's id is a tuple of (hetatm flag, residue number, insertion code) if res.resname == "HID": resname = polypeptide.three_to_one('HIS') else: if res.resname not in self.residue_list: continue self.residues[chain.id][res.id[1]] = MappedResidue(res.id[1], polypeptide.three_to_one(res.resname)) self.pdb_seq[chain.id] = ''.join([self.residues[chain.id][x].name for x in sorted(self.residues[chain.id].keys())]) for pos, res in enumerate(sorted(self.residues[chain.id].keys()), start=1): self.residues[chain.id][res].pos_in_aln = pos def locate_res_by_pos (self, chain, pos): for res in self.residues[chain].keys(): if self.residues[chain][res].pos_in_aln == pos: return res return 0 def map_blast_seq (self, prot_id, hsps, chain): #find uniprot residue numbers corresponding to those in pdb file q_seq = list(hsps.query) tmp_seq = list(hsps.sbjct) subj_counter = hsps.sbjct_start q_counter = hsps.query_start logger.info("{}\n{}".format(hsps.query, hsps.sbjct)) logger.info("{:d}\t{:d}".format(hsps.query_start, hsps.sbjct_start)) rs = Residue.objects.prefetch_related('display_generic_number', 'protein_segment').filter( protein_conformation__protein=prot_id) residues = {} for r in rs: residues[r.sequence_number] = r while tmp_seq: #skipping position if there is a gap in either of sequences if q_seq[0] == '-' or q_seq[0] == 'X' or q_seq[0] == ' ': subj_counter += 1 tmp_seq.pop(0) q_seq.pop(0) continue if tmp_seq[0] == '-' or tmp_seq[0] == 'X' or tmp_seq[0] == ' ': q_counter += 1 tmp_seq.pop(0) q_seq.pop(0) continue if tmp_seq[0] == q_seq[0]: resn = self.locate_res_by_pos(chain, q_counter) if resn != 0: if subj_counter in residues: db_res = residues[subj_counter] if db_res.protein_segment: segment = db_res.protein_segment.slug self.residues[chain][resn].add_segment(segment) if db_res.display_generic_number: num = db_res.display_generic_number.label bw, gpcrdb = num.split('x') gpcrdb = "{}.{}".format(bw.split('.')[0], gpcrdb) self.residues[chain][resn].add_bw_number(bw) self.residues[chain][resn].add_gpcrdb_number(gpcrdb) self.residues[chain][resn].add_gpcrdb_number_id(db_res.display_generic_number.id) self.residues[chain][resn].add_display_number(num) self.residues[chain][resn].add_residue_record(db_res) else: logger.warning("Could not find residue {} {} in the database.".format(resn, subj_counter)) if prot_id not in self.prot_id_list: self.prot_id_list.append(prot_id) q_counter += 1 subj_counter += 1 tmp_seq.pop(0) q_seq.pop(0) def get_substructure_mapping_dict(self): mapping_dict = {} for chain in self.residues.keys(): for res in self.residues[chain].keys(): if self.residues[chain][res].segment in mapping_dict.keys(): mapping_dict[self.residues[chain][res].segment].append(self.residues[chain][res].number) else: mapping_dict[self.residues[chain][res].segment] = [self.residues[chain][res].number,] return mapping_dict def get_annotated_structure(self): for chain in self.pdb_structure: for residue in chain: if residue.id[1] in self.residues[chain.id].keys(): if self.residues[chain.id][residue.id[1]].gpcrdb != 0.: residue["CA"].set_bfactor(float(self.residues[chain.id][residue.id[1]].gpcrdb)) if self.residues[chain.id][residue.id[1]].bw != 0.: residue["N"].set_bfactor(float(self.residues[chain.id][residue.id[1]].bw)) return self.pdb_structure def save_gn_to_pdb(self): #replace bfactor field of CA atoms with b-w numbers and return filehandle with the structure written for chain in self.pdb_structure: for residue in chain: if residue.id[1] in self.residues[chain.id].keys(): if self.residues[chain.id][residue.id[1]].gpcrdb != 0.: residue["CA"].set_bfactor(float(self.residues[chain.id][residue.id[1]].gpcrdb)) if self.residues[chain.id][residue.id[1]].bw != 0.: residue["N"].set_bfactor(float(self.residues[chain.id][residue.id[1]].bw)) r = self.residues[chain.id][residue.id[1]] #get the basename, extension and export the pdb structure with b-w numbers root, ext = os.path.splitext(self.pdb_filename) io=PDBIO() io.set_structure(self.pdb_structure) io.save("%s_GPCRDB%s" %(root, ext)) def assign_generic_numbers(self): alignments = {} #blast search goes first, looping through all the chains for chain in self.pdb_seq.keys(): alignments[chain] = self.blast.run(self.pdb_seq[chain]) #map the results onto pdb sequence for every sequence pair from blast for chain in self.pdb_seq.keys(): for alignment in alignments[chain]: if alignment == []: continue for hsps in alignment[1].hsps: self.map_blast_seq(alignment[0], hsps, chain) return self.get_annotated_structure() def assign_generic_numbers_with_sequence_parser(self): for chain in self.pdb_structure: for residue in chain: if chain.id in self.mapping: if residue.id[1] in self.mapping[chain.id].keys(): gpcrdb_num = self.mapping[chain.id][residue.id[1]].gpcrdb if gpcrdb_num != '' and len(gpcrdb_num.split('x'))==2: bw, gn = gpcrdb_num.split('x') gn = "{}.{}".format(bw.split('.')[0], gn) if len(gn.split('.')[1])==3: gn = '-'+gn[:-1] try: residue["CA"].set_bfactor(float(gn)) residue["N"].set_bfactor(float(bw)) except: pass return self.pdb_structure def assign_cgn_with_sequence_parser(self, target_chain): pdb_array = OrderedDict() for s in G_PROTEIN_SEGMENTS['Full']: pdb_array[s] = OrderedDict() i, j = 0, 0 key_list = [i.gpcrdb for i in list(self.mapping[target_chain].values())] for key, vals in self.mapping[target_chain].items(): category, segment, num = vals.gpcrdb.split('.') if self.pdb_code in self.exceptions: try: if self.pdb_structure[target_chain][key].get_id()[1]>=self.exceptions[self.pdb_code][0]: if i<self.exceptions[self.pdb_code][1]: pdb_array[segment][vals.gpcrdb] = 'x' i+=1 continue except: pass this_cat, this_seg, this_num = key_list[j].split('.') try: pdb_array[segment][vals.gpcrdb] = self.pdb_structure[target_chain][key-i].get_list() except: pdb_array[segment][vals.gpcrdb] = 'x' j+=1 return pdb_array
from tempest.api.volume import base from tempest.common import waiters from tempest import config from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import test_utils from tempest.lib import decorators CONF = config.CONF class BaseGroupSnapshotsTest(base.BaseVolumeAdminTest): @classmethod def skip_checks(cls): super(BaseGroupSnapshotsTest, cls).skip_checks() if not CONF.volume_feature_enabled.snapshot: raise cls.skipException("Cinder volume snapshots are disabled") def _create_group_snapshot(self, **kwargs): if 'name' not in kwargs: kwargs['name'] = data_utils.rand_name( self.__class__.__name__ + '-Group_Snapshot') group_snapshot = self.group_snapshots_client.create_group_snapshot( **kwargs)['group_snapshot'] group_snapshot['group_id'] = kwargs['group_id'] self.addCleanup(test_utils.call_and_ignore_notfound_exc, self._delete_group_snapshot, group_snapshot) waiters.wait_for_volume_resource_status( self.group_snapshots_client, group_snapshot['id'], 'available') return group_snapshot def _delete_group_snapshot(self, group_snapshot): self.group_snapshots_client.delete_group_snapshot(group_snapshot['id']) vols = self.volumes_client.list_volumes(detail=True)['volumes'] snapshots = self.snapshots_client.list_snapshots( detail=True)['snapshots'] for vol in vols: for snap in snapshots: if (vol['group_id'] == group_snapshot['group_id'] and vol['id'] == snap['volume_id']): self.snapshots_client.wait_for_resource_deletion( snap['id']) self.group_snapshots_client.wait_for_resource_deletion( group_snapshot['id']) class GroupSnapshotsTest(BaseGroupSnapshotsTest): """Test group snapshot""" volume_min_microversion = '3.14' volume_max_microversion = 'latest' @decorators.idempotent_id('1298e537-f1f0-47a3-a1dd-8adec8168897') def test_group_snapshot_create_show_list_delete(self): """Test create/show/list/delete group snapshot 1. Create volume type "volume_type1" 2. Create group type "group_type1" 3. Create group "group1" with "group_type1" and "volume_type1" 4. Create volume "volume1" with "volume_type1" and "group1" 5. Create group snapshot "group_snapshot1" with "group1" 6. Check snapshot created from "volume1" reaches available status 7. Check the created group snapshot "group_snapshot1" is in the list of all group snapshots 8. Delete group snapshot "group_snapshot1" """ # Create volume type volume_type = self.create_volume_type() # Create group type group_type = self.create_group_type() # Create group grp = self.create_group(group_type=group_type['id'], volume_types=[volume_type['id']]) # Create volume vol = self.create_volume(volume_type=volume_type['id'], group_id=grp['id']) # Create group snapshot group_snapshot_name = data_utils.rand_name('group_snapshot') group_snapshot = self._create_group_snapshot( group_id=grp['id'], name=group_snapshot_name) snapshots = self.snapshots_client.list_snapshots( detail=True)['snapshots'] for snap in snapshots: if vol['id'] == snap['volume_id']: waiters.wait_for_volume_resource_status( self.snapshots_client, snap['id'], 'available') self.assertEqual(group_snapshot_name, group_snapshot['name']) # Get a given group snapshot group_snapshot = self.group_snapshots_client.show_group_snapshot( group_snapshot['id'])['group_snapshot'] self.assertEqual(group_snapshot_name, group_snapshot['name']) # Get all group snapshots with details, check some detail-specific # elements, and look for the created group snapshot group_snapshots = self.group_snapshots_client.list_group_snapshots( detail=True)['group_snapshots'] for grp_snapshot in group_snapshots: self.assertIn('created_at', grp_snapshot) self.assertIn('group_id', grp_snapshot) self.assertIn((group_snapshot['name'], group_snapshot['id']), [(m['name'], m['id']) for m in group_snapshots]) # Delete group snapshot self._delete_group_snapshot(group_snapshot) group_snapshots = self.group_snapshots_client.list_group_snapshots()[ 'group_snapshots'] self.assertNotIn((group_snapshot['name'], group_snapshot['id']), [(m['name'], m['id']) for m in group_snapshots]) @decorators.idempotent_id('eff52c70-efc7-45ed-b47a-4ad675d09b81') def test_create_group_from_group_snapshot(self): """Test creating group from group snapshot 1. Create volume type "volume_type1" 2. Create group type "group_type1" 3. Create group "group1" with "group_type1" and "volume_type1" 4. Create volume "volume1" with "volume_type1" and "group1" 5. Create group snapshot "group_snapshot1" with "group1" 6. Check snapshot created from "volume1" reaches available status 7. Create group "group2" from "group_snapshot1" 8. Check the volumes belonging to "group2" reach available status 9. Check "group2" reaches available status """ # Create volume type volume_type = self.create_volume_type() # Create group type group_type = self.create_group_type() # Create Group grp = self.create_group(group_type=group_type['id'], volume_types=[volume_type['id']]) # Create volume vol = self.create_volume(volume_type=volume_type['id'], group_id=grp['id']) # Create group_snapshot group_snapshot_name = data_utils.rand_name('group_snapshot') group_snapshot = self._create_group_snapshot( group_id=grp['id'], name=group_snapshot_name) self.assertEqual(group_snapshot_name, group_snapshot['name']) snapshots = self.snapshots_client.list_snapshots( detail=True)['snapshots'] for snap in snapshots: if vol['id'] == snap['volume_id']: waiters.wait_for_volume_resource_status( self.snapshots_client, snap['id'], 'available') # Create Group from Group snapshot grp_name2 = data_utils.rand_name('Group_from_snap') grp2 = self.groups_client.create_group_from_source( group_snapshot_id=group_snapshot['id'], name=grp_name2)['group'] self.addCleanup(self.delete_group, grp2['id']) self.assertEqual(grp_name2, grp2['name']) vols = self.volumes_client.list_volumes(detail=True)['volumes'] for vol in vols: if vol['group_id'] == grp2['id']: waiters.wait_for_volume_resource_status( self.volumes_client, vol['id'], 'available') waiters.wait_for_volume_resource_status( self.groups_client, grp2['id'], 'available') @decorators.idempotent_id('7d7fc000-0b4c-4376-a372-544116d2e127') @decorators.related_bug('1739031') def test_delete_group_snapshots_following_updated_volumes(self): """Test deleting group snapshot following updated volumes 1. Create volume type "volume_type1" 2. Create group type "group_type1" 3. Create group "group1" with "group_type1" and "volume_type1" 4. Create 2 volumes "volume1" and "volume2" with "volume_type1" and "group1" 5. For each created volume, removing and then adding back to "group1" 6. Create group snapshot "group_snapshot1" with "group1" 7. Check snapshots created from "volume1" and "volume2" reach available status 8. Delete "group_snapshot1" 9. Check snapshots created from "volume1" and "volume2" are deleted """ volume_type = self.create_volume_type() group_type = self.create_group_type() # Create a volume group grp = self.create_group(group_type=group_type['id'], volume_types=[volume_type['id']]) # Note: When dealing with consistency groups all volumes must # reside on the same backend. Adding volumes to the same consistency # group from multiple backends isn't supported. In order to ensure all # volumes share the same backend, all volumes must share same # volume-type and group id. volume_list = [] for _ in range(2): volume = self.create_volume(volume_type=volume_type['id'], group_id=grp['id']) volume_list.append(volume['id']) for vol in volume_list: self.groups_client.update_group(grp['id'], remove_volumes=vol) waiters.wait_for_volume_resource_status( self.groups_client, grp['id'], 'available') self.groups_client.update_group(grp['id'], add_volumes=vol) waiters.wait_for_volume_resource_status( self.groups_client, grp['id'], 'available') # Verify the created volumes are associated with consistency group vols = self.volumes_client.list_volumes(detail=True)['volumes'] grp_vols = [v for v in vols if v['group_id'] == grp['id']] self.assertEqual(2, len(grp_vols)) # Create a snapshot group group_snapshot = self._create_group_snapshot(group_id=grp['id']) snapshots = self.snapshots_client.list_snapshots( detail=True)['snapshots'] for snap in snapshots: if snap['volume_id'] in volume_list: waiters.wait_for_volume_resource_status( self.snapshots_client, snap['id'], 'available') # Delete a snapshot group self._delete_group_snapshot(group_snapshot) class GroupSnapshotsV319Test(BaseGroupSnapshotsTest): """Test group snapshot with volume microversion greater than 3.18""" volume_min_microversion = '3.19' volume_max_microversion = 'latest' @decorators.idempotent_id('3b42c9b9-c984-4444-816e-ca2e1ed30b40') def test_reset_group_snapshot_status(self): """Test resetting group snapshot status to creating/available/error""" # Create volume type volume_type = self.create_volume_type() # Create group type group_type = self.create_group_type() # Create group group = self.create_group(group_type=group_type['id'], volume_types=[volume_type['id']]) # Create volume volume = self.create_volume(volume_type=volume_type['id'], group_id=group['id']) # Create group snapshot group_snapshot = self._create_group_snapshot(group_id=group['id']) snapshots = self.snapshots_client.list_snapshots( detail=True)['snapshots'] for snap in snapshots: if volume['id'] == snap['volume_id']: waiters.wait_for_volume_resource_status( self.snapshots_client, snap['id'], 'available') # Reset group snapshot status self.addCleanup(waiters.wait_for_volume_resource_status, self.group_snapshots_client, group_snapshot['id'], 'available') self.addCleanup( self.admin_group_snapshots_client.reset_group_snapshot_status, group_snapshot['id'], 'available') for status in ['creating', 'available', 'error']: self.admin_group_snapshots_client.reset_group_snapshot_status( group_snapshot['id'], status) waiters.wait_for_volume_resource_status( self.group_snapshots_client, group_snapshot['id'], status)
from uuid import UUID import uuid try: bytes() except NameError: bytes = str from org.apache.qpid.proton import Proton, ProtonUnsupportedOperationException from org.apache.qpid.proton import InterruptException as Interrupt from org.apache.qpid.proton import TimeoutException as Timeout from org.apache.qpid.proton.engine import \ Transport as JTransport, Sender as JSender, Receiver as JReceiver, \ Sasl, SslDomain as JSslDomain, \ EndpointState, TransportException from org.apache.qpid.proton.message import \ MessageFormat, Message as JMessage from org.apache.qpid.proton.codec import \ Data as JData from org.apache.qpid.proton.messenger import MessengerException, Status from org.apache.qpid.proton.amqp.transport import ErrorCondition, SenderSettleMode, ReceiverSettleMode from org.apache.qpid.proton.amqp.messaging import Source, Target, Accepted, \ Rejected, Received, Modified, Released, AmqpValue from org.apache.qpid.proton.amqp import UnsignedInteger, UnsignedLong, UnsignedByte, UnsignedShort, Symbol, \ Decimal32, Decimal64, Decimal128 from jarray import zeros, array from java.util import EnumSet, UUID as JUUID, Date as JDate, HashMap from java.nio import ByteBuffer from java.lang import Character as JCharacter, String as JString, Integer as JInteger from java.lang import NoClassDefFoundError class Constant(object): def __init__(self, name): self.name = name def __repr__(self): return self.name class Skipped(Exception): skipped = True PENDING = "PENDING" ACCEPTED = "ACCEPTED" REJECTED = "REJECTED" RELEASED = "RELEASED" SETTLED = "SETTLED" STATUSES = { Status.ACCEPTED: ACCEPTED, Status.REJECTED: REJECTED, Status.PENDING: PENDING, Status.RELEASED: RELEASED, Status.SETTLED: SETTLED, Status.UNKNOWN: None } MANUAL = "MANUAL" AUTOMATIC = "AUTOMATIC" API_LANGUAGE = "Java" IMPLEMENTATION_LANGUAGE = "C" if Proton.getDefaultImplementationType().name() == "PROTON_J": IMPLEMENTATION_LANGUAGE = "Java" class Endpoint(object): LOCAL_UNINIT = 1 LOCAL_ACTIVE = 2 LOCAL_CLOSED = 4 REMOTE_UNINIT = 8 REMOTE_ACTIVE = 16 REMOTE_CLOSED = 32 def __init__(self): self.condition = None @property def remote_condition(self): return Condition(impl = self.impl.getRemoteCondition()) @property def state(self): local = self.impl.getLocalState() remote = self.impl.getRemoteState() result = 0 if (local == EndpointState.UNINITIALIZED): result = result | self.LOCAL_UNINIT elif (local == EndpointState.ACTIVE): result = result | self.LOCAL_ACTIVE elif (local == EndpointState.CLOSED): result = result | self.LOCAL_CLOSED if (remote == EndpointState.UNINITIALIZED): result = result | self.REMOTE_UNINIT elif (remote == EndpointState.ACTIVE): result = result | self.REMOTE_ACTIVE elif (remote == EndpointState.CLOSED): result = result | self.REMOTE_CLOSED return result def _enums(self, mask): local = [] if (self.LOCAL_UNINIT | mask): local.append(EndpointState.UNINITIALIZED) if (self.LOCAL_ACTIVE | mask): local.append(EndpointState.ACTIVE) if (self.LOCAL_CLOSED | mask): local.append(EndpointState.CLOSED) remote = [] if (self.REMOTE_UNINIT | mask): remote.append(EndpointState.UNINITIALIZED) if (self.REMOTE_ACTIVE | mask): remote.append(EndpointState.ACTIVE) if (self.REMOTE_CLOSED | mask): remote.append(EndpointState.CLOSED) return EnumSet.of(*local), EnumSet.of(*remote) def open(self): self.impl.open() def close(self): if self.condition is not None: self.impl.setCondition(self.condition.impl) self.impl.close() class Condition(object): def __init__(self, name=None, description=None, info=None, impl=None): if impl is None: impl = ErrorCondition(Symbol.valueOf(name), description) if info is not None: impl.setInfo(info) self.impl = impl def _get_name(self): c = self.impl.getCondition() if c is not None: return c.toString() def _set_name(self, n): self.impl.setCondition(Symbol.valueOf(n)) name = property(_get_name, _set_name) def _get_description(self): return self.impl.getDescription() def _set_description(self, d): self.impl.setDescription(d) description = property(_get_description, _set_description) def _get_info(self): return self.impl.getInfo() def _set_info(self, i): self.impl.setInfo(i) info = property(_get_info, _get_description) def __repr__(self): return "Condition(%s)" % ", ".join([repr(x) for x in (self.name, self.description, self.info) if x]) def __eq__(self, o): if not isinstance(o, Condition): return False return self.impl.equals(o.impl) def _2J(self): return self.impl def wrap_connection(impl): if impl: return impl.getContext() else: return None class Connection(Endpoint): def __init__(self): Endpoint.__init__(self) self.impl = Proton.connection() self.impl.setContext(self) self.desired_capabilities = None self.offered_capabilities = None self.properties = None @property def writable(self): raise ProtonUnsupportedOperationException("Connection.writable") def session(self): return wrap_session(self.impl.session()) def session_head(self, mask): return wrap_session(self.impl.sessionHead(*self._enums(mask))) def link_head(self, mask): return wrap_link(self.impl.linkHead(*self._enums(mask))) @property def work_head(self): return wrap_delivery(self.impl.getWorkHead()) def _get_container(self): return self.impl.getContainer() def _set_container(self, container): self.impl.setContainer(container) container = property(_get_container, _set_container) def _get_hostname(self): return self.impl.getHostname() def _set_hostname(self, hostname): self.impl.setHostname(hostname) hostname = property(_get_hostname, _set_hostname) def _get_remote_container(self): return self.impl.getRemoteContainer() def _set_remote_container(self, container): self.impl.setRemoteContainer(container) remote_container = property(_get_remote_container, _set_remote_container) def _get_remote_hostname(self): return self.impl.getRemoteHostname() def _set_remote_hostname(self, hostname): self.impl.setRemoteHostname(hostname) remote_hostname = property(_get_remote_hostname, _set_remote_hostname) @property def remote_offered_capabilities(self): return convertToPyArray(Data.SYMBOL, self.impl.getRemoteOfferedCapabilities(),symbol) @property def remote_desired_capabilities(self): return convertToPyArray(Data.SYMBOL, self.impl.getRemoteDesiredCapabilities(),symbol) @property def remote_properties(self): return J2PY(self.impl.getRemoteProperties()); def open(self): self.impl.setOfferedCapabilities(PY2J(self.offered_capabilities)) self.impl.setDesiredCapabilities(PY2J(self.desired_capabilities)) self.impl.setProperties(PY2J(self.properties)) Endpoint.open(self) def wrap_session(impl): # XXX if impl: return Session(impl) class Session(Endpoint): def __init__(self, impl): Endpoint.__init__(self) self.impl = impl @property def connection(self): return wrap_connection(self.impl.getConnection()) def sender(self, name): return wrap_link(self.impl.sender(name)) def receiver(self, name): return wrap_link(self.impl.receiver(name)) def _get_incoming_capacity(self): return self.impl.getIncomingCapacity() def _set_incoming_capacity(self, capacity): self.impl.setIncomingCapacity(capacity) incoming_capacity = property(_get_incoming_capacity, _set_incoming_capacity) @property def outgoing_bytes(self): return self.impl.getOutgoingBytes() @property def incoming_bytes(self): return self.impl.getIncomingBytes() def wrap_link(impl): if impl is None: return None elif isinstance(impl, JSender): return Sender(impl) elif isinstance(impl, JReceiver): return Receiver(impl) else: raise Exception("unknown type") class Link(Endpoint): SND_UNSETTLED = SenderSettleMode.UNSETTLED SND_SETTLED = SenderSettleMode.SETTLED SND_MIXED = SenderSettleMode.MIXED RCV_FIRST = ReceiverSettleMode.FIRST RCV_SECOND = ReceiverSettleMode.SECOND def __init__(self, impl): Endpoint.__init__(self) self.impl = impl @property def source(self): if self.impl.getSource() is None: self.impl.setSource(Source()) return Terminus(self.impl.getSource()) @property def target(self): if self.impl.getTarget() is None: self.impl.setTarget(Target()) return Terminus(self.impl.getTarget()) @property def remote_source(self): return Terminus(self.impl.getRemoteSource()) @property def remote_target(self): return Terminus(self.impl.getRemoteTarget()) @property def session(self): return wrap_session(self.impl.getSession()) def delivery(self, tag): return wrap_delivery(self.impl.delivery(tag)) @property def current(self): return wrap_delivery(self.impl.current()) def advance(self): return self.impl.advance() @property def unsettled(self): return self.impl.getUnsettled() @property def credit(self): return self.impl.getCredit() @property def available(self): raise ProtonUnsupportedOperationException("Link.available") @property def queued(self): return self.impl.getQueued() def next(self, mask): return wrap_link(self.impl.next(*self._enums(mask))) @property def name(self): return self.impl.getName() @property def remote_snd_settle_mode(self): return self.impl.getRemoteSenderSettleMode() @property def remote_rcv_settle_mode(self): return self.impl.getRemoteReceiverSettleMode() def _get_snd_settle_mode(self): return self.impl.getSenderSettleMode() def _set_snd_settle_mode(self, mode): self.impl.setSenderSettleMode(mode) snd_settle_mode = property(_get_snd_settle_mode, _set_snd_settle_mode) def _get_rcv_settle_mode(self): return self.impl.getReceiverSettleMode() def _set_rcv_settle_mode(self, mode): self.impl.setReceiverSettleMode(mode) rcv_settle_mode = property(_get_rcv_settle_mode, _set_rcv_settle_mode) def drained(self): return self.impl.drained() class DataDummy: def format(self): pass def put_array(self, *args, **kwargs): raise ProtonUnsupportedOperationException("Data.put_array") class Terminus(object): UNSPECIFIED = None DIST_MODE_UNSPECIFIED = None DIST_MODE_COPY = "copy" DIST_MODE_MOVE = "move" def __init__(self, impl): self.impl = impl self.type = None self.timeout = None self.durability = None self.expiry_policy = None self.properties = DataDummy() self.outcomes = DataDummy() self.filter = DataDummy() self.capabilities = DataDummy() def _get_address(self): return self.impl.getAddress() def _set_address(self, address): self.impl.setAddress(address) address = property(_get_address, _set_address) def _get_timeout(self): return self.impl.getTimeout() def _set_timeout(self, t): if t is not None: t = UnsignedInteger(t) return self.impl.setTimeout(t) timeout = property(_get_timeout, _set_timeout) def _is_dynamic(self): return self.impl.getDynamic() def _set_dynamic(self, dynamic): self.impl.setDynamic(dynamic) dynamic = property(_is_dynamic, _set_dynamic) def _get_distribution_mode(self): if isinstance(self.impl, Source): sym = self.impl.getDistributionMode() if sym is None: return self.DIST_MODE_UNSPECIFIED else: return sym.toString() else: return self.DIST_MODE_UNSPECIFIED def _set_distribution_mode(self, mode): if isinstance(self.impl, Source): if mode in [None, "copy", "move"]: self.impl.setDistributionMode(Symbol.valueOf(mode)) else: self.impl.setDistributionMode(None) distribution_mode = property(_get_distribution_mode, _set_distribution_mode) def copy(self, src): self.address = src.address self.timeout = src.timeout self.dynamic = src.dynamic self.distribution_mode = src.distribution_mode class Sender(Link): def offered(self, n): raise ProtonUnsupportedOperationException("Sender.offered") def send(self, bytes): return self.impl.send(bytes, 0, len(bytes)) class Receiver(Link): def flow(self, n): self.impl.flow(n) def drain(self, n): self.impl.drain(n) def draining(self): return self.impl.draining() def recv(self, size): output = zeros(size, "b") n = self.impl.recv(output, 0, size) if n >= 0: return output.tostring()[:n] elif n == JTransport.END_OF_STREAM: return None else: raise Exception(n) class Disposition(object): RECEIVED = 0x23 ACCEPTED = 0x24 REJECTED = 0x25 RELEASED = 0x26 MODIFIED = 0x27 def __init__(self): self.type = 0 self._received = None self._accepted = None self._rejected = None self._released = None self._modified = None def _get_section_number(self): if self._received: return J2PY(self._received.getSectionNumber()) else: return 0 def _set_section_number(self, n): if not self._received: self._received = Received() self._received.setSectionNumber(UnsignedInteger(n)) section_number = property(_get_section_number, _set_section_number) def _get_section_offset(self): if self._received: return J2PY(self._received.getSectionOffset()) else: return 0 def _set_section_offset(self, n): if not self._received: self._received = Received() self._received.setSectionOffset(UnsignedLong(n)) section_offset = property(_get_section_offset, _set_section_offset) def _get_failed(self): if self._modified: return self._modified.getDeliveryFailed() else: return False def _set_failed(self, b): if not self._modified: self._modified = Modified() self._modified.setDeliveryFailed(b) failed = property(_get_failed, _set_failed) def _get_undeliverable(self): if self._modified: return self._modified.getUndeliverableHere() else: return False def _set_undeliverable(self, b): if not self._modified: self._modified = Modified() self._modified.setUndeliverableHere(b) undeliverable = property(_get_undeliverable, _set_undeliverable) def _get_data(self): return None def _set_data(self, obj): raise Skipped() data = property(_get_data, _set_data) def _get_annotations(self): if self._modified: return J2PY(self._modified.getMessageAnnotations()) else: return None def _set_annotations(self, obj): if not self._modified: self._modified = Modified() self._modified.setMessageAnnotations(PY2J(obj)) annotations = property(_get_annotations, _set_annotations) def _get_condition(self): if self._rejected: return Condition(impl = self._rejected.getError()) else: return None def _set_condition(self, obj): if not self._rejected: self._rejected = Rejected() self._rejected.setError(obj._2J()) condition = property(_get_condition, _set_condition) def _as_received(self): if self._received is None: self._received = Received() return self._received def _as_accepted(self): if self._accepted is None: self._accepted = Accepted.getInstance() return self._accepted def _as_rejected(self): if self._rejected is None: self._rejected = Rejected() return self._rejected def _as_released(self): if self._released is None: self._released = Released.getInstance() return self._released def _as_modified(self): if self._modified is None: self._modified = Modified() return self._modified PY2J = { RECEIVED: _as_received, ACCEPTED: _as_accepted, REJECTED: _as_rejected, RELEASED: _as_released, MODIFIED: _as_modified } def _2J(self): return self.PY2J[self.type](self) def _from_received(self, s): self.type = self.RECEIVED self._received = s def _from_accepted(self, s): self.type = self.ACCEPTED self._accepted = s def _from_rejected(self, s): self.type = self.REJECTED self._rejected = s def _from_released(self, s): self.type = self.RELEASED self._released = s def _from_modified(self, s): self.type = self.MODIFIED self._modified = s J2PY = { Received: _from_received, Accepted: _from_accepted, Rejected: _from_rejected, Released: _from_released, Modified: _from_modified } def _2PY(self, impl): self.J2PY[type(impl)](self, impl) def wrap_delivery(impl): if impl: return Delivery(impl) class Delivery(object): RECEIVED = Disposition.RECEIVED ACCEPTED = Disposition.ACCEPTED REJECTED = Disposition.REJECTED RELEASED = Disposition.RELEASED MODIFIED = Disposition.MODIFIED def __init__(self, impl): self.impl = impl self.local = Disposition() @property def tag(self): return self.impl.getTag().tostring() @property def writable(self): return self.impl.isWritable() @property def readable(self): return self.impl.isReadable() @property def updated(self): return self.impl.isUpdated() def update(self, disp): self.local.type = disp self.impl.disposition(self.local._2J()) @property def remote(self): d = Disposition() d._2PY(self.impl.getRemoteState()) return d @property def remote_state(self): return self.remote.type @property def local_state(self): return self.local.type def settle(self): self.impl.settle() @property def settled(self): return self.impl.remotelySettled() @property def work_next(self): return wrap_delivery(self.impl.getWorkNext()) @property def pending(self): return self.impl.pending() class Transport(object): TRACE_OFF = 0 TRACE_RAW = 1 TRACE_FRM = 2 TRACE_DRV = 4 def __init__(self): self.impl = Proton.transport() self._ssl = None self._sasl = None def __del__(self): if hasattr(self, ".impl") and self.impl: pn_transport_free(self.impl) if hasattr(self, "_sasl") and self._sasl: # pn_transport_free deallocs the C sasl associated with the # transport, so erase the reference if a SASL object was used. self._sasl._sasl = None self._sasl = None if hasattr(self, "_ssl") and self._ssl: # ditto the owned c SSL object self._ssl._ssl = None self._ssl = None del self._trans def trace(self, mask): # XXX: self.impl.trace(mask) pass def bind(self, connection): self.impl.bind(connection.impl) def capacity(self): return self.impl.capacity() def push(self, bytes): input_buffer = self.impl.tail() input_buffer.put(bytes) self.impl.process() def close_tail(self): self.impl.close_tail() def pending(self): return self.impl.pending() def peek(self, size): output_buffer = self.impl.head() output_length = min(size, output_buffer.remaining()) output = zeros(output_length, "b") output_buffer.mark() output_buffer.get(output) output_buffer.reset() return output.tostring() def pop(self, size): self.impl.pop(size) def close_head(self): self.impl.close_head() def output(self, size): p = self.pending() if p < 0: return None else: out = self.peek(min(size, p)) self.pop(len(out)) return out def input(self, bytes): if not bytes: self.close_tail() return None else: c = self.capacity() if (c < 0): return None trimmed = bytes[:c] self.push(trimmed) return len(trimmed) def _get_max_frame_size(self): return self.impl.getMaxFrameSize() def _set_max_frame_size(self, value): self.impl.setMaxFrameSize(value) max_frame_size = property(_get_max_frame_size, _set_max_frame_size, doc=""" Sets the maximum size for received frames (in bytes). """) @property def remote_max_frame_size(self): return self.impl.getRemoteMaxFrameSize() # AMQP 1.0 idle-time-out def _get_idle_timeout(self): #return pn_transport_get_idle_timeout(self._trans) raise ProtonUnsupportedOperationException("Transport.idle_timeout") def _set_idle_timeout(self, value): #pn_transport_set_idle_timeout(self._trans, value) raise ProtonUnsupportedOperationException("Transport.idle_timeout") idle_timeout = property(_get_idle_timeout, _set_idle_timeout, doc=""" The idle timeout of the connection (in milliseconds). """) @property def remote_idle_timeout(self): #return pn_transport_get_remote_idle_timeout(self._trans) raise ProtonUnsupportedOperationException("Transport.remote_idle_timeout") @property def frames_output(self): #return pn_transport_get_frames_output(self._trans) raise ProtonUnsupportedOperationException("Transport.frames_output") @property def frames_input(self): #return pn_transport_get_frames_input(self._trans) raise ProtonUnsupportedOperationException("Transport.frames_input") def sasl(self): # SASL factory (singleton for this transport) if not self._sasl: self._sasl = SASL(self) return self._sasl def ssl(self, domain=None, session_details=None): # SSL factory (singleton for this transport) if not self._ssl: self._ssl = SSL(self, domain, session_details) return self._ssl class UnmappedType: def __init__(self, msg): self.msg = msg def __repr__(self): return "UnmappedType(%s)" % self.msg class ulong(long): def __repr__(self): return "ulong(%s)" % long.__repr__(self) class timestamp(long): def __repr__(self): return "timestamp(%s)" % long.__repr__(self) class symbol(unicode): def __repr__(self): return "symbol(%s)" % unicode.__repr__(self) class char(unicode): def __repr__(self): return "char(%s)" % unicode.__repr__(self) class Described(object): def __init__(self, descriptor, value): self.descriptor = descriptor self.value = value def __repr__(self): return "Described(%r, %r)" % (self.descriptor, self.value) def __eq__(self, o): if isinstance(o, Described): return self.descriptor == o.descriptor and self.value == o.value else: return False UNDESCRIBED = Constant("UNDESCRIBED") class Array(object): def __init__(self, descriptor, type, *elements): self.descriptor = descriptor self.type = type self.elements = elements def __repr__(self): if self.elements: els = ", %s" % (", ".join(map(repr, self.elements))) else: els = "" return "Array(%r, %r%s)" % (self.descriptor, self.type, els) def __eq__(self, o): if isinstance(o, Array): return self.descriptor == o.descriptor and \ self.type == o.type and self.elements == o.elements else: return False class Data(object): NULL = JData.DataType.NULL; BOOL = JData.DataType.BOOL; UBYTE = JData.DataType.UBYTE; BYTE = JData.DataType.BYTE; USHORT = JData.DataType.USHORT; SHORT = JData.DataType.SHORT; UINT = JData.DataType.UINT; INT = JData.DataType.INT; CHAR = JData.DataType.CHAR; ULONG = JData.DataType.ULONG; LONG = JData.DataType.LONG; TIMESTAMP = JData.DataType.TIMESTAMP; FLOAT = JData.DataType.FLOAT; DOUBLE = JData.DataType.DOUBLE; DECIMAL32 = JData.DataType.DECIMAL32; DECIMAL64 = JData.DataType.DECIMAL64; DECIMAL128 = JData.DataType.DECIMAL128; UUID = JData.DataType.UUID; BINARY = JData.DataType.BINARY; STRING = JData.DataType.STRING; SYMBOL = JData.DataType.SYMBOL; DESCRIBED = JData.DataType.DESCRIBED; ARRAY = JData.DataType.ARRAY; LIST = JData.DataType.LIST; MAP = JData.DataType.MAP; def __init__(self, capacity=16): self._data = Proton.data(capacity) def __del__(self): if hasattr(self, "_data"): pn_data_free(self._data) del self._data def clear(self): self._data.clear() def rewind(self): self._data.rewind() def next(self): return self._data.next() def prev(self): return self._data.prev() def enter(self): return self._data.enter() def exit(self): return self._data.exit() def lookup(self, name): return self._data.lookup(name) def narrow(self): self._data.narrow() def widen(self): self._data.widen() def type(self): return self._data.type() def encode(self): b = self._data.encode() return b.getArray().tostring()[b.getArrayOffset():b.getLength()] def decode(self, encoded): return self._data.decode(ByteBuffer.wrap(encoded)) def put_list(self): self._data.putList() def put_map(self): self._data.putMap() def put_array(self, described, element_type): self._data.putArray(described, element_type) def put_described(self): self._data.putDescribed() def put_null(self): self._data.putNull() def put_bool(self, b): self._data.putBoolean(b) def put_ubyte(self, ub): self._data.putUnsignedByte(UnsignedByte.valueOf(ub)) def put_byte(self, b): self._data.putByte(b) def put_ushort(self, us): self._data.putUnsignedShort(UnsignedShort.valueOf(us)) def put_short(self, s): self._data.putShort(s) def put_uint(self, ui): self._data.putUnsignedInteger(UnsignedInteger.valueOf(ui)) def put_int(self, i): self._data.putInt(i) def put_char(self, c): self._data.putChar(ord(c)) def put_ulong(self, ul): self._data.putUnsignedLong(UnsignedLong.valueOf(ul)) def put_long(self, l): self._data.putLong(l) def put_timestamp(self, t): self._data.putTimestamp(JDate(t)) def put_float(self, f): self._data.putFloat(f) def put_double(self, d): self._data.putDouble(d) def put_decimal32(self, d): self._data.putDecimal32(Decimal32(d)) def put_decimal64(self, d): self._data.putDecimal64(Decimal64(d)) def put_decimal128(self, d): self._data.putDecimal128(Decimal128(d)) def put_uuid(self, u): u = JUUID.fromString( str(u) ) self._data.putUUID(u) def put_binary(self, b): self._data.putBinary(b) def put_string(self, s): self._data.putString(s) def put_symbol(self, s): self._data.putSymbol(Symbol.valueOf(s)) def get_list(self): return self._data.getList() def get_map(self): return self._data.getMap() def get_array(self): count = self._data.getArray() described = self._data.isArrayDescribed() type = self._data.getArrayType() return count, described, type def is_described(self): return self._data.isDescribed() def is_null(self): return self._data.isNull() def get_bool(self): return self._data.getBoolean() def get_ubyte(self): return self._data.getUnsignedByte().shortValue() def get_byte(self): return self._data.getByte() def get_ushort(self): return self._data.getUnsignedShort().intValue() def get_short(self): return self._data.getShort() def get_int(self): return self._data.getInt() def get_uint(self): return self._data.getUnsignedInteger().longValue() def get_char(self): return char(unichr(self._data.getChar())) def get_ulong(self): return ulong(self._data.getUnsignedLong().longValue()) def get_long(self): return self._data.getLong() def get_timestamp(self): return self._data.getTimestamp().getTime() def get_float(self): return self._data.getFloat() def get_double(self): return self._data.getDouble() def get_decimal32(self): return self._data.getDecimal32().getBits() def get_decimal64(self): return self._data.getDecimal64().getBits() def get_decimal128(self): return self._data.getDecimal128().asBytes().tostring() def get_uuid(self): return UUID(self._data.getUUID().toString() ) def get_binary(self): b = self._data.getBinary() return b.getArray().tostring()[b.getArrayOffset():b.getArrayOffset()+b.getLength()] def get_string(self): return self._data.getString() def get_symbol(self): return symbol(self._data.getSymbol().toString()) def put_dict(self, d): self.put_map() self.enter() try: for k, v in d.items(): self.put_object(k) self.put_object(v) finally: self.exit() def get_dict(self): if self.enter(): try: result = {} while self.next(): k = self.get_object() if self.next(): v = self.get_object() else: v = None result[k] = v finally: self.exit() return result def put_sequence(self, s): self.put_list() self.enter() try: for o in s: self.put_object(o) finally: self.exit() def get_sequence(self): if self.enter(): try: result = [] while self.next(): result.append(self.get_object()) finally: self.exit() return result def get_py_described(self): if self.enter(): try: self.next() descriptor = self.get_object() self.next() value = self.get_object() finally: self.exit() return Described(descriptor, value) def put_py_described(self, d): self.put_described() self.enter() try: self.put_object(d.descriptor) self.put_object(d.value) finally: self.exit() def get_py_array(self): count, described, type = self.get_array() if self.enter(): try: if described: self.next() descriptor = self.get_object() else: descriptor = UNDESCRIBED elements = [] while self.next(): elements.append(self.get_object()) finally: self.exit() return Array(descriptor, type, *elements) def put_py_array(self, a): self.put_array(a.descriptor != UNDESCRIBED, a.type) self.enter() try: for e in a.elements: self.put_object(e) finally: self.exit() put_mappings = { None.__class__: lambda s, _: s.put_null(), bool: put_bool, dict: put_dict, list: put_sequence, tuple: put_sequence, unicode: put_string, bytes: put_binary, symbol: put_symbol, int: put_int, char: put_char, long: put_long, ulong: put_ulong, timestamp: put_timestamp, float: put_double, uuid.UUID: put_uuid, Described: put_py_described, Array: put_py_array } get_mappings = { NULL: lambda s: None, BOOL: get_bool, BYTE: get_byte, UBYTE: get_ubyte, SHORT: get_short, USHORT: get_ushort, INT: get_int, UINT: get_uint, CHAR: get_char, LONG: get_long, ULONG: get_ulong, TIMESTAMP: get_timestamp, FLOAT: get_float, DOUBLE: get_double, DECIMAL32: get_decimal32, DECIMAL64: get_decimal64, DECIMAL128: get_decimal128, UUID: get_uuid, BINARY: get_binary, STRING: get_string, SYMBOL: get_symbol, DESCRIBED: get_py_described, ARRAY: get_py_array, LIST: get_sequence, MAP: get_dict } def put_object(self, obj): putter = self.put_mappings[obj.__class__] putter(self, obj) def get_object(self): type = self.type() if type is None: return None getter = self.get_mappings.get(type) if getter: return getter(self) else: self.dump() return UnmappedType(str(type)) def copy(self, src): self._data.copy(src._data) def format(self): return self._data.toString() class Messenger(object): def __init__(self, name=None): if name: self.impl = Proton.messenger(name) else: self.impl = Proton.messenger() def route(self, pattern, address): self.impl.route(pattern, address) def rewrite(self, pattern, address): self.impl.rewrite(pattern, address) def start(self): self.impl.start() def stop(self): self.impl.stop() @property def stopped(self): return self.impl.stopped() def subscribe(self, source): self.impl.subscribe(source) def put(self, message): self.impl.put(message.impl) return self.impl.outgoingTracker() def send(self, n=-1): self.impl.send(n) def recv(self, n=-1): self.impl.recv(n) @property def receiving(self): return self.impl.receiving() def work(self, timeout=None): if timeout is None: t = -1 else: t = long(1000*timeout) try: err = self.impl.work(t) except Timeout, e: return False return err def interrupt(self): self.impl.interrupt() def get(self, message=None): result = self.impl.get() if message and result: message.impl = result return self.impl.incomingTracker() @property def outgoing(self): return self.impl.outgoing() @property def incoming(self): return self.impl.incoming() def _get_timeout(self): t = self.impl.getTimeout() if t == -1: return None else: return float(t)/1000 def _set_timeout(self, timeout): if timeout is None: t = -1 else: t = long(1000*timeout) self.impl.setTimeout(t) timeout = property(_get_timeout, _set_timeout) def _is_blocking(self): return self.impl.isBlocking() def _set_blocking(self, b): self.impl.setBlocking(b) blocking = property(_is_blocking, _set_blocking) def accept(self, tracker=None): if tracker is None: tracker = self.impl.incomingTracker() flags = self.impl.CUMULATIVE else: flags = 0 self.impl.accept(tracker, flags) def reject(self, tracker=None): if tracker is None: tracker = self.impl.incomingTracker() flags = self.impl.CUMULATIVE else: flags = 0 self.impl.reject(tracker, flags) def settle(self, tracker=None): if tracker is None: tracker = self.impl.outgoingTracker() flags = self.impl.CUMULATIVE else: flags = 0 self.impl.settle(tracker, flags) def status(self, tracker): return STATUSES[self.impl.getStatus(tracker)] def _get_incoming_window(self): return self.impl.getIncomingWindow() def _set_incoming_window(self, window): self.impl.setIncomingWindow(window) incoming_window = property(_get_incoming_window, _set_incoming_window) def _get_outgoing_window(self): return self.impl.getOutgoingWindow() def _set_outgoing_window(self, window): self.impl.setOutgoingWindow(window) outgoing_window = property(_get_outgoing_window, _set_outgoing_window) def _get_certificate(self): raise Skipped() def _set_certificate(self, xxx): raise Skipped() certificate = property(_get_certificate, _set_certificate) def buffered(self, tracker): raise Skipped() class Message(object): AMQP = MessageFormat.AMQP TEXT = MessageFormat.TEXT DATA = MessageFormat.DATA JSON = MessageFormat.JSON DEFAULT_PRIORITY = JMessage.DEFAULT_PRIORITY def __init__(self): self.impl = Proton.message() def clear(self): self.impl.clear() def save(self): saved = self.impl.save() if saved is None: saved = "" elif not isinstance(saved, unicode): saved = saved.tostring() return saved def load(self, data): self.impl.load(data) def encode(self): size = 1024 output = zeros(size, "b") while True: n = self.impl.encode(output, 0, size) # XXX: need to check for overflow if n > 0: return output.tostring()[:n] else: raise Exception(n) def decode(self, data): self.impl.decode(data,0,len(data)) def _get_id(self): id = self.impl.getMessageId() if isinstance(id, JUUID): id = UUID( id.toString() ) return id def _set_id(self, value): if isinstance(value, UUID): value = JUUID.fromString( str(value) ) return self.impl.setMessageId(value) id = property(_get_id, _set_id) def _get_correlation_id(self): id = self.impl.getCorrelationId() if isinstance(id, JUUID): id = UUID( id.toString() ) return id def _set_correlation_id(self, value): if isinstance(value, UUID): value = JUUID.fromString( str(value) ) return self.impl.setCorrelationId(value) correlation_id = property(_get_correlation_id, _set_correlation_id) def _get_ttl(self): return self.impl.getTtl() def _set_ttl(self, ttl): self.impl.setTtl(ttl) ttl = property(_get_ttl, _set_ttl) def _get_priority(self): return self.impl.getPriority() def _set_priority(self, priority): self.impl.setPriority(priority) priority = property(_get_priority, _set_priority) def _get_address(self): return self.impl.getAddress() def _set_address(self, address): self.impl.setAddress(address) address = property(_get_address, _set_address) def _get_subject(self): return self.impl.getSubject() def _set_subject(self, subject): self.impl.setSubject(subject) subject = property(_get_subject, _set_subject) def _get_user_id(self): u = self.impl.getUserId() if u is None: return "" else: return u.tostring() def _set_user_id(self, user_id): self.impl.setUserId(user_id) user_id = property(_get_user_id, _set_user_id) def _get_reply_to(self): return self.impl.getReplyTo() def _set_reply_to(self, reply_to): self.impl.setReplyTo(reply_to) reply_to = property(_get_reply_to, _set_reply_to) def _get_reply_to_group_id(self): return self.impl.getReplyToGroupId() def _set_reply_to_group_id(self, reply_to_group_id): self.impl.setReplyToGroupId(reply_to_group_id) reply_to_group_id = property(_get_reply_to_group_id, _set_reply_to_group_id) def _get_group_id(self): return self.impl.getGroupId() def _set_group_id(self, group_id): self.impl.setGroupId(group_id) group_id = property(_get_group_id, _set_group_id) def _get_group_sequence(self): return self.impl.getGroupSequence() def _set_group_sequence(self, group_sequence): self.impl.setGroupSequence(group_sequence) group_sequence = property(_get_group_sequence, _set_group_sequence) def _is_first_acquirer(self): return self.impl.isFirstAcquirer() def _set_first_acquirer(self, b): self.impl.setFirstAcquirer(b) first_acquirer = property(_is_first_acquirer, _set_first_acquirer) def _get_expiry_time(self): return self.impl.getExpiryTime() def _set_expiry_time(self, expiry_time): self.impl.setExpiryTime(expiry_time) expiry_time = property(_get_expiry_time, _set_expiry_time) def _is_durable(self): return self.impl.isDurable() def _set_durable(self, durable): self.impl.setDurable(durable) durable = property(_is_durable, _set_durable) def _get_delivery_count(self): return self.impl.getDeliveryCount() def _set_delivery_count(self, delivery_count): self.impl.setDeliveryCount(delivery_count) delivery_count = property(_get_delivery_count, _set_delivery_count) def _get_creation_time(self): return self.impl.getCreationTime() def _set_creation_time(self, creation_time): self.impl.setCreationTime(creation_time) creation_time = property(_get_creation_time, _set_creation_time) def _get_content_type(self): return self.impl.getContentType() def _set_content_type(self, content_type): self.impl.setContentType(content_type) content_type = property(_get_content_type, _set_content_type) def _get_content_encoding(self): return self.impl.getContentEncoding() def _set_content_encoding(self, content_encoding): self.impl.setContentEncoding(content_encoding) content_encoding = property(_get_content_encoding, _set_content_encoding) def _get_format(self): return self.impl.getFormat() def _set_format(self, format): self.impl.setMessageFormat(format) format = property(_get_format, _set_format) def _get_body(self): body = self.impl.getBody() if isinstance(body, AmqpValue): return body.getValue() else: return body def _set_body(self, body): self.impl.setBody(AmqpValue(body)) body = property(_get_body, _set_body) class SASL(object): OK = Sasl.PN_SASL_OK AUTH = Sasl.PN_SASL_AUTH def __new__(cls, transport): """Enforce a singleton SASL object per Transport""" if not transport._sasl: obj = super(SASL, cls).__new__(cls) obj._sasl = transport.impl.sasl() transport._sasl = obj return transport._sasl def mechanisms(self, mechanisms): self._sasl.setMechanisms(mechanisms.split()) def client(self): self._sasl.client() def server(self): self._sasl.server() def send(self, data): self._sasl.send(data, 0, len(data)) def recv(self): size = 4096 output = zeros(size, "b") n = self._sasl.recv(output, 0, size) if n >= 0: return output.tostring()[:n] elif n == JTransport.END_OF_STREAM: return None else: raise Exception(n) def _get_outcome(self): value = self._sasl.getOutcome() if value == Sasl.PN_SASL_NONE: return None else: return value def _set_outcome(self, outcome): self.impl.setOutcome(outcome) outcome = property(_get_outcome, _set_outcome) def done(self, outcome): self._sasl.done(outcome) def plain(self, user, password): self._sasl.plain(user,password) class SSLException(Exception): pass class SSLUnavailable(SSLException): pass class SSLDomain(object): MODE_SERVER = JSslDomain.Mode.SERVER MODE_CLIENT = JSslDomain.Mode.CLIENT VERIFY_PEER = JSslDomain.VerifyMode.VERIFY_PEER VERIFY_PEER_NAME = JSslDomain.VerifyMode.VERIFY_PEER_NAME ANONYMOUS_PEER = JSslDomain.VerifyMode.ANONYMOUS_PEER def __init__(self, mode): try: self._domain = Proton.sslDomain() except NoClassDefFoundError, e: raise SSLUnavailable() self._domain.init(mode) def set_credentials(self, cert_file, key_file, password): self._domain.setCredentials(cert_file, key_file, password) def set_trusted_ca_db(self, certificate_db): self._domain.setTrustedCaDb(certificate_db) def set_peer_authentication(self, verify_mode, trusted_CAs=None): # TODO the method calls (setTrustedCaDb/setPeerAuthentication) have to occur in # that order otherwise tests fail with proton-jni. It is not clear yet why. if trusted_CAs is not None: self._domain.setTrustedCaDb(trusted_CAs) self._domain.setPeerAuthentication(verify_mode) def allow_unsecured_client(self, allow_unsecured = True): self._domain.allowUnsecuredClient(allow_unsecured) class SSLSessionDetails(object): def __init__(self, session_id): self._session_details = Proton.sslPeerDetails(session_id, 1) class SSL(object): def __new__(cls, transport, domain, session_details=None): """Enforce a singleton SSL object per Transport""" if transport._ssl: # unfortunately, we've combined the allocation and the configuration in a # single step. So catch any attempt by the application to provide what # may be a different configuration than the original (hack) ssl = transport._ssl if (domain and (ssl._domain is not domain) or session_details and (ssl._session_details is not session_details)): raise SSLException("Cannot re-configure existing SSL object!") else: obj = super(SSL, cls).__new__(cls) obj._domain = domain obj._session_details = session_details internal_session_details = None if session_details: internal_session_details = session_details._session_details obj._ssl = transport.impl.ssl(domain._domain, internal_session_details) transport._ssl = obj return transport._ssl def __init__(self, transport, domain, session_details=None): internal_session_details = None if session_details: internal_session_details = session_details._session_details self._ssl = transport.impl.ssl(domain._domain, internal_session_details) self._session_details = session_details def get_session_details(self): return self._session_details def cipher_name(self): return self._ssl.getCipherName() def protocol_name(self): return self._ssl.getProtocolName() def _set_peer_hostname(self, hostname): self._ssl.setPeerHostname(hostname) def _get_peer_hostname(self): return self._ssl.getPeerHostname() peer_hostname = property(_get_peer_hostname, _set_peer_hostname) class Driver(object): """ Proton-c platform abstraction - not needed.""" def __init__(self, *args, **kwargs): raise ProtonUnsupportedOperationException("Driver") class Connector(object): """ Proton-c platform abstraction - not needed.""" def __init__(self, *args, **kwargs): raise ProtonUnsupportedOperationException("Connector") class Listener(object): """ Proton-c platform abstraction - not needed.""" def __init__(self, *args, **kwargs): raise ProtonUnsupportedOperationException("Listener") def convertToPyArray(t,a,f): if a == None or len(a) == 0: return None return Array(UNDESCRIBED, t, *map(f,a)) arrayElementMappings = { JData.DataType.SYMBOL: lambda s: Symbol.valueOf(s) } arrayTypeMappings = { JData.DataType.SYMBOL: Symbol } conversions_J2PY = { dict: lambda d: dict([(J2PY(k), J2PY(v)) for k, v in d.items()]), HashMap: lambda m: dict([(J2PY(e.getKey()), J2PY(e.getValue())) for e in m.entrySet()]), list: lambda l: [J2PY(x) for x in l], Symbol: lambda s: symbol(s.toString()), UnsignedInteger: lambda n: n.longValue(), UnsignedLong: lambda n: n.longValue() } conversions_PY2J = { dict: lambda d: dict([(PY2J(k), PY2J(v)) for k, v in d.items()]), list: lambda l: [PY2J(x) for x in l], symbol: lambda s: Symbol.valueOf(s), Array: lambda a: array(map(arrayElementMappings[a.type], a.elements), arrayTypeMappings[a.type]) } def identity(x): return x def J2PY(obj): result = conversions_J2PY.get(type(obj), identity)(obj) return result def PY2J(obj): result = conversions_PY2J.get(type(obj), identity)(obj) return result __all__ = [ "ACCEPTED", "Array", "API_LANGUAGE", "IMPLEMENTATION_LANGUAGE", "MANUAL", "PENDING", "REJECTED", "RELEASED", "SETTLED", "char", "Condition", "Connection", "Connector", "Data", "Delivery", "Disposition", "Described", "Driver", "Endpoint", "Link", "Listener", "Message", "MessageException", "Messenger", "MessengerException", "ProtonException", "Receiver", "SASL", "Sender", "Session", "SSL", "SSLDomain", "SSLException", "SSLSessionDetails", "SSLUnavailable", "symbol", "timestamp", "Terminus", "Timeout", "Interrupt", "Transport", "TransportException", "ulong", "UNDESCRIBED"]
"""Built-in loss functions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import six from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.framework import ops from tensorflow.python.framework import smart_cond from tensorflow.python.framework import tensor_util from tensorflow.python.keras import backend as K from tensorflow.python.keras.utils import losses_utils from tensorflow.python.keras.utils import tf_utils from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object from tensorflow.python.keras.utils.generic_utils import serialize_keras_object from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops.losses import losses_impl from tensorflow.python.ops.losses import util as tf_losses_util from tensorflow.python.util.tf_export import keras_export from tensorflow.tools.docs import doc_controls @keras_export('keras.losses.Loss') class Loss(object): """Loss base class. To be implemented by subclasses: * `call()`: Contains the logic for loss calculation using `y_true`, `y_pred`. Example subclass implementation: ```python class MeanSquaredError(Loss): def call(self, y_true, y_pred): y_pred = ops.convert_to_tensor(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) return K.mean(math_ops.square(y_pred - y_true), axis=-1) ``` When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, please use 'SUM' or 'NONE' reduction types, and reduce losses explicitly in your training loop. Using 'AUTO' or 'SUM_OVER_BATCH_SIZE' will raise an error. Please see https://www.tensorflow.org/tutorials/distribute/custom_training for more details on this. You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like: ```python with strategy.scope(): loss_obj = tf.keras.losses.CategoricalCrossentropy( reduction=tf.keras.losses.Reduction.NONE) .... loss = (tf.reduce_sum(loss_obj(labels, predictions)) * (1. / global_batch_size)) ``` Args: reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see https://www.tensorflow.org/tutorials/distribute/custom_training for more details on this. name: Optional name for the op. """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name=None): losses_utils.ReductionV2.validate(reduction) self.reduction = reduction self.name = name # SUM_OVER_BATCH is only allowed in losses managed by `fit` or # CannedEstimators. self._allow_sum_over_batch_size = False def __call__(self, y_true, y_pred, sample_weight=None): """Invokes the `Loss` instance. Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except sparse loss functions such as sparse categorical crossentropy where shape = `[batch_size, d0, .. dN-1]` y_pred: The predicted values. shape = `[batch_size, d0, .. dN]` sample_weight: Optional `sample_weight` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `sample_weight` is a tensor of size `[batch_size]`, then the total loss for each sample of the batch is rescaled by the corresponding element in the `sample_weight` vector. If the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted to this shape), then each loss element of `y_pred` is scaled by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss functions reduce by 1 dimension, usually axis=-1.) Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1` because all loss functions reduce by 1 dimension, usually axis=-1.) Raises: ValueError: If the shape of `sample_weight` is invalid. """ # If we are wrapping a lambda function strip '<>' from the name as it is not # accepted in scope name. scope_name = 'lambda' if self.name == '<lambda>' else self.name graph_ctx = tf_utils.graph_context_for_symbolic_tensors( y_true, y_pred, sample_weight) with K.name_scope(scope_name or self.__class__.__name__), graph_ctx: losses = self.call(y_true, y_pred) return losses_utils.compute_weighted_loss( losses, sample_weight, reduction=self._get_reduction()) @classmethod def from_config(cls, config): """Instantiates a `Loss` from its config (output of `get_config()`). Args: config: Output of `get_config()`. Returns: A `Loss` instance. """ return cls(**config) def get_config(self): return {'reduction': self.reduction, 'name': self.name} @abc.abstractmethod @doc_controls.for_subclass_implementers def call(self, y_true, y_pred): """Invokes the `Loss` instance. Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except sparse loss functions such as sparse categorical crossentropy where shape = `[batch_size, d0, .. dN-1]` y_pred: The predicted values. shape = `[batch_size, d0, .. dN]` Returns: Loss values with the shape `[batch_size, d0, .. dN-1]`. """ NotImplementedError('Must be implemented in subclasses.') def _get_reduction(self): """Handles `AUTO` reduction cases and returns the reduction value.""" if (not self._allow_sum_over_batch_size and distribution_strategy_context.has_strategy() and (self.reduction == losses_utils.ReductionV2.AUTO or self.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE)): raise ValueError( 'Please use `tf.keras.losses.Reduction.SUM` or ' '`tf.keras.losses.Reduction.NONE` for loss reduction when losses are ' 'used with `tf.distribute.Strategy` outside of the built-in training ' 'loops. You can implement ' '`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` using global batch ' 'size like:\n```\nwith strategy.scope():\n' ' loss_obj = tf.keras.losses.CategoricalCrossentropy(' 'reduction=tf.keras.losses.Reduction.NONE)\n....\n' ' loss = tf.reduce_sum(loss_obj(labels, predictions)) * ' '(1. / global_batch_size)\n```\nPlease see ' 'https://www.tensorflow.org/tutorials/distribute/custom_training' ' for more details.') if self.reduction == losses_utils.ReductionV2.AUTO: return losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE return self.reduction class LossFunctionWrapper(Loss): """Wraps a loss function in the `Loss` class. Args: fn: The loss function to wrap, with signature `fn(y_true, y_pred, **kwargs)`. reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see https://www.tensorflow.org/tutorials/distribute/custom_training for more details on this. name: (Optional) name for the loss. **kwargs: The keyword arguments that are passed on to `fn`. """ def __init__(self, fn, reduction=losses_utils.ReductionV2.AUTO, name=None, **kwargs): super(LossFunctionWrapper, self).__init__(reduction=reduction, name=name) self.fn = fn self._fn_kwargs = kwargs def call(self, y_true, y_pred): """Invokes the `LossFunctionWrapper` instance. Args: y_true: Ground truth values. y_pred: The predicted values. Returns: Loss values per sample. """ if tensor_util.is_tensor(y_pred) and tensor_util.is_tensor(y_true): y_pred, y_true = tf_losses_util.squeeze_or_expand_dimensions( y_pred, y_true) return self.fn(y_true, y_pred, **self._fn_kwargs) def get_config(self): config = {} for k, v in six.iteritems(self._fn_kwargs): config[k] = K.eval(v) if tf_utils.is_tensor_or_variable(v) else v base_config = super(LossFunctionWrapper, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.losses.MeanSquaredError') class MeanSquaredError(LossFunctionWrapper): """Computes the mean of squares of errors between labels and predictions. `loss = square(y_true - y_pred)` Usage: >>> mse = tf.keras.losses.MeanSquaredError() >>> loss = mse([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]]) >>> loss.numpy() 0.5 >>> loss = mse([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]], ... sample_weight=[0.7, 0.3]) >>> loss.numpy() 0.25 Usage with the `compile` API: ```python model = tf.keras.Model(inputs, outputs) model.compile('sgd', loss=tf.keras.losses.MeanSquaredError()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='mean_squared_error'): super(MeanSquaredError, self).__init__( mean_squared_error, name=name, reduction=reduction) @keras_export('keras.losses.MeanAbsoluteError') class MeanAbsoluteError(LossFunctionWrapper): """Computes the mean of absolute difference between labels and predictions. `loss = abs(y_true - y_pred)` Usage: >>> mae = tf.keras.losses.MeanAbsoluteError() >>> loss = mae([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]]) >>> loss.numpy() 0.5 >>> loss = mae([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]], ... sample_weight=[0.7, 0.3]) >>> loss.numpy() 0.25 Usage with the `compile` API: ```python model = tf.keras.Model(inputs, outputs) model.compile('sgd', loss=tf.keras.losses.MeanAbsoluteError()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='mean_absolute_error'): super(MeanAbsoluteError, self).__init__( mean_absolute_error, name=name, reduction=reduction) @keras_export('keras.losses.MeanAbsolutePercentageError') class MeanAbsolutePercentageError(LossFunctionWrapper): """Computes the mean absolute percentage error between `y_true` and `y_pred`. `loss = 100 * abs(y_true - y_pred) / y_true` Usage: >>> mape = tf.keras.losses.MeanAbsolutePercentageError() >>> loss = mape([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]]) >>> loss.numpy() 500000000.0 >>> loss = mape([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]], ... sample_weight=[0.7, 0.3]) >>> loss.numpy() 250000000.0 Usage with the `compile` API: ```python model = tf.keras.Model(inputs, outputs) model.compile('sgd', loss=tf.keras.losses.MeanAbsolutePercentageError()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='mean_absolute_percentage_error'): super(MeanAbsolutePercentageError, self).__init__( mean_absolute_percentage_error, name=name, reduction=reduction) @keras_export('keras.losses.MeanSquaredLogarithmicError') class MeanSquaredLogarithmicError(LossFunctionWrapper): """Computes the mean squared logarithmic error between `y_true` and `y_pred`. `loss = square(log(y_true) - log(y_pred))` Usage: >>> msle = tf.keras.losses.MeanSquaredLogarithmicError() >>> loss = msle([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]]) >>> loss.numpy() 0.24022643 >>> loss = msle([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]], ... sample_weight=[0.7, 0.3]) >>> loss.numpy() 0.12011322 Usage with the `compile` API: ```python model = tf.keras.Model(inputs, outputs) model.compile('sgd', loss=tf.keras.losses.MeanSquaredLogarithmicError()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='mean_squared_logarithmic_error'): super(MeanSquaredLogarithmicError, self).__init__( mean_squared_logarithmic_error, name=name, reduction=reduction) @keras_export('keras.losses.BinaryCrossentropy') class BinaryCrossentropy(LossFunctionWrapper): """Computes the cross-entropy loss between true labels and predicted labels. Use this cross-entropy loss when there are only two label classes (assumed to be 0 and 1). For each example, there should be a single floating-point value per prediction. In the snippet below, each of the four examples has only a single floating-pointing value, and both `y_pred` and `y_true` have the shape `[batch_size]`. Usage: >>> bce = tf.keras.losses.BinaryCrossentropy() >>> loss = bce([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> loss.numpy() 0.81492424 >>> loss = bce([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> loss.numpy() 0.45814526 Usage with the `tf.keras` API: ```python model = tf.keras.Model(inputs, outputs) model.compile('sgd', loss=tf.keras.losses.BinaryCrossentropy()) ``` Args: from_logits: Whether to interpret `y_pred` as a tensor of [logit](https://en.wikipedia.org/wiki/Logit) values. By default, we assume that `y_pred` contains probabilities (i.e., values in [0, 1]). Note: Using from_logits=True may be more numerically stable. label_smoothing: Float in [0, 1]. When 0, no smoothing occurs. When > 0, we compute the loss between the predicted labels and a smoothed version of the true labels, where the smoothing squeezes the labels towards 0.5. Larger values of `label_smoothing` correspond to heavier smoothing. reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see https://www.tensorflow.org/tutorials/distribute/custom_training for more details on this. name: (Optional) Name for the op. """ def __init__(self, from_logits=False, label_smoothing=0, reduction=losses_utils.ReductionV2.AUTO, name='binary_crossentropy'): super(BinaryCrossentropy, self).__init__( binary_crossentropy, name=name, reduction=reduction, from_logits=from_logits, label_smoothing=label_smoothing) self.from_logits = from_logits @keras_export('keras.losses.CategoricalCrossentropy') class CategoricalCrossentropy(LossFunctionWrapper): """Computes the crossentropy loss between the labels and predictions. Use this crossentropy loss function when there are two or more label classes. We expect labels to be provided in a `one_hot` representation. If you want to provide labels as integers, please use `SparseCategoricalCrossentropy` loss. There should be `# classes` floating point values per feature. In the snippet below, there is `# classes` floating pointing values per example. The shape of both `y_pred` and `y_true` are `[batch_size, num_classes]`. Usage: >>> cce = tf.keras.losses.CategoricalCrossentropy() >>> loss = cce([[0, 1, 0], [0, 0, 1]], ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) >>> loss.numpy() 1.1769392 >>> loss = cce([[0, 1, 0], [0, 0, 1]], ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]], ... sample_weight=tf.constant([0.3, 0.7])) >>> loss.numpy() 0.8135988 Usage with the `compile` API: ```python model = tf.keras.Model(inputs, outputs) model.compile('sgd', loss=tf.keras.losses.CategoricalCrossentropy()) ``` Args: from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. **Note: Using from_logits=True is more numerically stable.** label_smoothing: Float in [0, 1]. When > 0, label values are smoothed, meaning the confidence on label values are relaxed. e.g. `label_smoothing=0.2` means that we will use a value of `0.1` for label `0` and `0.9` for label `1`" reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see https://www.tensorflow.org/tutorials/distribute/custom_training for more details on this. name: Optional name for the op. """ def __init__(self, from_logits=False, label_smoothing=0, reduction=losses_utils.ReductionV2.AUTO, name='categorical_crossentropy'): super(CategoricalCrossentropy, self).__init__( categorical_crossentropy, name=name, reduction=reduction, from_logits=from_logits, label_smoothing=label_smoothing) @keras_export('keras.losses.SparseCategoricalCrossentropy') class SparseCategoricalCrossentropy(LossFunctionWrapper): """Computes the crossentropy loss between the labels and predictions. Use this crossentropy loss function when there are two or more label classes. We expect labels to be provided as integers. If you want to provide labels using `one-hot` representation, please use `CategoricalCrossentropy` loss. There should be `# classes` floating point values per feature for `y_pred` and a single floating point value per feature for `y_true`. In the snippet below, there is a single floating point value per example for `y_true` and `# classes` floating pointing values per example for `y_pred`. The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is `[batch_size, num_classes]`. Usage: >>> scce = tf.keras.losses.SparseCategoricalCrossentropy() >>> loss = scce([1, 2], [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) >>> loss.numpy() 1.1769392 >>> loss = scce([1, 2], [[0.05, 0.95, 0], [0.1, 0.8, 0.1]], ... sample_weight=tf.constant([0.3, 0.7])) >>> loss.numpy() 0.8135988 Usage with the `compile` API: ```python model = tf.keras.Model(inputs, outputs) model.compile('sgd', loss=tf.keras.losses.SparseCategoricalCrossentropy()) ``` Args: from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. Note: Using from_logits=True may be more numerically stable. reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see https://www.tensorflow.org/tutorials/distribute/custom_training for more details on this. name: Optional name for the op. """ def __init__(self, from_logits=False, reduction=losses_utils.ReductionV2.AUTO, name='sparse_categorical_crossentropy'): super(SparseCategoricalCrossentropy, self).__init__( sparse_categorical_crossentropy, name=name, reduction=reduction, from_logits=from_logits) @keras_export('keras.losses.Hinge') class Hinge(LossFunctionWrapper): """Computes the hinge loss between `y_true` and `y_pred`. `loss = maximum(1 - y_true * y_pred, 0)` `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. Usage: >>> h = tf.keras.losses.Hinge() >>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> loss.numpy() 1.3 >>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], sample_weight=[1, 0]) >>> loss.numpy() 0.55 Usage with the `compile` API: ```python model = tf.keras.Model(inputs, outputs) model.compile('sgd', loss=tf.keras.losses.Hinge()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='hinge'): super(Hinge, self).__init__(hinge, name=name, reduction=reduction) @keras_export('keras.losses.SquaredHinge') class SquaredHinge(LossFunctionWrapper): """Computes the squared hinge loss between `y_true` and `y_pred`. `loss = square(maximum(1 - y_true * y_pred, 0))` `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. Usage: >>> h = tf.keras.losses.SquaredHinge() >>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> loss.numpy() 1.86 >>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], sample_weight=[1, 0]) >>> loss.numpy() 0.73 Usage with the `compile` API: ```python model = tf.keras.Model(inputs, outputs) model.compile('sgd', loss=tf.keras.losses.SquaredHinge()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='squared_hinge'): super(SquaredHinge, self).__init__( squared_hinge, name=name, reduction=reduction) @keras_export('keras.losses.CategoricalHinge') class CategoricalHinge(LossFunctionWrapper): """Computes the categorical hinge loss between `y_true` and `y_pred`. `loss = maximum(neg - pos + 1, 0)` where `neg = sum(y_true * y_pred)` and `pos = maximum(1 - y_true)` Usage: >>> h = tf.keras.losses.CategoricalHinge() >>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> loss.numpy() 1.4000001 >>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], sample_weight=[1, 0]) >>> loss.numpy() 0.6 Usage with the `compile` API: ```python model = tf.keras.Model(inputs, outputs) model.compile('sgd', loss=tf.keras.losses.CategoricalHinge()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='categorical_hinge'): super(CategoricalHinge, self).__init__( categorical_hinge, name=name, reduction=reduction) @keras_export('keras.losses.Poisson') class Poisson(LossFunctionWrapper): """Computes the Poisson loss between `y_true` and `y_pred`. `loss = y_pred - y_true * log(y_pred)` Usage: >>> p = tf.keras.losses.Poisson() >>> loss = p([[0., 1.], [0., 0.]], [[1., 1.], [0., 0.]]) >>> loss.numpy() 0.49999997 >>> loss = p([[0., 1.], [0., 0.]], [[1., 1.], [0., 0.]], ... sample_weight=[1., 0.]) >>> loss.numpy() 0.49999997 Usage with the `compile` API: ```python model = tf.keras.Model(inputs, outputs) model.compile('sgd', loss=tf.keras.losses.Poisson()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='poisson'): super(Poisson, self).__init__(poisson, name=name, reduction=reduction) @keras_export('keras.losses.LogCosh') class LogCosh(LossFunctionWrapper): """Computes the logarithm of the hyperbolic cosine of the prediction error. `logcosh = log((exp(x) + exp(-x))/2)`, where x is the error `y_pred - y_true`. Usage: >>> l = tf.keras.losses.LogCosh() >>> loss = l([[0., 1.], [0., 0.]], [[1., 1.], [0., 0.]]) >>> loss.numpy() 0.10844523 >>> loss = l([[0., 1.], [0., 0.]], [[1., 1.], [0., 0.]], ... sample_weight=[1., 0.]) >>> loss.numpy() 0.10844523 Usage with the `compile` API: ```python model = tf.keras.Model(inputs, outputs) model.compile('sgd', loss=tf.keras.losses.LogCosh()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='logcosh'): super(LogCosh, self).__init__(logcosh, name=name, reduction=reduction) @keras_export('keras.losses.KLDivergence') class KLDivergence(LossFunctionWrapper): """Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`. `loss = y_true * log(y_true / y_pred)` See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence Usage: >>> kl = tf.keras.losses.KLDivergence() >>> loss = kl([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> loss.numpy() 0.45814306 >>> loss = kl([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> loss.numpy() 0.4581446 Usage with the `compile` API: ```python model = tf.keras.Model(inputs, outputs) model.compile('sgd', loss=tf.keras.losses.KLDivergence()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='kullback_leibler_divergence'): super(KLDivergence, self).__init__( kullback_leibler_divergence, name=name, reduction=reduction) @keras_export('keras.losses.Huber') class Huber(LossFunctionWrapper): """Computes the Huber loss between `y_true` and `y_pred`. For each value x in `error = y_true - y_pred`: ``` loss = 0.5 * x^2 if |x| <= d loss = 0.5 * d^2 + d * (|x| - d) if |x| > d ``` where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss Usage: >>> h = tf.keras.losses.Huber() >>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> loss.numpy() 0.155 >>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> loss.numpy() 0.09 Usage with the `compile` API: ```python model = tf.keras.Model(inputs, outputs) model.compile('sgd', loss=tf.keras.losses.Huber()) ``` Args: delta: A float, the point where the Huber loss function changes from a quadratic to linear. reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see https://www.tensorflow.org/tutorials/distribute/custom_training for more details on this. name: Optional name for the op. """ def __init__(self, delta=1.0, reduction=losses_utils.ReductionV2.AUTO, name='huber_loss'): super(Huber, self).__init__( huber_loss, name=name, reduction=reduction, delta=delta) @keras_export('keras.metrics.mean_squared_error', 'keras.metrics.mse', 'keras.metrics.MSE', 'keras.losses.mean_squared_error', 'keras.losses.mse', 'keras.losses.MSE') def mean_squared_error(y_true, y_pred): """Computes the mean squared error between labels and predictions. After computing the squared distance between the inputs, the mean value over the last dimension is returned. `loss = mean(square(y_true - y_pred), axis=-1)` Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean squared error values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = ops.convert_to_tensor(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) return K.mean(math_ops.squared_difference(y_pred, y_true), axis=-1) @keras_export('keras.metrics.mean_absolute_error', 'keras.metrics.mae', 'keras.metrics.MAE', 'keras.losses.mean_absolute_error', 'keras.losses.mae', 'keras.losses.MAE') def mean_absolute_error(y_true, y_pred): """Computes the mean absolute error between labels and predictions. `loss = abs(y_true - y_pred)` Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean absolute error values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = ops.convert_to_tensor(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) return K.mean(math_ops.abs(y_pred - y_true), axis=-1) @keras_export('keras.metrics.mean_absolute_percentage_error', 'keras.metrics.mape', 'keras.metrics.MAPE', 'keras.losses.mean_absolute_percentage_error', 'keras.losses.mape', 'keras.losses.MAPE') def mean_absolute_percentage_error(y_true, y_pred): """Computes the mean absolute percentage error between `y_true` and `y_pred`. `loss = 100 * abs(y_true - y_pred) / y_true` Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean absolute percentage error values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = ops.convert_to_tensor(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) diff = math_ops.abs( (y_true - y_pred) / K.maximum(math_ops.abs(y_true), K.epsilon())) return 100. * K.mean(diff, axis=-1) @keras_export('keras.metrics.mean_squared_logarithmic_error', 'keras.metrics.msle', 'keras.metrics.MSLE', 'keras.losses.mean_squared_logarithmic_error', 'keras.losses.msle', 'keras.losses.MSLE') def mean_squared_logarithmic_error(y_true, y_pred): """Computes the mean squared logarithmic error between `y_true` and `y_pred`. `loss = square(log(y_true) - log(y_pred))` Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean squared logarithmic error values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = ops.convert_to_tensor(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) first_log = math_ops.log(K.maximum(y_pred, K.epsilon()) + 1.) second_log = math_ops.log(K.maximum(y_true, K.epsilon()) + 1.) return K.mean(math_ops.squared_difference(first_log, second_log), axis=-1) def _maybe_convert_labels(y_true): """Converts binary labels into -1/1.""" are_zeros = math_ops.equal(y_true, 0) are_ones = math_ops.equal(y_true, 1) is_binary = math_ops.reduce_all(math_ops.logical_or(are_zeros, are_ones)) def _convert_binary_labels(): # Convert the binary labels to -1 or 1. return 2. * y_true - 1. updated_y_true = smart_cond.smart_cond(is_binary, _convert_binary_labels, lambda: y_true) return updated_y_true @keras_export('keras.metrics.squared_hinge', 'keras.losses.squared_hinge') def squared_hinge(y_true, y_pred): """Computes the squared hinge loss between `y_true` and `y_pred`. `loss = square(maximum(1 - y_true * y_pred, 0))` Args: y_true: The ground truth values. `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Squared hinge loss values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = ops.convert_to_tensor(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) y_true = _maybe_convert_labels(y_true) return K.mean( math_ops.square(math_ops.maximum(1. - y_true * y_pred, 0.)), axis=-1) @keras_export('keras.metrics.hinge', 'keras.losses.hinge') def hinge(y_true, y_pred): """Computes the hinge loss between `y_true` and `y_pred`. `loss = maximum(1 - y_true * y_pred, 0)` Args: y_true: The ground truth values. `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided they will be converted to -1 or 1. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Hinge loss values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = ops.convert_to_tensor(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) y_true = _maybe_convert_labels(y_true) return K.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1) @keras_export('keras.losses.categorical_hinge') def categorical_hinge(y_true, y_pred): """Computes the categorical hinge loss between `y_true` and `y_pred`. `loss = maximum(neg - pos + 1, 0)` where `neg = sum(y_true * y_pred)` and `pos = maximum(1 - y_true)` Args: y_true: The ground truth values. `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided they will be converted to -1 or 1. y_pred: The predicted values. Returns: Categorical hinge loss values. """ y_pred = ops.convert_to_tensor(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) pos = math_ops.reduce_sum(y_true * y_pred, axis=-1) neg = math_ops.reduce_max((1. - y_true) * y_pred, axis=-1) return math_ops.maximum(0., neg - pos + 1.) def huber_loss(y_true, y_pred, delta=1.0): """Computes Huber loss value. For each value x in `error = y_true - y_pred`: ``` loss = 0.5 * x^2 if |x| <= d loss = 0.5 * d^2 + d * (|x| - d) if |x| > d ``` where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss Args: y_true: tensor of true targets. y_pred: tensor of predicted targets. delta: A float, the point where the Huber loss function changes from a quadratic to linear. Returns: Tensor with one scalar loss entry per sample. """ y_pred = math_ops.cast(y_pred, dtype=K.floatx()) y_true = math_ops.cast(y_true, dtype=K.floatx()) error = math_ops.subtract(y_pred, y_true) abs_error = math_ops.abs(error) quadratic = math_ops.minimum(abs_error, delta) linear = math_ops.subtract(abs_error, quadratic) return math_ops.add( math_ops.multiply( ops.convert_to_tensor(0.5, dtype=quadratic.dtype), math_ops.multiply(quadratic, quadratic)), math_ops.multiply(delta, linear)) @keras_export('keras.losses.logcosh') def logcosh(y_true, y_pred): """Logarithm of the hyperbolic cosine of the prediction error. `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly like the mean squared error, but will not be so strongly affected by the occasional wildly incorrect prediction. Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Logcosh error values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = ops.convert_to_tensor(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) def _logcosh(x): return x + nn.softplus(-2. * x) - math_ops.cast(math_ops.log(2.), x.dtype) return K.mean(_logcosh(y_pred - y_true), axis=-1) @keras_export('keras.metrics.categorical_crossentropy', 'keras.losses.categorical_crossentropy') def categorical_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0): """Computes the categorical crossentropy loss. Args: y_true: tensor of true targets. y_pred: tensor of predicted targets. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. Returns: Categorical crossentropy loss value. """ y_pred = ops.convert_to_tensor(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) label_smoothing = ops.convert_to_tensor(label_smoothing, dtype=K.floatx()) def _smooth_labels(): num_classes = math_ops.cast(array_ops.shape(y_true)[1], y_pred.dtype) return y_true * (1.0 - label_smoothing) + (label_smoothing / num_classes) y_true = smart_cond.smart_cond(label_smoothing, _smooth_labels, lambda: y_true) return K.categorical_crossentropy(y_true, y_pred, from_logits=from_logits) @keras_export('keras.metrics.sparse_categorical_crossentropy', 'keras.losses.sparse_categorical_crossentropy') def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1): """Computes the sparse categorical crossentropy loss. Args: y_true: Ground truth values. y_pred: The predicted values. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. axis: (Optional) Defaults to -1. The dimension along which the entropy is computed. Returns: Sparse categorical crossentropy loss value. """ y_pred = ops.convert_to_tensor(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) return K.sparse_categorical_crossentropy( y_true, y_pred, from_logits=from_logits, axis=axis) @keras_export('keras.metrics.binary_crossentropy', 'keras.losses.binary_crossentropy') def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0): """Computes the binary crossentropy loss. Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. Returns: Binary crossentropy loss value. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = ops.convert_to_tensor(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) label_smoothing = ops.convert_to_tensor(label_smoothing, dtype=K.floatx()) def _smooth_labels(): return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing y_true = smart_cond.smart_cond(label_smoothing, _smooth_labels, lambda: y_true) return K.mean( K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1) @keras_export('keras.metrics.kullback_leibler_divergence', 'keras.metrics.kld', 'keras.metrics.KLD', 'keras.losses.kullback_leibler_divergence', 'keras.losses.kld', 'keras.losses.KLD') def kullback_leibler_divergence(y_true, y_pred): """Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`. `loss = y_true * log(y_true / y_pred)` See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence Usage: ```python loss = tf.keras.losses.KLD([.4, .9, .2], [.5, .8, .12]) print('Loss: ', loss.numpy()) # Loss: 0.11891246 ``` Args: y_true: Tensor of true targets. y_pred: Tensor of predicted targets. Returns: A `Tensor` with loss. Raises: TypeError: If `y_true` cannot be cast to the `y_pred.dtype`. """ y_pred = ops.convert_to_tensor(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) y_true = K.clip(y_true, K.epsilon(), 1) y_pred = K.clip(y_pred, K.epsilon(), 1) return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1) @keras_export('keras.metrics.poisson', 'keras.losses.poisson') def poisson(y_true, y_pred): """Computes the Poisson loss between y_true and y_pred. The Poisson loss is the mean of the elements of the `Tensor` `y_pred - y_true * log(y_pred)`. Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Poisson loss value. shape = `[batch_size, d0, .. dN-1]`. Raises: InvalidArgumentError: If `y_true` and `y_pred` have incompatible shapes. """ y_pred = ops.convert_to_tensor(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1) @keras_export( 'keras.losses.cosine_similarity', v1=[ 'keras.metrics.cosine_proximity', 'keras.metrics.cosine', 'keras.losses.cosine_proximity', 'keras.losses.cosine', 'keras.losses.cosine_similarity', ]) def cosine_similarity(y_true, y_pred, axis=-1): """Computes the cosine similarity between labels and predictions. Note that it is a negative quantity between -1 and 0, where 0 indicates orthogonality and values closer to -1 indicate greater similarity. This makes it usable as a loss function in a setting where you try to maximize the proximity between predictions and targets. `loss = -sum(y_true * y_pred)` Args: y_true: Tensor of true targets. y_pred: Tensor of predicted targets. axis: Axis along which to determine similarity. Returns: Cosine similarity tensor. """ y_true = nn.l2_normalize(y_true, axis=axis) y_pred = nn.l2_normalize(y_pred, axis=axis) return -math_ops.reduce_sum(y_true * y_pred, axis=axis) @keras_export('keras.losses.CosineSimilarity') class CosineSimilarity(LossFunctionWrapper): """Computes the cosine similarity between `y_true` and `y_pred`. `loss = -sum(y_true * y_pred)` Usage: >>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1) >>> loss = cosine_loss([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]]) >>> # l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]] >>> # l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]] >>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]] >>> # loss = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1)) >>> # = ((0. + 0.) + (0.5 + 0.5)) / 2 >>> loss.numpy() -0.49999997 Usage with the `compile` API: ```python model = tf.keras.Model(inputs, outputs) model.compile('sgd', loss=tf.keras.losses.CosineSimilarity(axis=1)) ``` Args: axis: (Optional) Defaults to -1. The dimension along which the cosine similarity is computed. reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see https://www.tensorflow.org/tutorials/distribute/custom_training for more details on this. name: Optional name for the op. """ def __init__(self, axis=-1, reduction=losses_utils.ReductionV2.AUTO, name='cosine_similarity'): super(CosineSimilarity, self).__init__( cosine_similarity, reduction=reduction, name=name, axis=axis) bce = BCE = binary_crossentropy mse = MSE = mean_squared_error mae = MAE = mean_absolute_error mape = MAPE = mean_absolute_percentage_error msle = MSLE = mean_squared_logarithmic_error kld = KLD = kullback_leibler_divergence def is_categorical_crossentropy(loss): result = ((isinstance(loss, CategoricalCrossentropy) or (isinstance(loss, LossFunctionWrapper) and loss.fn == categorical_crossentropy) or (hasattr(loss, '__name__') and loss.__name__ == 'categorical_crossentropy') or (loss == 'categorical_crossentropy'))) return result @keras_export('keras.losses.serialize') def serialize(loss): return serialize_keras_object(loss) @keras_export('keras.losses.deserialize') def deserialize(name, custom_objects=None): return deserialize_keras_object( name, module_objects=globals(), custom_objects=custom_objects, printable_module_name='loss function') @keras_export('keras.losses.get') def get(identifier): if identifier is None: return None if isinstance(identifier, six.string_types): identifier = str(identifier) return deserialize(identifier) if isinstance(identifier, dict): return deserialize(identifier) elif callable(identifier): return identifier else: raise ValueError('Could not interpret ' 'loss function identifier:', identifier) LABEL_DTYPES_FOR_LOSSES = { losses_impl.sparse_softmax_cross_entropy: 'int32', sparse_categorical_crossentropy: 'int32' }
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [("form_designer", "__first__"), ("fluent_contents", "0001_initial")] operations = [ migrations.CreateModel( name="FormDesignerLink", fields=[ ( "contentitem_ptr", models.OneToOneField( parent_link=True, on_delete=models.CASCADE, auto_created=True, primary_key=True, serialize=False, to="fluent_contents.ContentItem", ), ), ( "form_definition", models.ForeignKey( verbose_name="Form", on_delete=models.PROTECT, to="form_designer.FormDefinition", ), ), ], options={ "db_table": "contentitem_formdesignerlink_formdesignerlink", "verbose_name": "Form link", "verbose_name_plural": "Form links", }, bases=("fluent_contents.contentitem",), ) ]
import sqlite3 as sqlite import sys import os.path import json from pokemon import pokemon class cookie_reader(): def __init__(self, cookie_location, browser_type): self.cookie_location = cookie_location self._expand_tilde() self.filename = "http_play.pokemonshowdown.com_0.localstorage" def _get_cookie_location(self): platform = sys.platform if (platform == 'darwin'): return "~/Library/Application Support/Google/Chrome/Default/Local Storage/" elif (platform == 'linux2'): return "~/.config/google-chrome/Default/Cookies" elif (platform == 'win32' or platform == 'win64'): return "~/AppData/Local/Google/Chrome/User Data/ Default/Local Storage/" return "Platform not recognized." def _expand_tilde(self): self.cookie_location = self.cookie_location.replace('~', os.path.expanduser('~')) def _read_from_database(self): conn = sqlite.connect(self.cookie_location + self.filename) conn.text_factory = str c = conn.cursor() c.execute("""SELECT value FROM ItemTable WHERE key='showdown_teams'""") return c.fetchone() def _get_json(self): raw_json = str(self._read_from_database()) raw_json = raw_json[3:-3] raw_json = raw_json.replace('\\x00', '') return json.loads(raw_json) def read_teams(self): decoded = self._get_json() for team in decoded: yield (team['name'], team['format'], [pokemon(t) for t in team['team']]) if __name__ == '__main__': c = cookie_reader() for t in c.read_teams(): print(t)
"""Main package of the python bindings for oDesk API. For convenience some most commonly used functionalities are imported here, so you can use:: from odesk import Client from odesk import raise_http_error """ VERSION = '0.5.8' def get_version(): return VERSION from odesk.client import Client from odesk.http import raise_http_error __all__ = ["get_version", "Client", "raise_http_error"]
"""Plugin to parse the OLECF summary/document summary information items.""" from plaso.lib import event from plaso.lib import eventdata from plaso.parsers.olecf_plugins import interface class OleCfSummaryInfoEvent(event.FiletimeEvent): """Convenience class for an OLECF Summary info event.""" DATA_TYPE = 'olecf:summary_info' def __init__(self, timestamp, usage, attributes): """Initializes the event. Args: timestamp: The FILETIME timestamp value. usage: The usage string, describing the timestamp value. attributes: A dict object containing all extracted attributes. """ super(OleCfSummaryInfoEvent, self).__init__( timestamp, usage) self.name = u'Summary Information' for attribute_name, attribute_value in attributes.iteritems(): setattr(self, attribute_name, attribute_value) class OleCfSummaryInfo(object): """An OLECF Summary Info object.""" _CLASS_IDENTIFIER = 'f29f85e0-4ff9-1068-ab91-08002b27b3d9' _PROPERTY_NAMES_INT32 = { 0x000e: 'number_of_pages', # PIDSI_PAGECOUNT 0x000f: 'number_of_words', # PIDSI_WORDCOUNT 0x0010: 'number_of_characters', # PIDSI_CHARCOUNT 0x0013: 'security', # PIDSI_SECURITY } _PROPERTY_NAMES_STRING = { 0x0002: 'title', # PIDSI_TITLE 0x0003: 'subject', # PIDSI_SUBJECT 0x0004: 'author', # PIDSI_AUTHOR 0x0005: 'keywords', # PIDSI_KEYWORDS 0x0006: 'comments', # PIDSI_COMMENTS 0x0007: 'template', # PIDSI_TEMPLATE 0x0008: 'last_saved_by', # PIDSI_LASTAUTHOR 0x0009: 'revision_number', # PIDSI_REVNUMBER 0x0012: 'application', # PIDSI_APPNAME } PIDSI_CODEPAGE = 0x0001 PIDSI_EDITTIME = 0x000a PIDSI_LASTPRINTED = 0x000b PIDSI_CREATE_DTM = 0x000c PIDSI_LASTSAVE_DTM = 0x000d PIDSI_THUMBNAIL = 0x0011 def __init__(self, olecf_item, root_creation_time, root_modification_time): """Initialize the OLECF summary object. Args: olecf_item: The OLECF item (instance of pyolecf.property_set_stream). root_creation_time: The creation time of the root OLECF item. root_modification_time: The modification time of the root OLECF item. """ super(OleCfSummaryInfo, self).__init__() self._root_creation_time = root_creation_time self._root_modification_time = root_modification_time self._events = [] self.attributes = {} self._InitFromPropertySet(olecf_item.set) def _InitFromPropertySet(self, property_set): """Initializes the object from a property set. Args: property_set: The OLECF property set (pyolecf.property_set). """ # Combine the values of multiple property sections # but do not override properties that are already set. for property_section in property_set.sections: if property_section.class_identifier != self._CLASS_IDENTIFIER: continue for property_value in property_section.properties: self._InitFromPropertyValue(property_value) def _InitFromPropertyValue(self, property_value): """Initializes the object from a property value. Args: property_value: The OLECF property value (pyolecf.property_value). """ if property_value.type == interface.OleDefinitions.VT_I2: self._InitFromPropertyValueTypeInt16(property_value) elif property_value.type == interface.OleDefinitions.VT_I4: self._InitFromPropertyValueTypeInt32(property_value) elif (property_value.type == interface.OleDefinitions.VT_LPSTR or property_value.type == interface.OleDefinitions.VT_LPWSTR): self._InitFromPropertyValueTypeString(property_value) elif property_value.type == interface.OleDefinitions.VT_FILETIME: self._InitFromPropertyValueTypeFiletime(property_value) def _InitFromPropertyValueTypeInt16(self, property_value): """Initializes the object from a 16-bit int type property value. Args: property_value: The OLECF property value (pyolecf.property_value of type VT_I2). """ if property_value.identifier == self.PIDSI_CODEPAGE: # TODO: can the codepage vary per property section? # And is it needed to interpret the ASCII strings? # codepage = property_value.data_as_integer pass def _InitFromPropertyValueTypeInt32(self, property_value): """Initializes the object from a 32-bit int type property value. Args: property_value: The OLECF property value (pyolecf.property_value of type VT_I4). """ property_name = self._PROPERTY_NAMES_INT32.get( property_value.identifier, None) if property_name and not property_name in self.attributes: self.attributes[property_name] = property_value.data_as_integer def _InitFromPropertyValueTypeString(self, property_value): """Initializes the object from a string type property value. Args: property_value: The OLECF property value (pyolecf.property_value of type VT_LPSTR or VT_LPWSTR). """ property_name = self._PROPERTY_NAMES_STRING.get( property_value.identifier, None) if property_name and not property_name in self.attributes: self.attributes[property_name] = property_value.data_as_string def _InitFromPropertyValueTypeFiletime(self, property_value): """Initializes the object from a filetime type property value. Args: property_value: The OLECF property value (pyolecf.property_value of type VT_FILETIME). """ if property_value.identifier == self.PIDSI_LASTPRINTED: self._events.append( (property_value.data_as_integer, 'Document Last Printed Time')) elif property_value.identifier == self.PIDSI_CREATE_DTM: self._events.append( (property_value.data_as_integer, 'Document Creation Time')) elif property_value.identifier == self.PIDSI_LASTSAVE_DTM: self._events.append( (property_value.data_as_integer, 'Document Last Save Time')) elif property_value.identifier == self.PIDSI_EDITTIME: # property_name = 'total_edit_time' # TODO: handle duration. pass def GetEventObjects(self): """Yields extracted event objects.""" for timestamp, timestamp_description in self._events: yield OleCfSummaryInfoEvent( timestamp, timestamp_description, self.attributes) if self._root_creation_time: yield OleCfSummaryInfoEvent( self._root_creation_time, eventdata.EventTimestamp.CREATION_TIME, self.attributes) if self._root_modification_time: yield OleCfSummaryInfoEvent( self._root_modification_time, eventdata.EventTimestamp.MODIFICATION_TIME, self.attributes) class OleCfDocumentSummaryInfoEvent(event.FiletimeEvent): """Convenience class for an OLECF Document Summary info event.""" DATA_TYPE = 'olecf:document_summary_info' _CLASS_IDENTIFIER = 'd5cdd502-2e9c-101b-9397-08002b2cf9ae' _PROPERTY_NAMES_BOOL = { 0x0013: 'shared_document', # PIDDSI_SHAREDDOC } _PROPERTY_NAMES_INT32 = { 0x0004: 'number_of_bytes', # PIDDSI_BYTECOUNT 0x0005: 'number_of_lines', # PIDDSI_LINECOUNT 0x0006: 'number_of_paragraphs', # PIDDSI_PARCOUNT 0x0007: 'number_of_slides', # PIDDSI_SLIDECOUNT 0x0008: 'number_of_notes', # PIDDSI_NOTECOUNT 0x0009: 'number_of_hidden_slides', # PIDDSI_HIDDENCOUNT 0x000a: 'number_of_clips', # PIDDSI_MMCLIPCOUNT 0x0011: 'number_of_characters_with_white_space', # PIDDSI_CCHWITHSPACES 0x0017: 'application_version', # PIDDSI_VERSION } _PROPERTY_NAMES_STRING = { 0x000e: 'manager', # PIDDSI_MANAGER 0x000f: 'company', # PIDDSI_COMPANY 0x001a: 'content_type', # PIDDSI_CONTENTTYPE 0x001b: 'content_status', # PIDDSI_CONTENTSTATUS 0x001c: 'language', # PIDDSI_LANGUAGE 0x001d: 'document_version', # PIDDSI_DOCVERSION } PIDDSI_CODEPAGE = 0x0001 PIDDSI_CATEGORY = 0x0002 PIDDSI_PRESFORMAT = 0x0003 PIDDSI_SCALE = 0x000b PIDDSI_HEADINGPAIR = 0x000c PIDDSI_DOCPARTS = 0x000d PIDDSI_LINKSDIRTY = 0x0010 PIDDSI_VERSION = 0x0017 def __init__(self, timestamp, usage, olecf_item): """Initializes the event. Args: timestamp: The FILETIME timestamp value. usage: The usage string, describing the timestamp value. olecf_item: The OLECF item (pyolecf.property_set_stream). """ super(OleCfDocumentSummaryInfoEvent, self).__init__( timestamp, usage) self.name = u'Document Summary Information' self._InitFromPropertySet(olecf_item.set) def _InitFromPropertySet(self, property_set): """Initializes the event from a property set. Args: property_set: The OLECF property set (pyolecf.property_set). """ # Combine the values of multiple property sections # but do not override properties that are already set. for property_section in property_set.sections: if property_section.class_identifier != self._CLASS_IDENTIFIER: continue for property_value in property_section.properties: self._InitFromPropertyValue(property_value) def _InitFromPropertyValue(self, property_value): """Initializes the event from a property value. Args: property_value: The OLECF property value (pyolecf.property_value). """ if property_value.type == interface.OleDefinitions.VT_I2: self._InitFromPropertyValueTypeInt16(property_value) elif property_value.type == interface.OleDefinitions.VT_I4: self._InitFromPropertyValueTypeInt32(property_value) elif property_value.type == interface.OleDefinitions.VT_BOOL: self._InitFromPropertyValueTypeBool(property_value) elif (property_value.type == interface.OleDefinitions.VT_LPSTR or property_value.type == interface.OleDefinitions.VT_LPWSTR): self._InitFromPropertyValueTypeString(property_value) def _InitFromPropertyValueTypeInt16(self, property_value): """Initializes the event from a 16-bit int type property value. Args: property_value: The OLECF property value (pyolecf.property_value of type VT_I2). """ if property_value.identifier == self.PIDDSI_CODEPAGE: # TODO: can the codepage vary per property section? # And is it needed to interpret the ASCII strings? # codepage = property_value.data_as_integer pass def _InitFromPropertyValueTypeInt32(self, property_value): """Initializes the event from a 32-bit int type property value. Args: property_value: The OLECF property value (pyolecf.property_value of type VT_I4). """ property_name = self._PROPERTY_NAMES_INT32.get( property_value.identifier, None) # The application version consists of 2 16-bit values that make up # the version number. Where the upper 16-bit is the major number # and the lower 16-bit the minor number. if property_value.identifier == self.PIDDSI_VERSION: application_version = property_value.data_as_integer setattr(self, property_name, u'{0:d}.{1:d}'.format( application_version >> 16, application_version & 0xffff)) elif property_name and not hasattr(self, property_name): setattr(self, property_name, property_value.data_as_integer) def _InitFromPropertyValueTypeBool(self, property_value): """Initializes the event from a boolean type property value. Args: property_value: The OLECF property value (pyolecf.property_value of type VT_BOOL). """ property_name = self._PROPERTY_NAMES_BOOL.get( property_value.identifier, None) if property_name and not hasattr(self, property_name): setattr(self, property_name, property_value.data_as_boolean) def _InitFromPropertyValueTypeString(self, property_value): """Initializes the event from a string type property value. Args: property_value: The OLECF property value (pyolecf.property_value of type VT_LPSTR or VT_LPWSTR). """ property_name = self._PROPERTY_NAMES_STRING.get( property_value.identifier, None) if property_name and not hasattr(self, property_name): setattr(self, property_name, property_value.data_as_string) class DocumentSummaryPlugin(interface.OlecfPlugin): """Plugin that parses DocumentSummary information from an OLECF file.""" NAME = 'olecf_document_summary' REQUIRED_ITEMS = frozenset(['\005DocumentSummaryInformation']) def GetEntries(self, root_item, items, **unused_kwargs): """Generate event based on the document summary item. Args: root_item: The root item of the OLECF file. item_names: A list of all items discovered in the root. Yields: Event objects (instance of OleCfDocumentSummaryInfoEvent). """ creation_time, modification_time = self.GetTimestamps(root_item) for item in items: if creation_time: yield OleCfDocumentSummaryInfoEvent( creation_time, eventdata.EventTimestamp.CREATION_TIME, item) if modification_time: yield OleCfDocumentSummaryInfoEvent( modification_time, eventdata.EventTimestamp.MODIFICATION_TIME, item) class SummaryInfoPlugin(interface.OlecfPlugin): """Plugin that parses the SummaryInformation item from an OLECF file.""" NAME = 'olecf_summary' REQUIRED_ITEMS = frozenset(['\005SummaryInformation']) def GetEntries(self, root_item, items, **unused_kwargs): """Generate event based on the summary information item. Args: root_item: The root item of the OLECF file. item_names: A list of all items discovered in the root. Yields: Event objects (instance of OleCfSummaryInfoEvent). """ root_creation_time, root_modification_time = self.GetTimestamps(root_item) for item in items: summary_information_object = OleCfSummaryInfo( item, root_creation_time, root_modification_time) for event_object in summary_information_object.GetEventObjects(): yield event_object
from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'TwitterRecentEntriesItem' db.create_table(u'contentitem_fluentcms_twitterfeed_twitterrecententriesitem', ( (u'contentitem_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fluent_contents.ContentItem'], unique=True, primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)), ('twitter_user', self.gf('django.db.models.fields.CharField')(max_length=75)), ('amount', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=5)), ('widget_id', self.gf('django.db.models.fields.CharField')(max_length=75)), ('footer_text', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)), ('include_replies', self.gf('django.db.models.fields.BooleanField')(default=False)), )) db.send_create_signal(u'fluentcms_twitterfeed', ['TwitterRecentEntriesItem']) # Adding model 'TwitterSearchItem' db.create_table(u'contentitem_fluentcms_twitterfeed_twittersearchitem', ( (u'contentitem_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fluent_contents.ContentItem'], unique=True, primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)), ('query', self.gf('django.db.models.fields.CharField')(default='', max_length=200)), ('amount', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=5)), ('widget_id', self.gf('django.db.models.fields.CharField')(max_length=75)), ('footer_text', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)), ('include_replies', self.gf('django.db.models.fields.BooleanField')(default=False)), )) db.send_create_signal(u'fluentcms_twitterfeed', ['TwitterSearchItem']) def backwards(self, orm): # Deleting model 'TwitterRecentEntriesItem' db.delete_table(u'contentitem_fluentcms_twitterfeed_twitterrecententriesitem') # Deleting model 'TwitterSearchItem' db.delete_table(u'contentitem_fluentcms_twitterfeed_twittersearchitem') models = { u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'fluent_contents.contentitem': { 'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'ContentItem'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '15', 'db_index': 'True'}), 'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), 'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contentitems'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['fluent_contents.Placeholder']"}), 'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_fluent_contents.contentitem_set+'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}), 'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'}) }, 'fluent_contents.placeholder': { 'Meta': {'unique_together': "(('parent_type', 'parent_id', 'slot'),)", 'object_name': 'Placeholder'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}), 'role': ('django.db.models.fields.CharField', [], {'default': "'m'", 'max_length': '1'}), 'slot': ('django.db.models.fields.SlugField', [], {'max_length': '50'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}) }, u'fluentcms_twitterfeed.twitterrecententriesitem': { 'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'TwitterRecentEntriesItem', 'db_table': "u'contentitem_fluentcms_twitterfeed_twitterrecententriesitem'", '_ormbases': ['fluent_contents.ContentItem']}, 'amount': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}), u'contentitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fluent_contents.ContentItem']", 'unique': 'True', 'primary_key': 'True'}), 'footer_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'include_replies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'twitter_user': ('django.db.models.fields.CharField', [], {'max_length': '75'}), 'widget_id': ('django.db.models.fields.CharField', [], {'max_length': '75'}) }, u'fluentcms_twitterfeed.twittersearchitem': { 'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'TwitterSearchItem', 'db_table': "u'contentitem_fluentcms_twitterfeed_twittersearchitem'", '_ormbases': ['fluent_contents.ContentItem']}, 'amount': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}), u'contentitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fluent_contents.ContentItem']", 'unique': 'True', 'primary_key': 'True'}), 'footer_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'include_replies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'query': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'widget_id': ('django.db.models.fields.CharField', [], {'max_length': '75'}) } } complete_apps = ['fluentcms_twitterfeed']
'''------------------------------------------------------------------------- Copyright IBM Corp. 2015, 2015 All Rights Reserved Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and Limitations under the License. -------------------------------------------------------------------------''' import StringIO import os import socket from dragon.engine.clients import Clients from dragon.openstack.common import log as logging from dragon.openstack.common import exception from dragon.workload_policy.actions import action from dragon.workload_policy.actions import action_execution as ae from oslo.config import cfg import dragon.openstack.common.uuidutils as uuidutils from dragon.template.heat_template import InstanceResource LOG = logging.getLogger(__name__) instance_image_opts = [ cfg.IntOpt('backup_image_object_size', default=52428800, help='The size in bytes of instance image objects') ] CONF = cfg.CONF CONF.register_opts(instance_image_opts) class InstanceImageAction(action.Action): is_global = True # store in global container def __init__(self, context): self.clients = Clients(context) self._image_id = None self._name = None self._resource_id = None self.data_block_size_bytes = CONF.backup_image_object_size # super(action.Action, self).__init__(workload_action_excution_id) def pre_protect(self, cntx, workload_action_excution_id, resource_id): pass def post_protect(self, cntx, workload_action_excution_id, resource_id): pass def protect(self, cntx, workload_action_excution_id, resource_id, container_name): LOG.debug("protecting instance (image copied) %s" % (resource_id)) instance = self.clients.nova().servers.get(resource_id) self._image_id = instance.image['id'] self._name = instance.name self._resource_id = resource_id instance_copy_execution =\ ae.ActionExecution(workload_action_excution_id, resource_id, self.id) result = self._imagecopy(cntx, instance, container_name, instance_copy_execution) return result def generate_template(self, context, template_gen): instance = InstanceResource(self._image_id, self._name, resource_id=self._resource_id) template_gen.add_instance(instance) def failover(self, context, resource_id, resource_data, container_name): return self._import_from_swift(context, resource_id, resource_data, container_name) def _import_from_swift(self, context, resource_id, resource_data, container_name): LOG.debug("resource %s data %s container %s" % (resource_id, resource_data, container_name)) swift_client = self.clients.swift() data_chunks = resource_data["chunks"] image_id = resource_data["image_id"] image_response_data = StringIO.StringIO() for chunk in range(data_chunks): swift_meta, image_response =\ swift_client.get_object(container_name, image_id + "_" + str(chunk)) image_response_data.write(image_response) try: image = {} image['name'] = resource_data["meta"]["name"] image['size'] = resource_data["meta"]["size"] image['disk_format'] = resource_data["meta"]["disk_format"] image['container_format'] =\ resource_data["meta"]["container_format"] image['id'] = uuidutils.generate_uuid() image_response_data.seek(0, os.SEEK_SET) self.clients.glance().images.create(data=image_response_data, **image) self._image_id = image['id'] self._name = resource_data["instance_name"] return True # except ImageAlreadyPresentException: except Exception, e: LOG.error(e) return False def _imagecopy(self, context, instance, container_name, action_excution): backup_rec = {} action_excution.set_status(context, 'uploaded to swift') swift_conn = Clients(context).swift() headers = {'X-Container-Meta-dr_state': 'processing'} image = self.clients.glance().images.get(self._image_id) # take the checksum as unique id global_container_image_id = image._info['checksum'] image_response = image.data() image_response_data = StringIO.StringIO() for chunk in image_response: image_response_data.write(chunk) image_response_data.seek(0, os.SEEK_SET) chunks = 0 while True: data = image_response_data.read(self.data_block_size_bytes) data_offset = image_response_data.tell() LOG.debug("uploading image offset %s chunks %s" % (data_offset, chunks)) if data == '': break try: swift_conn.put_object(container_name, global_container_image_id + "_" + str(chunks), data, content_length=len(data)) chunks += 1 except socket.error as err: dr_state = 'DR image backup failed' action_excution.set_status(context, dr_state) raise exception.SwiftConnectionFailed(reason=str(err)) dr_state = 'Protected' backup_rec["metadata"] = instance.metadata backup_rec["image_id"] = global_container_image_id backup_rec["instance_name"] = self._name backup_rec["meta"] = image.to_dict() backup_rec["chunks"] = chunks action_excution.set_status(context, dr_state) return dr_state, backup_rec
class InternalFlowException(Exception): pass class ReturnException(InternalFlowException): def __init__(self, value): self._value = value @property def value(self): return self._value class BreakException(InternalFlowException): pass class ContinueException(InternalFlowException): pass class DslInvalidOperationError(Exception): pass class NoMethodFound(Exception): def __init__(self, name): super(NoMethodFound, self).__init__('Method "%s" is not found' % name) class NoClassFound(Exception): def __init__(self, name): super(NoClassFound, self).__init__('Class "%s" is not found' % name) class NoPackageFound(Exception): def __init__(self, name): super(NoPackageFound, self).__init__( 'Package "%s" is not found' % name) class NoPackageForClassFound(Exception): def __init__(self, name): super(NoPackageForClassFound, self).__init__('Package for class "%s" ' 'is not found' % name) class NoObjectFoundError(Exception): def __init__(self, object_id): super(NoObjectFoundError, self).__init__( 'Object "%s" is not found in object store' % object_id) class AmbiguousMethodName(Exception): def __init__(self, name): super(AmbiguousMethodName, self).__init__( 'Found more that one method "%s"' % name) class DslContractSyntaxError(Exception): pass class ContractViolationException(Exception): pass class ValueIsMissingError(Exception): pass class DslSyntaxError(Exception): pass class PropertyAccessError(Exception): pass class AmbiguousPropertyNameError(PropertyAccessError): def __init__(self, name): super(AmbiguousPropertyNameError, self).__init__( 'Found more that one property "%s"' % name) class NoWriteAccess(PropertyAccessError): def __init__(self, name): super(NoWriteAccess, self).__init__( 'Property "%s" is immutable to the caller' % name) class NoWriteAccessError(PropertyAccessError): def __init__(self, name): super(NoWriteAccessError, self).__init__( 'Property "%s" is immutable to the caller' % name) class PropertyReadError(PropertyAccessError): def __init__(self, name, murano_class): super(PropertyAccessError, self).__init__( 'Property "%s" in class "%s" cannot be read' % (name, murano_class.name)) class PropertyWriteError(PropertyAccessError): def __init__(self, name, murano_class): super(PropertyAccessError, self).__init__( 'Property "%s" in class "%s" cannot be written' % (name, murano_class.name)) class UninitializedPropertyAccessError(PropertyAccessError): def __init__(self, name, murano_class): super(PropertyAccessError, self).__init__( 'Access to uninitialized property ' '"%s" in class "%s" is forbidden' % (name, murano_class.name))
"""Tests for learn.io.graph_io.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import random import tempfile import tensorflow as tf from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.platform import gfile _VALID_FILE_PATTERN = "VALID" _FILE_NAMES = [b"abc", b"def", b"ghi", b"jkl"] _INVALID_FILE_PATTERN = "INVALID" class GraphIOTest(tf.test.TestCase): def _mock_glob(self, pattern): if _VALID_FILE_PATTERN == pattern: return _FILE_NAMES self.assertEqual(_INVALID_FILE_PATTERN, pattern) return [] def setUp(self): super(GraphIOTest, self).setUp() random.seed(42) self._orig_glob = gfile.Glob gfile.Glob = self._mock_glob def tearDown(self): gfile.Glob = self._orig_glob super(GraphIOTest, self).tearDown() def test_dequeue_batch_value_errors(self): default_batch_size = 17 queue_capacity = 1234 num_threads = 3 name = "my_batch" self.assertRaisesRegexp( ValueError, "No files match", tf.contrib.learn.io.read_batch_examples, _INVALID_FILE_PATTERN, default_batch_size, tf.TFRecordReader, False, num_epochs=None, queue_capacity=queue_capacity, num_threads=num_threads, name=name) self.assertRaisesRegexp( ValueError, "Invalid batch_size", tf.contrib.learn.io.read_batch_examples, _VALID_FILE_PATTERN, None, tf.TFRecordReader, False, num_epochs=None, queue_capacity=queue_capacity, num_threads=num_threads, name=name) self.assertRaisesRegexp( ValueError, "Invalid batch_size", tf.contrib.learn.io.read_batch_examples, _VALID_FILE_PATTERN, -1, tf.TFRecordReader, False, num_epochs=None, queue_capacity=queue_capacity, num_threads=num_threads, name=name) self.assertRaisesRegexp( ValueError, "Invalid queue_capacity", tf.contrib.learn.io.read_batch_examples, _VALID_FILE_PATTERN, default_batch_size, tf.TFRecordReader, False, num_epochs=None, queue_capacity=None, num_threads=num_threads, name=name) self.assertRaisesRegexp( ValueError, "Invalid num_threads", tf.contrib.learn.io.read_batch_examples, _VALID_FILE_PATTERN, default_batch_size, tf.TFRecordReader, False, num_epochs=None, queue_capacity=queue_capacity, num_threads=None, name=name) self.assertRaisesRegexp( ValueError, "Invalid num_threads", tf.contrib.learn.io.read_batch_examples, _VALID_FILE_PATTERN, default_batch_size, tf.TFRecordReader, False, num_epochs=None, queue_capacity=queue_capacity, num_threads=-1, name=name) self.assertRaisesRegexp( ValueError, "Invalid batch_size", tf.contrib.learn.io.read_batch_examples, _VALID_FILE_PATTERN, queue_capacity + 1, tf.TFRecordReader, False, num_epochs=None, queue_capacity=queue_capacity, num_threads=1, name=name) self.assertRaisesRegexp( ValueError, "Invalid num_epochs", tf.contrib.learn.io.read_batch_examples, _VALID_FILE_PATTERN, default_batch_size, tf.TFRecordReader, False, num_epochs=-1, queue_capacity=queue_capacity, num_threads=1, name=name) self.assertRaisesRegexp( ValueError, "Invalid read_batch_size", tf.contrib.learn.io.read_batch_examples, _VALID_FILE_PATTERN, default_batch_size, tf.TFRecordReader, False, num_epochs=None, queue_capacity=queue_capacity, num_threads=1, read_batch_size=0, name=name) def test_batch_record_features(self): batch_size = 17 queue_capacity = 1234 name = "my_batch" features = {"feature": tf.FixedLenFeature(shape=[0], dtype=tf.float32)} with tf.Graph().as_default() as g, self.test_session(graph=g) as sess: features = tf.contrib.learn.io.read_batch_record_features( _VALID_FILE_PATTERN, batch_size, features, randomize_input=False, queue_capacity=queue_capacity, reader_num_threads=2, parser_num_threads=2, name=name) self.assertEqual("%s/fifo_queue_1_Dequeue:0" % name, features["feature"].name) file_name_queue_name = "%s/file_name_queue" % name file_names_name = "%s/input" % file_name_queue_name example_queue_name = "%s/fifo_queue" % name parse_example_queue_name = "%s/fifo_queue" % name op_nodes = test_util.assert_ops_in_graph({ file_names_name: "Const", file_name_queue_name: "FIFOQueue", "%s/read/TFRecordReader" % name: "TFRecordReader", example_queue_name: "FIFOQueue", parse_example_queue_name: "FIFOQueue", name: "QueueDequeueMany" }, g) self.assertAllEqual(_FILE_NAMES, sess.run(["%s:0" % file_names_name])[0]) self.assertEqual( queue_capacity, op_nodes[example_queue_name].attr["capacity"].i) def test_one_epoch(self): batch_size = 17 queue_capacity = 1234 name = "my_batch" with tf.Graph().as_default() as g, self.test_session(graph=g) as sess: inputs = tf.contrib.learn.io.read_batch_examples( _VALID_FILE_PATTERN, batch_size, reader=tf.TFRecordReader, randomize_input=True, num_epochs=1, queue_capacity=queue_capacity, name=name) self.assertEqual("%s:1" % name, inputs.name) file_name_queue_name = "%s/file_name_queue" % name file_name_queue_limit_name = ( "%s/limit_epochs/epochs" % file_name_queue_name) file_names_name = "%s/input" % file_name_queue_name example_queue_name = "%s/random_shuffle_queue" % name op_nodes = test_util.assert_ops_in_graph({ file_names_name: "Const", file_name_queue_name: "FIFOQueue", "%s/read/TFRecordReader" % name: "TFRecordReader", example_queue_name: "RandomShuffleQueue", name: "QueueDequeueUpTo", file_name_queue_limit_name: "Variable" }, g) self.assertEqual( set(_FILE_NAMES), set(sess.run(["%s:0" % file_names_name])[0])) self.assertEqual( queue_capacity, op_nodes[example_queue_name].attr["capacity"].i) def test_batch_randomized(self): batch_size = 17 queue_capacity = 1234 name = "my_batch" with tf.Graph().as_default() as g, self.test_session(graph=g) as sess: inputs = tf.contrib.learn.io.read_batch_examples( _VALID_FILE_PATTERN, batch_size, reader=tf.TFRecordReader, randomize_input=True, queue_capacity=queue_capacity, name=name) self.assertEqual("%s:1" % name, inputs.name) file_name_queue_name = "%s/file_name_queue" % name file_names_name = "%s/input" % file_name_queue_name example_queue_name = "%s/random_shuffle_queue" % name op_nodes = test_util.assert_ops_in_graph({ file_names_name: "Const", file_name_queue_name: "FIFOQueue", "%s/read/TFRecordReader" % name: "TFRecordReader", example_queue_name: "RandomShuffleQueue", name: "QueueDequeueMany" }, g) self.assertEqual( set(_FILE_NAMES), set(sess.run(["%s:0" % file_names_name])[0])) self.assertEqual( queue_capacity, op_nodes[example_queue_name].attr["capacity"].i) def _create_temp_file(self, lines): tempdir = tempfile.mkdtemp() filename = os.path.join(tempdir, "temp_file") gfile.Open(filename, "w").write(lines) return filename def _create_sorted_temp_files(self, lines_list): tempdir = tempfile.mkdtemp() filenames = [] for i, lines in enumerate(lines_list): filename = os.path.join(tempdir, "temp_file%05d" % i) gfile.Open(filename, "w").write(lines) filenames.append(filename) return filenames def test_read_text_lines(self): gfile.Glob = self._orig_glob filename = self._create_temp_file("ABC\nDEF\nGHK\n") batch_size = 1 queue_capacity = 5 name = "my_batch" with tf.Graph().as_default() as g, self.test_session(graph=g) as session: inputs = tf.contrib.learn.io.read_batch_examples( filename, batch_size, reader=tf.TextLineReader, randomize_input=False, num_epochs=1, queue_capacity=queue_capacity, name=name) session.run(tf.initialize_local_variables()) coord = tf.train.Coordinator() tf.train.start_queue_runners(session, coord=coord) self.assertAllEqual(session.run(inputs), [b"ABC"]) self.assertAllEqual(session.run(inputs), [b"DEF"]) self.assertAllEqual(session.run(inputs), [b"GHK"]) with self.assertRaises(errors.OutOfRangeError): session.run(inputs) coord.request_stop() def test_read_text_lines_multifile(self): gfile.Glob = self._orig_glob filenames = self._create_sorted_temp_files(["ABC\n", "DEF\nGHK\n"]) batch_size = 1 queue_capacity = 5 name = "my_batch" with tf.Graph().as_default() as g, self.test_session(graph=g) as session: inputs = tf.contrib.learn.io.read_batch_examples( filenames, batch_size, reader=tf.TextLineReader, randomize_input=False, num_epochs=1, queue_capacity=queue_capacity, name=name) session.run(tf.initialize_local_variables()) coord = tf.train.Coordinator() tf.train.start_queue_runners(session, coord=coord) self.assertAllEqual(session.run(inputs), [b"ABC"]) self.assertAllEqual(session.run(inputs), [b"DEF"]) self.assertAllEqual(session.run(inputs), [b"GHK"]) with self.assertRaises(errors.OutOfRangeError): session.run(inputs) coord.request_stop() def test_batch_text_lines(self): gfile.Glob = self._orig_glob filename = self._create_temp_file("A\nB\nC\nD\nE\n") batch_size = 3 queue_capacity = 10 name = "my_batch" with tf.Graph().as_default() as g, self.test_session(graph=g) as session: inputs = tf.contrib.learn.io.read_batch_examples( [filename], batch_size, reader=tf.TextLineReader, randomize_input=False, num_epochs=1, queue_capacity=queue_capacity, read_batch_size=10, name=name) session.run(tf.initialize_local_variables()) coord = tf.train.Coordinator() tf.train.start_queue_runners(session, coord=coord) self.assertAllEqual(session.run(inputs), [b"A", b"B", b"C"]) self.assertAllEqual(session.run(inputs), [b"D", b"E"]) with self.assertRaises(errors.OutOfRangeError): session.run(inputs) coord.request_stop() def test_keyed_read_text_lines(self): gfile.Glob = self._orig_glob filename = self._create_temp_file("ABC\nDEF\nGHK\n") batch_size = 1 queue_capacity = 5 name = "my_batch" with tf.Graph().as_default() as g, self.test_session(graph=g) as session: keys, inputs = tf.contrib.learn.io.read_keyed_batch_examples( filename, batch_size, reader=tf.TextLineReader, randomize_input=False, num_epochs=1, queue_capacity=queue_capacity, name=name) session.run(tf.initialize_local_variables()) coord = tf.train.Coordinator() tf.train.start_queue_runners(session, coord=coord) self.assertAllEqual(session.run([keys, inputs]), [[filename.encode("utf-8") + b":1"], [b"ABC"]]) self.assertAllEqual(session.run([keys, inputs]), [[filename.encode("utf-8") + b":2"], [b"DEF"]]) self.assertAllEqual(session.run([keys, inputs]), [[filename.encode("utf-8") + b":3"], [b"GHK"]]) with self.assertRaises(errors.OutOfRangeError): session.run(inputs) coord.request_stop() def test_keyed_parse_json(self): gfile.Glob = self._orig_glob filename = self._create_temp_file( '{"features": {"feature": {"age": {"int64_list": {"value": [0]}}}}}\n' '{"features": {"feature": {"age": {"int64_list": {"value": [1]}}}}}\n' '{"features": {"feature": {"age": {"int64_list": {"value": [2]}}}}}\n' ) batch_size = 1 queue_capacity = 5 name = "my_batch" with tf.Graph().as_default() as g, self.test_session(graph=g) as session: dtypes = {"age": tf.FixedLenFeature([1], tf.int64)} parse_fn = lambda example: tf.parse_single_example( # pylint: disable=g-long-lambda tf.decode_json_example(example), dtypes) keys, inputs = tf.contrib.learn.io.read_keyed_batch_examples( filename, batch_size, reader=tf.TextLineReader, randomize_input=False, num_epochs=1, queue_capacity=queue_capacity, parse_fn=parse_fn, name=name) session.run(tf.initialize_local_variables()) coord = tf.train.Coordinator() tf.train.start_queue_runners(session, coord=coord) key, age = session.run([keys, inputs["age"]]) self.assertAllEqual(age, [[0]]) self.assertAllEqual(key, [filename.encode("utf-8") + b":1"]) key, age = session.run([keys, inputs["age"]]) self.assertAllEqual(age, [[1]]) self.assertAllEqual(key, [filename.encode("utf-8") + b":2"]) key, age = session.run([keys, inputs["age"]]) self.assertAllEqual(age, [[2]]) self.assertAllEqual(key, [filename.encode("utf-8") + b":3"]) with self.assertRaises(errors.OutOfRangeError): session.run(inputs) coord.request_stop() if __name__ == "__main__": tf.test.main()
import asyncio from aio_pika import connect, IncomingMessage, ExchangeType loop = asyncio.get_event_loop() async def on_message(message: IncomingMessage): async with message.process(): print("[x] %r" % message.body) async def main(): # Perform connection connection = await connect( "amqp://guest:guest@localhost/", loop=loop ) # Creating a channel channel = await connection.channel() await channel.set_qos(prefetch_count=1) logs_exchange = await channel.declare_exchange( "logs", ExchangeType.FANOUT ) # Declaring queue queue = await channel.declare_queue(exclusive=True) # Binding the queue to the exchange await queue.bind(logs_exchange) # Start listening the queue with name 'task_queue' await queue.consume(on_message) if __name__ == "__main__": loop = asyncio.get_event_loop() loop.create_task(main()) # we enter a never-ending loop that waits for data # and runs callbacks whenever necessary. print(" [*] Waiting for logs. To exit press CTRL+C") loop.run_forever()
from django.shortcuts import render, redirect from django.http import HttpResponseRedirect from .models import SupportProject def index( request ): sp = SupportProject.objects.all() if sp.count() == 1: return HttpResponseRedirect( sp.first().project.get_absolute_url() ) else: context_dict = { 'sps' : sp, } return render( request, 'support/index.html', context_dict )
import time import shutil from configobj import ConfigObj NOVA_API_CONF = "/etc/nova/api-paste.ini" OS_API_SEC = "composite:openstack_compute_api_v2" DR_FILTER_TARGET_KEY = "keystone_nolimit" DR_FILTER_TARGET_KEY_VALUE = "compute_req_id faultwrap sizelimit " \ "authtoken keystonecontext drfilter " \ "osapi_compute_app_v2" DR_SEC = "filter:drfilter" DR_KEY = "paste.filter_factory" DR_KEY_VALUE = "drfilter.urlforwarding:url_forwarding_factory" now = time.strftime('%Y%m%d%H%M%S') target = NOVA_API_CONF + "." + now + ".bak" shutil.copyfile(NOVA_API_CONF, target) conf = ConfigObj(NOVA_API_CONF) conf[OS_API_SEC][DR_FILTER_TARGET_KEY] = DR_FILTER_TARGET_KEY_VALUE conf[DR_SEC] = {} conf[DR_SEC][DR_KEY] = DR_KEY_VALUE conf.write() for sec in conf: print(sec) for key in conf[sec]: print("\t" + key + " = " + conf[sec][key])
from absl import app from absl.testing import absltest from grr_response_server.databases import db_time_test from grr_response_server.databases import mysql_test from grr.test_lib import test_lib class MysqlClientsTest(db_time_test.DatabaseTimeTestMixin, mysql_test.MysqlTestBase, absltest.TestCase): pass if __name__ == "__main__": app.run(test_lib.main)
from __future__ import absolute_import, division, print_function, unicode_literals import time import logging import ujson as json from elasticsearch import Elasticsearch from elasticsearch.client import IndicesClient from elasticsearch.exceptions import ConnectionTimeout from .config import config from .es_mappings import ES_MAPPINGS, ES_SIMILARITIES class ElasticsearchBulkIndexer(object): """ Bulk indexer for Elasticsearch """ servers = { "docs": [config["ELASTICSEARCHDOCS"]], "text": [config["ELASTICSEARCHTEXT"]] } def __init__(self, index_name, batch_size=500): self.index_name = index_name self.buffer = [] self.batch_size = batch_size self.total_size = 0 self.connected = False self.client = None def connect(self): """ Establish the ES connection if not already done """ if self.connected: return self.connected = True self.client = Elasticsearch(self.servers[self.index_name], timeout=60) def index(self, _id, hit): """ Queue one document for indexing. """ if not self.connected: self.connect() # https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html self.buffer.append('{"index":{"_id":"%s"}}\n%s\n' % ( _id, json.dumps(hit) # pylint: disable=no-member )) if len(self.buffer) >= self.batch_size: self.flush() def empty(self): """ Empty the ES index. Dangerous operation! """ if config["ENV"] not in ("local", "ci"): raise Exception("empty() not allowed in env %s" % config["ENV"]) if self.indices().exists(index=self.index_name): self.indices().delete(index=self.index_name) def refresh(self): """ Sends a "refresh" to the ES index, forcing the actual indexing of what was sent up until now """ if not self.connected: return if config["ENV"] not in ("local", "ci"): raise Exception("refresh() not allowed in env %s" % config["ENV"]) self.indices().refresh(index=self.index_name) def flush(self, retries=10): """ Sends the current indexing batch to ES """ if len(self.buffer) == 0: return if not self.connected: self.connect() self.total_size += len(self.buffer) logging.debug( "ES: Flushing %s docs to index=%s (total: %s)", len(self.buffer), self.index_name, self.total_size ) try: self.bulk_index() except ConnectionTimeout, e: if retries == 0: raise e time.sleep(60) return self.flush(retries=retries - 1) self.buffer = [] def bulk_index(self): """ Indexes the current buffer to Elasticsearch, bypassing the bulk() helper for performance """ connection = self.client.transport.get_connection() bulk_url = "/%s/page/_bulk" % self.index_name body = "".join(self.buffer) # TODO retries # status, headers, data status, _, _ = connection.perform_request("POST", bulk_url, body=body) if status != 200: raise Exception("Elasticsearch returned status=%s" % status) # TODO: look for errors there? # parsed = json.loads(data) def indices(self): """ Returns an elasticsearch.client.IndicesClient instance """ if not self.connected: self.connect() return IndicesClient(self.client) def create(self, empty=False): """ Creates the ES index """ if empty: self.empty() mappings = ES_MAPPINGS[self.index_name] self.indices().create(index=self.index_name, body={ "settings": { # TODO: this configuration should be set somewhere else! (cosr-ops?) "number_of_shards": 5, "number_of_replicas": 0, # In prod we don't refresh manually so this is the only setting # that will make ES periodically refresh to avoid storing only in temporary files # as we index "refresh_interval": "60s", "similarity": ES_SIMILARITIES }, "mappings": mappings })
import urllib.parse from openstack import exceptions from openstack import resource class Resource(resource.Resource): @classmethod def find(cls, session, name_or_id, ignore_missing=True, **params): """Find a resource by its name or id. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param name_or_id: This resource's identifier, if needed by the request. The default is ``None``. :param bool ignore_missing: When set to ``False`` :class:`~openstack.exceptions.ResourceNotFound` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. :param dict params: Any additional parameters to be passed into underlying methods, such as to :meth:`~openstack.resource.Resource.existing` in order to pass on URI parameters. :return: The :class:`Resource` object matching the given name or id or None if nothing matches. :raises: :class:`openstack.exceptions.DuplicateResource` if more than one resource is found for this request. :raises: :class:`openstack.exceptions.ResourceNotFound` if nothing is found and ignore_missing is ``False``. """ session = cls._get_session(session) # Try to short-circuit by looking directly for a matching ID. try: match = cls.existing( id=name_or_id, connection=session._get_connection(), **params) return match.fetch(session) except exceptions.SDKException: # DNS may return 400 when we try to do GET with name pass if ('name' in cls._query_mapping._mapping.keys() and 'name' not in params): params['name'] = name_or_id data = cls.list(session, **params) result = cls._get_one_match(name_or_id, data) if result is not None: return result if ignore_missing: return None raise exceptions.ResourceNotFound( "No %s found for %s" % (cls.__name__, name_or_id)) @classmethod def _get_next_link(cls, uri, response, data, marker, limit, total_yielded): next_link = None params = {} if isinstance(data, dict): links = data.get('links') if links: next_link = links.get('next') total = data.get('metadata', {}).get('total_count') if total: # We have a kill switch total_count = int(total) if total_count <= total_yielded: return None, params # Parse params from Link (next page URL) into params. # This prevents duplication of query parameters that with large # number of pages result in HTTP 414 error eventually. if next_link: parts = urllib.parse.urlparse(next_link) query_params = urllib.parse.parse_qs(parts.query) params.update(query_params) next_link = urllib.parse.urljoin(next_link, parts.path) # If we still have no link, and limit was given and is non-zero, # and the number of records yielded equals the limit, then the user # is playing pagination ball so we should go ahead and try once more. if not next_link and limit: next_link = uri params['marker'] = marker params['limit'] = limit return next_link, params
"""This file contains the tests for the generic text parser.""" import os import unittest from dfvfs.lib import definitions from dfvfs.path import factory as path_spec_factory from dfvfs.resolver import resolver as path_spec_resolver import pyparsing from plaso.lib import errors from plaso.lib import event from plaso.lib import eventdata from plaso.lib import lexer from plaso.lib import parser from plaso.lib import text_parser class TestTextEvent(event.TextEvent): """Test text event.""" DATA_TYPE = 'test:parser:text' class TestTextEventFormatter(eventdata.EventFormatter): """Test text event formatter.""" DATA_TYPE = 'test:parser:text' FORMAT_STRING = u'{body}' SOURCE_LONG = 'Test Text Parser' class TestTextParser(text_parser.SlowLexicalTextParser): """Implement a text parser object that can successfully parse a text file. To be able to achieve that one function has to be implemented, the ParseDate one. """ NAME = 'test_text' tokens = [ lexer.Token('INITIAL', r'^([\d\/]+) ', 'SetDate', 'TIME'), lexer.Token('TIME', r'([0-9:\.]+) ', 'SetTime', 'STRING_HOST'), lexer.Token('STRING_HOST', r'([^\-]+)- ', 'ParseStringHost', 'STRING'), lexer.Token('STRING', '([^\n]+)', 'ParseString', ''), lexer.Token('STRING', '\n', 'ParseMessage', 'INITIAL')] def ParseStringHost(self, match, **_): user, host = match.group(1).split(':') self.attributes['hostname'] = host self.attributes['username'] = user def SetDate(self, match, **_): month, day, year = match.group(1).split('/') self.attributes['imonth'] = int(month) self.attributes['iyear'] = int(year) self.attributes['iday'] = int(day) def Scan(self, unused_file_entry): pass def CreateEvent(self, timestamp, offset, attributes): event_object = TestTextEvent(timestamp, attributes) event_object.offset = offset return event_object class BaseParserTest(unittest.TestCase): """An unit test for the plaso parser library.""" def testParserNotImplemented(self): """Test the base class Parse function.""" self.assertRaises(TypeError, parser.BaseParser) class TextParserTest(unittest.TestCase): """An unit test for the plaso parser library.""" _TEST_DATA_PATH = os.path.join(os.getcwd(), 'test_data') # Show full diff results, part of TestCase so does not follow our naming # conventions. maxDiff = None def _GetTestFilePath(self, path_segments): """Retrieves the path of a test file relative to the test data directory. Args: path_segments: the path segments inside the test data directory. Returns: A path of the test file. """ # Note that we need to pass the individual path segments to os.path.join # and not a list. return os.path.join(self._TEST_DATA_PATH, *path_segments) def _GetTestFileEntry(self, path): """Retrieves the test file entry. Args: path: the path of the test file. Returns: The test file entry (instance of dfvfs.FileEntry). """ path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_OS, location=path) return path_spec_resolver.Resolver.OpenFileEntry(path_spec) def setUp(self): pre_obj = event.PreprocessObject() self._parser = TestTextParser(pre_obj, None) def testTextParserFail(self): """Test a text parser that will not match against content.""" test_file = self._GetTestFilePath(['text_parser', 'test1.txt']) file_entry = self._GetTestFileEntry(test_file) text_generator = self._parser.Parse(file_entry) self.assertRaises(errors.UnableToParseFile, list, text_generator) def testTextParserSuccess(self): """Test a text parser that will match against content.""" test_file = self._GetTestFilePath(['text_parser', 'test2.txt']) file_entry = self._GetTestFileEntry(test_file) text_generator = self._parser.Parse(file_entry) first_entry = text_generator.next() second_entry = text_generator.next() msg1, _ = eventdata.EventFormatterManager.GetMessageStrings(first_entry) self.assertEquals(first_entry.timestamp, 1293859395000000) self.assertEquals(msg1, 'first line.') self.assertEquals(first_entry.hostname, 'myhost') self.assertEquals(first_entry.username, 'myuser') msg2, _ = eventdata.EventFormatterManager.GetMessageStrings(second_entry) self.assertEquals(second_entry.timestamp, 693604686000000) self.assertEquals(msg2, 'second line.') self.assertEquals(second_entry.hostname, 'myhost') self.assertEquals(second_entry.username, 'myuser') class PyParserTest(unittest.TestCase): """Few unit tests for the pyparsing unit.""" def testPyConstantIPv4(self): """Run few tests to make sure the constants are working.""" self.assertTrue(self._CheckIPv4('123.51.234.52')) self.assertTrue(self._CheckIPv4('255.254.23.1')) self.assertTrue(self._CheckIPv4('1.1.34.2')) self.assertFalse(self._CheckIPv4('1.1.34.258')) self.assertFalse(self._CheckIPv4('a.1.34.258')) self.assertFalse(self._CheckIPv4('.34.258')) self.assertFalse(self._CheckIPv4('34.258')) self.assertFalse(self._CheckIPv4('10.52.34.258')) def testPyConstantOctet(self): with self.assertRaises(pyparsing.ParseException): text_parser.PyparsingConstants.IPV4_OCTET.parseString('526') with self.assertRaises(pyparsing.ParseException): text_parser.PyparsingConstants.IPV4_OCTET.parseString('1026') with self.assertRaises(pyparsing.ParseException): text_parser.PyparsingConstants.IPV4_OCTET.parseString( 'a9', parseAll=True) def testPyConstantOthers(self): with self.assertRaises(pyparsing.ParseException): text_parser.PyparsingConstants.MONTH.parseString('MMo') with self.assertRaises(pyparsing.ParseException): text_parser.PyparsingConstants.MONTH.parseString('M') with self.assertRaises(pyparsing.ParseException): text_parser.PyparsingConstants.MONTH.parseString('March', parseAll=True) self.assertTrue(text_parser.PyparsingConstants.MONTH.parseString('Jan')) line = '# This is a comment.' parsed_line = text_parser.PyparsingConstants.COMMENT_LINE_HASH.parseString( line) self.assertEquals(parsed_line[-1], 'This is a comment.') self.assertEquals(len(parsed_line), 2) def _CheckIPv4(self, ip_address): # TODO: Add a similar IPv6 check. try: text_parser.PyparsingConstants.IPV4_ADDRESS.parseString(ip_address) return True except pyparsing.ParseException: return False if __name__ == '__main__': unittest.main()
from manilaclient import api_versions from manilaclient import base from manilaclient.openstack.common.apiclient import base as common_base class ShareInstance(common_base.Resource): """A share is an extra block level storage to the OpenStack instances.""" def __repr__(self): return "<Share: %s>" % self.id def force_delete(self): """Delete the specified share ignoring its current state.""" self.manager.force_delete(self) def reset_state(self, state): """Update the share with the provided state.""" self.manager.reset_state(self, state) class ShareInstanceManager(base.ManagerWithFind): """Manage :class:`ShareInstances` resources.""" resource_class = ShareInstance @api_versions.wraps("2.3") def get(self, instance): """Get a share instance. :param instance: either share object or text with its ID. :rtype: :class:`ShareInstance` """ share_id = common_base.getid(instance) return self._get("/share_instances/%s" % share_id, "share_instance") @api_versions.wraps("2.3") def list(self): """List all share instances.""" return self._list('/share_instances', 'share_instances') def _action(self, action, instance, info=None, **kwargs): """Perform a share instnace 'action'. :param action: text with action name. :param instance: either share object or text with its ID. :param info: dict with data for specified 'action'. :param kwargs: dict with data to be provided for action hooks. """ body = {action: info} self.run_hooks('modify_body_for_action', body, **kwargs) url = '/share_instances/%s/action' % common_base.getid(instance) return self.api.client.post(url, body=body) def _do_force_delete(self, instance, action_name="force_delete"): """Delete a share instance forcibly - share status will be avoided. :param instance: either share instance object or text with its ID. """ return self._action(action_name, common_base.getid(instance)) @api_versions.wraps("2.3", "2.6") def force_delete(self, instance): return self._do_force_delete(instance, "os-force_delete") @api_versions.wraps("2.7") # noqa def force_delete(self, instance): return self._do_force_delete(instance, "force_delete") def _do_reset_state(self, instance, state, action_name): """Update the provided share instance with the provided state. :param instance: either share object or text with its ID. :param state: text with new state to set for share. """ return self._action(action_name, instance, {"status": state}) @api_versions.wraps("2.3", "2.6") def reset_state(self, instance, state): return self._do_reset_state(instance, state, "os-reset_status") @api_versions.wraps("2.7") # noqa def reset_state(self, instance, state): return self._do_reset_state(instance, state, "reset_status")
"""Create / interact with a batch of updates / deletes.""" from gcloud._localstack import _LocalStack from gcloud.datastore import _implicit_environ from gcloud.datastore import helpers from gcloud.datastore.key import _dataset_ids_equal from gcloud.datastore import _datastore_v1_pb2 as datastore_pb _BATCHES = _LocalStack() class Batch(object): """An abstraction representing a collected group of updates / deletes. Used to build up a bulk mutuation. For example, the following snippet of code will put the two ``save`` operations and the delete operatiuon into the same mutation, and send them to the server in a single API request:: >>> from gcloud.datastore.batch import Batch >>> batch = Batch() >>> batch.put(entity1) >>> batch.put(entity2) >>> batch.delete(key3) >>> batch.commit() You can also use a batch as a context manager, in which case the ``commit`` will be called automatically if its block exits without raising an exception:: >>> with Batch() as batch: ... batch.put(entity1) ... batch.put(entity2) ... batch.delete(key3) By default, no updates will be sent if the block exits with an error:: >>> from gcloud import datastore >>> dataset = datastore.get_dataset('dataset-id') >>> with Batch() as batch: ... do_some_work(batch) ... raise Exception() # rolls back """ def __init__(self, dataset_id=None, connection=None): """ Construct a batch. :type dataset_id: :class:`str`. :param dataset_id: The ID of the dataset. :type connection: :class:`gcloud.datastore.connection.Connection` :param connection: The connection used to connect to datastore. :raises: :class:`ValueError` if either a connection or dataset ID are not set. """ self._connection = connection or _implicit_environ.CONNECTION self._dataset_id = dataset_id or _implicit_environ.DATASET_ID if self._connection is None or self._dataset_id is None: raise ValueError('A batch must have a connection and ' 'a dataset ID set.') self._mutation = datastore_pb.Mutation() self._auto_id_entities = [] @staticmethod def current(): """Return the topmost batch / transaction, or None.""" return _BATCHES.top @property def dataset_id(self): """Getter for dataset ID in which the batch will run. :rtype: :class:`str` :returns: The dataset ID in which the batch will run. """ return self._dataset_id @property def connection(self): """Getter for connection over which the batch will run. :rtype: :class:`gcloud.datastore.connection.Connection` :returns: The connection over which the batch will run. """ return self._connection @property def mutation(self): """Getter for the current mutation. Every batch is committed with a single Mutation representing the 'work' to be done as part of the batch. Inside a batch, calling ``batch.put()`` with an entity, or ``batch.delete`` with a key, builds up the mutation. This getter returns the Mutation protobuf that has been built-up so far. :rtype: :class:`gcloud.datastore._datastore_v1_pb2.Mutation` :returns: The Mutation protobuf to be sent in the commit request. """ return self._mutation def add_auto_id_entity(self, entity): """Adds an entity to the list of entities to update with IDs. When an entity has a partial key, calling ``save()`` adds an insert_auto_id entry in the mutation. In order to make sure we update the Entity once the transaction is committed, we need to keep track of which entities to update (and the order is important). When you call ``save()`` on an entity inside a transaction, if the entity has a partial key, it adds itself to the list of entities to be updated once the transaction is committed by calling this method. :type entity: :class:`gcloud.datastore.entity.Entity` :param entity: The entity to be updated with a completed key. :raises: ValueError if the entity's key is alread completed. """ if not entity.key.is_partial: raise ValueError("Entity has a completed key") self._auto_id_entities.append(entity) def put(self, entity): """Remember an entity's state to be saved during ``commit``. .. note:: Any existing properties for the entity will be replaced by those currently set on this instance. Already-stored properties which do not correspond to keys set on this instance will be removed from the datastore. .. note:: Property values which are "text" ('unicode' in Python2, 'str' in Python3) map to 'string_value' in the datastore; values which are "bytes" ('str' in Python2, 'bytes' in Python3) map to 'blob_value'. :type entity: :class:`gcloud.datastore.entity.Entity` :param entity: the entity to be saved. :raises: ValueError if entity has no key assigned, or if the key's ``dataset_id`` does not match ours. """ if entity.key is None: raise ValueError("Entity must have a key") if not _dataset_ids_equal(self._dataset_id, entity.key.dataset_id): raise ValueError("Key must be from same dataset as batch") _assign_entity_to_mutation( self.mutation, entity, self._auto_id_entities) def delete(self, key): """Remember a key to be deleted durring ``commit``. :type key: :class:`gcloud.datastore.key.Key` :param key: the key to be deleted. :raises: ValueError if key is not complete, or if the key's ``dataset_id`` does not match ours. """ if key.is_partial: raise ValueError("Key must be complete") if not _dataset_ids_equal(self._dataset_id, key.dataset_id): raise ValueError("Key must be from same dataset as batch") key_pb = key.to_protobuf() helpers._add_keys_to_request(self.mutation.delete, [key_pb]) def begin(self): """No-op Overridden by :class:`gcloud.datastore.transaction.Transaction`. """ pass def commit(self): """Commits the batch. This is called automatically upon exiting a with statement, however it can be called explicitly if you don't want to use a context manager. """ response = self.connection.commit(self._dataset_id, self.mutation) # If the back-end returns without error, we are guaranteed that # the response's 'insert_auto_id_key' will match (length and order) # the request's 'insert_auto_id` entities, which are derived from # our '_auto_id_entities' (no partial success). for new_key_pb, entity in zip(response.insert_auto_id_key, self._auto_id_entities): new_id = new_key_pb.path_element[-1].id entity.key = entity.key.completed_key(new_id) def rollback(self): """No-op Overridden by :class:`gcloud.datastore.transaction.Transaction`. """ pass def __enter__(self): _BATCHES.push(self) self.begin() return self def __exit__(self, exc_type, exc_val, exc_tb): try: if exc_type is None: self.commit() else: self.rollback() finally: _BATCHES.pop() def _assign_entity_to_mutation(mutation_pb, entity, auto_id_entities): """Copy ``entity`` into appropriate slot of ``mutation_pb``. If ``entity.key`` is incomplete, append ``entity`` to ``auto_id_entities`` for later fixup during ``commit``. Helper method for ``Batch.put``. :type mutation_pb: :class:`gcloud.datastore._datastore_v1_pb2.Mutation` :param mutation_pb; the Mutation protobuf for the batch / transaction. :type entity: :class:`gcloud.datastore.entity.Entity` :param entity; the entity being updated within the batch / transaction. :type auto_id_entities: list of :class:`gcloud.datastore.entity.Entity` :param auto_id_entities: entiites with partial keys, to be fixed up during commit. """ auto_id = entity.key.is_partial key_pb = entity.key.to_protobuf() key_pb = helpers._prepare_key_for_request(key_pb) if auto_id: insert = mutation_pb.insert_auto_id.add() auto_id_entities.append(entity) else: # We use ``upsert`` for entities with completed keys, rather than # ``insert`` or ``update``, in order not to create race conditions # based on prior existence / removal of the entity. insert = mutation_pb.upsert.add() insert.key.CopyFrom(key_pb) for name, value in entity.items(): value_is_list = isinstance(value, list) if value_is_list and len(value) == 0: continue prop = insert.property.add() # Set the name of the property. prop.name = name # Set the appropriate value. helpers._set_protobuf_value(prop.value, value) if name in entity.exclude_from_indexes: if not value_is_list: prop.value.indexed = False for sub_value in prop.value.list_value: sub_value.indexed = False
''' @author: sheng @license: ''' import unittest from meridian.acupoints import zhimai44 class TestZhimai44Functions(unittest.TestCase): def setUp(self): pass def test_xxx(self): pass if __name__ == '__main__': unittest.main()
from collections import OrderedDict import os import fixtures from jacket.compute import exception from jacket.compute import test from jacket.tests.compute.unit.virt.disk.vfs import fakeguestfs from jacket.compute.virt.disk import api as diskapi from jacket.compute.virt.disk.vfs import guestfs as vfsguestfs from jacket.compute.virt.image import model as imgmodel class VirtDiskTest(test.NoDBTestCase): def setUp(self): super(VirtDiskTest, self).setUp() self.useFixture( fixtures.MonkeyPatch('compute.virt.disk.vfs.guestfs.guestfs', fakeguestfs)) self.file = imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_QCOW2) def test_inject_data(self): self.assertTrue(diskapi.inject_data( imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_QCOW2))) self.assertTrue(diskapi.inject_data( imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_RAW), mandatory=('files',))) self.assertTrue(diskapi.inject_data( imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_RAW), key="mysshkey", mandatory=('key',))) os_name = os.name os.name = 'nt' # Cause password injection to fail self.assertRaises(exception.NovaException, diskapi.inject_data, imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_RAW), admin_password="p", mandatory=('admin_password',)) self.assertFalse(diskapi.inject_data( imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_RAW), admin_password="p")) os.name = os_name self.assertFalse(diskapi.inject_data( imgmodel.LocalFileImage("/some/fail/file", imgmodel.FORMAT_RAW), key="mysshkey")) def test_inject_data_key(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() diskapi._inject_key_into_fs("mysshkey", vfs) self.assertIn("/root/.ssh", vfs.handle.files) self.assertEqual(vfs.handle.files["/root/.ssh"], {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700}) self.assertIn("/root/.ssh/authorized_keys", vfs.handle.files) self.assertEqual(vfs.handle.files["/root/.ssh/authorized_keys"], {'isdir': False, 'content': "Hello World\n# The following ssh " + "key was injected by Nova\nmysshkey\n", 'gid': 100, 'uid': 100, 'mode': 0o600}) vfs.teardown() def test_inject_data_key_with_selinux(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() vfs.make_path("etc/selinux") vfs.make_path("etc/rc.d") diskapi._inject_key_into_fs("mysshkey", vfs) self.assertIn("/etc/rc.d/rc.local", vfs.handle.files) self.assertEqual(vfs.handle.files["/etc/rc.d/rc.local"], {'isdir': False, 'content': "Hello World#!/bin/sh\n# Added by " + "Nova to ensure injected ssh keys " + "have the right context\nrestorecon " + "-RF root/.ssh 2>/dev/null || :\n", 'gid': 100, 'uid': 100, 'mode': 0o700}) self.assertIn("/root/.ssh", vfs.handle.files) self.assertEqual(vfs.handle.files["/root/.ssh"], {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700}) self.assertIn("/root/.ssh/authorized_keys", vfs.handle.files) self.assertEqual(vfs.handle.files["/root/.ssh/authorized_keys"], {'isdir': False, 'content': "Hello World\n# The following ssh " + "key was injected by Nova\nmysshkey\n", 'gid': 100, 'uid': 100, 'mode': 0o600}) vfs.teardown() def test_inject_data_key_with_selinux_append_with_newline(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() vfs.replace_file("/etc/rc.d/rc.local", "#!/bin/sh\necho done") vfs.make_path("etc/selinux") vfs.make_path("etc/rc.d") diskapi._inject_key_into_fs("mysshkey", vfs) self.assertIn("/etc/rc.d/rc.local", vfs.handle.files) self.assertEqual(vfs.handle.files["/etc/rc.d/rc.local"], {'isdir': False, 'content': "#!/bin/sh\necho done\n# Added " "by Nova to ensure injected ssh keys have " "the right context\nrestorecon -RF " "root/.ssh 2>/dev/null || :\n", 'gid': 100, 'uid': 100, 'mode': 0o700}) vfs.teardown() def test_inject_net(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() diskapi._inject_net_into_fs("mynetconfig", vfs) self.assertIn("/etc/network/interfaces", vfs.handle.files) self.assertEqual(vfs.handle.files["/etc/network/interfaces"], {'content': 'mynetconfig', 'gid': 100, 'isdir': False, 'mode': 0o700, 'uid': 100}) vfs.teardown() def test_inject_metadata(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() metadata = {"foo": "bar", "eek": "wizz"} metadata = OrderedDict(sorted(metadata.items())) diskapi._inject_metadata_into_fs(metadata, vfs) self.assertIn("/meta.js", vfs.handle.files) self.assertEqual({'content': '{"eek": "wizz", ' + '"foo": "bar"}', 'gid': 100, 'isdir': False, 'mode': 0o700, 'uid': 100}, vfs.handle.files["/meta.js"]) vfs.teardown() def test_inject_admin_password(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() def fake_salt(): return "1234567890abcdef" self.stubs.Set(diskapi, '_generate_salt', fake_salt) vfs.handle.write("/etc/shadow", "root:$1$12345678$xxxxx:14917:0:99999:7:::\n" + "bin:*:14495:0:99999:7:::\n" + "daemon:*:14495:0:99999:7:::\n") vfs.handle.write("/etc/passwd", "root:x:0:0:root:/root:/bin/bash\n" + "bin:x:1:1:bin:/bin:/sbin/nologin\n" + "daemon:x:2:2:daemon:/sbin:/sbin/nologin\n") diskapi._inject_admin_password_into_fs("123456", vfs) self.assertEqual(vfs.handle.files["/etc/passwd"], {'content': "root:x:0:0:root:/root:/bin/bash\n" + "bin:x:1:1:bin:/bin:/sbin/nologin\n" + "daemon:x:2:2:daemon:/sbin:" + "/sbin/nologin\n", 'gid': 100, 'isdir': False, 'mode': 0o700, 'uid': 100}) shadow = vfs.handle.files["/etc/shadow"] # if the encrypted password is only 13 characters long, then # compute.virt.disk.api:_set_password fell back to DES. if len(shadow['content']) == 91: self.assertEqual(shadow, {'content': "root:12tir.zIbWQ3c" + ":14917:0:99999:7:::\n" + "bin:*:14495:0:99999:7:::\n" + "daemon:*:14495:0:99999:7:::\n", 'gid': 100, 'isdir': False, 'mode': 0o700, 'uid': 100}) else: self.assertEqual(shadow, {'content': "root:$1$12345678$a4ge4d5iJ5vw" + "vbFS88TEN0:14917:0:99999:7:::\n" + "bin:*:14495:0:99999:7:::\n" + "daemon:*:14495:0:99999:7:::\n", 'gid': 100, 'isdir': False, 'mode': 0o700, 'uid': 100}) vfs.teardown() def test_inject_files_into_fs(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() diskapi._inject_files_into_fs([("/path/to/not/exists/file", "inject-file-contents")], vfs) self.assertIn("/path/to/not/exists", vfs.handle.files) shadow_dir = vfs.handle.files["/path/to/not/exists"] self.assertEqual(shadow_dir, {"isdir": True, "gid": 0, "uid": 0, "mode": 0o744}) shadow_file = vfs.handle.files["/path/to/not/exists/file"] self.assertEqual(shadow_file, {"isdir": False, "content": "inject-file-contents", "gid": 100, "uid": 100, "mode": 0o700}) vfs.teardown() def test_inject_files_into_fs_dir_exists(self): vfs = vfsguestfs.VFSGuestFS(self.file) vfs.setup() called = {'make_path': False} def fake_has_file(*args, **kwargs): return True def fake_make_path(*args, **kwargs): called['make_path'] = True self.stubs.Set(vfs, 'has_file', fake_has_file) self.stubs.Set(vfs, 'make_path', fake_make_path) # test for already exists dir diskapi._inject_files_into_fs([("/path/to/exists/file", "inject-file-contents")], vfs) self.assertIn("/path/to/exists/file", vfs.handle.files) self.assertFalse(called['make_path']) # test for root dir diskapi._inject_files_into_fs([("/inject-file", "inject-file-contents")], vfs) self.assertIn("/inject-file", vfs.handle.files) self.assertFalse(called['make_path']) # test for null dir vfs.handle.files.pop("/inject-file") diskapi._inject_files_into_fs([("inject-file", "inject-file-contents")], vfs) self.assertIn("/inject-file", vfs.handle.files) self.assertFalse(called['make_path']) vfs.teardown()
from ajenti.api import * from ajenti.plugins import * info = PluginInfo( title='Hosts', icon='sitemap', dependencies=[ PluginDependency('main'), ], ) def init(): import main
import json import numpy as np import cPickle as pickle with open('../validation/v_xgboost_word_tfidf.csv') as train_file: content = train_file.readlines() testData = [] scores = [] element = content[1].strip("\r\n").split(",") for i in range(1, len(content)): element = content[i].strip("\r\n").split(",") testData.append([element[0],element[1]]) scores.append(float(element[2])) predictions = [] maxscore = max(scores) minscore = min(scores) for score in scores: predictions.append((score-minscore)/float(maxscore-minscore)) ypred = predictions with open('../validation/v_xgboost_word_tfidf_0-1.csv', 'w') as f1: f1.write('qid,uid,label\n') for i in range(0, len(ypred)): f1.write(testData[i][0]+','+testData[i][1]+','+str(ypred[i])+'\n')
"""List and compare most used OpenStack cloud resources.""" import argparse import json import subprocess import sys from rally.common.plugin import discover from rally import consts from rally import osclients class ResourceManager(object): REQUIRED_SERVICE = None REPR_KEYS = ("id", "name", "tenant_id", "zone", "zoneName", "pool") def __init__(self, clients): self.clients = clients def is_available(self): if self.REQUIRED_SERVICE: return self.REQUIRED_SERVICE in self.clients.services().values() return True @property def client(self): return getattr(self.clients, self.__class__.__name__.lower())() def get_resources(self): all_resources = [] cls = self.__class__.__name__.lower() for prop in dir(self): if not prop.startswith("list_"): continue f = getattr(self, prop) resources = f() or [] resource_name = prop[5:][:-1] for res in resources: res_repr = [] for key in self.REPR_KEYS + (resource_name,): if isinstance(res, dict): value = res.get(key) else: value = getattr(res, key, None) if value: res_repr.append("%s:%s" % (key, value)) if not res_repr: raise ValueError("Failed to represent resource %r" % res) all_resources.append( "%s %s %s" % (cls, resource_name, " ".join(res_repr))) return all_resources class Keystone(ResourceManager): def list_users(self): return self.client.users.list() def list_tenants(self): return self.client.tenants.list() def list_roles(self): return self.client.roles.list() class Nova(ResourceManager): def list_flavors(self): return self.client.flavors.list() def list_floating_ip_pools(self): return self.client.floating_ip_pools.list() def list_floating_ips(self): return self.client.floating_ips.list() def list_images(self): return self.client.images.list() def list_keypairs(self): return self.client.keypairs.list() def list_networks(self): return self.client.networks.list() def list_security_groups(self): return self.client.security_groups.list( search_opts={"all_tenants": True}) def list_servers(self): return self.client.servers.list( search_opts={"all_tenants": True}) def list_services(self): return self.client.services.list() def list_availability_zones(self): return self.client.availability_zones.list() class Neutron(ResourceManager): REQUIRED_SERVICE = consts.Service.NEUTRON def has_extension(self, name): extensions = self.client.list_extensions().get("extensions", []) return any(ext.get("alias") == name for ext in extensions) def list_networks(self): return self.client.list_networks()["networks"] def list_subnets(self): return self.client.list_subnets()["subnets"] def list_routers(self): return self.client.list_routers()["routers"] def list_ports(self): return self.client.list_ports()["ports"] def list_floatingips(self): return self.client.list_floatingips()["floatingips"] def list_security_groups(self): return self.client.list_security_groups()["security_groups"] def list_health_monitors(self): if self.has_extension("lbaas"): return self.client.list_health_monitors()["health_monitors"] def list_pools(self): if self.has_extension("lbaas"): return self.client.list_pools()["pools"] def list_vips(self): if self.has_extension("lbaas"): return self.client.list_vips()["vips"] class Glance(ResourceManager): def list_images(self): return self.client.images.list() class Heat(ResourceManager): REQUIRED_SERVICE = consts.Service.HEAT def list_resource_types(self): return self.client.resource_types.list() def list_stacks(self): return self.client.stacks.list() class Cinder(ResourceManager): def list_availability_zones(self): return self.client.availability_zones.list() def list_backups(self): return self.client.backups.list() def list_volume_snapshots(self): return self.client.volume_snapshots.list() def list_volume_types(self): return self.client.volume_types.list() def list_volumes(self): return self.client.volumes.list( search_opts={"all_tenants": True}) class CloudResources(object): """List and compare cloud resources. resources = CloudResources(auth_url=..., ...) saved_list = resources.list() # Do something with the cloud ... changes = resources.compare(saved_list) has_changed = any(changes) removed, added = changes """ def __init__(self, **kwargs): endpoint = osclients.objects.Endpoint(**kwargs) self.clients = osclients.Clients(endpoint) def _deduplicate(self, lst): """Change list duplicates to make all items unique. >>> resources._deduplicate(["a", "b", "c", "b", "b"]) >>> ['a', 'b', 'c', 'b (duplicate 1)', 'b (duplicate 2)' """ deduplicated_list = [] for value in lst: if value in deduplicated_list: ctr = 0 try_value = value while try_value in deduplicated_list: ctr += 1 try_value = "%s (duplicate %i)" % (value, ctr) value = try_value deduplicated_list.append(value) return deduplicated_list def list(self): managers_classes = discover.itersubclasses(ResourceManager) resources = [] for cls in managers_classes: manager = cls(self.clients) if manager.is_available(): resources.extend(manager.get_resources()) return sorted(self._deduplicate(resources)) def compare(self, with_list): saved_resources = set(with_list) current_resources = set(self.list()) removed = saved_resources - current_resources added = current_resources - saved_resources return sorted(list(removed)), sorted(list(added)) def main(): parser = argparse.ArgumentParser( description=("Save list of OpenStack cloud resources or compare " "with previously saved list.")) parser.add_argument("--credentials", type=argparse.FileType("r"), metavar="<path/to/credentials.json>", help="cloud credentials in JSON format") group = parser.add_mutually_exclusive_group(required=True) group.add_argument("--dump-list", type=argparse.FileType("w"), metavar="<path/to/output/list.json>", help="dump resources to given file in JSON format") group.add_argument("--compare-with-list", type=argparse.FileType("r"), metavar="<path/to/existent/list.json>", help=("compare current resources with a list from " "given JSON file")) args = parser.parse_args() if args.credentials: config = json.load(args.credentials) else: config = json.loads(subprocess.check_output(["rally", "deployment", "config"])) config.update(config.pop("admin")) del config["type"] resources = CloudResources(**config) if args.dump_list: resources_list = resources.list() json.dump(resources_list, args.dump_list, indent=2) elif args.compare_with_list: given_list = json.load(args.compare_with_list) changes = resources.compare(with_list=given_list) removed, added = changes sys.stdout.write( json.dumps({"removed": removed, "added": added}, indent=2)) if any(changes): return 0 # `1' will fail gate job return 0 if __name__ == "__main__": sys.exit(main())
import gc import enum import functools import os import os.path import abc import sys import textwrap import re import inspect import copy import contextlib import itertools import types import warnings from operator import attrgetter from datetime import datetime from collections import OrderedDict, ChainMap from collections.abc import Mapping from inspect import signature import IPython.display from devlib.collector.dmesg import KernelLogEntry from devlib import TargetStableError from lisa.analysis.tasks import TasksAnalysis from lisa.analysis.rta import RTAEventsAnalysis from lisa.trace import requires_events, TraceEventCheckerBase, AndTraceEventChecker from lisa.trace import Trace, TaskID from lisa.wlgen.rta import RTA, PeriodicWload, RTAPhase, leaf_precedence from lisa.target import Target from lisa.utils import ( Serializable, memoized, lru_memoized, ArtifactPath, non_recursive_property, update_wrapper_doc, ExekallTaggable, annotations_from_signature, get_sphinx_name, optional_kwargs, group_by_value, kwargs_dispatcher, dispatch_kwargs, Loggable, kwargs_forwarded_to, docstring_update, is_running_ipython, ) from lisa.datautils import df_filter_task_ids from lisa.trace import FtraceCollector, FtraceConf, DmesgCollector, ComposedCollector from lisa.conf import ( SimpleMultiSrcConf, KeyDesc, TopLevelKeyDesc, ) from lisa._generic import TypedList from lisa.pelt import pelt_settling_time def _nested_formatter(multiline): def sort_mapping(data): if isinstance(data, Mapping): # Ensure stable ordering of keys if possible try: data = OrderedDict(sorted(data.items())) except TypeError: data = data return data if multiline: def format_data(data, level=0): idt = '\n' + ' ' * 4 * level def indent(s): stripped = s.strip() if '\n' in stripped: return idt + stripped.replace('\n', idt) else: return stripped if isinstance(data, TestMetric): out = data.pretty_format(multiline=multiline) out = indent(out) if '\n' in out else out elif isinstance(data, Mapping): data = sort_mapping(data) body = '\n'.join( f'{key}: {format_data(data, level + 1)}' for key, data in data.items() ) out = indent(body) else: out = str(data) return out else: def format_data(data): # Handle recursive mappings, like metrics of AggregatedResultBundle if isinstance(data, Mapping): data = sort_mapping(data) return '{' + ', '.join( f'{key}={format_data(data)}' for key, data in data.items() ) + '}' else: return str(data) return format_data class TestMetric: """ A storage class for metrics used by tests :param data: The data to store. Can be any base type or dict(TestMetric) :param units: The data units :type units: str """ def __init__(self, data, units=None): self.data = data self.units = units def __str__(self): return self.pretty_format(multiline=False) def pretty_format(self, multiline=True): """ Pretty print the metrics. :param multiline: If ``True``, use a multiline format. :type multiline: bool """ format_data = _nested_formatter(multiline=multiline) result = format_data(self.data) if self.units: result += ' ' + self.units return result def __repr__(self): return f'{type(self).__name__}({self.data}, {self.units})' @enum.unique class Result(enum.Enum): """ A classification of a test result """ PASSED = 1 """ The test has passed """ FAILED = 2 """ The test has failed """ UNDECIDED = 3 """ The test data could not be used to decide between :attr:`PASSED` or :attr:`FAILED` """ SKIPPED = 4 """ The test does not make sense on this platform and should therefore be skipped. .. note:: :attr:`UNDECIDED` should be used when the data are inconclusive but the test still makes sense on the target. """ @property def lower_name(self): """Return the name in lower case""" return self.name.lower() class ResultBundleBase(Exception): """ Base class for all result bundles. .. note:: ``__init__`` is not provided as some classes uses properties to provide some of the attributes. """ def __bool__(self): """ ``True`` if the ``result`` is :attr:`Result.PASSED`, ``False`` otherwise. """ return self.result is Result.PASSED def __str__(self): return self.pretty_format(multiline=False) def pretty_format(self, multiline=True): format_data = _nested_formatter(multiline=multiline) metrics_str = format_data(self.metrics) if '\n' in metrics_str: idt = '\n' + ' ' * 4 metrics_str = metrics_str.replace('\n', idt) else: metrics_str = ': ' + metrics_str return self.result.name + metrics_str def _repr_pretty_(self, p, cycle): "Pretty print instances in Jupyter notebooks" p.text(self.pretty_format()) def add_metric(self, name, data, units=None): """ Lets you append several test :class:`TestMetric` to the bundle. :Parameters: :class:`TestMetric` parameters """ self.metrics[name] = TestMetric(data, units) def display_and_exit(self) -> type(None): print(f"Test result: {self}") if self: sys.exit(0) else: sys.exit(1) class ResultBundle(ResultBundleBase): """ Bundle for storing test results :param result: Indicates whether the associated test passed. It will also be used as the truth-value of a ResultBundle. :type result: :class:`Result` :param utc_datetime: UTC time at which the result was collected, or ``None`` to record the current datetime. :type utc_datetime: datetime.datetime :param context: Contextual information to attach to the bundle. Keep the content small, as size of :class:`ResultBundle` instances matters a lot for storing long test sessions results. :type context: dict(str, object) :class:`TestMetric` can be added to an instance of this class. This can make it easier for users of your tests to understand why a certain test passed or failed. For instance:: def test_is_noon(): now = time.localtime().tm_hour res = ResultBundle(Result.PASSED if now == 12 else Result.FAILED) res.add_metric("current time", now) return res >>> res_bundle = test_is_noon() >>> print(res_bundle.result.name) FAILED # At this point, the user can wonder why the test failed. # Metrics are here to help, and are printed along with the result: >>> print(res_bundle) FAILED: current time=11 """ def __init__(self, result, utc_datetime=None, context=None): self.result = result self.metrics = {} self.utc_datetime = utc_datetime or datetime.utcnow() self.context = context if context is not None else {} @classmethod def from_bool(cls, cond, *args, **kwargs): """ Alternate constructor where ``ResultBundle.result`` is determined from a bool """ result = Result.PASSED if cond else Result.FAILED return cls(result, *args, **kwargs) @classmethod def raise_skip(cls, msg, from_=None, **kwargs): """ Raise an :class:`ResultBundle` with the :attr:`Result.SKIPPED` result, thereby short-circuiting the rest of the test. :param msg: Reason why the test is skipped :type msg: str :param from_: Other exception that lead to the test being skipped. It will be used as the ``Y`` in ``raise X from Y``. :type from_: Exception or None This is typically used as a way to bail out while indicating to the user that the test has essentially been skipped because the target does not support what the test is testing. """ res = cls(Result.SKIPPED, **kwargs) res.add_metric('skipped-reason', msg) raise res from from_ class AggregatedResultBundle(ResultBundleBase): """ Aggregates many :class:`ResultBundle` into one. :param result_bundles: List of :class:`ResultBundle` to aggregate. :type result_bundles: list(ResultBundle) :param name_metric: Metric to use as the "name" of each result bundle. The value of that metric will be used as top-level key in the aggregated metrics. If not provided, the index in the ``result_bundles`` list will be used. :type name_metric: str :param result: Optionally, force the ``self.result`` attribute to that value. This is useful when the way of combining the result bundles is not the default one, without having to make a whole new subclass. :type result: Result :param context: Contextual information to attach to the bundle. Keep the content small, as size of :class:`ResultBundle` instances matters a lot for storing long test sessions results. :type context: dict(str, object) This is useful for some tests that are naturally decomposed in subtests. .. note:: Metrics of aggregated bundles will always be shown, but can be augmented with new metrics using the usual API. """ def __init__(self, result_bundles, name_metric=None, result=None, context=None): self.result_bundles = result_bundles self.name_metric = name_metric self.extra_metrics = {} self.extra_context = context if context is not None else {} self._forced_result = result @property def utc_datetime(self): """ Use the earliest ``utc_datetime`` among the aggregated bundles. """ return min( result_bundle.utc_datetime for result_bundle in self.result_bundles ) @property def context(self): """ Merge the context of all the aggregated bundles, with priority given to last in the list. """ # All writes will be done in that first layer bases = [self.extra_context] bases.extend( result_bundle.context for result_bundle in self.result_bundles ) return ChainMap(*bases) @property def result(self): forced_result = self._forced_result if forced_result is not None: return forced_result def predicate(combinator, result): return combinator( res_bundle.result is result for res_bundle in self.result_bundles ) if predicate(all, Result.UNDECIDED): return Result.UNDECIDED elif predicate(any, Result.FAILED): return Result.FAILED elif predicate(any, Result.PASSED): return Result.PASSED else: return Result.UNDECIDED @result.setter def _(self, result): self._forced_result = result @property def metrics(self): def get_name(res_bundle, i): if self.name_metric: return res_bundle.metrics[self.name_metric] else: return str(i) names = { res_bundle: get_name(res_bundle, i) for i, res_bundle in enumerate(self.result_bundles) } def get_metrics(res_bundle): metrics = copy.copy(res_bundle.metrics) # Since we already show it at the top-level, we can remove it from # the nested level to remove some clutter metrics.pop(self.name_metric, None) return metrics base = { names[res_bundle]: get_metrics(res_bundle) for res_bundle in self.result_bundles } if 'failed' not in base: base['failed'] = TestMetric([ names[res_bundle] for res_bundle in self.result_bundles if res_bundle.result is Result.FAILED ]) top = self.extra_metrics return ChainMap(top, base) class TestBundleMeta(abc.ABCMeta): """ Metaclass of :class:`TestBundleBase`. Method with a return annotation of :class:`ResultBundleBase` are wrapped to: * Update the ``context`` attribute of a returned :class:`ResultBundleBase` * Add an ``undecided_filter`` attribute, with :meth:`add_undecided_filter` decorator, so that any test method can be used as a pre-filter for another one right away. * Wrap ``_from_target`` to provide a single ``collector`` parameter, built from the composition of the collectors provided by ``_make_collector`` methods in the base class tree. If ``_from_target`` is defined in the class but ``from_target`` is not, a stub is created and the annotation of ``_from_target`` is copied to the stub. The annotation is then removed from ``_from_target`` so that it is not picked up by exekall. The signature of ``from_target`` is the result of merging the original ``cls.from_target`` parameters with the ones defined in ``_from_target``. """ @classmethod def test_method(metacls, func): """ Decorator to intercept returned :class:`ResultBundle` and attach some contextual information. """ def update_res(test_bundle, res): plat_info = test_bundle.plat_info # Map context keys to PlatformInfo nested keys keys = { 'board-name': ['name'], 'kernel-version': ['kernel', 'version'] } context = {} for context_key, plat_info_key in keys.items(): try: val = plat_info.get_nested_key(plat_info_key) except KeyError: continue else: context[context_key] = val # Only update what is strictly necessary here, so that # AggregatedResultBundle ends up with a minimal context state. res_context = res.context for key, val in context.items(): if key not in res_context: res_context[key] = val @functools.wraps(func) def wrapper(self, *args, **kwargs): try: res = func(self, *args, **kwargs) except ResultBundleBase as res: update_res(self, res) raise else: if isinstance(res, ResultBundleBase): update_res(self, res) return res wrapper = metacls.add_undecided_filter(wrapper) return wrapper @classmethod def collector_factory(cls, f): @functools.wraps(f) def wrapper(*args, **kwargs): return f(*args, **kwargs) wrapper._COLLECTOR_FACTORY = True return wrapper @staticmethod def add_undecided_filter(func): """ Turn any method returning a :class:`ResultBundleBase` into a decorator that can be used as a test method filter. The filter decorator is accessible as the ``undecided_filter`` attribute of the decorated method. Once a test is decorated, the filter method will be run in addition to the wrapped test, and if the filter does not succeed, the :class:`ResultBundleBase` result will be set to :attr:`Result.UNDECIDED`. :Example: .. code-block:: python class Foo(TestBundle): @TestBundle.add_undecided_filter def test_foo(self, xxx=42, ...): ... # Alternatively, ResultBundle return annotation will # automatically decorate the method with TestBundleMeta # metaclass. def test_foo(self, xxx=42, ...) -> ResultBundle: ... class Bar(Foo): # Set xxx=55 as default, but this can be overriden when # test_bar() is called. @Foo.test_foo.undecided_filter(xxx=77) def test_bar(self, yyy=43, ...) -> ResultBundle: ... The resulting decorated method can take the union of keyword parameters:: bar = Bar() bar.test_bar(xxx=33, yyy=55) # Same as bar.test_bar(33, yyy=55) # But this fails, since only keyword arguments can be passed to the # wrapping pre-test bar.test_bar(33, 55) If there is a parameter conflict, it is detected at import time and will result in a :exc:`TypeError`. .. note:: Even if the pre-test does not succeed, the wrapped test is still executed, so that the ResultBundle metrics are updated and the artifacts still produced. This can be important in order to manually analyse results in case the pre-filter was overly conservative and marked a usable result as UNDECIDED. """ @optional_kwargs def decorator(wrapped_test, **preset_kwargs): # Propagate the events used by the filter try: used_events = func.used_events except AttributeError: used_events = lambda x: x @used_events @update_wrapper_doc( wrapped_test, added_by=func, sig_from=func, description=textwrap.dedent( """ The returned ``ResultBundle.result`` will be changed to :attr:`~lisa.tests.base.Result.UNDECIDED` if {} does not succeed (i.e. either :attr:`~lisa.tests.base.Result.UNDECIDED` or :attr:`~lisa.tests.base.Result.FAILED`). {} """).strip().format( get_sphinx_name(func, style='rst', abbrev=True), inspect.getdoc(func), ), ) @kwargs_dispatcher( { func.__get__(0): 'filter_kwargs', }, # Better safe than sorry, there is no guarantee that the tests # won't step on each other's toes allow_overlap=False, ) @functools.wraps(wrapped_test) def filter_wrapper(self, *args, filter_kwargs=None, **kwargs): # Merge-in the presets filter_kwargs = { **preset_kwargs, **filter_kwargs, } # Run the wrapped test no matter what, so we get the metrics # and also the artifacts res = wrapped_test(self, *args, **kwargs) filter_res = func(self, **filter_kwargs) res.metrics.update(filter_res.metrics) if not filter_res: res.result = Result.UNDECIDED res.add_metric('undecided-reason', f'{func.__qualname__} failed') return res return filter_wrapper func.undecided_filter = decorator return func @classmethod def __prepare__(metacls, cls_name, bases, **kwargs): # Decorate each method when it is bound to its name in the class' # namespace, so that other methods can use e.g. undecided_filter # If we do that from __new__, the decoration will happen after all # methods are defined, just before the class object is created. class NS(dict): def __setitem__(self, name, f): if isinstance(f, types.FunctionType): # Wrap the test methods to add contextual information sig = signature(f) annotation = sig.return_annotation if isinstance(annotation, type) and issubclass(annotation, ResultBundleBase): f = metacls.test_method(f) super().__setitem__(name, f) return NS() @staticmethod def _make_collector_cm_factory(cls): """ Create the method in charge of creating the collector for the test. This method is created by aggregating the ``_make_collector`` of all base classes into one :class:`lisa.trace.ComposedCollector`. The resulting method is then used to consume the user-level parameters exposed by each ``_make_collector`` and turn it into a single ``collector`` parameter passed to :meth:`_from_target`. """ def find_factories(cls): def predicate(f): if isinstance(f, (classmethod, staticmethod)): _f = f.__func__ else: _f = f return ( getattr(_f, '_COLLECTOR_FACTORY', False) or ( hasattr(_f, '__wrapped__') and find_factories(_f.__wrapped__) ) ) factories = inspect.getmembers(cls, predicate) return list(map( # Unbind the method and turn it again into an unbound # classmethod lambda member: classmethod(member[1].__func__), factories )) factories_f = find_factories(cls) # Bind the classmethods to remove the first parameter from their # signature factories = [ f.__get__(None, cls) for f in factories_f ] params = { param: param.name for f in factories for param in inspect.signature(f).parameters.values() if param.kind == param.KEYWORD_ONLY } for _name, _params in group_by_value(params, key_sort=attrgetter('name')).items(): if len(_params) > 1: _params = ', '.join(map(str, _params)) raise TypeError(f'Conflicting parameters for {cls.__qualname__} collectors factory: {_params}') params = sorted(params.keys(), key=attrgetter('name')) @classmethod def factory(cls, **kwargs): factories = [ f.__get__(None, cls) for f in factories_f ] dispatched = dispatch_kwargs( factories, kwargs, call=True, allow_overlap=True, ) cms = [ cm for cm in dispatched.values() if cm is not None ] cms = sorted( cms, key=attrgetter('_COMPOSITION_ORDER'), reverse=True, ) cm = ComposedCollector(cms) return cm first_param = list(inspect.signature(factory.__func__).parameters.values())[0] factory.__func__.__signature__ = inspect.Signature( parameters=[first_param] + params, ) factory.__name__ = '_make_collector_cm' factory.__qualname__ = f'{cls.__qualname__}.{factory.__name__}' factory.__module__ = cls.__module__ return factory def __new__(metacls, cls_name, bases, dct, **kwargs): new_cls = super().__new__(metacls, cls_name, bases, dct, **kwargs) # Merge the collectors available for that class and pass the # composed collector to _from_target new_cls._make_collector_cm = metacls._make_collector_cm_factory(new_cls) # If that class defines _from_target, stub from_target and move the # annotations of _from_target to from_target. If from_target was # already defined on that class, it's wrapped by the stub, otherwise # super().from_target is used. if '_from_target' in dct and not getattr(new_cls._from_target, '__isabstractmethod__', False): assert isinstance(dct['_from_target'], classmethod) _from_target = new_cls._from_target # Sanity check on _from_target signature for name, param in signature(_from_target).parameters.items(): if name != 'target' and param.kind is not inspect.Parameter.KEYWORD_ONLY: raise TypeError(f'Non keyword parameters "{name}" are not allowed in {_from_target.__qualname__} signature') # This is necessary since _from_target is then reassigned, and the # closure refers to it by name _real_from_target = _from_target @classmethod @kwargs_dispatcher( { _from_target: 'from_target_kwargs', new_cls._make_collector_cm: 'collector_kwargs', }, ignore=['collector'], ) def wrapper(cls, target, from_target_kwargs, collector_kwargs): cm = cls._make_collector_cm(**collector_kwargs) return _real_from_target.__func__(cls, collector=cm, **from_target_kwargs) # Make sure to get the return annotation from _real_from_target wrapper.__func__.__signature__ = inspect.signature(wrapper.__func__).replace( return_annotation=inspect.signature(_real_from_target.__func__).return_annotation ) wrapper.__func__.__annotations__ = annotations_from_signature(wrapper.__func__.__signature__) new_cls._from_target = wrapper _from_target = new_cls._from_target def get_keyword_only_names(f): return { param.name for param in signature(f).parameters.values() if param.kind is inspect.Parameter.KEYWORD_ONLY } try: missing_params = ( get_keyword_only_names(super(bases[0], new_cls)._from_target) - get_keyword_only_names(_from_target) ) except AttributeError: pass else: if missing_params: raise TypeError('{}._from_target() must at least implement all the parameters of {}._from_target(). Missing parameters: {}'.format( new_cls.__qualname__, bases[0].__qualname__, ', '.join(sorted(missing_params)) )) if 'from_target' in dct: # Bind the classmethod object to the class orig_from_target = dct['from_target'] def get_orig_from_target(cls): return orig_from_target.__get__(cls, cls) else: def get_orig_from_target(cls): return super(new_cls, cls).from_target # Make a stub that we can freely update # Merge the signatures to get the base signature of # super().from_target. @kwargs_forwarded_to(_from_target.__func__) @functools.wraps(new_cls.from_target.__func__) def from_target(cls, *args, **kwargs): from_target = get_orig_from_target(cls) return from_target(*args, **kwargs) # Hide the fact that we wrapped the function, so exekall does not # get confused del from_target.__wrapped__ # Fixup the names, so it is not displayed as `_from_target` from_target.__name__ = 'from_target' from_target.__qualname__ = new_cls.__qualname__ + '.' + from_target.__name__ # Stich the relevant docstrings func = new_cls.from_target.__func__ from_target_doc = inspect.cleandoc(func.__doc__ or '') _from_target_doc = inspect.cleandoc(_from_target.__doc__ or '') if _from_target_doc: doc = f'{from_target_doc}\n\n(**above inherited from** :meth:`{func.__module__}.{func.__qualname__}`)\n\n{_from_target_doc}\n' else: doc = from_target_doc from_target.__doc__ = doc # Make sure the annotation points to an actual class object if it # was set, as most of the time they will be strings for factories. # Since the wrapper's __globals__ (read-only) attribute is not # going to contain the necessary keys to resolve that string, we # take care of it here. if inspect.signature(_from_target).return_annotation != inspect.Signature.empty: # Since we set the signature manually, we also need to update # the annotations in it from_target.__signature__ = from_target.__signature__.replace(return_annotation=new_cls) # Keep the annotations and the signature in sync from_target.__annotations__ = annotations_from_signature(from_target.__signature__) # De-annotate the _from_target function so it is not picked up by exekall del _from_target.__func__.__annotations__ new_cls.from_target = classmethod(from_target) return new_cls class TestBundleBase( Serializable, ExekallTaggable, abc.ABC, docstring_update('.. note:: As a subclass of :class:`lisa.tests.base.TestBundleBase`, this class is considered as "application" and its API is therefore more subject to change than other parts of :mod:`lisa`.'), metaclass=TestBundleMeta ): """ A LISA test bundle. :param res_dir: Directory in which the target execution artifacts reside. This will also be used to dump any artifact generated in the test code. :type res_dir: str :param plat_info: Various informations about the platform, that is available to all tests. :type plat_info: :class:`lisa.platforms.platinfo.PlatformInfo` The point of a :class:`TestBundleBase` is to bundle in a single object all of the required data to run some test assertion (hence the name). When inheriting from this class, you can define test methods that use this data, and return a :class:`ResultBundle`. Thanks to :class:`~lisa.utils.Serializable`, instances of this class can be serialized with minimal effort. As long as some information is stored within an object's member, it will be automagically handled. Please refrain from monkey-patching the object in :meth:`from_target`. Data required by the object to run test assertions should be exposed as ``__init__`` parameters. .. note:: All subclasses are considered as "application" code, as opposed to most of the rest of :mod:`lisa` which is treated as a library. This means that the classes and their API is subject to change when needs evolve, which is not always backward compatible. It's rarely an issue since these classes are used "manually" mostly for debugging, which is a version-specific activity. Likewise, the set of tests will evolve as existing tests are replaced by more general implementations, that could be organized and named differently. **Design notes:** * :meth:`from_target` will collect whatever artifacts are required from a given target, and will then return a :class:`TestBundleBase`. Note that a default implementation is provided out of ``_from_target``. * :meth:`from_dir` will use whatever artifacts are available in a given directory (which should have been created by an earlier call to :meth:`from_target` and then :meth:`to_dir`), and will then return a :class:`TestBundleBase`. * :attr:`VERIFY_SERIALIZATION` is there to ensure the instances can serialized and deserialized without error. * ``res_dir`` parameter of ``__init__`` must be stored as an attribute without further processing, in order to support result directory relocation. * Test methods should have a return annotation for the :class:`ResultBundle` to be picked up by the test runners. **Implementation example**:: from lisa.target import Target from lisa.platforms.platinfo import PlatformInfo from lisa.utils import ArtifactPath class DummyTestBundle(TestBundle): def __init__(self, res_dir, plat_info, shell_output): super().__init__(res_dir, plat_info) self.shell_output = shell_output @classmethod def _from_target(cls, target:Target, *, res_dir:ArtifactPath) -> 'DummyTestBundle': output = target.execute('echo $((21+21))').split() return cls(res_dir, target.plat_info, output) def test_output(self) -> ResultBundle: return ResultBundle.from_bool( any( '42' in line for line in self.shell_output ) ) **Usage example**:: # Creating a Bundle from a live target bundle = TestBundle.from_target(target, plat_info=plat_info, res_dir="/my/res/dir") # Running some test on the bundle res_bundle = bundle.test_foo() # Saving the bundle on the disk bundle.to_dir("/my/res/dir") # Reloading the bundle from the disk bundle = TestBundle.from_dir("/my/res/dir") # The reloaded object can be used just like the original one. # Keep in mind that serializing/deserializing this way will have a # similar effect than a deepcopy. res_bundle = bundle.test_foo() """ VERIFY_SERIALIZATION = True """ When True, this enforces a serialization/deserialization step in :meth:`from_target`. .. note:: The deserialized instance is thrown away in order to avoid using what is in effect a deepcopy of the original bundle. Using that deepcopy greatly increases the memory consumption of long running processes. """ def __init__(self, res_dir, plat_info): # It is important that res_dir is directly stored as an attribute, so # it can be replaced by a relocated res_dir after the object is # deserialized on another host. # See exekall_customization.LISAAdaptor.load_db self.res_dir = res_dir self.plat_info = plat_info def get_tags(self): try: return {'board': self.plat_info['name']} except KeyError: return {} @classmethod @abc.abstractmethod def _from_target(cls, target, *, res_dir): """ :meta public: Internals of the target factory method. .. note:: This must be a classmethod, and all parameters except ``target`` must be keyword-only, i.e. appearing after `args*` or a lonely `*`:: @classmethod def _from_target(cls, target, *, foo=33, bar): ... """ @classmethod def check_from_target(cls, target): """ Check whether the given target can be used to create an instance of this class :raises: :class:`lisa.tests.base.ResultBundleBase` with ``result`` as :attr:`lisa.tests.base.Result.SKIPPED` if the check fails This method should be overriden to check your implementation requirements """ @classmethod def can_create_from_target(cls, target): """ :returns: Whether the given target can be used to create an instance of this class :rtype: bool :meth:`check_from_target` is used internally, so there shouldn't be any need to override this. """ try: cls.check_from_target(target) return True except ResultBundleBase: return False @classmethod def from_target(cls, target: Target, *, res_dir: ArtifactPath = None, **kwargs): """ Factory method to create a bundle using a live target :param target: Target to connect to. :type target: lisa.target.Target :param res_dir: Host result directory holding artifacts. :type res_dir: str or lisa.utils.ArtifactPath :param custom_collector: Custom collector that will be used as a context manager when calling the workload. :type custom_collector: lisa.trace.CollectorBase This is mostly boiler-plate code around :meth:`~lisa.tests.base.TestBundleBase._from_target`, which lets us introduce common functionalities for daughter classes. Unless you know what you are doing, you should not override this method, but the internal :meth:`lisa.tests.base.TestBundleBase._from_target` instead. """ cls.check_from_target(target) res_dir = res_dir or target.get_res_dir( name=cls.__qualname__, symlink=True, ) # Make sure that all the relevant dmesg warnings will fire when running # things on the target, even if we already hit some warn_once warnings. with contextlib.suppress(TargetStableError): target.write_value('/sys/kernel/debug/clear_warn_once', '1', verify=False) bundle = cls._from_target(target, res_dir=res_dir, **kwargs) # We've created the bundle from the target, and have all of # the information we need to execute the test code. However, # we enforce the use of the offline reloading path to ensure # it does not get broken. if cls.VERIFY_SERIALIZATION: bundle.to_dir(res_dir) # Updating the res_dir breaks deserialization for some use cases cls.from_dir(res_dir, update_res_dir=False) return bundle @classmethod @TestBundleMeta.collector_factory def _make_custom_collector(cls, *, custom_collector=None): return custom_collector @classmethod def _get_filepath(cls, res_dir): """ :meta public: Returns the path of the file containing the serialized object in ``res_dir`` folder. """ return ArtifactPath.join(res_dir, f"{cls.__qualname__}.yaml") def _save_debug_plot(self, fig, name): """ Save a holoviews debug plot using the bokeh backend and show it in the notebook cell. """ self.trace.ana.notebook.save_plot( fig, filepath=ArtifactPath.join( self.res_dir, f'{name}.html', ), backend='bokeh', ) # Check before calling display(), as running it outside a notebook will # just print the structure of the element, which is useless # # TODO: See if we can capture this side effect and re-run it when a # memoized test method is called again. if is_running_ipython(): IPython.display.display(fig) return fig @classmethod def _get_referred_objs(cls, obj, predicate=lambda x: True): visited = set() objs = [] def update_refs(obj): obj_id = id(obj) # Avoid cycles. Use the id() of the objects directly since the # inclusion check is orders of magnitude faster than checking for # inclusing on the object directly. It also handles well non hashable # objects and broken __eq__ implementations. if obj_id in visited: return else: visited.add(obj_id) # Filter-out weird objects that end up in the list and that can # trigger a coredump on the interpreter with warnings.catch_warnings(): warnings.simplefilter("ignore") has_class = hasattr(obj, '__class__') if has_class and predicate(obj): objs.append(obj) for sub in gc.get_referents(obj): update_refs(sub) update_refs(obj) return objs @property def _children_test_bundles(self): """ :meta public: List of references to :class:`TestBundleBase` instances ``self`` relies on (directly *and* indirectly). This is used for some post-deserialization fixup that need to walk the whole graph of :class:`TestBundleBase`. """ # Work around: # https://github.com/pallets/werkzeug/issues/2188 def predicate(x): try: return isinstance(x, TestBundleBase) except Exception: return False objs = set(self._get_referred_objs(self, predicate)) objs.discard(self) return objs def _fixup_res_dir(self, new): orig_root = self.res_dir def fixup(obj): rel = os.path.relpath(obj.res_dir, orig_root) absolute = os.path.abspath(os.path.join(new, rel)) obj.res_dir = absolute for child in self._children_test_bundles | {self}: fixup(child) @classmethod def from_dir(cls, res_dir, update_res_dir=True): """ Wrapper around :meth:`lisa.utils.Serializable.from_path`. It uses :meth:`_get_filepath` to get the name of the serialized file to reload. """ res_dir = ArtifactPath(root=res_dir, relative='') bundle = super().from_path(cls._get_filepath(res_dir)) # We need to update the res_dir to the one we were given if update_res_dir: bundle._fixup_res_dir(res_dir) return bundle def to_dir(self, res_dir): """ See :meth:`lisa.utils.Serializable.to_path` """ super().to_path(self._get_filepath(res_dir)) class FtraceTestBundleBase(TestBundleBase): """ Base class for test bundles needing ftrace traces. Optionally, an ``FTRACE_CONF`` class attribute can be defined to hold additional FTrace configuration used to record a trace while the synthetic workload is being run. By default, the required events are extracted from decorated test methods. This base class ensures that each subclass will get its own copy of ``FTRACE_CONF`` attribute, and that the events specified in that configuration are a superset of what is needed by methods using the family of decorators :func:`lisa.trace.requires_events`. This makes sure that the default set of events is always enough to run all defined methods, without duplicating that information. That means that trace events are "inherited" at the same time as the methods that need them. The ``FTRACE_CONF`` attribute is typically built by merging these sources: * Existing ``FTRACE_CONF`` class attribute on the :class:`RTATestBundle` subclass * Events required by methods using :func:`lisa.trace.requires_events` decorator (and equivalents). * :class:`lisa.trace.FtraceConf` specified by the user and passed to :meth:`lisa.tests.base.TestBundleBase.from_target` as ``ftrace_conf`` parameter. """ TRACE_PATH = 'trace.dat' """ Path to the ``trace-cmd`` trace.dat file in the result directory. """ @classmethod def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) # Collect all the events that can be used by all methods available on # that class. ftrace_events = [] for name, obj in inspect.getmembers(cls, callable): try: used_events = obj.used_events except AttributeError: continue else: ftrace_events.append(used_events) ftrace_events = AndTraceEventChecker(ftrace_events) # Get the ftrace_conf attribute of the class, and make sure it is # unique to that class (i.e. not shared with any other parent or # sibling classes) try: ftrace_conf = cls.FTRACE_CONF except AttributeError: ftrace_conf = None else: # If the ftrace_conf attribute has been defined in a base # class, make sure that class gets its own copy since we are # going to modify it if 'ftrace_conf' not in cls.__dict__: ftrace_conf = copy.copy(ftrace_conf) # Re-wrap into an FtraceConf so we get a change to set a correct source # name. ftrace_conf = FtraceConf( conf=ftrace_conf or None, src=cls.__qualname__, # Let the original object decide of that. add_default_src=False, ) # Merge-in a new source to FtraceConf that contains the events we # collected ftrace_conf.add_merged_src( src=f'{cls.__qualname__}(required)', conf={ 'events': ftrace_events, }, ) cls.FTRACE_CONF = ftrace_conf # Deprecated, for backward compat only, all new code uses the # capitalized version cls.ftrace_conf = ftrace_conf @classmethod @TestBundleBase.collector_factory def _make_ftrace_collector(cls, *, target: Target, res_dir: ArtifactPath = None, ftrace_conf: FtraceConf = None): cls_conf = cls.FTRACE_CONF or FtraceConf() user_conf = ftrace_conf or FtraceConf() # Make a copy of the conf, since it may be shared by multiple classes conf = copy.copy(cls_conf) # Merge user configuration with the test's configuration conf.add_merged_src( src=f'user+{cls.__qualname__}', conf=user_conf, optional_events=True, ) # If there is no event, do not collect the trace unless the user asked # for it. This can happen for classes that inherit from # FtraceTestBundle as a convenience to users without actually needing # it internally if conf.get('events'): path = ArtifactPath.join(res_dir, cls.TRACE_PATH) return FtraceCollector.from_conf( target=target, conf=conf, output_path=path, ) else: return None @property def trace_path(self): """ Path to the ``trace-cmd report`` trace.dat file. """ return ArtifactPath.join(self.res_dir, self.TRACE_PATH) # Guard before the cache, so we don't accidentally start depending on the # LRU cache for functionnal correctness. @non_recursive_property # Only cache the trace of N bundles at a time, to avoid running out of memory. # This should not really impact the test when ran with exekall, since they # are sequenced one after another. It would have some speed impact on # scripts/notebooks that try to do something with a bunch of # FtraceTestBundle. @lru_memoized(first_param_maxsize=5) def trace(self): """ :returns: a :class:`lisa.trace.TraceView` All events specified in ``FTRACE_CONF`` are parsed from the trace, so it is suitable for direct use in methods. Having the trace as a property lets us defer the loading of the actual trace to when it is first used. Also, this prevents it from being serialized when calling :meth:`lisa.utils.Serializable.to_path` and allows updating the underlying path before it is actually loaded to match a different folder structure. """ return self.get_trace( events=self.FTRACE_CONF["events"], normalize_time=True, # Soft limit on the amount of memory used by dataframes kept around # in memory by Trace, so that we don't blow up the memory when we # have a large-ish number of FTraceTestBundle alive at the same # time. max_mem_size=500e6, # TODO: revisit that. As of pyarrow 2.0.0 and pandas 1.1.4, reading # (and maybe writing) parquet fils seem to leak memory. This can # take the consumption in the order of tens of gigabytes for a few # iterations of the tests with exekall, leading to crashes. # Therefore, disable the on-disk swap. enable_swap=False, ) def get_trace(self, events=None, **kwargs): """ :returns: a :class:`lisa.trace.Trace` collected in the standard location. :Variable keyword arguments: Forwarded to :class:`lisa.trace.Trace`. """ return Trace(self.trace_path, self.plat_info, events=events, **kwargs) class FtraceTestBundle(FtraceTestBundleBase): """ Dummy subclass of :class:`FtraceTestBundleBase` to be inherited from to override :class:`OptionalFtraceTestBundle` in the inheritance tree. """ _make_ftrace_collector = FtraceTestBundleBase._make_ftrace_collector class OptionalFtraceTestBundle(FtraceTestBundleBase, Loggable): @classmethod @TestBundleBase.collector_factory @kwargs_forwarded_to(FtraceTestBundleBase._make_ftrace_collector) def _make_ftrace_collector(cls, **kwargs): try: return super()._make_ftrace_collector(**kwargs) except Exception as e: cls.get_logger().warning(f'Could not create ftrace collector: {e}') return None class TestConfBase(SimpleMultiSrcConf): """ Base class for test configurations. This class will add a ``test-conf`` top-level key above the level specified by the class, so that if the class specifies a ``TopLevelKeyDesc('foo')``, the actual top-level key will be ``test-conf/foo``. """ def __init_subclass__(cls, **kwargs): structure = copy.copy(cls.STRUCTURE) structure.levels = ['test-conf', *structure.levels] cls.STRUCTURE = structure super().__init_subclass__(**kwargs) class DmesgTestConf(TestConfBase): """ Configuration class for :meth:`lisa.tests.base.DmesgTestBundle.test_dmesg`. {generated_help} {yaml_example} """ STRUCTURE = TopLevelKeyDesc('dmesg', 'Dmesg test configuration', ( KeyDesc('ignored-patterns', 'List of Python regex matching dmesg entries *content* to be ignored (see :class:`devlib.collector.dmesg.KernelLogEntry` for how the message is split)', [TypedList[str]]), )) class DmesgTestBundleBase(TestBundleBase): """ Abstract Base Class for TestBundles based on dmesg output. .. seealso: Test subclasses should inherit from :class:`DmesgTestBundle` in order to require the features. """ DMESG_PATH = 'dmesg.log' """ Path to the dmesg log in the result directory. """ CANNED_DMESG_IGNORED_PATTERNS = { 'EAS-schedutil': 'Disabling EAS, schedutil is mandatory', # On kernel >= 5.6, executable stack will trigger this issue: # kern: warn: [555.927466] process 'root/devlib-target/bin/busybox' started with executable stack 'executable-stack': 'started with executable stack', } """ Mapping of canned patterns to avoid repetition while defining :attr:`lisa.tests.base.DmesgTestBundleBase.DMESG_IGNORED_PATTERNS` in subclasses. """ DMESG_IGNORED_PATTERNS = [ CANNED_DMESG_IGNORED_PATTERNS['executable-stack'], ] """ List of patterns to ignore in addition to the ones passed to :meth:`~lisa.tests.base.DmesgTestBundle.test_dmesg`. """ @classmethod @TestBundleBase.collector_factory def _make_dmesg_collector(cls, *, target: Target, res_dir: ArtifactPath = None): path = ArtifactPath.join(res_dir, cls.DMESG_PATH) return DmesgCollector( target, output_path=path, ) @property def dmesg_path(self): """ Path to the dmesg output log file """ return ArtifactPath.join(self.res_dir, self.DMESG_PATH) @property def dmesg_entries(self): """ List of parsed dmesg output entries :class:`devlib.collector.dmesg.KernelLogEntry`. """ with open(self.dmesg_path) as f: return list(KernelLogEntry.from_dmesg_output(f.read())) def test_dmesg(self, level='warn', facility=None, ignored_patterns: DmesgTestConf.IgnoredPatterns = None) -> ResultBundle: """ Basic test on kernel dmesg output. :param level: Any dmesg entr with a level more critical than (and including) that will make the test fail. :type level: str :param facility: Only select entries emitted by the given dmesg facility like `kern`. Note that not all versions of `dmesg` are able to print it, so specifying it may lead to no entry being inspected at all. If ``None``, the facility is ignored. :type facility: str or None :param ignored_patterns: List of regexes to ignore some messages. The pattern list is combined with :attr:`~lisa.tests.base.DmesgTestBundleBase.DMESG_IGNORED_PATTERNS` class attribute. :type ignored_patterns: list or None """ levels = DmesgCollector.LOG_LEVELS # Consider as an issue all levels more critical than `level` issue_levels = levels[:levels.index(level) + 1] ignored_patterns = ( (ignored_patterns or []) + (self.DMESG_IGNORED_PATTERNS or []) ) logger = self.logger if ignored_patterns: logger.info(f'Will ignore patterns in dmesg output: {ignored_patterns}') ignored_regex = [ re.compile(pattern) for pattern in ignored_patterns ] else: ignored_regex = [] issues = [ entry for entry in self.dmesg_entries if ( entry.msg.strip() and (entry.facility == facility if facility else True) and (entry.level in issue_levels) and not any(regex.search(entry.msg.strip()) for regex in ignored_regex) ) ] res = ResultBundle.from_bool(not issues) multiline = len(issues) > 1 res.add_metric('dmesg output', ('\n' if multiline else '') + '\n'.join(str(entry) for entry in issues)) return res class DmesgTestBundle(DmesgTestBundleBase): """ Dummy subclass of :class:`DmesgTestBundleBase` to be inherited from to override :class:`OptionalDmesgTestBundle` in the inheritance tree. """ test_dmesg = DmesgTestBundleBase.test_dmesg _make_dmesg_collector = DmesgTestBundleBase._make_dmesg_collector class OptionalDmesgTestBundle(DmesgTestBundleBase, Loggable): @functools.wraps(DmesgTestBundleBase.test_dmesg) def test_dmesg(self, *args, **kwargs): try: return super().test_dmesg(*args, **kwargs) except FileNotFoundError: self.logger.warning('Could not check dmesg content, as it was not collected') return ResultBundle(result=Result.UNDECIDED) @classmethod @TestBundleBase.collector_factory @kwargs_forwarded_to(DmesgTestBundleBase._make_dmesg_collector) def _make_dmesg_collector(cls, **kwargs): try: return super()._make_dmesg_collector(**kwargs) except Exception as e: cls.get_logger().warning(f'Could not create dmesg collector: {e}') return None class RTATestBundle(FtraceTestBundle, DmesgTestBundle): """ Abstract Base Class for :class:`lisa.wlgen.rta.RTA`-powered TestBundles .. seealso: :class:`lisa.tests.base.FtraceTestBundle` for default ``FTRACE_CONF`` content. """ TASK_PERIOD = 16e-3 """ A task period in seconds you can re-use for your :class:`lisa.wlgen.rta.RTATask` definitions. """ NOISE_ACCOUNTING_THRESHOLDS = { # Idle task - ignore completely # note: since it has multiple comms, we need to ignore them TaskID(pid=0, comm=None): 100, # Feeble boards like Juno/TC2 spend a while in sugov r"^sugov:\d+$": 5, # Some boards like Hikey960 have noisy threaded IRQs (thermal sensor # mailbox ...) r"^irq/\d+-.*$": 1.5, } """ PID/comm specific tuning for :meth:`test_noisy_tasks` * **keys** can be PIDs, comms, or regexps for comms. * **values** are noisiness thresholds (%), IOW below that runtime threshold the associated task will be ignored in the noise accounting. """ # Roughly 330*2 ms for PELT half life~=32ms # This allows enough time for scheduler signals to converge. _BUFFER_PHASE_DURATION_S = pelt_settling_time() * 2 """ Duration of the initial buffer phase; this is a phase that copies the first phase of each task, and that is prepended to the relevant task - this means all task in the profile get a buffer phase. """ _BUFFER_PHASE_PROPERTIES = { 'name': 'buffer', } """ Properties of the buffer phase, see :attr:`_BUFFER_PHASE_DURATION_S` """ @RTAEventsAnalysis.df_rtapp_phases_start.used_events @RTAEventsAnalysis.df_rtapp_phases_end.used_events @requires_events('sched_switch') def trace_window(self, trace): """ The time window to consider for this :class:`RTATestBundle` :returns: a (start, stop) tuple Since we're using rt-app profiles, we know the name of tasks we are interested in, so we can trim our trace scope to filter out the setup/teardown events we don't care about. Override this method if you need a different trace trimming. .. warning:: Calling ``self.trace`` here will raise an :exc:`AttributeError` exception, to avoid entering infinite recursion. """ swdf = trace.df_event('sched_switch') def get_first_switch(row): comm, pid, _ = row.name start_time = row['Time'] task = TaskID(comm=comm, pid=pid) start_swdf = df_filter_task_ids(swdf, [task], pid_col='next_pid', comm_col='next_comm') pre_phase_swdf = start_swdf[start_swdf.index < start_time] # The task with that comm and PID was never switched-in, which # means it was still on the current CPU when it was renamed, so we # just report phase-start. if pre_phase_swdf.empty: return start_time # Otherwise, we return the timestamp of the switch else: return pre_phase_swdf.index[-1] profile = self.rtapp_profile # Find when the first rtapp phase starts, and take the associated # sched_switch that is immediately preceding phase_start_df = trace.ana.rta.df_rtapp_phases_start( wlgen_profile=profile, ) # Get rid of the buffer phase we don't care about phase_start_df = phase_start_df[ phase_start_df['properties'].transform(lambda props: props['meta']['from_test']) ] rta_start = phase_start_df.apply(get_first_switch, axis=1).min() # Find when the last rtapp phase ends rta_stop = trace.ana.rta.df_rtapp_phases_end()['Time'].max() return (rta_start, rta_stop) @property def rtapp_profile(self): """ Compute the RTapp profile based on ``plat_info``. """ return self.get_rtapp_profile(self.plat_info) _rtapp_tasks_events = requires_events('sched_switch') @property @_rtapp_tasks_events @memoized def rtapp_task_ids_map(self): """ Mapping of task names as specified in the rtapp profile to list of :class:`lisa.trace.TaskID` names found in the trace. If the task forked, the list will contain more than one item. """ trace = self.get_trace(events=['sched_switch']) names = self.rtapp_profile.keys() return { name: task_ids for name, task_ids in RTA.resolve_trace_task_names(trace, names).items() } @property @_rtapp_tasks_events def rtapp_task_ids(self): """ The rtapp task :class:`lisa.trace.TaskID` as found from the trace in this bundle. :return: the list of actual trace task :class:`lisa.trace.TaskID` """ return sorted(itertools.chain.from_iterable(self.rtapp_task_ids_map.values())) @property @_rtapp_tasks_events def rtapp_tasks_map(self): """ Same as :func:`rtapp_task_ids_map` but with list of strings for values. """ return { name: [task_id.comm for task_id in task_ids] for name, task_ids in self.rtapp_task_ids_map.items() } @property @_rtapp_tasks_events def rtapp_tasks(self): """ Same as :func:`rtapp_task_ids` but as a list of string. :return: the list of actual trace task names """ return [task_id.comm for task_id in self.rtapp_task_ids] @property def cgroup_configuration(self): """ Compute the cgroup configuration based on ``plat_info`` """ return self.get_cgroup_configuration(self.plat_info) @non_recursive_property @lru_memoized(first_param_maxsize=5) def trace(self): """ :returns: a :class:`lisa.trace.TraceView` cropped to the window given by :meth:`trace_window`. .. seealso:: :attr:`FtraceTestBundleBase.trace` """ trace = super().trace return trace.get_view(self.trace_window(trace), clear_base_cache=True) def df_noisy_tasks(self, with_threshold_exclusion=True): """ :returns: a DataFrame containing all tasks that participate to the test noise. i.e. all non rt-app tasks. :param with_threshold_exclusion: When set to True, known noisy services will be ignored. """ df = self.trace.ana.tasks.df_tasks_runtime() df = df.copy(deep=False) # We don't want to account the test tasks ignored_ids = copy.copy(self.rtapp_task_ids) df['runtime_pct'] = df['runtime'] * (100 / self.trace.time_range) df['pid'] = df.index threshold_exclusion = self.NOISE_ACCOUNTING_THRESHOLDS if with_threshold_exclusion else {} # Figure out which PIDs to exclude from the thresholds for key, threshold in threshold_exclusion.items(): # Find out which task(s) this threshold is about if isinstance(key, str): comms = df.loc[df['comm'].str.match(key), 'comm'] task_ids = comms.apply(self.trace.get_task_id) else: # Use update=False to let None fields propagate, as they are # used to indicate a "dont care" value task_ids = [self.trace.get_task_id(key, update=False)] # For those tasks, check the cumulative threshold runtime_pct_sum = df_filter_task_ids(df, task_ids)['runtime_pct'].sum() if runtime_pct_sum <= threshold: ignored_ids.extend(task_ids) self.logger.info(f"Ignored PIDs for noise contribution: {', '.join(map(str, ignored_ids))}") # Filter out unwanted tasks (rt-app tasks + thresholds) df = df_filter_task_ids(df, ignored_ids, invert=True) return df.loc[df['runtime'] > 0] @TestBundleBase.add_undecided_filter @TasksAnalysis.df_tasks_runtime.used_events def test_noisy_tasks(self, *, noise_threshold_pct=None, noise_threshold_ms=None): """ Test that no non-rtapp ("noisy") task ran for longer than the specified thresholds :param noise_threshold_pct: The maximum allowed runtime for noisy tasks in percentage of the total rt-app execution time :type noise_threshold_pct: float :param noise_threshold_ms: The maximum allowed runtime for noisy tasks in ms :type noise_threshold_ms: float If both are specified, the smallest threshold (in seconds) will be used. """ if noise_threshold_pct is None and noise_threshold_ms is None: raise ValueError('Both "noise_threshold_pct" and "noise_threshold_ms" cannot be None') # No task can run longer than the recorded duration threshold_s = self.trace.time_range if noise_threshold_pct is not None: threshold_s = noise_threshold_pct * self.trace.time_range / 100 if noise_threshold_ms is not None: threshold_s = min(threshold_s, noise_threshold_ms * 1e3) df_noise = self.df_noisy_tasks() if df_noise.empty: return ResultBundle.from_bool(True) res = ResultBundle.from_bool(df_noise['runtime'].sum() < threshold_s) pid = df_noise.index[0] comm = df_noise['comm'].iloc[0] duration_s = df_noise['runtime'].iloc[0] duration_pct = df_noise['runtime_pct'].iloc[0] metric = {"pid": pid, "comm": comm, "duration (abs)": TestMetric(duration_s, "s"), "duration (rel)": TestMetric(duration_pct, "%")} res.add_metric("noisiest task", metric) return res @classmethod def unscaled_utilization(cls, plat_info, cpu, utilization_pct): """ Convert utilization scaled to a CPU to a 'raw', unscaled one. :param capacity: The CPU against which ``utilization_pct``` is scaled :type capacity: int :param utilization_pct: The scaled utilization in % :type utilization_pct: int .. seealso: In most cases, `PeriodicWload(scale_for_cpu=..., scale_for_freq=...)` is easier to use and leads to clearer code. """ return PeriodicWload( duty_cycle_pct=utilization_pct, scale_for_cpu=cpu, ).unscaled_duty_cycle_pct(plat_info) @classmethod def get_rtapp_profile(cls, plat_info, **kwargs): """ Returns a :class:`dict` with task names as keys and :class:`lisa.wlgen.rta.RTATask` as values. The following modifications are done on the profile returned by :meth:`_get_rtapp_profile`: * A buffer phase may be inserted at the beginning of each task in order to stabilize some kernel signals. * A ``from_test`` meta key is added to each :class:`lisa.wlgen.rta.RTAPhase` with a boolean value that is ``True`` if the phase comes from the test itself and ``False`` if it was added here (e.g. the buffer phase). This allows future-proof filtering of phases in the test code when inspecting the profile by looking at ``phase['meta']['from_test']``. .. note:: If you want to override the method in a subclass, override :meth:`_get_rtapp_profile` instead. """ def add_buffer(task): template_phase = task.phases[0] wload = template_phase['wload'] task = task.with_props(meta=leaf_precedence({'from_test': True})) if 'name' not in task: task = task.with_props(name='test') # Don't add the buffer phase if it has a nil duration if not cls._BUFFER_PHASE_DURATION_S: return task elif isinstance(wload, PeriodicWload): # Notes: # # Using a small period to allow the util_avg to be very close # to duty_cycle, but that also makes the duty_cycle converge to # a wrong value (rtapp looses fidelity with small periods, # maybe due to tracing overhead). Therefore we just replicate # the period. ref_wload = PeriodicWload( duration=cls._BUFFER_PHASE_DURATION_S, ) buffer_phase = RTAPhase( # Override some parameters with the reference ones prop_wload=ref_wload & wload, # Pin to the same CPUs and NUMA nodes if any, so that we # also let the runqueue signals converge and things like # that, if it's going to matter later. prop_cpus=template_phase.get('cpus'), prop_numa_nodes_membind=template_phase.get('numa_nodes_membind'), prop_meta={'from_test': False}, properties=cls._BUFFER_PHASE_PROPERTIES, ) # Prepend the buffer task return buffer_phase + task else: return task profile = cls._get_rtapp_profile(plat_info, **kwargs) return { name: add_buffer(task) for name, task in profile.items() } @classmethod @abc.abstractmethod def _get_rtapp_profile(cls, plat_info): """ :meta public: :returns: a :class:`dict` with task names as keys and :class:`lisa.wlgen.rta.RTATask` as values This is the method you want to override to specify what is your synthetic workload. """ @classmethod def get_cgroup_configuration(cls, plat_info): """ :returns: a :class:`dict` representing the configuration of a particular cgroup. This is a method you may optionally override to configure a cgroup for the synthetic workload. Example of return value:: { 'name': 'lisa_test', 'controller': 'schedtune', 'attributes' : { 'prefer_idle' : 1, 'boost': 50 } } """ return {} @classmethod def _target_configure_cgroup(cls, target, cfg): if not cfg: return None try: cgroups = target.cgroups except AttributeError: ResultBundle.raise_skip('cgroups are not available on this target') kind = cfg['controller'] try: ctrl = cgroups.controllers[kind] except KeyError: ResultBundle.raise_skip(f'"{kind}" cgroup controller unavailable') cg = ctrl.cgroup(cfg['name']) cg.set(**cfg['attributes']) return '/' + cg.name @classmethod def run_rtapp(cls, target, res_dir, profile=None, collector=None, cg_cfg=None, wipe_run_dir=True, update_cpu_capacities=None): """ Run the given RTA profile on the target, and collect an ftrace trace. :param target: target to execute the workload on. :type target: lisa.target.Target :param res_dir: Artifact folder where the artifacts will be stored. :type res_dir: str or lisa.utils.ArtifactPath :param profile: ``rt-app`` profile, as a dictionary of ``dict(task_name, RTATask)``. If ``None``, :meth:`~lisa.tests.base.RTATestBundle.get_rtapp_profile` is called with ``target.plat_info``. :type profile: dict(str, lisa.wlgen.rta.RTATask) :param collector: Context manager collector to use while running rt-app. :type collector: lisa.trace.ComposedCollector :param cg_cfg: CGroup configuration dictionary. If ``None``, :meth:`lisa.tests.base.RTATestBundle.get_cgroup_configuration` is called with ``target.plat_info``. :type cg_cfg: dict :param wipe_run_dir: Remove the run directory on the target after execution of the workload. :type wipe_run_dir: bool :param update_cpu_capacities: Attempt to update the CPU capacities based on the calibration values of rtapp to get the most accurate reproduction of duty cycles. :type update_cpu_capacities: bool """ logger = cls.get_logger() trace_path = ArtifactPath.join(res_dir, cls.TRACE_PATH) profile = profile or cls.get_rtapp_profile(target.plat_info) cg_cfg = cg_cfg or cls.get_cgroup_configuration(target.plat_info) try: ftrace_coll = collector['ftrace'] except KeyError: trace_events = [] else: trace_events = [ event.replace('userspace@rtapp_', '') for event in ftrace_coll.events if event.startswith('userspace@rtapp_') ] # Coarse-grained detection, but that should be enough for our use try: target.execute('ls /sys/kernel/debug/tracing/') except TargetStableError: debugfs_needs_root = True else: debugfs_needs_root = False wload = RTA.from_profile( target=target, profile=profile, res_dir=res_dir, name=f"rta_{cls.__name__.casefold()}", trace_events=trace_events, # Force the default value for all settings so that the test does # not depend on the environment setup. force_defaults=True, no_force_default_keys=[ # Since "taskgroup" cannot be always expected to work in case # cgroupfs is not mounted at all, we will not force a default # value for it. 'taskgroup' ], ) profile_str = '\n'.join( 'Task {}:\n{}'.format( task, textwrap.indent(str(phase), ' ' * 4) ) for task, phase in profile.items() ) logger.info(f'rt-app workload:\n{profile_str}') logger.debug(f'rt-app JSON:\n{wload.conf.json}') cgroup = cls._target_configure_cgroup(target, cg_cfg) as_root = bool( cgroup is not None or (trace_events and debugfs_needs_root) ) wload = wload( wipe_run_dir=wipe_run_dir, cgroup=cgroup, as_root=as_root, update_cpu_capacities=update_cpu_capacities, ) with target.freeze_userspace(), wload, collector: wload.run() return collector # Keep compat with existing code @classmethod def _run_rtapp(cls, *args, **kwargs): """ :meta public: Has been renamed to :meth:`~lisa.tests.base.RTATestBundle.run_rtapp`, as it really is part of the public API. """ return cls.run_rtapp(*args, **kwargs) @classmethod def _from_target(cls, target: Target, *, res_dir: ArtifactPath, collector=None) -> 'RTATestBundle': """ :meta public: Factory method to create a bundle using a live target This will execute the rt-app workload described in :meth:`~lisa.tests.base.RTATestBundle.get_rtapp_profile` """ cls.run_rtapp(target, res_dir, collector=collector) plat_info = target.plat_info return cls(res_dir, plat_info) class TestBundle(OptionalFtraceTestBundle, OptionalDmesgTestBundle, TestBundleBase): """ Dummy class used as a base class for all tests. """ @classmethod def check_from_target(cls, target): super().check_from_target(target) online = set(target.list_online_cpus()) cpus = set(range(target.plat_info['cpus-count'])) if not online <= cpus: raise ValueError('Online CPUs ({online}) are not a subset of detected CPUs ({cpus})') elif online != cpus: offline = sorted(cpus - online) raise ResultBundle.raise_skip(f'All CPUs must be online (aka not hotplugged) before creating a TestBundle. Offline CPUs: {offline}')
from django.utils.translation import ugettext_lazy as _ import horizon from openstack_dashboard.dashboards.tasks import dashboard class History(horizon.Panel): name = _("History") slug = "history" dashboard.Tasks.register(History)
""" Contains common test fixtures used to run unit tests. """ import sys sys.path.append('../../..') from test_tools.fixtures.common import *
from __future__ import absolute_import, division, print_function, unicode_literals # just in case, for py2 to be py3-ish import pkgutil, io import numpy as np from matplotlib import image, cm from matplotlib import pyplot as plt __all__ = ['get_cat_num', 'n_cats', 'catter'] # N_cats x 72 x 72, 0 is transparent, 1 is full-cat _CAT_DATA = np.load(io.BytesIO(pkgutil.get_data('catterplot', 'data/cats.npy'))) def get_cat_num(i): return _CAT_DATA[i] def n_cats(): return len(_CAT_DATA) def catter(x, y, s=40, c=None, cat='random', alpha=1, ax=None, cmap=None, aspects='auto'): """ A catter plot (scatter plot with cats). Most arguments are interpreted the same as the matplotlib `scatter` function, except that ``s`` is the *data* size of the symbol (not pixel). Additional kwargs include: ``cat`` can be: * int : the index of the cat symbol to use - you can use ``catterplot.n_cats()`` to get the number of cats available * a squence of ints : must match the data, but otherwise interpreted as for a scalar * 'random'/'rand' : random cats ``ax`` can be: * None: use the current default (pyplot) axes * an `Axes` : random cats ``aspects`` can be: * 'auto': the cats length-to-width is set to be square given the spread of inputs * a float: the height/width of the cats. If not 1, ``s`` is interpreted as the geometric mean of the sizes * a sequence of floats: much match data, gives height/width """ if ax is None: ax = plt.gca() if c is not None: if cmap is None: cmap = plt.rcParams['image.cmap'] smap = cm.ScalarMappable(cmap=cmap) rgba = smap.to_rgba(c) else: rgba = np.ones((len(x), 4)) rgba[:, 3] *= alpha if np.isscalar(s) or s.shape==tuple(): s = np.ones(len(x))*s # otherwise assume shapes match if cat in ('rand', 'random'): cats = np.random.randint(n_cats(), size=len(x)) else: try: cats = np.ones(len(x)) * cat except TypeError as e: raise TypeError('`cat` argument needs to be "random", a scalar, or match the input.', e) if aspects == 'auto': aspects = np.ptp(y)/np.ptp(x) if np.isscalar(aspects) or aspects.shape==tuple(): aspects = np.ones(len(x)) * aspects ims = [] for xi, yi, si, ci, cati, aspecti in zip(x, y, s, rgba, cats, aspects): data = get_cat_num(cati) offsetx = si * aspecti**-0.5 / (2 * data.shape[0]) offsety = si * aspecti**0.5 / (2 * data.shape[1]) im = image.AxesImage(ax, extent=(xi - offsetx, xi + offsetx, yi - offsety, yi + offsety)) if c is None: # defaults to fading "black" cdata = 1-data else: # leave just the alpha to control the fading cdata = np.ones(data.shape) imarr = np.transpose([cdata*ci[0], cdata*ci[1], cdata*ci[2], data*ci[3]], (1, 2, 0)) im.set_data(imarr) ims.append(im) for im in ims: ax.add_image(im) #ax.autoscale_view() # for some reason autoscaling fails for images. So we'll just force it via # scatter... sc = plt.scatter(x, y) sc.remove() return ims
from typing import Any, List, Tuple, Dict #cast from sphinx.application import Sphinx from sphinx.ext.autodoc import ModuleLevelDocumenter from sphinx.pycode import ModuleAnalyzer, PycodeError from sphinx.locale import __ from sphinx.domains.python import PyObject from sphinx import addnodes from sphinx.util.inspect import signature as Signature from sphinx.util.inspect import stringify_signature import logging logger = logging.getLogger(__name__) import inspect from fontbakery.callable import ( FontbakeryCallable , FontBakeryCondition , FontBakeryCheck , Disabled , FontBakeryExpectedValue ) if False: #pylint: disable=using-constant-test FontbakeryCallable FontBakeryCondition FontBakeryCheck Disabled FontBakeryExpectedValue __version__ = '0.0.1' class FontBakeryCallableDocumenter(ModuleLevelDocumenter): """ Specialized Documenter subclass for instances of FontBakeryCheck. """ objtype = 'fontbakerycallable' can_doc_cls = FontbakeryCallable member_order = 30 @classmethod def can_document_member(cls, member, membername, isattr, parent): # type: (Any, str, bool, Any) -> bool return isinstance(member, cls.can_doc_cls) def format_args(self): # pylint: disable=arguments-differ # I am really not sure what went wrong here... # type: () -> str # We use the original signature from the wrapped _function has_retval = isinstance(self.object, FontBakeryCondition) if not hasattr(self.object, '_func'): # FIXME! I don't know what's this. return None sig = Signature(self.object._func, bound_method=False, has_retval=has_retval) args = stringify_signature(sig) # escape backslashes for reST args = args.replace('\\', '\\\\') return args def format_name(self): # I'm using this to inject some new info into the check # search for the separator ":::" in this document to see where # the info is received. This is not a clean solution! # # in https://github.com/sphinx-doc/sphinx/blob/master/sphinx/ext/autodoc/__init__.py#L374 # it says: # > This normally should be something that can be parsed by the generated # > directive, but doesn't need to be (Sphinx will display it unparsed # > then). # See below in `handle_signature` # where that ipdb debugger is started, usually that eception would be # dropped and we drop out of signature building. (RAISED here in `_handle_signature` # The ValueError when the regex doesn't match...) # seems like the slash (/) Is killing most of the header! # Otherwise the ids display fine, the dots are fine. # Also, in any case of name change, the [source] view is killed (removed!) # the document and also genindex.html anchor works so far (with 7 instead of /) # res = super().format_name() if self.objtype == 'fontbakerycheck': # A bit hackish, splitting somwhere else by ::: to retrieve the checkid # we can get the source file first line number of self.object: lineno = inspect.getsourcelines(self.object)[1] res = self.object.id + ':::' + f'{lineno}' + ':::' + res#.replace('/', '7') # else: # res = super().format_name() # print('formatted name:', res) # > formatted name: com.google.fonts/check/xavgcharwidth:::59:::com_google_fonts_check_xavgcharwidth # > formatted name: bold_wght_coord return res # handle_signature: com_google_fonts_check_post_table_version(ttFont, is_ttf) <desc_signature first="False"/> # sig signature: com_google_fonts_check_post_table_version(ttFont, is_ttf) # result: ('com_google_fonts_check_post_table_version', None) signode: <desc_signature class="" first="False" fullname="com_google_fonts_check_post_table_version" module="fontbakery.profiles.post"><desc_annotation xml:space="preserve">FontBakeryCheck </desc_annotation><desc_addname xml:space="preserve">fontbakery.profiles.post.</desc_addname><desc_name xml:space="preserve">com_google_fonts_check_post_table_version</desc_name><desc_parameterlist xml:space="preserve"><desc_parameter xml:space="preserve">ttFont</desc_parameter><desc_parameter xml:space="preserve">is_ttf</desc_parameter></desc_parameterlist></desc_signature> def generate(self, more_content=None, real_modname=None, check_module=False, all_members=False): # type: (Any, str, bool, bool) -> None """Generate reST for the object given by *self.name*, and possibly for its members. If *more_content* is given, include that content. If *real_modname* is given, use that module name to find attribute docs. If *check_module* is True, only generate if the object is defined in the module name it is imported from. If *all_members* is True, document all members. """ # print('generate', more_content, real_modname, check_module, all_members) # print(self.name) # print('---------------------') # > generate None fontbakery.profiles.post True True # > fontbakery.profiles.post::com_google_fonts_check_post_table_version # > --------------------- # # > generate None fontbakery.profiles.shared_conditions True True # > fontbakery.profiles.shared_conditions::glyph_metrics_stats # > --------------------- if not self.parse_name(): # need a module to import logger.warning( __('don\'t know which module to import for autodocumenting ' '%r (try placing a "module" or "currentmodule" directive ' 'in the document, or giving an explicit module name)') % self.name, type='autodoc') return # now, import the module and get object to document if not self.import_object(): return # doesn't do anything! # if self.objtype == 'fontbakerycheck': # self.name = self.object.id # If there is no real module defined, figure out which to use. # The real module is used in the module analyzer to look up the module # where the attribute documentation would actually be found in. # This is used for situations where you have a module that collects the # functions and classes of internal submodules. self.real_modname = real_modname or self.get_real_modname() # type: str # try to also get a source code analyzer for attribute docs try: self.analyzer = ModuleAnalyzer.for_module(self.real_modname) # parse right now, to get PycodeErrors on parsing (results will # be cached anyway) self.analyzer.find_attr_docs() except PycodeError as err: logger.debug('[autodoc] module analyzer failed: %s', err) # no source file -- e.g. for builtin and C modules self.analyzer = None # at least add the module.__file__ as a dependency if hasattr(self.module, '__file__') and self.module.__file__: self.directive.filename_set.add(self.module.__file__) else: self.directive.filename_set.add(self.analyzer.srcname) # check __module__ of object (for members not given explicitly) if check_module: if not self.check_module(): return sourcename = self.get_sourcename() # make sure that the result starts with an empty line. This is # necessary for some situations where another directive preprocesses # reST and no starting newline is present self.add_line('', sourcename) # format the object's signature, if any sig = self.format_signature() # generate the directive header and options, if applicable self.add_directive_header(sig) self.add_line('', sourcename) # e.g. the module directive doesn't have content self.indent += self.content_indent # add all content (from docstrings, attribute docs etc.) self.add_content(more_content) # document members, if possible self.document_members(all_members) class FontBakeryCheckDocumenter(FontBakeryCallableDocumenter): objtype = 'fontbakerycheck' can_doc_cls = FontBakeryCheck class FontBakeryConditionDocumenter(FontBakeryCallableDocumenter): objtype = 'fontbakerycondition' can_doc_cls = FontBakeryCondition from sphinx.domains.python import _pseudo_parse_arglist import re py_sig_re = re.compile( r'''^ ([\w.]*\.)? # class name(s) (\w+) \s* # thing name (?: \(\s*(.*)\s*\) # optional: arguments (?:\s* -> \s* (.*))? # return annotation )? $ # and nothing more ''', re.VERBOSE) class PyFontBakeryObject(PyObject): """ Description of a class-like object (classes, interfaces, exceptions). """ allow_nesting = True @property def pretty_objtype(self): if self.objtype.startswith('fontbakery'): suffix = self.objtype[len('fontbakery'):] return 'FontBakery' + suffix[0].upper() + suffix[1:] return self.objtype def get_signature_prefix(self, sig): # type: (str) -> str # import ipdb # ipdb.set_trace() # print('sig signature:', sig) # > sig signature: com_google_fonts_check_all_glyphs_have_codepoints(ttFont) return self.pretty_objtype + ' ' # this is bullshit, returns two values but manipulates # signode massively, which is undocumented. # signode is an instance of <class 'sphinx.addnodes.desc_signature'> # from https://github.com/sphinx-doc/sphinx/blob/master/sphinx/domains/python.py#L237 def _handle_signature(self, cid, lineno, sig, signode): # type: (str, addnodes.desc_signature) -> Tuple[str, str] """Transform a Python signature into RST nodes. Return (fully qualified name of the thing, classname if any). If inside a class, the current class name is handled intelligently: * it is stripped from the displayed name if present * it is added to the full name (return value) if not present This is the xml string result of signode, whitespace is not equivalent for readability. <desc_signature class="" first="False" fullname="com.google.fonts/check/all_glyphs_have_codepoints" module="fontbakery.profiles.cmap" > <desc_annotation xml:space="preserve">FontBakeryCheck </desc_annotation> <desc_addname xml:space="preserve">fontbakery.profiles.cmap.</desc_addname> <desc_name xml:space="preserve">com_google_fonts_check_all_glyphs_have_codepoints</desc_name> <desc_parameterlist xml:space="preserve"> <desc_parameter xml:space="preserve">ttFont</desc_parameter> </desc_parameterlist> </desc_signature> """ m = py_sig_re.match(sig) if m is None: # this is the immediate fail!!! raise ValueError prefix, name, arglist, retann = m.groups() # print('prefix, name, arglist, retann =', prefix, name, arglist, retann) # > prefix, name, arglist, retann = None com_google_fonts_check_all_glyphs_have_codepoints ttFont None # determine module and class name (if applicable), as well as full name modname = self.options.get('module', self.env.ref_context.get('py:module')) classname = self.env.ref_context.get('py:class') if classname: add_module = False if prefix and (prefix == classname or prefix.startswith(classname + ".")): fullname = prefix + name # class name is given again in the signature prefix = prefix[len(classname):].lstrip('.') elif prefix: # class name is given in the signature, but different # (shouldn't happen) fullname = classname + '.' + prefix + name else: # class name is not given in the signature fullname = classname + '.' + name else: add_module = True if prefix: classname = prefix.rstrip('.') fullname = prefix + name else: classname = '' fullname = name signode['module'] = modname signode['class'] = classname signode['fullname'] = fullname signode.attributes['lineno'] = lineno #sig_prefix = self.get_signature_prefix(sig) #if sig_prefix: # signode += addnodes.desc_annotation(sig_prefix, sig_prefix) if prefix: signode += addnodes.desc_addname(prefix, prefix) elif add_module and self.env.config.add_module_names: if modname and modname != 'exceptions': # exceptions are a special case, since they are documented in the # 'exceptions' module. #nodetext = modname + ' ID: ' #signode += addnodes.desc_addname(nodetext, nodetext) pass signode += addnodes.desc_name(name, cid) if arglist: _pseudo_parse_arglist(signode, arglist) else: if self.needs_arglist(): # for callables, add an empty parameter list signode += addnodes.desc_parameterlist() if retann: signode += addnodes.desc_returns(retann, retann) anno = self.options.get('annotation') if anno: signode += addnodes.desc_annotation(' ' + anno, ' ' + anno) return cid, prefix def handle_signature(self, sig, signode): # print('>>>>>>>>>>>>>>>>>handle_signature:', sig, signode) # > >>>>>>>>>>>>>>>>>handle_signature: com.google.fonts/check/all_glyphs_have_codepoints:::36:::com_google_fonts_check_all_glyphs_have_codepoints(ttFont) <desc_signature first="False"/> cid = None if ':::' in sig: cid, lineno, sig = sig.split(':::') # print('GOT id:', cid, lineno, 'for:', sig) # > GOT id: com.google.fonts/check/all_glyphs_have_codepoints 36 for: com_google_fonts_check_all_glyphs_have_codepoints(ttFont) res = '(NONE!)' try: res = self._handle_signature(cid, lineno, sig, signode) if cid is not None\ else super().handle_signature(sig, signode) except Exception as e: print('!!!', e) raise e return res # This ends in: path-to-docs/html/genindex.html def get_index_text(self, modname, name): # type: (str, Tuple[str, str]) -> str return f'{name[0]} ({self.pretty_objtype} in {modname})' # fontbakerycheck # modname: fontbakery.profiles.cmap # name_cls:('com_google_fonts_check_all_glyphs_have_codepoints', None) # return f' {self.objtype} modname: {modname} name_cls:{name_cls}' def add_target_and_index(self, name_cls, sig, signode): # type: (Tuple[str, str], str, addnodes.desc_signature) -> None modname = self.options.get('module', self.env.ref_context.get('py:module')) # fullname = (modname and modname + '.' or '') + name_cls[0] fullname = name_cls[0] # note target if fullname not in self.state.document.ids: signode['names'].append(fullname) signode['ids'].append(fullname) signode['first'] = (not self.names) self.state.document.note_explicit_target(signode) # note, there will be a change to this in a future release # https://github.com/sphinx-doc/sphinx/commit/259be8716ad4b2332aa4d7693d73400eb06fa7d7 ## in the past (now) objects = self.env.domaindata['py']['objects'] if fullname in objects: self.state_machine.reporter.warning( 'duplicate object description of %s, ' % fullname + 'other instance in ' + self.env.doc2path(objects[fullname][0]) + ', use :noindex: for one of them', line=self.lineno) objects[fullname] = (self.env.docname, self.objtype) ## in the future # domain = cast(PythonDomain, self.env.get_domain('py')) # domain.note_object(fullname, self.objtype) indextext = self.get_index_text(modname, name_cls) if indextext: self.indexnode['entries'].append(('single', indextext, fullname, '', None)) def setup(app): # type: (Sphinx) -> Dict[str, Any] """Sphinx extension setup function. When the extension is loaded, Sphinx imports this module and executes the ``setup()`` function, which in turn notifies Sphinx of everything the extension offers. Parameters ---------- app : sphinx.application.Sphinx Application object representing the Sphinx process See Also -------- `The Sphinx documentation on Extensions <http://sphinx-doc.org/extensions.html>`_ `The Extension Tutorial <http://sphinx-doc.org/extdev/tutorial.html>`_ `The Extension API <http://sphinx-doc.org/extdev/appapi.html>`_ """ if not isinstance(app, Sphinx): # probably called by tests return {'version': __version__, 'parallel_read_safe': True} # _patch_python_domain() #=> this: app.add_autodocumenter(FontBakeryCallableDocumenter) app.add_autodocumenter(FontBakeryCheckDocumenter) app.add_autodocumenter(FontBakeryConditionDocumenter) # https://github.com/sphinx-doc/sphinx/blob/master/sphinx/domains/python.py app.add_directive_to_domain('py', 'fontbakerycallable', PyFontBakeryObject, override=False) app.add_directive_to_domain('py', 'fontbakerycheck', PyFontBakeryObject, override=False) app.add_directive_to_domain('py', 'fontbakerycondition', PyFontBakeryObject, override=False) # => see e.g.: https://github.com/sphinx-doc/sphinx/blob/master/sphinx/ext/autodoc/__init__.py#L984 app.setup_extension('sphinx.ext.autodoc') app.connect('autodoc-process-docstring', _process_docstring) app.connect('autodoc-skip-member', _skip_member) #for name, (default, rebuild) in Config._config_values.items(): # app.add_config_value(name, default, rebuild) return {'version': __version__, 'parallel_read_safe': True} def _skip_member(app, what, name, obj, skip, options): # type: (Sphinx, str, str, Any, bool, Any) -> bool """Determine if private and special class members are included in docs. The following settings in conf.py determine if private and special class members or init methods are included in the generated documentation: * ``napoleon_include_init_with_doc`` -- include init methods if they have docstrings * ``napoleon_include_private_with_doc`` -- include private members if they have docstrings * ``napoleon_include_special_with_doc`` -- include special members if they have docstrings Parameters ---------- app : sphinx.application.Sphinx Application object representing the Sphinx process what : str A string specifying the type of the object to which the member belongs. Valid values: "module", "class", "exception", "function", "method", "attribute". name : str The name of the member. obj : module, class, exception, function, method, or attribute. For example, if the member is the __init__ method of class A, then `obj` will be `A.__init__`. skip : bool A boolean indicating if autodoc will skip this member if `_skip_member` does not override the decision options : sphinx.ext.autodoc.Options The options given to the directive: an object with attributes inherited_members, undoc_members, show_inheritance and noindex that are True if the flag option of same name was given to the auto directive. Returns ------- bool True if the member should be skipped during creation of the docs, False if it should be included in the docs. """ if name in ['check_skip_filter', 'conditions', 'configs', 'description', 'documentation', 'force', 'id', 'is_librebarcode', 'name', 'proposal', 'rationale', 'severity']: return True else: return None def _process_docstring(app, what, name, obj, options, lines): # type: (Sphinx, str, str, Any, Any, List[str]) -> None """Process the docstring for a given python object. Called when autodoc has read and processed a docstring. `lines` is a list of docstring lines that `_process_docstring` modifies in place to change what Sphinx outputs. The following settings in conf.py control what styles of docstrings will be parsed: * ``napoleon_google_docstring`` -- parse Google style docstrings * ``napoleon_numpy_docstring`` -- parse NumPy style docstrings Parameters ---------- app : sphinx.application.Sphinx Application object representing the Sphinx process. what : str A string specifying the type of the object to which the docstring belongs. Valid values: "module", "class", "exception", "function", "method", "attribute". name : str The fully qualified name of the object. obj : module, class, exception, function, method, or attribute The object to which the docstring belongs. options : sphinx.ext.autodoc.Options The options given to the directive: an object with attributes inherited_members, undoc_members, show_inheritance and noindex that are True if the flag option of same name was given to the auto directive. lines : list of str The lines of the docstring, see above. .. note:: `lines` is modified *in place* """ if hasattr(obj, 'rationale') and obj.rationale: lines.append("**Rationale:**") for line in obj.rationale.split('\n'): lines.append(line) if hasattr(obj, 'proposal') and obj.proposal: proposal = obj.proposal if not isinstance(obj.proposal, list): proposal = [obj.proposal] proposals = [p for p in proposal if "legacy:" not in p] legacy_name = [p.split('legacy:')[1] for p in proposal if "legacy:" in p] if legacy_name: lines.append(f"**Legacy check** originally simply called '{legacy_name[0]}'." f" We used to lack richer metadata back in 2015. We're open to" f" further improvements to this description.") else: if proposals: lines.append(f"**Originally proposed at** {proposals.pop(0)}") if proposals: proposals = ' / '.join(proposals) lines.append(f"**Some additional changes** were proposed at {proposals}")
"""Monorail client.""" import json from google.oauth2 import service_account from google.auth.transport import requests as google_requests import requests _API_BASE = 'https://api-dot-monorail-prod.appspot.com/prpc' _TARGET_AUDIENCE = 'https://monorail-prod.appspot.com' _XSSI_PREFIX = ')]}\'\n' class Client: """Monorail client.""" def __init__(self, project, service_account_info): self.project = project self._service_account_info = service_account_info def get_issue(self, issue_id): """Get issue data.""" credentials = service_account.IDTokenCredentials.from_service_account_info( self._service_account_info, target_audience=_TARGET_AUDIENCE) credentials.refresh(google_requests.Request()) headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', } credentials.apply(headers) url = f'{_API_BASE}/monorail.v3.Issues/GetIssue' body = {'name': f'projects/{self.project}/issues/{issue_id}'} resp = requests.post(url, json=body, headers=headers) resp.raise_for_status() result = resp.text if result.startswith(_XSSI_PREFIX): result = result[len(_XSSI_PREFIX):] return json.loads(result)
"""Contains functions for performing actions on rooms.""" import itertools import logging import math import random import string from collections import OrderedDict from typing import ( TYPE_CHECKING, Any, Awaitable, Collection, Dict, List, Optional, Tuple, ) import attr from typing_extensions import TypedDict from synapse.api.constants import ( EventContentFields, EventTypes, GuestAccess, HistoryVisibility, JoinRules, Membership, RoomCreationPreset, RoomEncryptionAlgorithms, RoomTypes, ) from synapse.api.errors import ( AuthError, Codes, HttpResponseException, LimitExceededError, NotFoundError, StoreError, SynapseError, ) from synapse.api.filtering import Filter from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion from synapse.event_auth import validate_event_for_room_version from synapse.events import EventBase from synapse.events.utils import copy_power_levels_contents from synapse.federation.federation_client import InvalidResponseError from synapse.handlers.federation import get_domains_from_state from synapse.rest.admin._base import assert_user_is_admin from synapse.storage.databases.main.relations import BundledAggregations from synapse.storage.state import StateFilter from synapse.streams import EventSource from synapse.types import ( JsonDict, MutableStateMap, Requester, RoomAlias, RoomID, RoomStreamToken, StateMap, StreamToken, UserID, create_requester, ) from synapse.util import stringutils from synapse.util.async_helpers import Linearizer from synapse.util.caches.response_cache import ResponseCache from synapse.util.stringutils import parse_and_validate_server_name from synapse.visibility import filter_events_for_client if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger(__name__) id_server_scheme = "https://" FIVE_MINUTES_IN_MS = 5 * 60 * 1000 @attr.s(slots=True, frozen=True, auto_attribs=True) class EventContext: events_before: List[EventBase] event: EventBase events_after: List[EventBase] state: List[EventBase] aggregations: Dict[str, BundledAggregations] start: str end: str class RoomCreationHandler: def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main self.auth = hs.get_auth() self.clock = hs.get_clock() self.hs = hs self.spam_checker = hs.get_spam_checker() self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() self._event_auth_handler = hs.get_event_auth_handler() self.config = hs.config self.request_ratelimiter = hs.get_request_ratelimiter() # Room state based off defined presets self._presets_dict: Dict[str, Dict[str, Any]] = { RoomCreationPreset.PRIVATE_CHAT: { "join_rules": JoinRules.INVITE, "history_visibility": HistoryVisibility.SHARED, "original_invitees_have_ops": False, "guest_can_join": True, "power_level_content_override": {"invite": 0}, }, RoomCreationPreset.TRUSTED_PRIVATE_CHAT: { "join_rules": JoinRules.INVITE, "history_visibility": HistoryVisibility.SHARED, "original_invitees_have_ops": True, "guest_can_join": True, "power_level_content_override": {"invite": 0}, }, RoomCreationPreset.PUBLIC_CHAT: { "join_rules": JoinRules.PUBLIC, "history_visibility": HistoryVisibility.SHARED, "original_invitees_have_ops": False, "guest_can_join": False, "power_level_content_override": {}, }, } # Modify presets to selectively enable encryption by default per homeserver config for preset_name, preset_config in self._presets_dict.items(): encrypted = ( preset_name in self.config.room.encryption_enabled_by_default_for_room_presets ) preset_config["encrypted"] = encrypted self._replication = hs.get_replication_data_handler() # linearizer to stop two upgrades happening at once self._upgrade_linearizer = Linearizer("room_upgrade_linearizer") # If a user tries to update the same room multiple times in quick # succession, only process the first attempt and return its result to # subsequent requests self._upgrade_response_cache: ResponseCache[Tuple[str, str]] = ResponseCache( hs.get_clock(), "room_upgrade", timeout_ms=FIVE_MINUTES_IN_MS ) self._server_notices_mxid = hs.config.servernotices.server_notices_mxid self.third_party_event_rules = hs.get_third_party_event_rules() async def upgrade_room( self, requester: Requester, old_room_id: str, new_version: RoomVersion ) -> str: """Replace a room with a new room with a different version Args: requester: the user requesting the upgrade old_room_id: the id of the room to be replaced new_version: the new room version to use Returns: the new room id Raises: ShadowBanError if the requester is shadow-banned. """ await self.request_ratelimiter.ratelimit(requester) user_id = requester.user.to_string() # Check if this room is already being upgraded by another person for key in self._upgrade_response_cache.keys(): if key[0] == old_room_id and key[1] != user_id: # Two different people are trying to upgrade the same room. # Send the second an error. # # Note that this of course only gets caught if both users are # on the same homeserver. raise SynapseError( 400, "An upgrade for this room is currently in progress" ) # Upgrade the room # # If this user has sent multiple upgrade requests for the same room # and one of them is not complete yet, cache the response and # return it to all subsequent requests ret = await self._upgrade_response_cache.wrap( (old_room_id, user_id), self._upgrade_room, requester, old_room_id, new_version, # args for _upgrade_room ) return ret async def _upgrade_room( self, requester: Requester, old_room_id: str, new_version: RoomVersion ) -> str: """ Args: requester: the user requesting the upgrade old_room_id: the id of the room to be replaced new_versions: the version to upgrade the room to Raises: ShadowBanError if the requester is shadow-banned. """ user_id = requester.user.to_string() assert self.hs.is_mine_id(user_id), "User must be our own: %s" % (user_id,) # start by allocating a new room id r = await self.store.get_room(old_room_id) if r is None: raise NotFoundError("Unknown room id %s" % (old_room_id,)) new_room_id = await self._generate_room_id( creator_id=user_id, is_public=r["is_public"], room_version=new_version, ) logger.info("Creating new room %s to replace %s", new_room_id, old_room_id) # we create and auth the tombstone event before properly creating the new # room, to check our user has perms in the old room. ( tombstone_event, tombstone_context, ) = await self.event_creation_handler.create_event( requester, { "type": EventTypes.Tombstone, "state_key": "", "room_id": old_room_id, "sender": user_id, "content": { "body": "This room has been replaced", "replacement_room": new_room_id, }, }, ) old_room_version = await self.store.get_room_version(old_room_id) validate_event_for_room_version(old_room_version, tombstone_event) await self._event_auth_handler.check_auth_rules_from_context( old_room_version, tombstone_event, tombstone_context ) await self.clone_existing_room( requester, old_room_id=old_room_id, new_room_id=new_room_id, new_room_version=new_version, tombstone_event_id=tombstone_event.event_id, ) # now send the tombstone await self.event_creation_handler.handle_new_client_event( requester=requester, event=tombstone_event, context=tombstone_context, ) old_room_state = await tombstone_context.get_current_state_ids() # We know the tombstone event isn't an outlier so it has current state. assert old_room_state is not None # update any aliases await self._move_aliases_to_new_room( requester, old_room_id, new_room_id, old_room_state ) # Copy over user push rules, tags and migrate room directory state await self.room_member_handler.transfer_room_state_on_room_upgrade( old_room_id, new_room_id ) # finally, shut down the PLs in the old room, and update them in the new # room. await self._update_upgraded_room_pls( requester, old_room_id, new_room_id, old_room_state, ) return new_room_id async def _update_upgraded_room_pls( self, requester: Requester, old_room_id: str, new_room_id: str, old_room_state: StateMap[str], ) -> None: """Send updated power levels in both rooms after an upgrade Args: requester: the user requesting the upgrade old_room_id: the id of the room to be replaced new_room_id: the id of the replacement room old_room_state: the state map for the old room Raises: ShadowBanError if the requester is shadow-banned. """ old_room_pl_event_id = old_room_state.get((EventTypes.PowerLevels, "")) if old_room_pl_event_id is None: logger.warning( "Not supported: upgrading a room with no PL event. Not setting PLs " "in old room." ) return old_room_pl_state = await self.store.get_event(old_room_pl_event_id) # we try to stop regular users from speaking by setting the PL required # to send regular events and invites to 'Moderator' level. That's normally # 50, but if the default PL in a room is 50 or more, then we set the # required PL above that. pl_content = dict(old_room_pl_state.content) users_default = int(pl_content.get("users_default", 0)) restricted_level = max(users_default + 1, 50) updated = False for v in ("invite", "events_default"): current = int(pl_content.get(v, 0)) if current < restricted_level: logger.debug( "Setting level for %s in %s to %i (was %i)", v, old_room_id, restricted_level, current, ) pl_content[v] = restricted_level updated = True else: logger.debug("Not setting level for %s (already %i)", v, current) if updated: try: await self.event_creation_handler.create_and_send_nonmember_event( requester, { "type": EventTypes.PowerLevels, "state_key": "", "room_id": old_room_id, "sender": requester.user.to_string(), "content": pl_content, }, ratelimit=False, ) except AuthError as e: logger.warning("Unable to update PLs in old room: %s", e) await self.event_creation_handler.create_and_send_nonmember_event( requester, { "type": EventTypes.PowerLevels, "state_key": "", "room_id": new_room_id, "sender": requester.user.to_string(), "content": old_room_pl_state.content, }, ratelimit=False, ) async def clone_existing_room( self, requester: Requester, old_room_id: str, new_room_id: str, new_room_version: RoomVersion, tombstone_event_id: str, ) -> None: """Populate a new room based on an old room Args: requester: the user requesting the upgrade old_room_id : the id of the room to be replaced new_room_id: the id to give the new room (should already have been created with _gemerate_room_id()) new_room_version: the new room version to use tombstone_event_id: the ID of the tombstone event in the old room. """ user_id = requester.user.to_string() if not await self.spam_checker.user_may_create_room(user_id): raise SynapseError( 403, "You are not permitted to create rooms", Codes.FORBIDDEN ) creation_content: JsonDict = { "room_version": new_room_version.identifier, "predecessor": {"room_id": old_room_id, "event_id": tombstone_event_id}, } # Check if old room was non-federatable # Get old room's create event old_room_create_event = await self.store.get_create_event_for_room(old_room_id) # Check if the create event specified a non-federatable room if not old_room_create_event.content.get(EventContentFields.FEDERATE, True): # If so, mark the new room as non-federatable as well creation_content[EventContentFields.FEDERATE] = False initial_state = {} # Replicate relevant room events types_to_copy: List[Tuple[str, Optional[str]]] = [ (EventTypes.JoinRules, ""), (EventTypes.Name, ""), (EventTypes.Topic, ""), (EventTypes.RoomHistoryVisibility, ""), (EventTypes.GuestAccess, ""), (EventTypes.RoomAvatar, ""), (EventTypes.RoomEncryption, ""), (EventTypes.ServerACL, ""), (EventTypes.RelatedGroups, ""), (EventTypes.PowerLevels, ""), ] # If the old room was a space, copy over the room type and the rooms in # the space. if ( old_room_create_event.content.get(EventContentFields.ROOM_TYPE) == RoomTypes.SPACE ): creation_content[EventContentFields.ROOM_TYPE] = RoomTypes.SPACE types_to_copy.append((EventTypes.SpaceChild, None)) old_room_state_ids = await self.store.get_filtered_current_state_ids( old_room_id, StateFilter.from_types(types_to_copy) ) # map from event_id to BaseEvent old_room_state_events = await self.store.get_events(old_room_state_ids.values()) for k, old_event_id in old_room_state_ids.items(): old_event = old_room_state_events.get(old_event_id) if old_event: # If the event is an space child event with empty content, it was # removed from the space and should be ignored. if k[0] == EventTypes.SpaceChild and not old_event.content: continue initial_state[k] = old_event.content # deep-copy the power-levels event before we start modifying it # note that if frozen_dicts are enabled, `power_levels` will be a frozen # dict so we can't just copy.deepcopy it. initial_state[ (EventTypes.PowerLevels, "") ] = power_levels = copy_power_levels_contents( initial_state[(EventTypes.PowerLevels, "")] ) # Resolve the minimum power level required to send any state event # We will give the upgrading user this power level temporarily (if necessary) such that # they are able to copy all of the state events over, then revert them back to their # original power level afterwards in _update_upgraded_room_pls # Copy over user power levels now as this will not be possible with >100PL users once # the room has been created # Calculate the minimum power level needed to clone the room event_power_levels = power_levels.get("events", {}) if not isinstance(event_power_levels, dict): event_power_levels = {} state_default = power_levels.get("state_default", 50) try: state_default_int = int(state_default) # type: ignore[arg-type] except (TypeError, ValueError): state_default_int = 50 ban = power_levels.get("ban", 50) try: ban = int(ban) # type: ignore[arg-type] except (TypeError, ValueError): ban = 50 needed_power_level = max( state_default_int, ban, max(event_power_levels.values()) ) # Get the user's current power level, this matches the logic in get_user_power_level, # but without the entire state map. user_power_levels = power_levels.setdefault("users", {}) if not isinstance(user_power_levels, dict): user_power_levels = {} users_default = power_levels.get("users_default", 0) current_power_level = user_power_levels.get(user_id, users_default) try: current_power_level_int = int(current_power_level) # type: ignore[arg-type] except (TypeError, ValueError): current_power_level_int = 0 # Raise the requester's power level in the new room if necessary if current_power_level_int < needed_power_level: user_power_levels[user_id] = needed_power_level await self._send_events_for_new_room( requester, new_room_id, # we expect to override all the presets with initial_state, so this is # somewhat arbitrary. preset_config=RoomCreationPreset.PRIVATE_CHAT, invite_list=[], initial_state=initial_state, creation_content=creation_content, ratelimit=False, ) # Transfer membership events old_room_member_state_ids = await self.store.get_filtered_current_state_ids( old_room_id, StateFilter.from_types([(EventTypes.Member, None)]) ) # map from event_id to BaseEvent old_room_member_state_events = await self.store.get_events( old_room_member_state_ids.values() ) for old_event in old_room_member_state_events.values(): # Only transfer ban events if ( "membership" in old_event.content and old_event.content["membership"] == "ban" ): await self.room_member_handler.update_membership( requester, UserID.from_string(old_event.state_key), new_room_id, "ban", ratelimit=False, content=old_event.content, ) # XXX invites/joins # XXX 3pid invites async def _move_aliases_to_new_room( self, requester: Requester, old_room_id: str, new_room_id: str, old_room_state: StateMap[str], ) -> None: # check to see if we have a canonical alias. canonical_alias_event = None canonical_alias_event_id = old_room_state.get((EventTypes.CanonicalAlias, "")) if canonical_alias_event_id: canonical_alias_event = await self.store.get_event(canonical_alias_event_id) await self.store.update_aliases_for_room(old_room_id, new_room_id) if not canonical_alias_event: return # If there is a canonical alias we need to update the one in the old # room and set one in the new one. old_canonical_alias_content = dict(canonical_alias_event.content) new_canonical_alias_content = {} canonical = canonical_alias_event.content.get("alias") if canonical and self.hs.is_mine_id(canonical): new_canonical_alias_content["alias"] = canonical old_canonical_alias_content.pop("alias", None) # We convert to a list as it will be a Tuple. old_alt_aliases = list(old_canonical_alias_content.get("alt_aliases", [])) if old_alt_aliases: old_canonical_alias_content["alt_aliases"] = old_alt_aliases new_alt_aliases = new_canonical_alias_content.setdefault("alt_aliases", []) for alias in canonical_alias_event.content.get("alt_aliases", []): try: if self.hs.is_mine_id(alias): new_alt_aliases.append(alias) old_alt_aliases.remove(alias) except Exception: logger.info( "Invalid alias %s in canonical alias event %s", alias, canonical_alias_event_id, ) if not old_alt_aliases: old_canonical_alias_content.pop("alt_aliases") # If a canonical alias event existed for the old room, fire a canonical # alias event for the new room with a copy of the information. try: await self.event_creation_handler.create_and_send_nonmember_event( requester, { "type": EventTypes.CanonicalAlias, "state_key": "", "room_id": old_room_id, "sender": requester.user.to_string(), "content": old_canonical_alias_content, }, ratelimit=False, ) except SynapseError as e: # again I'm not really expecting this to fail, but if it does, I'd rather # we returned the new room to the client at this point. logger.error("Unable to send updated alias events in old room: %s", e) try: await self.event_creation_handler.create_and_send_nonmember_event( requester, { "type": EventTypes.CanonicalAlias, "state_key": "", "room_id": new_room_id, "sender": requester.user.to_string(), "content": new_canonical_alias_content, }, ratelimit=False, ) except SynapseError as e: # again I'm not really expecting this to fail, but if it does, I'd rather # we returned the new room to the client at this point. logger.error("Unable to send updated alias events in new room: %s", e) async def create_room( self, requester: Requester, config: JsonDict, ratelimit: bool = True, creator_join_profile: Optional[JsonDict] = None, ) -> Tuple[dict, int]: """Creates a new room. Args: requester: The user who requested the room creation. config : A dict of configuration options. ratelimit: set to False to disable the rate limiter creator_join_profile: Set to override the displayname and avatar for the creating user in this room. If unset, displayname and avatar will be derived from the user's profile. If set, should contain the values to go in the body of the 'join' event (typically `avatar_url` and/or `displayname`. Returns: First, a dict containing the keys `room_id` and, if an alias was, requested, `room_alias`. Secondly, the stream_id of the last persisted event. Raises: SynapseError if the room ID couldn't be stored, or something went horribly wrong. ResourceLimitError if server is blocked to some resource being exceeded """ user_id = requester.user.to_string() await self.auth.check_auth_blocking(requester=requester) if ( self._server_notices_mxid is not None and requester.user.to_string() == self._server_notices_mxid ): # allow the server notices mxid to create rooms is_requester_admin = True else: is_requester_admin = await self.auth.is_server_admin(requester.user) # Let the third party rules modify the room creation config if needed, or abort # the room creation entirely with an exception. await self.third_party_event_rules.on_create_room( requester, config, is_requester_admin=is_requester_admin ) invite_3pid_list = config.get("invite_3pid", []) invite_list = config.get("invite", []) if not is_requester_admin and not ( await self.spam_checker.user_may_create_room(user_id) ): raise SynapseError( 403, "You are not permitted to create rooms", Codes.FORBIDDEN ) if ratelimit: await self.request_ratelimiter.ratelimit(requester) room_version_id = config.get( "room_version", self.config.server.default_room_version.identifier ) if not isinstance(room_version_id, str): raise SynapseError(400, "room_version must be a string", Codes.BAD_JSON) room_version = KNOWN_ROOM_VERSIONS.get(room_version_id) if room_version is None: raise SynapseError( 400, "Your homeserver does not support this room version", Codes.UNSUPPORTED_ROOM_VERSION, ) room_alias = None if "room_alias_name" in config: for wchar in string.whitespace: if wchar in config["room_alias_name"]: raise SynapseError(400, "Invalid characters in room alias") room_alias = RoomAlias(config["room_alias_name"], self.hs.hostname) mapping = await self.store.get_association_from_room_alias(room_alias) if mapping: raise SynapseError(400, "Room alias already taken", Codes.ROOM_IN_USE) for i in invite_list: try: uid = UserID.from_string(i) parse_and_validate_server_name(uid.domain) except Exception: raise SynapseError(400, "Invalid user_id: %s" % (i,)) if (invite_list or invite_3pid_list) and requester.shadow_banned: # We randomly sleep a bit just to annoy the requester. await self.clock.sleep(random.randint(1, 10)) # Allow the request to go through, but remove any associated invites. invite_3pid_list = [] invite_list = [] if invite_list or invite_3pid_list: try: # If there are invites in the request, see if the ratelimiting settings # allow that number of invites to be sent from the current user. await self.room_member_handler.ratelimit_multiple_invites( requester, room_id=None, n_invites=len(invite_list) + len(invite_3pid_list), update=False, ) except LimitExceededError: raise SynapseError(400, "Cannot invite so many users at once") await self.event_creation_handler.assert_accepted_privacy_policy(requester) power_level_content_override = config.get("power_level_content_override") if ( power_level_content_override and "users" in power_level_content_override and user_id not in power_level_content_override["users"] ): raise SynapseError( 400, "Not a valid power_level_content_override: 'users' did not contain %s" % (user_id,), ) visibility = config.get("visibility", None) is_public = visibility == "public" room_id = await self._generate_room_id( creator_id=user_id, is_public=is_public, room_version=room_version, ) # Check whether this visibility value is blocked by a third party module allowed_by_third_party_rules = await ( self.third_party_event_rules.check_visibility_can_be_modified( room_id, visibility ) ) if not allowed_by_third_party_rules: raise SynapseError(403, "Room visibility value not allowed.") if is_public: room_aliases = [] if room_alias: room_aliases.append(room_alias.to_string()) if not self.config.roomdirectory.is_publishing_room_allowed( user_id, room_id, room_aliases ): # Let's just return a generic message, as there may be all sorts of # reasons why we said no. TODO: Allow configurable error messages # per alias creation rule? raise SynapseError(403, "Not allowed to publish room") directory_handler = self.hs.get_directory_handler() if room_alias: await directory_handler.create_association( requester=requester, room_id=room_id, room_alias=room_alias, servers=[self.hs.hostname], check_membership=False, ) preset_config = config.get( "preset", RoomCreationPreset.PRIVATE_CHAT if visibility == "private" else RoomCreationPreset.PUBLIC_CHAT, ) raw_initial_state = config.get("initial_state", []) initial_state = OrderedDict() for val in raw_initial_state: initial_state[(val["type"], val.get("state_key", ""))] = val["content"] creation_content = config.get("creation_content", {}) # override any attempt to set room versions via the creation_content creation_content["room_version"] = room_version.identifier last_stream_id = await self._send_events_for_new_room( requester, room_id, preset_config=preset_config, invite_list=invite_list, initial_state=initial_state, creation_content=creation_content, room_alias=room_alias, power_level_content_override=power_level_content_override, creator_join_profile=creator_join_profile, ratelimit=ratelimit, ) if "name" in config: name = config["name"] ( _, last_stream_id, ) = await self.event_creation_handler.create_and_send_nonmember_event( requester, { "type": EventTypes.Name, "room_id": room_id, "sender": user_id, "state_key": "", "content": {"name": name}, }, ratelimit=False, ) if "topic" in config: topic = config["topic"] ( _, last_stream_id, ) = await self.event_creation_handler.create_and_send_nonmember_event( requester, { "type": EventTypes.Topic, "room_id": room_id, "sender": user_id, "state_key": "", "content": {"topic": topic}, }, ratelimit=False, ) # we avoid dropping the lock between invites, as otherwise joins can # start coming in and making the createRoom slow. # # we also don't need to check the requester's shadow-ban here, as we # have already done so above (and potentially emptied invite_list). with (await self.room_member_handler.member_linearizer.queue((room_id,))): content = {} is_direct = config.get("is_direct", None) if is_direct: content["is_direct"] = is_direct for invitee in invite_list: ( _, last_stream_id, ) = await self.room_member_handler.update_membership_locked( requester, UserID.from_string(invitee), room_id, "invite", ratelimit=False, content=content, new_room=True, ) for invite_3pid in invite_3pid_list: id_server = invite_3pid["id_server"] id_access_token = invite_3pid.get("id_access_token") # optional address = invite_3pid["address"] medium = invite_3pid["medium"] # Note that do_3pid_invite can raise a ShadowBanError, but this was # handled above by emptying invite_3pid_list. last_stream_id = await self.hs.get_room_member_handler().do_3pid_invite( room_id, requester.user, medium, address, id_server, requester, txn_id=None, id_access_token=id_access_token, ) result = {"room_id": room_id} if room_alias: result["room_alias"] = room_alias.to_string() # Always wait for room creation to propagate before returning await self._replication.wait_for_stream_position( self.hs.config.worker.events_shard_config.get_instance(room_id), "events", last_stream_id, ) return result, last_stream_id async def _send_events_for_new_room( self, creator: Requester, room_id: str, preset_config: str, invite_list: List[str], initial_state: MutableStateMap, creation_content: JsonDict, room_alias: Optional[RoomAlias] = None, power_level_content_override: Optional[JsonDict] = None, creator_join_profile: Optional[JsonDict] = None, ratelimit: bool = True, ) -> int: """Sends the initial events into a new room. `power_level_content_override` doesn't apply when initial state has power level state event content. Returns: The stream_id of the last event persisted. """ creator_id = creator.user.to_string() event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""} def create(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict: e = {"type": etype, "content": content} e.update(event_keys) e.update(kwargs) return e async def send(etype: str, content: JsonDict, **kwargs: Any) -> int: event = create(etype, content, **kwargs) logger.debug("Sending %s in new room", etype) # Allow these events to be sent even if the user is shadow-banned to # allow the room creation to complete. ( _, last_stream_id, ) = await self.event_creation_handler.create_and_send_nonmember_event( creator, event, ratelimit=False, ignore_shadow_ban=True, ) return last_stream_id try: config = self._presets_dict[preset_config] except KeyError: raise SynapseError( 400, f"'{preset_config}' is not a valid preset", errcode=Codes.BAD_JSON ) creation_content.update({"creator": creator_id}) await send(etype=EventTypes.Create, content=creation_content) logger.debug("Sending %s in new room", EventTypes.Member) await self.room_member_handler.update_membership( creator, creator.user, room_id, "join", ratelimit=ratelimit, content=creator_join_profile, new_room=True, ) # We treat the power levels override specially as this needs to be one # of the first events that get sent into a room. pl_content = initial_state.pop((EventTypes.PowerLevels, ""), None) if pl_content is not None: last_sent_stream_id = await send( etype=EventTypes.PowerLevels, content=pl_content ) else: power_level_content: JsonDict = { "users": {creator_id: 100}, "users_default": 0, "events": { EventTypes.Name: 50, EventTypes.PowerLevels: 100, EventTypes.RoomHistoryVisibility: 100, EventTypes.CanonicalAlias: 50, EventTypes.RoomAvatar: 50, EventTypes.Tombstone: 100, EventTypes.ServerACL: 100, EventTypes.RoomEncryption: 100, }, "events_default": 0, "state_default": 50, "ban": 50, "kick": 50, "redact": 50, "invite": 50, "historical": 100, } if config["original_invitees_have_ops"]: for invitee in invite_list: power_level_content["users"][invitee] = 100 # Power levels overrides are defined per chat preset power_level_content.update(config["power_level_content_override"]) if power_level_content_override: power_level_content.update(power_level_content_override) last_sent_stream_id = await send( etype=EventTypes.PowerLevels, content=power_level_content ) if room_alias and (EventTypes.CanonicalAlias, "") not in initial_state: last_sent_stream_id = await send( etype=EventTypes.CanonicalAlias, content={"alias": room_alias.to_string()}, ) if (EventTypes.JoinRules, "") not in initial_state: last_sent_stream_id = await send( etype=EventTypes.JoinRules, content={"join_rule": config["join_rules"]} ) if (EventTypes.RoomHistoryVisibility, "") not in initial_state: last_sent_stream_id = await send( etype=EventTypes.RoomHistoryVisibility, content={"history_visibility": config["history_visibility"]}, ) if config["guest_can_join"]: if (EventTypes.GuestAccess, "") not in initial_state: last_sent_stream_id = await send( etype=EventTypes.GuestAccess, content={EventContentFields.GUEST_ACCESS: GuestAccess.CAN_JOIN}, ) for (etype, state_key), content in initial_state.items(): last_sent_stream_id = await send( etype=etype, state_key=state_key, content=content ) if config["encrypted"]: last_sent_stream_id = await send( etype=EventTypes.RoomEncryption, state_key="", content={"algorithm": RoomEncryptionAlgorithms.DEFAULT}, ) return last_sent_stream_id async def _generate_room_id( self, creator_id: str, is_public: bool, room_version: RoomVersion, ) -> str: # autogen room IDs and try to create it. We may clash, so just # try a few times till one goes through, giving up eventually. attempts = 0 while attempts < 5: try: random_string = stringutils.random_string(18) gen_room_id = RoomID(random_string, self.hs.hostname).to_string() await self.store.store_room( room_id=gen_room_id, room_creator_user_id=creator_id, is_public=is_public, room_version=room_version, ) return gen_room_id except StoreError: attempts += 1 raise StoreError(500, "Couldn't generate a room ID.") class RoomContextHandler: def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() self.store = hs.get_datastores().main self.storage = hs.get_storage() self.state_store = self.storage.state async def get_event_context( self, requester: Requester, room_id: str, event_id: str, limit: int, event_filter: Optional[Filter], use_admin_priviledge: bool = False, ) -> Optional[EventContext]: """Retrieves events, pagination tokens and state around a given event in a room. Args: requester room_id event_id limit: The maximum number of events to return in total (excluding state). event_filter: the filter to apply to the events returned (excluding the target event_id) use_admin_priviledge: if `True`, return all events, regardless of whether `user` has access to them. To be used **ONLY** from the admin API. Returns: dict, or None if the event isn't found """ user = requester.user if use_admin_priviledge: await assert_user_is_admin(self.auth, requester.user) before_limit = math.floor(limit / 2.0) after_limit = limit - before_limit users = await self.store.get_users_in_room(room_id) is_peeking = user.to_string() not in users async def filter_evts(events: List[EventBase]) -> List[EventBase]: if use_admin_priviledge: return events return await filter_events_for_client( self.storage, user.to_string(), events, is_peeking=is_peeking ) event = await self.store.get_event( event_id, get_prev_content=True, allow_none=True ) if not event: return None filtered = await filter_evts([event]) if not filtered: raise AuthError(403, "You don't have permission to access that event.") results = await self.store.get_events_around( room_id, event_id, before_limit, after_limit, event_filter ) events_before = results.events_before events_after = results.events_after if event_filter: events_before = await event_filter.filter(events_before) events_after = await event_filter.filter(events_after) events_before = await filter_evts(events_before) events_after = await filter_evts(events_after) # filter_evts can return a pruned event in case the user is allowed to see that # there's something there but not see the content, so use the event that's in # `filtered` rather than the event we retrieved from the datastore. event = filtered[0] # Fetch the aggregations. aggregations = await self.store.get_bundled_aggregations( itertools.chain(events_before, (event,), events_after), user.to_string(), ) if events_after: last_event_id = events_after[-1].event_id else: last_event_id = event_id if event_filter and event_filter.lazy_load_members: state_filter = StateFilter.from_lazy_load_member_list( ev.sender for ev in itertools.chain( events_before, (event,), events_after, ) ) else: state_filter = StateFilter.all() # XXX: why do we return the state as of the last event rather than the # first? Shouldn't we be consistent with /sync? # https://github.com/matrix-org/matrix-doc/issues/687 state = await self.state_store.get_state_for_events( [last_event_id], state_filter=state_filter ) state_events = list(state[last_event_id].values()) if event_filter: state_events = await event_filter.filter(state_events) # We use a dummy token here as we only care about the room portion of # the token, which we replace. token = StreamToken.START return EventContext( events_before=events_before, event=event, events_after=events_after, state=await filter_evts(state_events), aggregations=aggregations, start=await token.copy_and_replace("room_key", results.start).to_string( self.store ), end=await token.copy_and_replace("room_key", results.end).to_string( self.store ), ) class TimestampLookupHandler: def __init__(self, hs: "HomeServer"): self.server_name = hs.hostname self.store = hs.get_datastores().main self.state_handler = hs.get_state_handler() self.federation_client = hs.get_federation_client() async def get_event_for_timestamp( self, requester: Requester, room_id: str, timestamp: int, direction: str, ) -> Tuple[str, int]: """Find the closest event to the given timestamp in the given direction. If we can't find an event locally or the event we have locally is next to a gap, it will ask other federated homeservers for an event. Args: requester: The user making the request according to the access token room_id: Room to fetch the event from timestamp: The point in time (inclusive) we should navigate from in the given direction to find the closest event. direction: ["f"|"b"] to indicate whether we should navigate forward or backward from the given timestamp to find the closest event. Returns: A tuple containing the `event_id` closest to the given timestamp in the given direction and the `origin_server_ts`. Raises: SynapseError if unable to find any event locally in the given direction """ local_event_id = await self.store.get_event_id_for_timestamp( room_id, timestamp, direction ) logger.debug( "get_event_for_timestamp: locally, we found event_id=%s closest to timestamp=%s", local_event_id, timestamp, ) # Check for gaps in the history where events could be hiding in between # the timestamp given and the event we were able to find locally is_event_next_to_backward_gap = False is_event_next_to_forward_gap = False if local_event_id: local_event = await self.store.get_event( local_event_id, allow_none=False, allow_rejected=False ) if direction == "f": # We only need to check for a backward gap if we're looking forwards # to ensure there is nothing in between. is_event_next_to_backward_gap = ( await self.store.is_event_next_to_backward_gap(local_event) ) elif direction == "b": # We only need to check for a forward gap if we're looking backwards # to ensure there is nothing in between is_event_next_to_forward_gap = ( await self.store.is_event_next_to_forward_gap(local_event) ) # If we found a gap, we should probably ask another homeserver first # about more history in between if ( not local_event_id or is_event_next_to_backward_gap or is_event_next_to_forward_gap ): logger.debug( "get_event_for_timestamp: locally, we found event_id=%s closest to timestamp=%s which is next to a gap in event history so we're asking other homeservers first", local_event_id, timestamp, ) # Find other homeservers from the given state in the room curr_state = await self.state_handler.get_current_state(room_id) curr_domains = get_domains_from_state(curr_state) likely_domains = [ domain for domain, depth in curr_domains if domain != self.server_name ] # Loop through each homeserver candidate until we get a succesful response for domain in likely_domains: try: remote_response = await self.federation_client.timestamp_to_event( domain, room_id, timestamp, direction ) logger.debug( "get_event_for_timestamp: response from domain(%s)=%s", domain, remote_response, ) # TODO: Do we want to persist this as an extremity? # TODO: I think ideally, we would try to backfill from # this event and run this whole # `get_event_for_timestamp` function again to make sure # they didn't give us an event from their gappy history. remote_event_id = remote_response.event_id origin_server_ts = remote_response.origin_server_ts # Only return the remote event if it's closer than the local event if not local_event or ( abs(origin_server_ts - timestamp) < abs(local_event.origin_server_ts - timestamp) ): return remote_event_id, origin_server_ts except (HttpResponseException, InvalidResponseError) as ex: # Let's not put a high priority on some other homeserver # failing to respond or giving a random response logger.debug( "Failed to fetch /timestamp_to_event from %s because of exception(%s) %s args=%s", domain, type(ex).__name__, ex, ex.args, ) except Exception as ex: # But we do want to see some exceptions in our code logger.warning( "Failed to fetch /timestamp_to_event from %s because of exception(%s) %s args=%s", domain, type(ex).__name__, ex, ex.args, ) if not local_event_id: raise SynapseError( 404, "Unable to find event from %s in direction %s" % (timestamp, direction), errcode=Codes.NOT_FOUND, ) return local_event_id, local_event.origin_server_ts class RoomEventSource(EventSource[RoomStreamToken, EventBase]): def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main async def get_new_events( self, user: UserID, from_key: RoomStreamToken, limit: Optional[int], room_ids: Collection[str], is_guest: bool, explicit_room_id: Optional[str] = None, ) -> Tuple[List[EventBase], RoomStreamToken]: # We just ignore the key for now. to_key = self.get_current_key() if from_key.topological: logger.warning("Stream has topological part!!!! %r", from_key) from_key = RoomStreamToken(None, from_key.stream) app_service = self.store.get_app_service_by_user_id(user.to_string()) if app_service: # We no longer support AS users using /sync directly. # See https://github.com/matrix-org/matrix-doc/issues/1144 raise NotImplementedError() else: room_events = await self.store.get_membership_changes_for_user( user.to_string(), from_key, to_key ) room_to_events = await self.store.get_room_events_stream_for_rooms( room_ids=room_ids, from_key=from_key, to_key=to_key, limit=limit or 10, order="ASC", ) events = list(room_events) events.extend(e for evs, _ in room_to_events.values() for e in evs) events.sort(key=lambda e: e.internal_metadata.order) if limit: events[:] = events[:limit] if events: end_key = events[-1].internal_metadata.after else: end_key = to_key return events, end_key def get_current_key(self) -> RoomStreamToken: return self.store.get_room_max_token() def get_current_key_for_room(self, room_id: str) -> Awaitable[str]: return self.store.get_room_events_max_id(room_id) class ShutdownRoomResponse(TypedDict): """ Attributes: kicked_users: An array of users (`user_id`) that were kicked. failed_to_kick_users: An array of users (`user_id`) that that were not kicked. local_aliases: An array of strings representing the local aliases that were migrated from the old room to the new. new_room_id: A string representing the room ID of the new room. """ kicked_users: List[str] failed_to_kick_users: List[str] local_aliases: List[str] new_room_id: Optional[str] class RoomShutdownHandler: DEFAULT_MESSAGE = ( "Sharing illegal content on this server is not permitted and rooms in" " violation will be blocked." ) DEFAULT_ROOM_NAME = "Content Violation Notification" def __init__(self, hs: "HomeServer"): self.hs = hs self.room_member_handler = hs.get_room_member_handler() self._room_creation_handler = hs.get_room_creation_handler() self._replication = hs.get_replication_data_handler() self.event_creation_handler = hs.get_event_creation_handler() self.store = hs.get_datastores().main async def shutdown_room( self, room_id: str, requester_user_id: str, new_room_user_id: Optional[str] = None, new_room_name: Optional[str] = None, message: Optional[str] = None, block: bool = False, ) -> ShutdownRoomResponse: """ Shuts down a room. Moves all local users and room aliases automatically to a new room if `new_room_user_id` is set. Otherwise local users only leave the room without any information. The new room will be created with the user specified by the `new_room_user_id` parameter as room administrator and will contain a message explaining what happened. Users invited to the new room will have power level `-10` by default, and thus be unable to speak. The local server will only have the power to move local user and room aliases to the new room. Users on other servers will be unaffected. Args: room_id: The ID of the room to shut down. requester_user_id: User who requested the action and put the room on the blocking list. new_room_user_id: If set, a new room will be created with this user ID as the creator and admin, and all users in the old room will be moved into that room. If not set, no new room will be created and the users will just be removed from the old room. new_room_name: A string representing the name of the room that new users will be invited to. Defaults to `Content Violation Notification` message: A string containing the first message that will be sent as `new_room_user_id` in the new room. Ideally this will clearly convey why the original room was shut down. Defaults to `Sharing illegal content on this server is not permitted and rooms in violation will be blocked.` block: If set to `True`, users will be prevented from joining the old room. This option can also be used to pre-emptively block a room, even if it's unknown to this homeserver. In this case, the room will be blocked, and no further action will be taken. If `False`, attempting to delete an unknown room is invalid. Defaults to `False`. Returns: a dict containing the following keys: kicked_users: An array of users (`user_id`) that were kicked. failed_to_kick_users: An array of users (`user_id`) that that were not kicked. local_aliases: An array of strings representing the local aliases that were migrated from the old room to the new. new_room_id: A string representing the room ID of the new room, or None if no such room was created. """ if not new_room_name: new_room_name = self.DEFAULT_ROOM_NAME if not message: message = self.DEFAULT_MESSAGE if not RoomID.is_valid(room_id): raise SynapseError(400, "%s is not a legal room ID" % (room_id,)) # Action the block first (even if the room doesn't exist yet) if block: # This will work even if the room is already blocked, but that is # desirable in case the first attempt at blocking the room failed below. await self.store.block_room(room_id, requester_user_id) if not await self.store.get_room(room_id): # if we don't know about the room, there is nothing left to do. return { "kicked_users": [], "failed_to_kick_users": [], "local_aliases": [], "new_room_id": None, } if new_room_user_id is not None: if not self.hs.is_mine_id(new_room_user_id): raise SynapseError( 400, "User must be our own: %s" % (new_room_user_id,) ) room_creator_requester = create_requester( new_room_user_id, authenticated_entity=requester_user_id ) info, stream_id = await self._room_creation_handler.create_room( room_creator_requester, config={ "preset": RoomCreationPreset.PUBLIC_CHAT, "name": new_room_name, "power_level_content_override": {"users_default": -10}, }, ratelimit=False, ) new_room_id = info["room_id"] logger.info( "Shutting down room %r, joining to new room: %r", room_id, new_room_id ) # We now wait for the create room to come back in via replication so # that we can assume that all the joins/invites have propagated before # we try and auto join below. await self._replication.wait_for_stream_position( self.hs.config.worker.events_shard_config.get_instance(new_room_id), "events", stream_id, ) else: new_room_id = None logger.info("Shutting down room %r", room_id) users = await self.store.get_users_in_room(room_id) kicked_users = [] failed_to_kick_users = [] for user_id in users: if not self.hs.is_mine_id(user_id): continue logger.info("Kicking %r from %r...", user_id, room_id) try: # Kick users from room target_requester = create_requester( user_id, authenticated_entity=requester_user_id ) _, stream_id = await self.room_member_handler.update_membership( requester=target_requester, target=target_requester.user, room_id=room_id, action=Membership.LEAVE, content={}, ratelimit=False, require_consent=False, ) # Wait for leave to come in over replication before trying to forget. await self._replication.wait_for_stream_position( self.hs.config.worker.events_shard_config.get_instance(room_id), "events", stream_id, ) await self.room_member_handler.forget(target_requester.user, room_id) # Join users to new room if new_room_user_id: await self.room_member_handler.update_membership( requester=target_requester, target=target_requester.user, room_id=new_room_id, action=Membership.JOIN, content={}, ratelimit=False, require_consent=False, ) kicked_users.append(user_id) except Exception: logger.exception( "Failed to leave old room and join new room for %r", user_id ) failed_to_kick_users.append(user_id) # Send message in new room and move aliases if new_room_user_id: await self.event_creation_handler.create_and_send_nonmember_event( room_creator_requester, { "type": "m.room.message", "content": {"body": message, "msgtype": "m.text"}, "room_id": new_room_id, "sender": new_room_user_id, }, ratelimit=False, ) aliases_for_room = await self.store.get_aliases_for_room(room_id) await self.store.update_aliases_for_room( room_id, new_room_id, requester_user_id ) else: aliases_for_room = [] return { "kicked_users": kicked_users, "failed_to_kick_users": failed_to_kick_users, "local_aliases": aliases_for_room, "new_room_id": new_room_id, }
from __future__ import unicode_literals from datetime import datetime import logging from types import NoneType from google.appengine.ext import ndb, deferred, db from google.appengine.ext.ndb.query import Cursor from typing import Optional, List, Union, Tuple from mcfw.rpc import returns, arguments from rogerthat.bizz.communities.communities import get_community from rogerthat.bizz.jobs.matching import rebuild_matches_check_current from rogerthat.bizz.jobs.notifications import calculate_next_reminder from rogerthat.bizz.jobs.translations import localize as localize_jobs from rogerthat.capi.jobs import newJobs from rogerthat.consts import JOBS_WORKER_QUEUE from rogerthat.dal.mobile import get_mobile_key_by_account from rogerthat.dal.profile import get_user_profile from rogerthat.models import NdbUserProfile from rogerthat.models.jobs import JobOffer, JobMatchingCriteria, JobMatchingCriteriaNotifications, JobMatch, \ JobMatchStatus, JobNotificationSchedule, JobOfferSourceType from rogerthat.rpc import users from rogerthat.rpc.models import RpcCAPICall, RpcException from rogerthat.rpc.rpc import mapping, logError, CAPI_KEYWORD_ARG_PRIORITY, \ PRIORITY_HIGH from rogerthat.service.api.messaging import add_chat_members from rogerthat.to.jobs import GetJobsResponseTO, JobOfferTO, NewJobsResponseTO, \ NewJobsRequestTO, SaveJobsCriteriaResponseTO, GetJobsCriteriaResponseTO, \ JobKeyLabelTO, JobCriteriaLocationTO, JobCriteriaNotificationsTO, JobCriteriaGeoLocationTO, \ SaveJobsCriteriaRequestTO, JobOfferChatActionTO, JobOfferOpenActionTO, GetJobChatInfoResponseTO, JobChatAnonymousTO, \ CreateJobChatResponseTO, CreateJobChatRequestTO, JobsInfoTO, JobOfferProviderTO from rogerthat.translations import localize from rogerthat.utils import now, get_epoch_from_datetime from rogerthat.utils.location import coordinates_to_city from solutions.common.jobs.models import JobSolicitation TAG_JOB_CHAT = '__rt__.jobs_chat' CONTRACT_TYPES = [ 'contract_type_001', 'contract_type_002', 'contract_type_003', 'contract_type_004', 'contract_type_005', 'contract_type_006', 'contract_type_007', ] JOB_DOMAINS = [ 'job_domain_001', 'job_domain_002', 'job_domain_003', 'job_domain_004', 'job_domain_005', 'job_domain_006', 'job_domain_007', 'job_domain_008', 'job_domain_009', 'job_domain_010', 'job_domain_011', 'job_domain_012', 'job_domain_013', 'job_domain_014', 'job_domain_015', 'job_domain_016', 'job_domain_017', 'job_domain_018', 'job_domain_019', 'job_domain_020', 'job_domain_021', 'job_domain_022', 'job_domain_023', 'job_domain_024', ] def get_job_criteria(app_user): # type: (users.User) -> GetJobsCriteriaResponseTO user_profile = get_user_profile(app_user) response = GetJobsCriteriaResponseTO() response.location = JobCriteriaLocationTO() response.location.address = None response.location.geo = None response.location.distance = 20000 # 20 Km response.contract_types = [] response.job_domains = [] response.keywords = [] response.notifications = JobCriteriaNotificationsTO() response.notifications.timezone = None response.notifications.how_often = JobNotificationSchedule.NEVER response.notifications.delivery_day = 'monday' response.notifications.delivery_time = 64800 # 18:00 job_criteria = JobMatchingCriteria.create_key(app_user).get() # type: JobMatchingCriteria for contract_type in CONTRACT_TYPES: to = JobKeyLabelTO() to.key = contract_type to.label = localize_jobs(user_profile.language, contract_type) to.enabled = contract_type in job_criteria.contract_types if job_criteria else False response.contract_types.append(to) response.contract_types.sort(key=lambda item: item.label) for domain in JOB_DOMAINS: to = JobKeyLabelTO() to.key = domain to.label = localize_jobs(user_profile.language, domain) to.enabled = domain in job_criteria.job_domains if job_criteria else False response.job_domains.append(to) response.job_domains.sort(key=lambda item: item.label) if job_criteria: response.active = job_criteria.active response.location = JobCriteriaLocationTO() response.location.address = job_criteria.address response.location.geo = JobCriteriaGeoLocationTO() response.location.geo.latitude = job_criteria.geo_location.lat response.location.geo.longitude = job_criteria.geo_location.lon response.location.distance = job_criteria.distance response.keywords = job_criteria.keywords if job_criteria.notifications: response.notifications.how_often = job_criteria.notifications.how_often if job_criteria.notifications.delivery_day: response.notifications.delivery_day = job_criteria.notifications.delivery_day if job_criteria.notifications.delivery_time: response.notifications.delivery_time = job_criteria.notifications.delivery_time else: response.active = True # user first usage return response @returns(SaveJobsCriteriaResponseTO) @arguments(app_user=users.User, request=SaveJobsCriteriaRequestTO) def save_job_criteria(app_user, request): # type: (users.User, SaveJobsCriteriaRequestTO) -> SaveJobsCriteriaResponseTO job_criteria_key = JobMatchingCriteria.create_key(app_user) job_criteria = job_criteria_key.get() # type: JobMatchingCriteria new_job_profile = not job_criteria if new_job_profile: if not request.criteria: return SaveJobsCriteriaResponseTO(active=False, new_profile=new_job_profile) job_criteria = JobMatchingCriteria(key=job_criteria_key) job_criteria.last_load_request = datetime.utcnow() job_criteria.demo = get_community(get_user_profile(app_user).community_id).demo original_job_criteria = None else: original_job_criteria = job_criteria.to_dict(exclude=['notifications', 'active']) notifications = None job_criteria.active = request.active if request.criteria: location = request.criteria.location notifications = request.criteria.notifications if location.geo: job_criteria.geo_location = ndb.GeoPt(location.geo.latitude, location.geo.longitude) if location.address: job_criteria.address = location.address else: job_criteria.address = coordinates_to_city(job_criteria.geo_location.lat, job_criteria.geo_location.lon) job_criteria.distance = location.distance job_criteria.contract_types = sorted(request.criteria.contract_types) job_criteria.job_domains = sorted(request.criteria.job_domains) job_criteria.keywords = sorted(request.criteria.keywords) if not job_criteria.job_domains: raise RpcException('at_least_one_job_domain_required', app_user) if not job_criteria.contract_types: raise RpcException('at_least_one_contract_type_required', app_user) updated_criteria = job_criteria.to_dict(exclude=['notifications', 'active']) should_build_matches = original_job_criteria != updated_criteria should_calculate_reminder = should_build_matches should_clear_notifications = should_build_matches og_notifications = job_criteria.notifications and job_criteria.notifications.to_dict() if not job_criteria.notifications: job_criteria.notifications = JobMatchingCriteriaNotifications() job_criteria.notifications.how_often = JobNotificationSchedule.NEVER if notifications and notifications.timezone: job_criteria.notifications.timezone = notifications.timezone if job_criteria.notifications.how_often != notifications.how_often: delayed_notification_types = (JobNotificationSchedule.AT_MOST_ONCE_A_DAY, JobNotificationSchedule.AT_MOST_ONCE_A_WEEK) if job_criteria.notifications.how_often in delayed_notification_types and \ notifications.how_often not in delayed_notification_types: should_clear_notifications = True job_criteria.notifications.how_often = notifications.how_often job_criteria.notifications.delivery_day = notifications.delivery_day job_criteria.notifications.delivery_time = notifications.delivery_time if not should_calculate_reminder: should_calculate_reminder = job_criteria.notifications.to_dict() != og_notifications job_criteria.put() if should_build_matches: deferred.defer(rebuild_matches_check_current, app_user, _queue=JOBS_WORKER_QUEUE) if should_calculate_reminder: deferred.defer(calculate_next_reminder, app_user, should_clear_notifications, _queue=JOBS_WORKER_QUEUE) return SaveJobsCriteriaResponseTO(active=job_criteria.active, new_profile=new_job_profile) def get_oca_logo_url(language): if language.startswith('nl'): return 'https://storage.googleapis.com/oca-files/jobs/OCA-nl.png' return 'https://storage.googleapis.com/oca-files/jobs/OCA.png' def get_jobs_for_activity_type(app_user, activity_type, cursor, ids): # type: (users.User, unicode, Optional[unicode], List[int]) -> GetJobsResponseTO job_criteria_key = JobMatchingCriteria.create_key(app_user) user_profile_key = NdbUserProfile.createKey(app_user) keys = [job_criteria_key, user_profile_key] job_criteria, user_profile = ndb.get_multi(keys) # type: Optional[JobMatchingCriteria], NdbUserProfile resp = GetJobsResponseTO() if not job_criteria or not job_criteria.active: resp.is_profile_active = False resp.items = [] resp.cursor = None resp.has_more = False else: if cursor is None and activity_type == JobOfferTO.ACTIVITY_TYPE_NEW: job_criteria.last_load_request = datetime.utcnow() job_criteria.put() resp.items, resp.cursor, resp.has_more = _get_jobs(activity_type, app_user, cursor, user_profile.language, ids) resp.is_profile_active = True info = JobsInfoTO() info.title = localize(user_profile.language, 'app_jobs_title') info.description = localize(user_profile.language, 'app_jobs_description') info.providers = [ JobOfferProviderTO(image_url=get_oca_logo_url(user_profile.language)), JobOfferProviderTO(image_url='https://storage.googleapis.com/oca-files/jobs/VDAB.jpg'), ] resp.info = info return resp def bulk_save_jobs(app_user, job_ids, status): # type: (users.User, List[int], int) -> List[int] keys = [JobMatch.create_key(app_user, job_id) for job_id in job_ids] matches = ndb.get_multi(keys) # type: List[JobMatch] to_put = [] for match in matches: if not match: continue match.status = status to_put.append(match) ndb.put_multi(to_put) return [match.get_job_id() for match in to_put] @mapping('com.mobicage.capi.jobs.new_jobs_response_handler') @returns(NoneType) @arguments(context=RpcCAPICall, result=NewJobsResponseTO) def new_jobs_response_handler(context, result): pass def _get_jobs(activity_type, app_user, cursor, language, ids): # type: (str, users.User, Optional[str], str, List[int]) -> Tuple[List[JobOfferTO], Optional[str], bool] fetch_size = 20 start_cursor = Cursor.from_websafe_string(cursor) if cursor else None if activity_type == JobOfferTO.ACTIVITY_TYPE_NEW: qry = JobMatch.list_new_by_app_user(app_user) elif activity_type == JobOfferTO.ACTIVITY_TYPE_HISTORY: qry = JobMatch.list_by_app_user_and_status(app_user, JobMatchStatus.DELETED) elif activity_type == JobOfferTO.ACTIVITY_TYPE_STARRED: qry = JobMatch.list_by_app_user_and_status(app_user, JobMatchStatus.STARRED) else: raise Exception('Unknown activity type %s' % activity_type) job_matches_keys, new_cursor, has_more = qry.fetch_page( fetch_size, start_cursor=start_cursor, keys_only=True) # type: List[ndb.Key], Cursor, bool match_keys = [JobMatch.create_key(app_user, job_id) for job_id in ids if job_id] + \ [key for key in job_matches_keys if key.id() not in ids] offer_keys = [JobOffer.create_key(match_key.id()) for match_key in match_keys] models = ndb.get_multi(match_keys + offer_keys) # type: List[Union[JobMatch, JobOffer]] job_matches = models[0: len(models) / 2] job_offers = models[len(models) / 2:] items = [] to_put = [] for match, job_offer in zip(job_matches, job_offers): # type: JobMatch, JobOffer if not match: # this should only happen when the job was requested using the 'ids' property # like when the jobs activity is opened via a button on a news item if job_offer.id not in ids: logging.warning('Expected JobMatch to exist, creating it anyway...') logging.debug('Creating manual JobMatch entry for job %d', job_offer.id) match = JobMatch.manually_create(app_user, job_offer.id) to_put.append(match) timestamp = get_epoch_from_datetime(match.update_date) items.append(JobOfferTO.from_job_offer(job_offer, timestamp, language, get_job_offer_actions(job_offer, match, language))) ndb.put_multi(to_put) return items, new_cursor.to_websafe_string().decode('utf-8') if new_cursor else None, has_more def get_job_offer_actions(job_offer, match, language): # type: (JobOffer, JobMatch, str) -> List[Union[JobOfferChatActionTO, JobOfferOpenActionTO]] actions = [] if job_offer.source.type == JobOfferSourceType.OCA: action = JobOfferChatActionTO() action.label = localize(language, 'open_chat') action.chat_key = match.chat_key # possibly None action.icon = 'fa-comment' actions.append(action) return actions def send_new_jobs_for_activity_types(app_user, activity_types): user_profile = get_user_profile(app_user) if not user_profile.get_mobiles(): return request = NewJobsRequestTO() request.creation_time = now() request.activity_types = activity_types mobiles = db.get([get_mobile_key_by_account(mobile_detail.account) for mobile_detail in user_profile.get_mobiles().values()]) for mobile in mobiles: ios_push_id = None if mobile.is_ios: ios_push_id = mobile.iOSPushId kwargs = {} if ios_push_id: kwargs[CAPI_KEYWORD_ARG_PRIORITY] = PRIORITY_HIGH newJobs(new_jobs_response_handler, logError, app_user, request=request, MOBILE_ACCOUNT=mobile, **kwargs) def get_job_chat_info(app_user, job_id): # type: (users.User, int) -> GetJobChatInfoResponseTO keys = [JobOffer.create_key(job_id), JobMatch.create_key(app_user, job_id)] job_offer, job_match = ndb.get_multi(keys) # type: JobOffer, JobMatch job_sln_id = long(job_offer.source.id) solicitation = JobSolicitation.list_by_job_and_user(users.User(job_offer.service_email), job_sln_id, app_user.email()).get() # type: Optional[JobSolicitation] lang = get_user_profile(app_user).language response = GetJobChatInfoResponseTO() response.anonymous = JobChatAnonymousTO() response.job_id = job_id response.anonymous.enabled = True response.anonymous.default_value = False response.default_text = '' response.info_text = localize(lang, 'job_info_text') if solicitation: # User has already applied before, but deleted the chat. # Add him back to the chat and return the original chat key. job_match.chat_key = solicitation.chat_key response.chat_key = solicitation.chat_key with users.set_user(users.User(job_offer.service_email)): add_chat_members(solicitation.chat_key, [app_user.email()]) job_match.put() return response def create_job_chat(app_user, request): # type: (users.User, CreateJobChatRequestTO) -> CreateJobChatResponseTO keys = [JobMatch.create_key(app_user, request.job_id), JobOffer.create_key(request.job_id)] job_match, job_offer = ndb.get_multi(keys) # type: JobMatch, JobOffer if not job_match.chat_key: # If you ever want to create a separate service for jobs, you'll have to create a service api callback for this from solutions.common.jobs.solicitations import create_job_solicitation message_key = create_job_solicitation(app_user, job_offer, request) job_match.chat_key = message_key job_match.put() response = CreateJobChatResponseTO() response.message_key = job_match.chat_key return response
import argparse import ipaddress import gevent import gevent.wsgi import hashlib import json import traceback from gevent import monkey from werkzeug.exceptions import (BadRequest, HTTPException, InternalServerError, NotFound) from werkzeug.routing import Map, Rule, RequestRedirect from werkzeug.wrappers import Request, Response from werkzeug.wsgi import responder monkey.patch_all() IMAGE_METHOD = 'tftp' BOOTSCRIPT = """#!ipxe :retry dhcp && isset ${{filename}} || goto retry echo Booting from ${{filename}} kernel {image_method}://${{next-server}}/vmlinuz.img quiet pixie_server=${{next-server}} \ ip=${{ip}}::${{gateway}}:${{netmask}}::eth0:none:${{dns}} {wipe} pixie_root_size={root_size} \ pixie_swap_size={swap_size} pixie_sha224={sha224} {extra_args} || goto error initrd {image_method}://${{next-server}}//initrd.img || goto error boot || goto error error: shell """ CONFIGSCRIPT = """#!ipxe :retry dhcp && isset ${{filename}} || goto retry echo Booting from ${{filename}} kernel {image_method}://${{next-server}}/vmlinuz.img quiet \ ip=${{ip}}::${{gateway}}:${{netmask}}::eth0:none:${{dns}} \ SERVER_IP=${{next-server}}{collector_prefix} || goto error initrd {image_method}://${{next-server}}//doconfig.img || goto error boot || goto error error: shell """ class ScriptHandler(object): def __init__(self, configs, collector_prefix): self.configs = [] self.default_config = dict() self.default_config['image_method'] = IMAGE_METHOD self.default_config['collector_prefix'] = collector_prefix for config in configs: self.configs.append(self.load_config(config)) self.router = Map([ Rule('/', methods=['GET'], endpoint='default'), Rule('/wipe', methods=['GET'], endpoint='wipe') ]) def load_config(self, config): with open(config) as c: cfg = json.load(c) m = hashlib.sha224() m.update(bytes(cfg['subnet'], 'utf-8')) m.update(bytes(cfg['swap_size'])) m.update(bytes(cfg['root_size'])) m.update(bytes(cfg['extra_args'], 'utf-8')) # TODO: check sizes for f in cfg['hashes']: with open(f, 'rb') as fl: for line in fl: m.update(line) cfg['sha224'] = m.hexdigest() cfg['subnet'] = ipaddress.ip_network(cfg['subnet']) cfg['image_method'] = IMAGE_METHOD return cfg @responder def __call__(self, environ, start_response): try: return self.wsgi_app(environ, start_response) except: traceback.print_exc() return InternalServerError() def wsgi_app(self, environ, start_response): route = self.router.bind_to_environ(environ) try: endpoint, args = route.match() except RequestRedirect as e: return e except HTTPException: return NotFound() request = Request(environ) get_args = dict(request.args) if endpoint == 'wipe': get_args['wipe'] = 'pixie_wipe=force' else: get_args['wipe'] = "" response = Response() response.mimetype = 'text/plain' response.status_code = 200 config = None if 'ip' in get_args: ip_addr = ipaddress.ip_address(get_args['ip'][0]) for cfg in self.configs: if ip_addr in cfg['subnet']: config = cfg if config is None: response.data = CONFIGSCRIPT.format(**self.default_config) else: for (k, v) in config.items(): get_args[k] = v response.data = BOOTSCRIPT.format(**get_args) return response if __name__ == '__main__': parser = argparse.ArgumentParser( description="pixied", formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument("configs", action="store", type=str, nargs="+", help="config files to load") parser.add_argument("-a", "--addr", action="store", type=str, default="0.0.0.0", help="address to bind to (default '0.0.0.0')") parser.add_argument("-p", "--port", action="store", type=int, default=8080, help="port to bind to (default 8080)") parser.add_argument("-c", "--collector-prefix", action="store", type=str, default="/pixie_collector", help="prefix on which the collector is served") args = parser.parse_args() server = gevent.wsgi.WSGIServer( (args.addr, args.port), ScriptHandler(args.configs, args.collector_prefix)) gevent.spawn(server.serve_forever).join()
from oslo_log import log import six import webob.dec import webob.exc from manila.api.openstack import wsgi from manila.i18n import _ from manila import utils from manila.wsgi import common as base_wsgi LOG = log.getLogger(__name__) class FaultWrapper(base_wsgi.Middleware): """Calls down the middleware stack, making exceptions into faults.""" _status_to_type = {} @staticmethod def status_to_type(status): if not FaultWrapper._status_to_type: for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError): FaultWrapper._status_to_type[clazz.code] = clazz return FaultWrapper._status_to_type.get( status, webob.exc.HTTPInternalServerError)() def _error(self, inner, req): if isinstance(inner, UnicodeDecodeError): msg = _("Error decoding your request. Either the URL or the " "request body contained characters that could not be " "decoded by Manila.") return wsgi.Fault(webob.exc.HTTPBadRequest(explanation=msg)) LOG.exception("Caught error: %s", inner) safe = getattr(inner, 'safe', False) headers = getattr(inner, 'headers', None) status = getattr(inner, 'code', 500) if status is None: status = 500 msg_dict = dict(url=req.url, status=status) LOG.info("%(url)s returned with HTTP %(status)d", msg_dict) outer = self.status_to_type(status) if headers: outer.headers = headers # NOTE(johannes): We leave the explanation empty here on # purpose. It could possibly have sensitive information # that should not be returned back to the user. See # bugs 868360 and 874472 # NOTE(eglynn): However, it would be over-conservative and # inconsistent with the EC2 API to hide every exception, # including those that are safe to expose, see bug 1021373 if safe: outer.explanation = '%s: %s' % (inner.__class__.__name__, six.text_type(inner)) return wsgi.Fault(outer) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): try: return req.get_response(self.application) except Exception as ex: return self._error(ex, req)
import constants from construct import Byte, Struct, Enum, Bytes, Const, Array, Renamed, Int16ul Short = Int16ul RobotInfo = "robot_info" / Struct( "penalty" / Enum(Byte, constants.SPLPenalty), "secs_till_unpenalised" / Byte, "number_of_yellow_cards" / Byte, "number_of_red_cards" / Byte ) TeamInfo = "team" / Struct( "team_number" / Byte, "team_color" / Enum(Byte, constants.SPLTeamColor), "score" / Byte, "penalty_shot" / Byte, # penalty shot counter "single_shots" / Short, # bits represent penalty shot success "coach_sequence" / Byte, "coach_message" / Bytes(253), "coach"/ RobotInfo, "players" / Array(11, RobotInfo) ) GameState = "gamedata" / Struct( "header" / Const(constants.GAMECONTROLLER_STRUCT_HEADER, Bytes(4)), "version" / Const(constants.GAMECONTROLLER_STRUCT_VERSION, Short), "packet_number" / Byte, "players_per_team" / Byte, "game_type" / Byte, "game_state" / Enum(Byte, constants.State), "first_half" / Byte, "kick_of_team" / Byte, "secondary_state" / Enum(Byte, constants.State2), "secondary_state_info" / Bytes(4), "drop_in_team" / Byte, "drop_in_time" / Short, "seconds_remaining" / Short, "secondary_seconds_remaining" / Short, "teams" / Array(2, TeamInfo) ) ReturnData = "returndata" / Struct( "header" / Const(b"RGrt", Bytes(4)), "version" / Const(constants.GAMECONTROLLER_RESPONSE_VERSION, Byte), "team" / Byte, "player" / Byte, "message" / Byte )
"""A library of random helper functionality.""" import platform, subprocess, operator, os, shutil, re, sys from glob import glob class MesonException(Exception): def __init__(self, *args, **kwargs): Exception.__init__(self, *args, **kwargs) class File: def __init__(self, is_built, subdir, fname): self.is_built = is_built self.subdir = subdir self.fname = fname def __str__(self): return os.path.join(self.subdir, self.fname) def __repr__(self): ret = '<File: {0}' if not self.is_built: ret += ' (not built)' ret += '>' return ret.format(os.path.join(self.subdir, self.fname)) @staticmethod def from_source_file(source_root, subdir, fname): if not os.path.isfile(os.path.join(source_root, subdir, fname)): raise MesonException('File %s does not exist.' % fname) return File(False, subdir, fname) @staticmethod def from_built_file(subdir, fname): return File(True, subdir, fname) @staticmethod def from_absolute_file(fname): return File(False, '', fname) def rel_to_builddir(self, build_to_src): if self.is_built: return os.path.join(self.subdir, self.fname) else: return os.path.join(build_to_src, self.subdir, self.fname) def endswith(self, ending): return self.fname.endswith(ending) def split(self, s): return self.fname.split(s) def __eq__(self, other): return (self.fname, self.subdir, self.is_built) == (other.fname, other.subdir, other.is_built) def __hash__(self): return hash((self.fname, self.subdir, self.is_built)) def get_compiler_for_source(compilers, src): for comp in compilers: if comp.can_compile(src): return comp raise RuntimeError('No specified compiler can handle file {!s}'.format(src)) def classify_unity_sources(compilers, sources): compsrclist = {} for src in sources: comp = get_compiler_for_source(compilers, src) if comp not in compsrclist: compsrclist[comp] = [src] else: compsrclist[comp].append(src) return compsrclist def flatten(item): if not isinstance(item, list): return item result = [] for i in item: if isinstance(i, list): result += flatten(i) else: result.append(i) return result def is_osx(): return platform.system().lower() == 'darwin' def is_linux(): return platform.system().lower() == 'linux' def is_windows(): platname = platform.system().lower() return platname == 'windows' or 'mingw' in platname def is_debianlike(): return os.path.isfile('/etc/debian_version') def exe_exists(arglist): try: p = subprocess.Popen(arglist, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.communicate() if p.returncode == 0: return True except FileNotFoundError: pass return False def detect_vcs(source_dir): vcs_systems = [ dict(name = 'git', cmd = 'git', repo_dir = '.git', get_rev = 'git describe --dirty=+', rev_regex = '(.*)', dep = '.git/logs/HEAD'), dict(name = 'mercurial', cmd = 'hg', repo_dir = '.hg', get_rev = 'hg id -i', rev_regex = '(.*)', dep = '.hg/dirstate'), dict(name = 'subversion', cmd = 'svn', repo_dir = '.svn', get_rev = 'svn info', rev_regex = 'Revision: (.*)', dep = '.svn/wc.db'), dict(name = 'bazaar', cmd = 'bzr', repo_dir = '.bzr', get_rev = 'bzr revno', rev_regex = '(.*)', dep = '.bzr'), ] segs = source_dir.replace('\\', '/').split('/') for i in range(len(segs), -1, -1): curdir = '/'.join(segs[:i]) for vcs in vcs_systems: if os.path.isdir(os.path.join(curdir, vcs['repo_dir'])) and shutil.which(vcs['cmd']): vcs['wc_dir'] = curdir return vcs return None def grab_leading_numbers(vstr): result = [] for x in vstr.split('.'): try: result.append(int(x)) except ValueError: break return result numpart = re.compile('[0-9.]+') def version_compare(vstr1, vstr2): match = numpart.match(vstr1.strip()) if match is None: raise MesonException('Uncomparable version string %s.' % vstr1) vstr1 = match.group(0) if vstr2.startswith('>='): cmpop = operator.ge vstr2 = vstr2[2:] elif vstr2.startswith('<='): cmpop = operator.le vstr2 = vstr2[2:] elif vstr2.startswith('!='): cmpop = operator.ne vstr2 = vstr2[2:] elif vstr2.startswith('=='): cmpop = operator.eq vstr2 = vstr2[2:] elif vstr2.startswith('='): cmpop = operator.eq vstr2 = vstr2[1:] elif vstr2.startswith('>'): cmpop = operator.gt vstr2 = vstr2[1:] elif vstr2.startswith('<'): cmpop = operator.lt vstr2 = vstr2[1:] else: cmpop = operator.eq varr1 = grab_leading_numbers(vstr1) varr2 = grab_leading_numbers(vstr2) return cmpop(varr1, varr2) def default_libdir(): if is_debianlike(): try: pc = subprocess.Popen(['dpkg-architecture', '-qDEB_HOST_MULTIARCH'], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) (stdo, _) = pc.communicate() if pc.returncode == 0: archpath = stdo.decode().strip() return 'lib/' + archpath except Exception: pass if os.path.isdir('/usr/lib64') and not os.path.islink('/usr/lib64'): return 'lib64' return 'lib' def default_libexecdir(): # There is no way to auto-detect this, so it must be set at build time return 'libexec' def default_prefix(): return 'c:/' if is_windows() else '/usr/local' def get_library_dirs(): if is_windows(): return ['C:/mingw/lib'] # Fixme if is_osx(): return ['/usr/lib'] # Fix me as well. # The following is probably Debian/Ubuntu specific. # /usr/local/lib is first because it contains stuff # installed by the sysadmin and is probably more up-to-date # than /usr/lib. If you feel that this search order is # problematic, please raise the issue on the mailing list. unixdirs = ['/usr/local/lib', '/usr/lib', '/lib'] plat = subprocess.check_output(['uname', '-m']).decode().strip() # This is a terrible hack. I admit it and I'm really sorry. # I just don't know what the correct solution is. if plat == 'i686': plat = 'i386' if plat.startswith('arm'): plat = 'arm' unixdirs += glob('/usr/lib/' + plat + '*') if os.path.exists('/usr/lib64'): unixdirs.append('/usr/lib64') unixdirs += glob('/lib/' + plat + '*') if os.path.exists('/lib64'): unixdirs.append('/lib64') unixdirs += glob('/lib/' + plat + '*') return unixdirs def do_replacement(regex, line, confdata): match = re.search(regex, line) while match: varname = match.group(1) if varname in confdata.keys(): (var, desc) = confdata.get(varname) if isinstance(var, str): pass elif isinstance(var, int): var = str(var) else: raise RuntimeError('Tried to replace a variable with something other than a string or int.') else: var = '' line = line.replace('@' + varname + '@', var) match = re.search(regex, line) return line def do_mesondefine(line, confdata): arr = line.split() if len(arr) != 2: raise MesonException('#mesondefine does not contain exactly two tokens: %s', line.strip()) varname = arr[1] try: (v, desc) = confdata.get(varname) except KeyError: return '/* #undef %s */\n' % varname if isinstance(v, bool): if v: return '#define %s\n' % varname else: return '#undef %s\n' % varname elif isinstance(v, int): return '#define %s %d\n' % (varname, v) elif isinstance(v, str): return '#define %s %s\n' % (varname, v) else: raise MesonException('#mesondefine argument "%s" is of unknown type.' % varname) def do_conf_file(src, dst, confdata): try: with open(src, encoding='utf-8') as f: data = f.readlines() except Exception as e: raise MesonException('Could not read input file %s: %s' % (src, str(e))) # Only allow (a-z, A-Z, 0-9, _, -) as valid characters for a define # Also allow escaping '@' with '\@' regex = re.compile(r'[^\\]?@([-a-zA-Z0-9_]+)@') result = [] for line in data: if line.startswith('#mesondefine'): line = do_mesondefine(line, confdata) else: line = do_replacement(regex, line, confdata) result.append(line) dst_tmp = dst + '~' with open(dst_tmp, 'w') as f: f.writelines(result) shutil.copymode(src, dst_tmp) replace_if_different(dst, dst_tmp) def dump_conf_header(ofilename, cdata): with open(ofilename, 'w') as ofile: ofile.write('''/* * Autogenerated by the Meson build system. * Do not edit, your changes will be lost. */ ''') for k in sorted(cdata.keys()): (v, desc) = cdata.get(k) if desc: ofile.write('/* %s */\n' % desc) if isinstance(v, bool): if v: ofile.write('#define %s\n\n' % k) else: ofile.write('#undef %s\n\n' % k) elif isinstance(v, (int, str)): ofile.write('#define %s %s\n\n' % (k, v)) else: raise MesonException('Unknown data type in configuration file entry: ' + k) def replace_if_different(dst, dst_tmp): # If contents are identical, don't touch the file to prevent # unnecessary rebuilds. different = True try: with open(dst, 'r') as f1, open(dst_tmp, 'r') as f2: if f1.read() == f2.read(): different = False except FileNotFoundError: pass if different: os.replace(dst_tmp, dst) else: os.unlink(dst_tmp) def stringlistify(item): if isinstance(item, str): item = [item] if not isinstance(item, list): raise MesonException('Item is not an array') for i in item: if not isinstance(i, str): raise MesonException('List item not a string.') return item def expand_arguments(args): expended_args = [] for arg in args: if not arg.startswith('@'): expended_args.append(arg) continue args_file = arg[1:] try: with open(args_file) as f: extended_args = f.read().split() expended_args += extended_args except Exception as e: print('Error expanding command line arguments, %s not found' % args_file) print(e) return None return expended_args
import unittest import mock from greenpithumb import light_sensor class LightSensorTest(unittest.TestCase): def setUp(self): self.mock_adc = mock.Mock() channel = 1 self.light_sensor = light_sensor.LightSensor(self.mock_adc, channel) def test_light_50_pct(self): """Near midpoint light sensor value should return near 50.""" self.mock_adc.read_adc.return_value = 511 self.assertAlmostEqual(self.light_sensor.light(), 50.0, places=1) def test_ambient_light_too_low(self): """Light sensor value less than min should raise a ValueError.""" with self.assertRaises(light_sensor.LightSensorLowError): self.mock_adc.read_adc.return_value = ( light_sensor._LIGHT_SENSOR_MIN_VALUE - 1) self.light_sensor.light()
""" Copyright 2011, 2012 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os, time, logging, threading, Queue class Threadable: def __init__(self,name="tvod.util.Threadable"): self._exit = False self._name = name # properties def get_exit(self): return self._exit def set_exit(self,value): assert isinstance(value,bool) self._exit = value def get_name(self): return self._name exit = property(get_exit,set_exit) name = property(get_name) # methods def runloop(self): while not self.exit: self.dowork() time.sleep(0.5) def dowork(self): pass class WatchFolder(Threadable): def __init__(self,path,timedelta=10.0): assert os.path.isdir(path) assert timedelta > 0 Threadable.__init__(self,name="<WatchFolder %s>" % os.path.basename(path)) self._path = path self._files = dict() self._timedelta = timedelta self._scantime = None # properties def get_path(self): return self._path path = property(get_path) # methods def scan(self): # scan for new files new_files = dict([(f, None) for f in os.listdir(self._path)]) added_files = [f for f in new_files if (not f in self._files and self.valid_filename(f))] deleted_files = [f for f in self._files if (not f in new_files and self.valid_filename(f))] # report on changes if added_files: self.added(added_files) if deleted_files: self.deleted(deleted_files) # reset files self._files = new_files def dowork(self): if self._scantime==None or (time.time() - self._scantime) > self._timedelta: logging.debug("SCANNING") self.scan() self._scantime = time.time() # These are the messages which need to be overridden def valid_filename(self,filename): if filename.startswith('.') or filename.endswith('~'): return False return True def added(self,paths): pass def deleted(self,paths): pass class XMLWatchFolder(WatchFolder): def __init__(self,path,queue): assert isinstance(queue,Queue.Queue) WatchFolder.__init__(self,path) self.queue = queue def valid_filename(self,filename): if not WatchFolder.valid_filename(self,filename): return False (basename,extension) = os.path.splitext(filename) if extension.lower() != ".xml": return False return True def added(self,filenames): for f in filenames: path = os.path.join(self.path,f) if not (os.path.exists(path) and os.path.getsize(path)): logging.debug("Ignoring non-existent or empty file: %s" % path) continue if not os.path.isfile(path): continue logging.debug("ADDED: %s/%s" % (os.path.basename(self.path),f)) self.queue.put(WorkerItem(path)) def deleted(self,filenames): for f in filenames: logging.debug("DELETED: %s/%s" % (os.path.basename(self.path),f)) class Worker(Threadable): def __init__(self,queue,number=0): assert isinstance(queue,Queue.Queue) Threadable.__init__(self,name="<Worker %d>" % number) self._queue = queue # methods def dowork(self): try: item = self._queue.get(True,0.5) assert isinstance(item,WorkerItem) self.process(item) except Queue.Empty, e: pass def process(self,item): pass class WorkerItem: def __init__(self,path): assert path self._path = path # properties def get_path(self): return self._path path = property(get_path) # convert into a string def __str__(self): return "<WorkerItem %s>" % self._path
import unreal_engine as ue import json class FilmActor: def begin_play(self): self.pawn = self.uobject.get_owner() def getjson(self): ue.log("@@@@video getting json:") loc = self.uobject.get_actor_location() rot = self.uobject.get_actor_forward() data = { "x":loc.x,"y":loc.y,"z":loc.z, "rx":rot.x, "ry":rot.y, "rz": rot.z } return json.dumps(data) def addtoworld(self): ue.log("@@@@video add to world") return "" def setjson(self,js): ue.log("@@@@video setting json:") data = json.loads(js) loc = self.uobject.get_actor_location() loc.x = data["x"] loc.y = data["y"] loc.z = data["z"] self.uobject.set_actor_location(loc) rot = self.uobject.get_actor_forward() return True def tick(self, delta_time): pass
""" Example from pybedtools documentation (:ref:`third example`) to count \ reads in introns and exons using multiple CPUs. """ from __future__ import print_function import pybedtools import argparse import os import sys import multiprocessing def featuretype_filter(feature, featuretype): """ Only passes features with the specified *featuretype* """ if feature[2] == featuretype: return True return False def subset_featuretypes(featuretype): return g.filter(featuretype_filter, featuretype).saveas() def count_reads_in_features(features): """ Callback function to count reads in features """ return features.intersect(abam=bam, b=features.fn, s=stranded, bed=True, stream=True).count() def main(): """ Third quick example from the documentation -- count reads introns and exons, in parallel """ ap = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]), usage=__doc__) ap.add_argument('--gff', required=True, help='GFF or GTF file containing annotations') ap.add_argument('--bam', required=True, help='BAM file containing reads to be counted') ap.add_argument('--stranded', action='store_true', help='Use strand-specific merging and overlap. ' 'Default is to ignore strand') ap.add_argument('--no-parallel', dest='noparallel', action='store_true', help='Disables parallel computation') ap.add_argument('-o', '--output', help='Optional file to which results will be written; ' 'default is stdout') ap.add_argument('-v', '--verbose', action='store_true', help='Verbose (goes to stderr)') args = ap.parse_args() gff = args.gff bam = args.bam stranded = args.stranded parallel = not args.noparallel # Some GFF files have invalid entries -- like chromosomes with negative # coords or features of length = 0. This line removes them and saves the # result in a tempfile g = pybedtools.BedTool(gff).remove_invalid().saveas() # Decide which version of map to use. If parallel, we only need 3 # processes. pool = multiprocessing.Pool(processes=3) # Get separate files for introns and exons in parallel (if specified) featuretypes = ('intron', 'exon') introns, exons = pool.map(subset_featuretypes, featuretypes) # Perform some genome algebra to get unique and shared regions exon_only = exons.subtract(introns).merge().remove_invalid().saveas() intron_only = introns.subtract(exons).merge().remove_invalid().saveas() intron_and_exon = exons\ .intersect(introns).merge().remove_invalid().saveas() # Do intersections with BAM file in parallel features = (exon_only, intron_only, intron_and_exon) results = pool.map(count_reads_in_features, features) labels = (' exon only:', ' intron only:', 'intron and exon:') for label, reads in zip(labels, results): print('%s %s' % (label, reads)) pybedtools.cleanup(verbose=False) if __name__ == "__main__": main()
from __future__ import print_function, unicode_literals, absolute_import import json import sys import os import re import time import shutil import random import mimetypes import imghdr import traceback import json import redis import logging import requests from requests.exceptions import RequestException TYPE_CAT = 'cats' TYPE_DOG = 'dogs' TYPE_OTHER = 'others' SOURCE_ROOT = os.path.join('..', 'images') TWO_HOUR_EXPIRE = 60*60*2 # in seconds MEDIA_ID_EXPIRE = TWO_HOUR_EXPIRE * 35 # in seconds ACCESS_TOKEN_KEY = 'wechat:token:v1:%s' MEDIA_ID_KEY = 'wechat:media_ids:v1:%s' MEDIA_ID_OUTPUT = 'data' MEDIA_ID_USER_KEY = 'wechat:media_ids:user:v1:%s:%s' MEDIA_ID_FILE = 'media_ids_v1_%s.txt' UPLOAD_IMAGE_URL = 'https://api.weixin.qq.com/cgi-bin/media/upload?access_token=%s&type=image' GET_TOKEN_URL = 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s' logging.basicConfig(level=logging.INFO) logger = logging.getLogger('MediaStore') def get_wechat_access_token(app_id, app_secret): url = GET_TOKEN_URL % (app_id, app_secret) logger.info('get_wechat_access_token url=%s' % url) response = requests.get(url) response.encoding = 'utf-8' logger.info('get_wechat_access_token result=%s' % response.json()) return response.json()['access_token'] class MediaStore(object): _redis = redis.StrictRedis(decode_responses=True) def __init__(self, name, app_id, app_secret, r=_redis, expire=MEDIA_ID_EXPIRE): assert name, 'name can not be None' assert app_id, 'app_id can not be None' assert app_secret, 'app_secret can not be None' self.name = name self.app_id = app_id self.app_secret = app_secret self.expire = expire self.r = r logger.debug('__init__ name=%s app_id=%s, app_secret=%s' % (name, app_id, app_secret)) def _get_media_key(self, type_name=''): return MEDIA_ID_KEY % type_name def _get_media_file(self, type_name=''): return os.path.join(MEDIA_ID_OUTPUT, MEDIA_ID_FILE % type_name) def _get_user_key(self, user_id, type_name=''): return MEDIA_ID_USER_KEY % (type_name, user_id) def _get_access_token(self): token = self.r.get(ACCESS_TOKEN_KEY % self.app_id) if not token: token = get_wechat_access_token(self.app_id, self.app_secret) logger.info('get_wechat_access_token token=%s' % token) if token: self.r.set(ACCESS_TOKEN_KEY % self.app_id, token) self.r.expire(ACCESS_TOKEN_KEY % self.app_id, TWO_HOUR_EXPIRE) return token def clear_media_ids(self, type_name=''): logger.info('clear_media_ids type=%s' % type_name) self.r.delete(self._get_media_key(type_name)) def save_media_ids(self, media_ids, type_name='', replace=True): if media_ids: with open(self._get_media_file(type_name), 'w') as f: f.write('\n'.join(media_ids)) key = self._get_media_key(type_name) if replace: self.r.delete(key) rt = self.r.sadd(key, *media_ids) self.r.expire(key, self.expire) logger.info('save_media_ids %s media ids saved %s' % (self.media_ids_length(type_name), rt)) return media_ids def upload_image(self, filepath): token = self._get_access_token() if not token: raise IOError('token is None') url = UPLOAD_IMAGE_URL % token files = {'media': open(filepath, 'rb')} try: response = requests.post(url, files=files) response.encoding = 'utf-8' return response.json()['media_id'] except RequestException as e: logger.error('upload_image error=%s' % e) def upload_images(self, source_dir, type_name='', max_count=100): if not source_dir or not os.path.isdir(source_dir): return logger.info('upload_images [%s] for type [%s]' % (source_dir, type_name)) names = os.listdir(source_dir) if len(names) > max_count: names = random.sample(names, max_count) count = 0 mids = [] for name in names: filepath = os.path.join(source_dir, name) filepath = os.path.abspath(filepath) mime_type, _ = mimetypes.guess_type(name) if mime_type not in ['image/jpeg', 'image/png', 'image/gif']: logger.warning('upload_images invalid=%s' % filepath) continue logger.info('upload_images file=%s' % filepath) media_id = self.upload_image(filepath) if media_id: logger.info('upload_images result=%s' % media_id) mids.append(media_id) count += 1 if count > max_count: break self.save_media_ids(mids, type_name) def random_user_media_id(self, user_id=None, type_name=''): if not user_id: return self.random_media_id(type_name) media_key = self._get_media_key(type_name) user_key = self._get_user_key(user_id, type_name) mids = self.r.sdiff(media_key, user_key) mid = None if mids: mid = random.choice(list(mids)) if mid: self.r.sadd(user_key, mid) self.r.expire(user_key, self.expire) if not mid: self.r.delete(user_key) mid = self.random_media_id(type_name) logger.debug('random_user_media_id user_id=%s result=%s' % (user_id, mid)) return mid def all_media_ids(self, type_name=''): return self.r.smembers(self._get_media_key(type_name)) def media_ids_length(self, type_name=''): return self.r.scard(self._get_media_key(type_name)) def random_media_id(self, type_name=''): return self.r.srandmember(self._get_media_key(type_name)) from config import WECHAT_APPID, WECHAT_APPSECRET, WECHAT2_APPID, WECHAT2_APPSECRET store1 = MediaStore('Cat', WECHAT_APPID, WECHAT_APPSECRET) store2 = MediaStore('Miu', WECHAT2_APPID, WECHAT2_APPSECRET) def update_app(store, root=SOURCE_ROOT): for type_name in (TYPE_CAT, TYPE_DOG, TYPE_OTHER): source_dir = os.path.join(root, type_name) store.upload_images(source_dir, type_name) def update_all(root=SOURCE_ROOT): check_all(root) update_app(store1, root) update_app(store2, root) def check_all(root=SOURCE_ROOT): for type_name in (TYPE_CAT, TYPE_DOG, TYPE_OTHER): source_dir = os.path.abspath(os.path.join(root, type_name)) if not os.path.exists(source_dir): print('ERROR: check_all source dir [%s] not exists' % source_dir) exit(1) if not os.path.isdir(source_dir): print('ERROR: check_all source dir [%s] not directory' % source_dir) exit(2) if not os.listdir(source_dir): print('ERROR: check_all source dir [%s] is empty' % source_dir) exit(2) print('all directories exists, check passed.') def test_all(): for store in [store1, store2]: for type_name in (TYPE_CAT, TYPE_DOG, TYPE_OTHER): print('\n[Store:%s] found %s values for type %s, read test:' % (store.name, store.media_ids_length(type_name), type_name)) for i in range(0, 10): print(store1.random_user_media_id('test', type_name)) for i in range(0,10): assert store1.random_user_media_id('test', type_name), 'No media id found' assert store1.random_media_id(type_name), 'No media id found' print('all tests passed.') if __name__ == '__main__': import argparse parser = argparse.ArgumentParser( prog='wechat_uploader', description='WeChat Images Uploader v0.1.0') parser.add_argument('-c', '--check', action="store_true", help='check source dir') parser.add_argument('-t', '--test', action="store_true", help='test read media id') parser.add_argument('-u', '--upload', action="store_true", help='upload all images') parser.add_argument('-s', '--source', help='images source directory') args = parser.parse_args() # print(args) source_dir = args.source or SOURCE_ROOT if args.check: check_all(source_dir) elif args.upload: update_all(source_dir) elif args.test: test_all() else: parser.print_help()
"""The auto-tuning module of tvm This module includes: * Tuning space definition API * Efficient auto-tuners * Tuning result and database support * Distributed measurement to scale up tuning """ from . import database from . import feature from . import measure from . import record from . import task from . import tuner from . import util from . import env from . import tophub from .measure import measure_option, MeasureInput, MeasureResult, MeasureErrorNo, \ LocalBuilder, LocalRunner, RPCRunner from .tuner import callback from .task import template, get_config, create, ConfigSpace, ConfigEntity, \ register_topi_compute, register_topi_schedule, \ DispatchContext, FallbackContext, ApplyHistoryBest as apply_history_best from .env import GLOBAL_SCOPE
sum=0 for x in range(0,1000): if(x%3==0 or x%5==0): sum+=x print(sum)
from __future__ import annotations import hashlib import json from dataclasses import dataclass from enum import Enum from typing import Any, Callable, Iterable, Set, TypeVar from pkg_resources import Requirement from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints from pants.util.ordered_set import FrozenOrderedSet BEGIN_LOCKFILE_HEADER = b"# --- BEGIN PANTS LOCKFILE METADATA: DO NOT EDIT OR REMOVE ---" END_LOCKFILE_HEADER = b"# --- END PANTS LOCKFILE METADATA ---" _concrete_metadata_classes: dict[int, type[LockfileMetadata]] = {} def _lockfile_metadata_version( version: int, ) -> Callable[[type[LockfileMetadata]], type[LockfileMetadata]]: """Decorator to register a Lockfile metadata version subclass with a given version number. The class must be a frozen dataclass """ def _dec(cls: type[LockfileMetadata]) -> type[LockfileMetadata]: # Only frozen dataclasses may be registered as lockfile metadata: cls_dataclass_params = getattr(cls, "__dataclass_params__", None) if not cls_dataclass_params or not cls_dataclass_params.frozen: raise ValueError( "Classes registered with `_lockfile_metadata_version` may only be " "frozen dataclasses" ) _concrete_metadata_classes[version] = cls return cls return _dec class InvalidLockfileError(Exception): pass @dataclass(frozen=True) class LockfileMetadata: """Base class for metadata that is attached to a given lockfiles. This class, and provides the external API for serializing, deserializing, and validating the contents of individual lockfiles. New versions of metadata implement a concrete subclass and provide deserialization and validation logic, along with specialist serialization logic. To construct an instance of the most recent concrete subclass, call `LockfileMetadata.new()`. """ _LockfileMetadataSubclass = TypeVar("_LockfileMetadataSubclass", bound="LockfileMetadata") valid_for_interpreter_constraints: InterpreterConstraints @staticmethod def new( valid_for_interpreter_constraints: InterpreterConstraints, requirements: set[Requirement], ) -> LockfileMetadata: """Call the most recent version of the `LockfileMetadata` class to construct a concrete instance. This static method should be used in place of the `LockfileMetadata` constructor. This gives calling sites a predictable method to call to construct a new `LockfileMetadata` for writing, while still allowing us to support _reading_ older, deprecated metadata versions. """ return LockfileMetadataV2(valid_for_interpreter_constraints, requirements) @staticmethod def from_lockfile( lockfile: bytes, lockfile_path: str | None = None, resolve_name: str | None = None ) -> LockfileMetadata: """Parse all relevant metadata from the lockfile's header.""" in_metadata_block = False metadata_lines = [] for line in lockfile.splitlines(): if line == BEGIN_LOCKFILE_HEADER: in_metadata_block = True elif line == END_LOCKFILE_HEADER: break elif in_metadata_block: metadata_lines.append(line[2:]) error_suffix = ( "To resolve this error, you will need to regenerate the lockfile by running " "`./pants generate-lockfiles" ) if resolve_name: error_suffix += " --resolve={tool_name}" error_suffix += "`." if lockfile_path is not None and resolve_name is not None: lockfile_description = f"the lockfile `{lockfile_path}` for `{resolve_name}`" elif lockfile_path is not None: lockfile_description = f"the lockfile `{lockfile_path}`" elif resolve_name is not None: lockfile_description = f"the lockfile for `{resolve_name}`" else: lockfile_description = "this lockfile" if not metadata_lines: raise InvalidLockfileError( f"Could not find a Pants metadata block in {lockfile_description}. {error_suffix}" ) try: metadata = json.loads(b"\n".join(metadata_lines)) except json.decoder.JSONDecodeError: raise InvalidLockfileError( f"Metadata header in {lockfile_description} is not a valid JSON string and can't " "be decoded. " + error_suffix ) concrete_class = _concrete_metadata_classes[metadata["version"]] return concrete_class._from_json_dict(metadata, lockfile_description, error_suffix) @classmethod def _from_json_dict( cls: type[_LockfileMetadataSubclass], json_dict: dict[Any, Any], lockfile_description: str, error_suffix: str, ) -> _LockfileMetadataSubclass: """Construct a `LockfileMetadata` subclass from the supplied JSON dict. *** Not implemented. Subclasses should override. *** `lockfile_description` is a detailed, human-readable description of the lockfile, which can be read by the user to figure out which lockfile is broken in case of an error. `error_suffix` is a string describing how to fix the lockfile. """ raise NotImplementedError( "`LockfileMetadata._from_json_dict` should not be directly " "called." ) def add_header_to_lockfile(self, lockfile: bytes, *, regenerate_command: str) -> bytes: metadata_dict = self._header_dict() metadata_json = json.dumps(metadata_dict, ensure_ascii=True, indent=2).splitlines() metadata_as_a_comment = "\n".join(f"# {l}" for l in metadata_json).encode("ascii") header = b"%b\n%b\n%b" % (BEGIN_LOCKFILE_HEADER, metadata_as_a_comment, END_LOCKFILE_HEADER) regenerate_command_bytes = ( f"# This lockfile was autogenerated by Pants. To regenerate, run:\n#\n" f"# {regenerate_command}" ).encode() return b"%b\n#\n%b\n\n%b" % (regenerate_command_bytes, header, lockfile) def _header_dict(self) -> dict[Any, Any]: """Produce a dictionary to be serialized into the lockfile header. Subclasses should call `super` and update the resulting dictionary. """ version: int for ver, cls in _concrete_metadata_classes.items(): if isinstance(self, cls): version = ver break else: raise ValueError("Trying to serialize an unregistered `LockfileMetadata` subclass.") return { "version": version, "valid_for_interpreter_constraints": [ str(ic) for ic in self.valid_for_interpreter_constraints ], } def is_valid_for( self, expected_invalidation_digest: str | None, user_interpreter_constraints: InterpreterConstraints, interpreter_universe: Iterable[str], user_requirements: Iterable[Requirement] | None, ) -> LockfileMetadataValidation: """Returns Truthy if this `LockfileMetadata` can be used in the current execution context.""" raise NotImplementedError("call `is_valid_for` on subclasses only") @_lockfile_metadata_version(1) @dataclass(frozen=True) class LockfileMetadataV1(LockfileMetadata): requirements_invalidation_digest: str @classmethod def _from_json_dict( cls: type[LockfileMetadataV1], json_dict: dict[Any, Any], lockfile_description: str, error_suffix: str, ) -> LockfileMetadataV1: metadata = _get_metadata(json_dict, lockfile_description, error_suffix) interpreter_constraints = metadata( "valid_for_interpreter_constraints", InterpreterConstraints, InterpreterConstraints ) requirements_digest = metadata("requirements_invalidation_digest", str, None) return LockfileMetadataV1(interpreter_constraints, requirements_digest) def _header_dict(self) -> dict[Any, Any]: d = super()._header_dict() d["requirements_invalidation_digest"] = self.requirements_invalidation_digest return d def is_valid_for( self, expected_invalidation_digest: str | None, user_interpreter_constraints: InterpreterConstraints, interpreter_universe: Iterable[str], _: Iterable[Requirement] | None, # User requirements are not used by V1 ) -> LockfileMetadataValidation: failure_reasons: set[InvalidLockfileReason] = set() if expected_invalidation_digest is None: return LockfileMetadataValidation(failure_reasons) if self.requirements_invalidation_digest != expected_invalidation_digest: failure_reasons.add(InvalidLockfileReason.INVALIDATION_DIGEST_MISMATCH) if not self.valid_for_interpreter_constraints.contains( user_interpreter_constraints, interpreter_universe ): failure_reasons.add(InvalidLockfileReason.INTERPRETER_CONSTRAINTS_MISMATCH) return LockfileMetadataValidation(failure_reasons) @_lockfile_metadata_version(2) @dataclass(frozen=True) class LockfileMetadataV2(LockfileMetadata): """Lockfile version that permits specifying a requirements as a set rather than a digest. Validity is tested by the set of requirements strings being the same in the user requirements as those in the stored requirements. """ requirements: set[Requirement] @classmethod def _from_json_dict( cls: type[LockfileMetadataV2], json_dict: dict[Any, Any], lockfile_description: str, error_suffix: str, ) -> LockfileMetadataV2: metadata = _get_metadata(json_dict, lockfile_description, error_suffix) requirements = metadata( "generated_with_requirements", Set[Requirement], lambda l: {Requirement.parse(i) for i in l}, ) interpreter_constraints = metadata( "valid_for_interpreter_constraints", InterpreterConstraints, InterpreterConstraints ) return LockfileMetadataV2(interpreter_constraints, requirements) def _header_dict(self) -> dict[Any, Any]: out = super()._header_dict() # Requirements need to be stringified then sorted so that tests are deterministic. Sorting # followed by stringifying does not produce a meaningful result. out["generated_with_requirements"] = ( sorted(str(i) for i in self.requirements) if self.requirements is not None else None ) return out def is_valid_for( self, _: str | None, # Validation digests are not used by V2; this param will be deprecated user_interpreter_constraints: InterpreterConstraints, interpreter_universe: Iterable[str], user_requirements: Iterable[Requirement] | None, ) -> LockfileMetadataValidation: failure_reasons: set[InvalidLockfileReason] = set() if user_requirements is None: return LockfileMetadataValidation(failure_reasons) if self.requirements != set(user_requirements): failure_reasons.add(InvalidLockfileReason.REQUIREMENTS_MISMATCH) if not self.valid_for_interpreter_constraints.contains( user_interpreter_constraints, interpreter_universe ): failure_reasons.add(InvalidLockfileReason.INTERPRETER_CONSTRAINTS_MISMATCH) return LockfileMetadataValidation(failure_reasons) def calculate_invalidation_digest(requirements: Iterable[str]) -> str: """Returns an invalidation digest for the given requirements.""" m = hashlib.sha256() inputs = { # `FrozenOrderedSet` deduplicates while keeping ordering, which speeds up the sorting if # the input was already sorted. "requirements": sorted(FrozenOrderedSet(requirements)), } m.update(json.dumps(inputs).encode("utf-8")) return m.hexdigest() class InvalidLockfileReason(Enum): INVALIDATION_DIGEST_MISMATCH = "invalidation_digest_mismatch" INTERPRETER_CONSTRAINTS_MISMATCH = "interpreter_constraints_mismatch" REQUIREMENTS_MISMATCH = "requirements_mismatch" class LockfileMetadataValidation: """Boolean-like value which additionally carries reasons why a validation failed.""" failure_reasons: set[InvalidLockfileReason] def __init__(self, failure_reasons: Iterable[InvalidLockfileReason] = ()): self.failure_reasons = set(failure_reasons) def __bool__(self): return not self.failure_reasons T = TypeVar("T") def _get_metadata( metadata: dict[Any, Any], lockfile_description: str, error_suffix: str, ) -> Callable[[str, type[T], Callable[[Any], T] | None], T]: """Returns a function that will get a given key from the `metadata` dict, and optionally do some verification and post-processing to return a value of the correct type.""" def get_metadata(key: str, type_: type[T], coerce: Callable[[Any], T] | None) -> T: val: Any try: val = metadata[key] except KeyError: raise InvalidLockfileError( f"Required key `{key}` is not present in metadata header for " f"{lockfile_description}. {error_suffix}" ) if not coerce: if isinstance(val, type_): return val raise InvalidLockfileError( f"Metadata value `{key}` in {lockfile_description} must " f"be a {type(type_).__name__}. {error_suffix}" ) else: try: return coerce(val) except Exception: raise InvalidLockfileError( f"Metadata value `{key}` in {lockfile_description} must be able to " f"be converted to a {type(type_).__name__}. {error_suffix}" ) return get_metadata
import distutils.version as dist_version import os import sys from dragon.db.sqlalchemy.session import get_engine from dragon.db import migration import sqlalchemy import migrate from migrate.versioning import util as migrate_util from dragon.openstack.common import exception from dragon.openstack.common.gettextutils import _ _REPOSITORY = None @migrate_util.decorator def patched_with_engine(f, *a, **kw): url = a[0] engine = migrate_util.construct_engine(url, **kw) try: kw['engine'] = engine return f(*a, **kw) finally: if isinstance(engine, migrate_util.Engine) and engine is not url: migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine) engine.dispose() MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3') if (not hasattr(migrate, '__version__') or dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION): migrate_util.with_engine = patched_with_engine from migrate.versioning import api as versioning_api from migrate.versioning.repository import Repository try: from migrate.versioning import exceptions as versioning_exceptions except ImportError: try: from migrate import exceptions as versioning_exceptions except ImportError: sys.exit(_("python-migrate is not installed. Exiting.")) def db_sync(version=None): if version is not None: try: version = int(version) except ValueError: raise exception.Error(_("version should be an integer")) current_version = db_version() repository = _find_migrate_repo() if version is None or version > current_version: return versioning_api.upgrade(get_engine(), repository, version) else: return versioning_api.downgrade(get_engine(), repository, version) def db_version(): repository = _find_migrate_repo() try: return versioning_api.db_version(get_engine(), repository) except versioning_exceptions.DatabaseNotControlledError as exc: # If we aren't version controlled there may be an existing, # non-version controlled database present. meta = sqlalchemy.MetaData() engine = get_engine() meta.reflect(bind=engine) tables = meta.tables if len(tables): raise exc db_version_control(migration.INIT_VERSION) return versioning_api.db_version(get_engine(), repository) def db_version_control(version=None): repository = _find_migrate_repo() versioning_api.version_control(get_engine(), repository, version) return version def _find_migrate_repo(): """Get the path for the migrate repository.""" path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'migrate_repo') assert os.path.exists(path) global _REPOSITORY if _REPOSITORY is None: _REPOSITORY = Repository(path) return _REPOSITORY
"""Analysis for discounting_chain.""" from typing import Optional, Sequence from bsuite.experiments.discounting_chain import sweep from bsuite.utils import plotting import numpy as np import pandas as pd import plotnine as gg NUM_EPISODES = sweep.NUM_EPISODES BASE_REGRET = 0.08 TAGS = sweep.TAGS _HORIZONS = np.array([1, 3, 10, 30, 100]) def score(df: pd.DataFrame) -> float: """Output a single score for discounting_chain.""" n_eps = np.minimum(df.episode.max(), sweep.NUM_EPISODES) ave_return = df.loc[df.episode == n_eps, 'total_return'].mean() / n_eps raw_score = 1. - 10. * (1.1 - ave_return) return np.clip(raw_score, 0, 1) def _mapping_seed_compatibility(df: pd.DataFrame) -> pd.DataFrame: """Utility function to maintain compatibility with old bsuite runs.""" # Discounting chain kwarg "seed" was renamed to "mapping_seed" if 'mapping_seed' in df.columns: nan_seeds = df.mapping_seed.isna() if np.any(nan_seeds): df.loc[nan_seeds, 'mapping_seed'] = df.loc[nan_seeds, 'seed'] print('WARNING: seed renamed to "mapping_seed" for compatibility.') else: if 'seed' in df.columns: print('WARNING: seed renamed to "mapping_seed" for compatibility.') df['mapping_seed'] = df.seed else: print('ERROR: outdated bsuite run, please relaunch.') return df def dc_preprocess(df_in: pd.DataFrame) -> pd.DataFrame: """Preprocess discounting chain data for use with regret metrics.""" df = df_in.copy() df = _mapping_seed_compatibility(df) df['optimal_horizon'] = _HORIZONS[ (df.mapping_seed % len(_HORIZONS)).astype(int)] df['total_regret'] = 1.1 * df.episode - df.total_return df['optimal_horizon'] = df.optimal_horizon.astype('category') return df def plot_learning(df: pd.DataFrame, sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot: """Plots the average regret through time by optimal_horizon.""" df = dc_preprocess(df_in=df) p = plotting.plot_regret_learning( df_in=df, group_col='optimal_horizon', sweep_vars=sweep_vars, max_episode=sweep.NUM_EPISODES ) p += gg.geom_hline(gg.aes(yintercept=BASE_REGRET), linetype='dashed', alpha=0.4, size=1.75) p += gg.coord_cartesian(ylim=(0, 0.1)) return p def plot_average(df: pd.DataFrame, sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot: """Plots the average regret at 1k episodes by optimal_horizon.""" df = dc_preprocess(df_in=df) p = plotting.plot_regret_average( df_in=df, group_col='optimal_horizon', episode=sweep.NUM_EPISODES, sweep_vars=sweep_vars ) p += gg.geom_hline(gg.aes(yintercept=BASE_REGRET), linetype='dashed', alpha=0.4, size=1.75) return p def plot_seeds(df_in: pd.DataFrame, sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot: """Plot the returns through time individually by run.""" df = dc_preprocess(df_in) df['average_return'] = 1.1 - (df.total_regret.diff() / df.episode.diff()) p = plotting.plot_individual_returns( df_in=df, max_episode=NUM_EPISODES, return_column='average_return', colour_var='optimal_horizon', yintercept=1.1, sweep_vars=sweep_vars, ) return p + gg.ylab('average episodic return')
""" Copyright 2015 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ref: https://github.com/swagger-api/swagger-codegen """ from pprint import pformat from six import iteritems class Thermal100Temperature(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self): """ Thermal100Temperature - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'member_id': 'str', 'oem': 'ResourceOem', 'physical_context': 'PhysicalContext100PhysicalContext', 'related_item': 'list[Odata400IdRef]', 'related_itemodata_count': 'Odata400Count', 'related_itemodata_navigation_link': 'Odata400IdRef', 'status': 'ResourceStatus' } self.attribute_map = { 'member_id': 'MemberId', 'oem': 'Oem', 'physical_context': 'PhysicalContext', 'related_item': 'RelatedItem', 'related_itemodata_count': 'RelatedItem@odata.count', 'related_itemodata_navigation_link': 'RelatedItem@odata.navigationLink', 'status': 'Status' } self._member_id = None self._oem = None self._physical_context = None self._related_item = None self._related_itemodata_count = None self._related_itemodata_navigation_link = None self._status = None @property def member_id(self): """ Gets the member_id of this Thermal100Temperature. This is the identifier for the member within the collection. :return: The member_id of this Thermal100Temperature. :rtype: str """ return self._member_id @member_id.setter def member_id(self, member_id): """ Sets the member_id of this Thermal100Temperature. This is the identifier for the member within the collection. :param member_id: The member_id of this Thermal100Temperature. :type: str """ self._member_id = member_id @property def oem(self): """ Gets the oem of this Thermal100Temperature. This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections. :return: The oem of this Thermal100Temperature. :rtype: ResourceOem """ return self._oem @oem.setter def oem(self, oem): """ Sets the oem of this Thermal100Temperature. This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections. :param oem: The oem of this Thermal100Temperature. :type: ResourceOem """ self._oem = oem @property def physical_context(self): """ Gets the physical_context of this Thermal100Temperature. Describes the area or device to which this temperature measurement applies. :return: The physical_context of this Thermal100Temperature. :rtype: PhysicalContext100PhysicalContext """ return self._physical_context @physical_context.setter def physical_context(self, physical_context): """ Sets the physical_context of this Thermal100Temperature. Describes the area or device to which this temperature measurement applies. :param physical_context: The physical_context of this Thermal100Temperature. :type: PhysicalContext100PhysicalContext """ self._physical_context = physical_context @property def related_item(self): """ Gets the related_item of this Thermal100Temperature. Describes the areas or devices to which this temperature measurement applies. :return: The related_item of this Thermal100Temperature. :rtype: list[Odata400IdRef] """ return self._related_item @related_item.setter def related_item(self, related_item): """ Sets the related_item of this Thermal100Temperature. Describes the areas or devices to which this temperature measurement applies. :param related_item: The related_item of this Thermal100Temperature. :type: list[Odata400IdRef] """ self._related_item = related_item @property def related_itemodata_count(self): """ Gets the related_itemodata_count of this Thermal100Temperature. :return: The related_itemodata_count of this Thermal100Temperature. :rtype: Odata400Count """ return self._related_itemodata_count @related_itemodata_count.setter def related_itemodata_count(self, related_itemodata_count): """ Sets the related_itemodata_count of this Thermal100Temperature. :param related_itemodata_count: The related_itemodata_count of this Thermal100Temperature. :type: Odata400Count """ self._related_itemodata_count = related_itemodata_count @property def related_itemodata_navigation_link(self): """ Gets the related_itemodata_navigation_link of this Thermal100Temperature. :return: The related_itemodata_navigation_link of this Thermal100Temperature. :rtype: Odata400IdRef """ return self._related_itemodata_navigation_link @related_itemodata_navigation_link.setter def related_itemodata_navigation_link(self, related_itemodata_navigation_link): """ Sets the related_itemodata_navigation_link of this Thermal100Temperature. :param related_itemodata_navigation_link: The related_itemodata_navigation_link of this Thermal100Temperature. :type: Odata400IdRef """ self._related_itemodata_navigation_link = related_itemodata_navigation_link @property def status(self): """ Gets the status of this Thermal100Temperature. :return: The status of this Thermal100Temperature. :rtype: ResourceStatus """ return self._status @status.setter def status(self, status): """ Sets the status of this Thermal100Temperature. :param status: The status of this Thermal100Temperature. :type: ResourceStatus """ self._status = status def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('main', '0001_initial'), ] operations = [ migrations.AddField( model_name='notification', name='queued', field=models.BooleanField(db_index=True, default=False), ), ]
from __future__ import unicode_literals from django import template from django.conf import settings from django.utils import timezone from ads.models import Ad register = template.Library() @register.inclusion_tag('ads/tags/render_ads_zone.html', takes_context=True) def render_ads_zone(context, zone): """ Returns an advertise for a ``zone``. Tag usage: {% load ads_tags %} {% render_zone 'zone' %} """ context.update({ 'google_adsense_client': settings.ADS_GOOGLE_ADSENSE_CLIENT, 'zone': zone, 'zone_info': settings.ADS_ZONES.get(zone, None) }) return context @register.simple_tag def get_ads_count(zone): """ Returns ads count for ``zone``. Tag usage: {% load ads_tags %} {% get_ads_count 'zone' as ads_count %} {% get_ads_count 'zone1,zone2,zone3' as ads_count %} """ zone = zone.split(',') return Ad.objects.public().filter(zone__in=zone).count()
''' Created on Sep 15, 2012 Agent classes. Contains references to instances of classes containing observer handlers and code Agent Instances are created automatically. Create a named Handler instance under the Agent, as an instance of the desired handler class, by create (POST) of a JSON object containing a dictionary of settings for example Agent.create({'resourceCName': 'addHandler_1','resourceClass': 'addHandler'}) @author: mjkoster ''' from RESTfulResource import RESTfulResource from LinkFormatProxy import LinkFormatProxy import subprocess class Handler(RESTfulResource): # single base class for handlers to extend directly, contains convenience methods for linking resources def __init__(self, parentObject=None, resourceDescriptor = {}): RESTfulResource.__init__(self, parentObject, resourceDescriptor) self._settings = self._resourceDescriptor # use the constructor descriptor for the initial settings # link cache keeps endpoints hashed by pathFromBase string, only need to walk the path one time self._linkBaseDict = self.Resources.get('baseObject').resources self._linkCache = {} self._init() def _init(self): pass def get(self, Key=None): if Key != None : return self._settings[Key] else : return self._settings def set(self, newSettings): # create an instance of a handler from settings dictionary self._settings.update(newSettings) def handleNotify(self, updateRef=None): # external method to call from Observer-Notifier self._handleNotify(updateRef) def _handleNotify(self, updateRef=None ): # override this for handling state changes from an observer pass def linkToRef(self, linkPath): ''' takes a path string and walks the object tree from a base dictionary returns a ref to the resource at the path endpoint store translations in a hash cache for fast lookup after the first walk ''' self._linkPath = linkPath if self._linkPath in self._linkCache.keys() : return self._linkCache[self._linkPath] # cache miss, walk path and update cache at end self._currentDict = self._linkBaseDict self._pathElements = linkPath.split('/') for pathElement in self._pathElements[:-1] : # all but the last, which should be the endpoint self._currentDict = self._currentDict[pathElement].resources self._resource = self._currentDict[self._pathElements[-1] ] self._linkCache.update({ self._linkPath : self._resource }) return self._resource def getByLink(self, linkPath): return self.linkToRef(linkPath).get() def setByLink(self, linkPath, newValue): self.linkToRef(linkPath).set(newValue) class addHandler(Handler): # an example appHandler that adds two values together and stores the result # define a method for handling state changes in observed resources def _handleNotify(self, updateRef = None ): # get the 2 addends, add them, and set the sum location self._addend1 = self.getByLink(self._settings['addendLink1']) self._addend2 = self.getByLink(self._settings['addendLink2']) self.setByLink( self._settings['sumOutLink'], self._addend1 + self._addend2 ) class logPrintHandler(Handler): def _handleNotify(self, resource) : print resource.Properties.get('resourceName'), ' = ', resource.get() class BLE_ColorLED_handler(Handler): def _handleNotify(self, resource = None ): subprocess.call([("/usr/local/bin/gatttool"),\ ("--device="+self._settings['MACaddress']),\ ("--addr-type="+self._settings['MACtype']),\ ("--char-write"),\ ("--handle="+self._settings['charHandle']),\ ("--value=0x"+resource.get())]) class Agent(RESTfulResource): # Agent is a container for Handlers and daemons, instantiated as a resource of a SmartObject def __init__(self, parentObject=None, resourceDescriptor = {}): RESTfulResource.__init__(self, parentObject, resourceDescriptor) self._handlers = {} def get(self, handlerName=None): if handlerName == None: return self._handlers # to get the list of names else: if self._handlers.has_key(handlerName) : return self._handlers[handlerName] # to get reference to handler resources by handler name return None # new create takes dictionary built from JSON object POSTed to parent resource def create(self, resourceDescriptor): resourceName = resourceDescriptor['resourceName'] resourceClass = resourceDescriptor['resourceClass'] # import the module if it's specified in the descriptor if resourceDescriptor.has_key('resourceClassPath') : resourceClassPath = resourceDescriptor['resourceClassPath'] self.importByPath(resourceClassPath) if resourceName not in self.resources: # create new instance of the named class and add to resources directory, return the ref self.resources.update({resourceName : globals()[resourceClass](self, resourceDescriptor)}) #pass the constructor the entire descriptor for creating the properties object #self.resources.update({resourceName : globals()[resourceClass](self, resourceDescriptor)}) self._handlers.update({resourceName: resourceClass}) return self.resources[resourceName] # returns a reference to the created instance # need to destroy instance of code module # FIXME Doesn't seem to work. Need to look at this and recursive import issue, devise dynamic import system def importByPath(self,classPath): # separate the module path from the class,import the module, and return the class name self._components = classPath.split('.') self._module = __import__( '.'.join(self._components[:-1]) ) return self._module
"""Deploy a model in AI Platform.""" import logging import json import time import subprocess from googleapiclient import discovery from googleapiclient import errors _WAIT_FOR_COMPLETION_SLEEP_SECONDS = 10 _PYTHON_VERSION = '3.5' _RUN_TIME_VERSION = '1.15' def _create_service(): """Gets service instance to start API searches. :return: """ return discovery.build('ml', 'v1') def copy_artifacts(source_path, destination_path): """ :param source_path: :param destination_path: :return: """ logging.info( 'Moving model directory from {} to {}'.format(source_path, destination_path)) subprocess.call( "gsutil -m cp -r {} {}".format(source_path, destination_path), shell=True) class AIPlatformModel(object): def __init__(self, project_id): self._project_id = project_id self._service = _create_service() def model_exists(self, model_name): """ :param model_name: :return: """ models = self._service.projects().models() try: response = models.list( parent='projects/{}'.format(self._project_id)).execute() if response: for model in response['models']: if model['name'].rsplit('/', 1)[1] == model_name: return True else: return False except errors.HttpError as err: logging.error('%s', json.loads(err.content)['error']['message']) def _list_model_versions(self, model_name): """Lists existing model versions in the project. Args: model_name: Model name to list versions for. Returns: Dictionary of model versions. """ versions = self._service.projects().models().versions() try: return versions.list( parent='projects/{}/models/{}'.format(self._project_id, model_name)).execute() except errors.HttpError as err: logging.error('%s', json.loads(err.content)['error']['message']) def create_model(self, model_name, model_region='us-central1'): """ :param model_name: :param model_region: :return: """ if not self.model_exists(model_name): body = { 'name': model_name, 'regions': model_region, 'description': 'MLflow model' } parent = 'projects/{}'.format(self._project_id) try: self._service.projects().models().create( parent=parent, body=body).execute() logging.info('Model "%s" has been created.', model_name) except errors.HttpError as err: logging.error('"%s". Skipping model creation.', json.loads(err.content)['error']['message']) else: logging.warning('Model "%s" already exists.', model_name) def deploy_model(self, bucket_name, model_name, model_version, runtime_version=_RUN_TIME_VERSION): """Deploys model on AI Platform. Args: bucket_name: Cloud Storage Bucket name that stores saved model. model_name: Model name to deploy. model_version: Model version. runtime_version: Runtime version. Raises: RuntimeError if deployment completes with errors. """ # For details on request body, refer to: # https://cloud.google.com/ml-engine/reference/rest/v1/projects # .models.versions/create model_version_exists = False model_versions_list = self._list_model_versions(model_name) # Field: version.name Error: A name should start with a letter and # contain only letters, numbers and underscores model_version = 'mlflow_{}'.format(model_version) if model_versions_list: for version in model_versions_list['versions']: if version['name'].rsplit('/', 1)[1] == model_version: model_version_exists = True if not model_version_exists: request_body = { 'name': model_version, 'deploymentUri': '{}'.format(bucket_name), 'framework': 'TENSORFLOW', 'runtimeVersion': runtime_version, 'pythonVersion': _PYTHON_VERSION } parent = 'projects/{}/models/{}'.format(self._project_id, model_name) response = self._service.projects().models().versions().create( parent=parent, body=request_body).execute() op_name = response['name'] while True: deploy_status = ( self._service.projects().operations().get( name=op_name).execute()) if deploy_status.get('done'): logging.info('Model "%s" with version "%s" deployed.', model_name, model_version) break if deploy_status.get('error'): logging.error(deploy_status['error']) raise RuntimeError( 'Failed to deploy model for serving: {}'.format( deploy_status['error'])) logging.info( 'Waiting for %d seconds for "%s" with "%s" version to be ' 'deployed.', _WAIT_FOR_COMPLETION_SLEEP_SECONDS, model_name, model_version) time.sleep(_WAIT_FOR_COMPLETION_SLEEP_SECONDS) else: logging.info('Model "%s" with version "%s" already exists.', model_name, model_version)
from typing import Any, Callable, Tuple, Union from packed import pack, unpack import jj from jj import default_app, default_handler from jj.apps import BaseApp, create_app from jj.http.codes import BAD_REQUEST, OK from jj.http.methods import ANY, DELETE, GET, POST from jj.matchers import LogicalMatcher, RequestMatcher, ResolvableMatcher, exists from jj.requests import Request from jj.resolvers import Registry, Resolver from jj.responses import RelayResponse, Response, StreamResponse from ._history import HistoryRepository from ._remote_response import RemoteResponseType __all__ = ("Mock",) MatcherType = Union[RequestMatcher, LogicalMatcher] class Mock(jj.App): def __init__(self, app_factory: Callable[..., BaseApp] = create_app, resolver_factory: Callable[..., Resolver] = Resolver) -> None: self._resolver = resolver_factory(Registry(), default_app, default_handler) self._app = app_factory(resolver=self._resolver) self._repo = HistoryRepository() def _decode(self, payload: bytes) -> Tuple[str, MatcherType, RemoteResponseType]: def resolver(cls: Any, **kwargs: Any) -> Any: return cls.__unpacked__(**kwargs, resolver=self._resolver) decoded = unpack(payload, {ResolvableMatcher: resolver}) handler_id = decoded.get("id") assert isinstance(handler_id, str) matcher = decoded.get("request") assert isinstance(matcher, (RequestMatcher, LogicalMatcher)) response = decoded.get("response") assert isinstance(response, (Response, RelayResponse)) return handler_id, matcher, response @jj.match(POST, headers={"x-jj-remote-mock": exists}) async def register(self, request: Request) -> Response: payload = await request.read() try: handler_id, matcher, response = self._decode(payload) except Exception: return Response(status=BAD_REQUEST, json={"status": BAD_REQUEST}) async def handler(request: Request) -> RemoteResponseType: return response.copy() self._resolver.register_attribute("handler_id", handler_id, handler) setattr(self._app.__class__, handler_id, matcher(handler)) return Response(status=OK, json={"status": OK}) @jj.match(DELETE, headers={"x-jj-remote-mock": exists}) async def deregister(self, request: Request) -> Response: payload = await request.read() try: handler_id, *_ = self._decode(payload) except Exception: return Response(status=BAD_REQUEST, json={"status": BAD_REQUEST}) try: delattr(self._app.__class__, handler_id) except AttributeError: pass await self._repo.delete_by_tag(handler_id) return Response(status=OK, json={"status": OK}) @jj.match(GET, headers={"x-jj-remote-mock": exists}) async def history(self, request: Request) -> Response: payload = await request.read() try: handler_id, *_ = self._decode(payload) except Exception: return Response(status=BAD_REQUEST, json={"status": BAD_REQUEST}) history = await self._repo.get_by_tag(handler_id) packed = pack(history) return Response(status=OK, body=packed) @jj.match(ANY) async def resolve(self, request: Request) -> StreamResponse: handler = await self._resolver.resolve(request, self._app) response = await handler(request) handler_id = self._resolver.get_attribute("handler_id", handler, default=None) if handler_id: await self._repo.add(request, response, tags=[handler_id]) return response
""" EPYNET Classes """ from . import epanet2 from .objectcollection import ObjectCollection from .baseobject import BaseObject, lazy_property from .pattern import Pattern class Node(BaseObject): """ Base EPANET Node class """ static_properties = {'elevation': epanet2.EN_ELEVATION} properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE} def __init__(self, uid, network): super(Node, self).__init__(uid, network) self.links = ObjectCollection() def get_index(self, uid): if not self._index: self._index = self.network().ep.ENgetnodeindex(uid) return self._index def set_object_value(self, code, value): return self.network().ep.ENsetnodevalue(self.index, code, value) def get_object_value(self, code): return self.network().ep.ENgetnodevalue(self.index, code) @property def index(self): return self.get_index(self.uid) @lazy_property def coordinates(self): return self.network().ep.ENgetcoord(self.index) # extra functionality @lazy_property def upstream_links(self): """ return a list of upstream links """ if self.results != {}: raise ValueError("This method is only supported for steady state simulations") links = ObjectCollection() for link in self.links: if (link.to_node == self and link.flow >= 1e-3) or (link.from_node == self and link.flow < -1e-3): links[link.uid] = link return links @lazy_property def downstream_links(self): """ return a list of downstream nodes """ if self.results != {}: raise ValueError("This method is only supported for steady state simulations") links = ObjectCollection() for link in self.links: if (link.from_node == self and link.flow >= 1e-3) or (link.to_node == self and link.flow < 1e-3): links[link.uid] = link return links @lazy_property def inflow(self): outflow = 0 for link in self.upstream_links: outflow += abs(link.flow) return outflow @lazy_property def outflow(self): outflow = 0 for link in self.downstream_links: outflow += abs(link.flow) return outflow """ calculates all the water flowing out of the node """ class Reservoir(Node): """ EPANET Reservoir Class """ node_type = "Reservoir" class Junction(Node): """ EPANET Junction Class """ static_properties = {'elevation': epanet2.EN_ELEVATION, 'basedemand': epanet2.EN_BASEDEMAND, 'emitter': epanet2.EN_EMITTER} properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE, 'demand': epanet2.EN_DEMAND} node_type = "Junction" @property def pattern(self): pattern_index = int(self.get_property(epanet2.EN_PATTERN)) uid = self.network().ep.ENgetpatternid(pattern_index) return Pattern(uid, self.network()) @pattern.setter def pattern(self, value): if isinstance(value, int): pattern_index = value elif isinstance(value, str): pattern_index = self.network().ep.ENgetpatternindex(value) else: pattern_index = value.index self.network().solved = False self.set_object_value(epanet2.EN_PATTERN, pattern_index) class Tank(Node): """ EPANET Tank Class """ node_type = "Tank" static_properties = {'elevation': epanet2.EN_ELEVATION, 'basedemand': epanet2.EN_BASEDEMAND, 'initvolume': epanet2.EN_INITVOLUME, 'diameter': epanet2.EN_TANKDIAM, 'minvolume': epanet2.EN_MINVOLUME, 'minlevel': epanet2.EN_MINLEVEL, 'maxlevel': epanet2.EN_MAXLEVEL, 'maxvolume': 25, 'tanklevel': epanet2.EN_TANKLEVEL} properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE, 'demand': epanet2.EN_DEMAND, 'volume': 24, 'level': epanet2.EN_TANKLEVEL}
import numpy as np import random import os import shutil import platform import pytest import ray from ray.test_utils import wait_for_condition from ray.internal.internal_api import memory_summary MB = 1024 * 1024 def _init_ray(): return ray.init( num_cpus=2, object_store_memory=700e6, _system_config={"plasma_unlimited": True}) def _check_spilled_mb(address, spilled=None, restored=None, fallback=None): def ok(): s = memory_summary(address=address["redis_address"], stats_only=True) print(s) if restored: if "Restored {} MiB".format(restored) not in s: return False else: if "Restored" in s: return False if spilled: if "Spilled {} MiB".format(spilled) not in s: return False else: if "Spilled" in s: return False if fallback: if "Plasma filesystem mmap usage: {} MiB".format( fallback) not in s: return False else: if "Plasma filesystem mmap usage:" in s: return False return True wait_for_condition(ok, timeout=3, retry_interval_ms=1000) @pytest.mark.skipif( platform.system() == "Windows", reason="Need to fix up for Windows.") def test_fallback_when_spilling_impossible_on_put(): try: address = _init_ray() x1 = ray.put(np.zeros(400 * MB, dtype=np.uint8)) x1p = ray.get(x1) # x2 will be fallback allocated on the filesystem. x2 = ray.put(np.zeros(400 * MB, dtype=np.uint8)) x2p = ray.get(x2) del x1p del x2p _check_spilled_mb(address, spilled=None, fallback=400) finally: ray.shutdown() @pytest.mark.skipif( platform.system() == "Windows", reason="Need to fix up for Windows.") def test_spilling_when_possible_on_put(): try: address = _init_ray() results = [] for _ in range(5): results.append(ray.put(np.zeros(400 * MB, dtype=np.uint8))) _check_spilled_mb(address, spilled=1600) finally: ray.shutdown() @pytest.mark.skipif( platform.system() == "Windows", reason="Need to fix up for Windows.") def test_fallback_when_spilling_impossible_on_get(): try: address = _init_ray() x1 = ray.put(np.zeros(400 * MB, dtype=np.uint8)) # x1 will be spilled. x2 = ray.put(np.zeros(400 * MB, dtype=np.uint8)) _check_spilled_mb(address, spilled=400) # x1 will be restored, x2 will be spilled. x1p = ray.get(x1) _check_spilled_mb(address, spilled=800, restored=400) # x2 will be restored, triggering a fallback allocation. x2p = ray.get(x2) _check_spilled_mb(address, spilled=800, restored=800, fallback=400) del x1p del x2p finally: ray.shutdown() @pytest.mark.skipif( platform.system() == "Windows", reason="Need to fix up for Windows.") def test_spilling_when_possible_on_get(): try: address = _init_ray() x1 = ray.put(np.zeros(400 * MB, dtype=np.uint8)) # x1 will be spilled. x2 = ray.put(np.zeros(400 * MB, dtype=np.uint8)) _check_spilled_mb(address, spilled=400) # x1 will be restored, x2 will be spilled. ray.get(x1) _check_spilled_mb(address, spilled=800, restored=400) # x2 will be restored, spilling x1. ray.get(x2) _check_spilled_mb(address, spilled=800, restored=800) finally: ray.shutdown() @pytest.mark.skipif( platform.system() == "Windows", reason="Need to fix up for Windows.") def test_task_unlimited(): try: address = _init_ray() x1 = ray.put(np.zeros(400 * MB, dtype=np.uint8)) refs = [x1] # x1 is spilled. x2 = ray.put(np.zeros(400 * MB, dtype=np.uint8)) x2p = ray.get(x2) sentinel = ray.put(np.zeros(100 * MB, dtype=np.uint8)) _check_spilled_mb(address, spilled=400) @ray.remote def consume(refs): # triggers fallback allocation, spilling of the sentinel ray.get(refs[0]) # triggers fallback allocation. return ray.put(np.zeros(400 * MB, dtype=np.uint8)) # round 1 ray.get(consume.remote(refs)) _check_spilled_mb(address, spilled=500, restored=400, fallback=400) del x2p del sentinel finally: ray.shutdown() @pytest.mark.skipif( platform.system() == "Windows", reason="Need to fix up for Windows.") def test_task_unlimited_multiget_args(): try: address = _init_ray() # Too many refs to fit into memory. refs = [] for _ in range(10): refs.append(ray.put(np.zeros(200 * MB, dtype=np.uint8))) x2 = ray.put(np.zeros(600 * MB, dtype=np.uint8)) x2p = ray.get(x2) _check_spilled_mb(address, spilled=2000) @ray.remote def consume(refs): # Should work without thrashing. ray.get(refs) return os.getpid() ray.get([consume.remote(refs) for _ in range(1000)]) _check_spilled_mb(address, spilled=2000, restored=2000, fallback=2000) del x2p finally: ray.shutdown() @pytest.mark.skipif( platform.system() == "Windows", reason="Need to fix up for Windows.") def test_fd_reuse_no_memory_corruption(shutdown_only): @ray.remote class Actor: def produce(self, i): s = int(random.random() * 200) z = np.ones(s * 1024 * 1024) z[0] = i return z def consume(self, x, i): print(x) assert x[0] == i, x ray.init(object_store_memory=100e6) a = Actor.remote() b = Actor.remote() for i in range(20): x_id = a.produce.remote(i) ray.get(b.consume.remote(x_id, i)) @pytest.mark.skipif( platform.system() != "Linux", reason="Only Linux handles fallback allocation disk full error.") def test_fallback_allocation_failure(shutdown_only): ray.init( object_store_memory=100e6, _temp_dir="/dev/shm", _system_config={"plasma_unlimited": True}) shm_size = shutil.disk_usage("/dev/shm").total object_size = max(100e6, shm_size // 5) num_exceptions = 0 refs = [] for i in range(8): try: refs.append(ray.put(np.zeros(object_size, dtype=np.uint8))) except ray.exceptions.ObjectStoreFullError: num_exceptions = num_exceptions + 1 assert num_exceptions > 0 if __name__ == "__main__": import sys sys.exit(pytest.main(["-v", __file__]))
from __future__ import print_function import pandas as pd from sklearn.base import TransformerMixin class FamilyCounter(TransformerMixin): def __init__(self, use=True): self.use = use def transform(self, features_raw, **transform_params): if self.use: features = features_raw.copy(deep=True) family = features_raw[['SibSp', 'Parch']]\ .apply(lambda x: x[0] + x[1], axis=1) features.drop('SibSp', axis=1, inplace=True) features.drop('Parch', axis=1, inplace=True) return pd.concat([features, pd.DataFrame({'Family': family})], axis=1) return features_raw def fit(self, X, y=None, **fit_params): return self def get_params(self, *args, **kwargs): return { 'use': self.use } def set_params(self, **params): if 'use' in params: self.use = params.get('use')
__author__ = 'horacioibrahim' import unittest, os import datetime from time import sleep, ctime, time from random import randint from hashlib import md5 from types import StringType import merchant, customers, config, invoices, errors, plans, subscriptions def check_tests_environment(): """ For tests is need environment variables to instantiate merchant. Or Edit tests file to instantiate merchant.IuguMerchant(account_id=YOUR_ID) """ try: global ACCOUNT_ID ACCOUNT_ID = os.environ["ACCOUNT_ID"] except KeyError: raise errors.IuguConfigTestsErrors("Only for tests is required an environment " \ "variable ACCOUNT_ID or edit file tests.py") class TestMerchant(unittest.TestCase): check_tests_environment() # Checks if enviroment variables defined def setUp(self): self.EMAIL_CUSTOMER = "anyperson@ap.com" self.client = merchant.IuguMerchant(account_id=ACCOUNT_ID, api_mode_test=True) def tearDown(self): pass def test_create_payment_token_is_test(self): response = self.client.create_payment_token('4111111111111111', 'JA', 'Silva', '12', '2010', '123') self.assertTrue(response.is_test) def test_create_payment_token(self): response = self.client.create_payment_token('4111111111111111', 'JA', 'Silva', '12', '2010', '123') self.assertEqual(response.status, 200) def test_create_charge_credit_card(self): item = merchant.Item("Produto My Test", 1, 10000) token = self.client.create_payment_token('4111111111111111', 'JA', 'Silva', '12', '2010', '123') charge = self.client.create_charge(self.EMAIL_CUSTOMER, item, token=token) self.assertEqual(charge.is_success(), True) def test_create_charge_bank_slip(self): item = merchant.Item("Produto Bank Slip", 1, 1000) charge = self.client.create_charge(self.EMAIL_CUSTOMER, item) self.assertEqual(charge.is_success(), True) class TestCustomer(unittest.TestCase): def setUp(self): hash_md5 = md5() number = randint(1, 50) hash_md5.update(str(number)) email = "{email}@test.com".format(email=hash_md5.hexdigest()) self.random_user_email = email def test_create_customer_basic_info(self): consumer = customers.IuguCustomer(api_mode_test=True, email=self.random_user_email) c = consumer.create() c.remove() self.assertEqual(consumer.email, c.email) def test_create_customer_basic_email(self): consumer = customers.IuguCustomer() c = consumer.create(email=self.random_user_email) c.remove() self.assertEqual(consumer.email, c.email) def test_create_customer_extra_attrs(self): consumer = customers.IuguCustomer(api_mode_test=True, email=self.random_user_email) c = consumer.create(name="Mario Lago", notes="It's the man", custom_variables={'local':'cup'}) c.remove() self.assertEqual(c.custom_variables[0]['name'], "local") self.assertEqual(c.custom_variables[0]['value'], "cup") def test_get_customer(self): consumer = customers.IuguCustomer(api_mode_test=True, email=self.random_user_email) consumer_new = consumer.create() c = consumer.get(customer_id=consumer_new.id) consumer_new.remove() self.assertEqual(consumer.email, c.email) def test_set_customer(self): consumer = customers.IuguCustomer(api_mode_test=True, email=self.random_user_email) consumer_new = consumer.create(name="Mario Lago", notes="It's the man", custom_variables={'local':'cup'}) c = consumer.set(consumer_new.id, name="Lago Mario") consumer_new.remove() self.assertEqual(c.name, "Lago Mario") def test_customer_save(self): consumer = customers.IuguCustomer(api_mode_test=True, email=self.random_user_email) consumer_new = consumer.create(name="Mario Lago", notes="It's the man", custom_variables={'local':'cup'}) # Edit info consumer_new.name = "Ibrahim Horacio" # Save as instance consumer_new.save() # verify results check_user = consumer.get(consumer_new.id) consumer_new.remove() self.assertEqual(check_user.name, "Ibrahim Horacio") def test_customer_delete_by_id(self): consumer = customers.IuguCustomer(api_mode_test=True, email=self.random_user_email) consumer_new = consumer.create(name="Mario Lago", notes="It's the man", custom_variables={'local':'cup'}) consumer.delete(consumer_new.id) self.assertRaises(errors.IuguGeneralException, consumer.get, consumer_new.id) def test_customer_delete_instance(self): consumer = customers.IuguCustomer(api_mode_test=True, email=self.random_user_email) consumer_new = consumer.create(name="Mario Lago", notes="It's the man", custom_variables={'local':'cup'}) r = consumer_new.remove() self.assertRaises(errors.IuguGeneralException, consumer.get, consumer_new.id) class TestCustomerLists(unittest.TestCase): def setUp(self): hash_md5 = md5() number = randint(1, 50) hash_md5.update(str(number)) email = "{email}@test.com".format(email=hash_md5.hexdigest()) self.random_user_email = email self.c = customers.IuguCustomer(api_mode_test=True, email=self.random_user_email) # creating customers for tests with lists p1, p2, p3 = "Andrea", "Bruna", "Carol" self.one = self.c.create(name=p1, notes="It's the man", custom_variables={'local':'cup'}) # I'm not happy with it (sleep), but was need. This certainly occurs because # time data is not a timestamp. sleep(1) self.two = self.c.create(name=p2, notes="It's the man", custom_variables={'local':'cup'}) sleep(1) self.three = self.c.create(name=p3, notes="It's the man", custom_variables={'local':'cup'}) sleep(1) self.p1, self.p2, self.p3 = p1, p2, p3 def tearDown(self): self.one.remove() self.two.remove() self.three.remove() def test_getitems(self): customers_list = self.c.getitems() self.assertEqual(type(customers_list), list) def test_getitems_limit(self): # get items with auto DESC order customers_list = self.c.getitems(limit=2) self.assertEqual(len(customers_list), 2) def test_getitems_start(self): # get items with auto DESC order sleep(2) customers_list = self.c.getitems(limit=3) # get latest three customers reference_customer = customers_list[2].name customers_list = self.c.getitems(skip=2) self.assertEqual(customers_list[0].name, reference_customer) def test_getitems_query_by_match_in_name(self): hmd5 = md5() hmd5.update(ctime(time())) salt = hmd5.hexdigest() term = 'name_inexistent_or_improbable_here_{salt}'.format(salt=salt) # test value/term in >>name<< customer = self.c.create(name=term) sleep(2) items = self.c.getitems(query=term) # assert valid because name customer.remove() self.assertEqual(items[0].name, term) def test_getitems_query_by_match_in_notes(self): hmd5 = md5() hmd5.update(ctime(time())) salt = hmd5.hexdigest() term = 'name_inexistent_or_improbable_here_{salt}'.format(salt=salt) # test value/term in >>notes<< customer = self.c.create(name="Sub Zero", notes=term) sleep(2) items = self.c.getitems(query=term) customer.remove() self.assertEqual(items[0].notes, term) def test_getitems_query_by_match_in_email(self): hmd5 = md5() hmd5.update(ctime(time())) salt = hmd5.hexdigest() term = 'name_inexistent_or_improbable_here_{salt}'.format(salt=salt) # test value/term in >>email<< email = term + '@email.com' self.c.email = email customer = self.c.create() sleep(2) items = self.c.getitems(query=term) customer.remove() self.assertIn(term, items[0].email) # Uncomment/comment the next one line to disable/enable the test @unittest.skip("Database of webservice is not empty") def test_getitems_sort(self): sleep(1) # Again. It's need # Useful to test database with empty data (previous data, old tests) customers_list = self.c.getitems(sort="name") # monkey skip if len(customers_list) < 4: self.assertEqual(customers_list[0].name, self.p1) else: raise TypeError("API Database is not empty. This test isn't useful. " \ "Use unittest.skip() before this method.") def test_getitems_created_at_from(self): sleep(1) customers_list = self.c.getitems(created_at_from=self.three.created_at) self.assertEqual(customers_list[0].id, self.three.id) # Uncomment the next one line to disable the test # @unittest.skip("Real-time interval not reached") def test_getitems_created_at_to(self): sleep(1) customers_list = self.c.getitems(created_at_to=self.one.created_at) self.assertEqual(customers_list[0].id, self.three.id) def test_getitems_updated_since(self): # get items with auto DESC order sleep(1) customers_list = self.c.getitems(updated_since=self.three.created_at) self.assertEqual(customers_list[0].id, self.three.id) class TestCustomerPayments(unittest.TestCase): def setUp(self): hash_md5 = md5() number = randint(1, 50) hash_md5.update(str(number)) email = "{email}@test.com".format(email=hash_md5.hexdigest()) self.random_user_email = email self.client = customers.IuguCustomer(email="test@testmail.com") self.customer = self.client.create() self.instance_payment = self.customer.payment.create(description="New payment method", number='4111111111111111', verification_value=123, first_name="Joao", last_name="Maria", month=12, year=2014) def tearDown(self): self.instance_payment.remove() self.customer.remove() # if you remove customer also get payment def test_create_payment_method_new_user_by_create(self): """ Test create payment method to new recent user returned by create() of IuguCustomer """ instance_payment = self.customer.payment.create(description="New payment method", number='4111111111111111', verification_value=123, first_name="Joao", last_name="Maria", month=12, year=2014) instance_payment.remove() self.assertTrue(isinstance(instance_payment, customers.IuguPaymentMethod)) def test_create_payment_method_existent_user_by_get(self): """ Test create payment method of existent user returned by get() of IuguCustomer. """ new_customer = self.client.create() # Test with user from get() existent_customer = self.client.get(new_customer.id) instance_payment = existent_customer.payment.create(description="New payment method", number='4111111111111111', verification_value=123, first_name="Joao", last_name="Maria", month=12, year=2015) instance_payment.remove() self.assertTrue(isinstance(instance_payment, customers.IuguPaymentMethod)) def test_create_payment_method_existent_user_by_getitems(self): """ Test create payment method of existent user returned by getitems() of IuguCustomer """ # Test with user from getitems() customers_list = self.client.getitems() c_0 = customers_list[0] instance_payment = c_0.payment.create(description="New payment method", number='4111111111111111', verification_value=123, first_name="Joao", last_name="Maria", month=12, year=2016) instance_payment.remove() self.assertTrue(isinstance(instance_payment, customers.IuguPaymentMethod)) def test_create_payment_method_non_existent_user_by_instance(self): """ Test create payment method to instance's user before it was created in API. So without ID. """ create = self.client.payment.create self.assertRaises(errors.IuguPaymentMethodException, create, description="New payment method", number='4111111111111111', verification_value=123, first_name="Joao", last_name="Maria", month=12, year=2016) def test_create_payment_method_raise_general(self): # Create payment method without data{} where API returns error. customer = self.client.create() self.assertRaises(errors.IuguGeneralException, customer.payment.create, description="Second payment method") customer.remove() def test_get_payment_method_by_payment_id_customer_id(self): # Test get payment based payment_id and customer_id id = self.instance_payment.id # two args passed payment = self.client.payment.get(id, customer_id=self.customer.id) self.assertTrue(isinstance(payment, customers.IuguPaymentMethod)) def test_get_payment_by_customer(self): # Test get payment by instance's customer (existent in API) id = self.instance_payment.id # one arg passed. user is implicit to customer payment = self.customer.payment.get(id) self.assertTrue(isinstance(payment, customers.IuguPaymentMethod)) def test_set_payment_by_payment_id_customer_id(self): # Changes payment method base payment_id and customer_id id = self.instance_payment.id # two args passed payment = self.client.payment.set(id, "New Card Name", customer_id=self.customer.id) self.assertTrue(isinstance(payment, customers.IuguPaymentMethod)) payment_test = self.customer.payment.get(payment.id) self.assertEqual(payment_test.description, payment.description) def test_set_payment_by_customer(self): # Changes payment method base payment_id of an intance's customer id = self.instance_payment.id # one arg passed. user is implicit to customer payment = self.customer.payment.set(id, "New Card Name") self.assertTrue(isinstance(payment, customers.IuguPaymentMethod)) payment_test = self.customer.payment.get(payment.id) self.assertEqual(payment_test.description, payment.description) def test_set_payment_by_customer_by_save(self): """ Changes payment method of an instance's payment no payment_id or no customer_id is need""" self.instance_payment.description = "New Card Name" # no args passed. To payment method instance this is implicit payment = self.instance_payment.save() self.assertTrue(isinstance(payment, customers.IuguPaymentMethod)) payment_test = self.customer.payment.get(payment.id) self.assertEqual(payment_test.description, payment.description) def test_set_payment_remove(self): """ Changes payment method of an instance's payment no payment_id or no customer_id is need""" instance_payment = self.customer.payment.create(description="New payment method", number='4111111111111111', verification_value=123, first_name="Joao", last_name="Maria", month=12, year=2014) instance_payment.remove() # Try get payment already removed payment_test = self.customer.payment.get # copy method self.assertRaises(errors.IuguGeneralException, payment_test, instance_payment.id) def test_set_payment_remove_by_attrs(self): """ """ instance_payment = self.customer.payment instance_payment.payment_data.description = "New payment method" instance_payment.payment_data.number = number='4111111111111111' instance_payment.payment_data.verification_value = 123 instance_payment.payment_data.first_name = "Joao" instance_payment.payment_data.last_name = "Silva" instance_payment.payment_data.month = 12 instance_payment.payment_data.year = 2015 instance_payment = instance_payment.create(description="Meu cartao") instance_payment.remove() self.assertRaises(errors.IuguGeneralException, instance_payment.get, instance_payment.id) def test_getitems_payments(self): payment_one = self.customer.payment.create(description="New payment One", number='4111111111111111', verification_value=123, first_name="World", last_name="Cup", month=12, year=2014) payment_two = self.customer.payment.create(description="New payment Two", number='4111111111111111', verification_value=123, first_name="Is a ", last_name="Problem", month=12, year=2015) payment_three = self.customer.payment.create(description="New payment Three", number='4111111111111111', verification_value=123, first_name="To Brazil", last_name="Worry", month=12, year=2015) list_of_payments = self.customer.payment.getitems() self.assertTrue(isinstance(list_of_payments, list)) self.assertTrue(isinstance(list_of_payments[0], customers.IuguPaymentMethod)) class TestInvoice(unittest.TestCase): TODAY = datetime.date.today().strftime("%d/%m/%Y") check_tests_environment() # Checks if enviroment variables defined def setUp(self): hash_md5 = md5() number = randint(1, 50) hash_md5.update(str(number)) email = "{email}@test.com".format(email=hash_md5.hexdigest()) self.customer_email = email # create a customer for tests c = customers.IuguCustomer() self.consumer = c.create(email="client@customer.com") # create a invoice item = merchant.Item("Prod 1", 1, 1190) self.item = item self.invoice_obj = invoices.IuguInvoice(email=self.customer_email, item=item, due_date=self.TODAY) self.invoice = self.invoice_obj.create(draft=True) # to tests for refund self.EMAIL_CUSTOMER = "anyperson@ap.com" self.client = merchant.IuguMerchant(account_id=ACCOUNT_ID, api_mode_test=True) def tearDown(self): if self.invoice.id: # if id is None already was removed self.invoice.remove() self.consumer.remove() def test_invoice_raise_required_email(self): i = invoices.IuguInvoice() self.assertRaises(errors.IuguInvoiceException, i.create, due_date="30/11/2020", items=self.item) def test_invoice_raise_required_due_date(self): i = invoices.IuguInvoice() self.assertRaises(errors.IuguInvoiceException, i.create, email="h@gmail.com", items=self.item) def test_invoice_raise_required_items(self): i = invoices.IuguInvoice() self.assertRaises(errors.IuguInvoiceException, i.create, due_date="30/11/2020", email="h@gmail.com") def test_invoice_create_basic(self): self.assertTrue(isinstance(self.invoice, invoices.IuguInvoice)) def test_invoice_with_customer_id(self): res = self.invoice_obj.create(customer_id=self.consumer.id) self.assertEqual(res.customer_id, self.consumer.id) def test_invoice_create_all_fields_as_draft(self): response = self.invoice_obj.create(draft=True, return_url='http://hipy.co/success', expired_url='http://hipy.co/expired', notification_url='http://hipy.co/webhooks', tax_cents=200, discount_cents=500, customer_id=self.consumer.id, ignore_due_email=True) self.assertTrue(isinstance(response, invoices.IuguInvoice)) existent_invoice = invoices.IuguInvoice.get(response.id) self.assertEqual(existent_invoice.expiration_url, response.expiration_url) response.remove() def test_invoice_create_all_fields_as_pending(self): response = self.invoice_obj.create(draft=False, return_url='http://example.com/success', expired_url='http://example.com/expired', notification_url='http://example.com/webhooks', tax_cents=200, discount_cents=500, customer_id=self.consumer.id, ignore_due_email=True) self.assertTrue(isinstance(response, invoices.IuguInvoice)) # The comments below was put because API don't exclude # an invoice that was paid. So only does refund. # response.remove() def test_invoice_created_check_id(self): self.assertIsNotNone(self.invoice.id) def test_invoice_create_with_custom_variables_in_create(self): invoice = self.invoice_obj.create(draft=True, custom_variables={'city': 'Brasilia'}) self.assertEqual(invoice.custom_variables[0]["name"], "city") self.assertEqual(invoice.custom_variables[0]["value"], "Brasilia") invoice.remove() def test_invoice_create_with_custom_variables_in_set(self): invoice = self.invoice_obj.set(invoice_id=self.invoice.id, custom_variables={'city': 'Brasilia'}) self.assertEqual(invoice.custom_variables[0]["name"], "city") self.assertEqual(invoice.custom_variables[0]["value"], "Brasilia") def test_invoice_get_one(self): # test start here res = invoices.IuguInvoice.get(self.invoice.id) self.assertEqual(res.items[0].description, "Prod 1") def test_invoice_create_as_draft(self): self.assertEqual(self.invoice.status, 'draft') def test_invoice_edit_email_with_set(self): id = self.invoice.id invoice_edited = self.invoice_obj.set(invoice_id=id, email="now@now.com") self.assertEqual(invoice_edited.email, u"now@now.com") def test_invoice_edit_return_url_with_set(self): return_url = "http://hipy.co" id = self.invoice.id invoice_edited = self.invoice_obj.set(invoice_id=id, return_url=return_url) self.assertEqual(invoice_edited.return_url, return_url) @unittest.skip("It isn't support by API") def test_invoice_edit_expired_url_with_set(self): expired_url = "http://hipy.co" id = self.invoice.id invoice_edited = self.invoice_obj.set(invoice_id=id, expired_url=expired_url) self.assertEqual(invoice_edited.expiration_url, expired_url) def test_invoice_edit_notification_url_with_set(self): notification_url = "http://hipy.co" id = self.invoice.id invoice_edited = self.invoice_obj.set(invoice_id=id, notification_url=notification_url) self.assertEqual(invoice_edited.notification_url, notification_url) def test_invoice_edit_tax_cents_with_set(self): tax_cents = 200 id = self.invoice.id invoice_edited = self.invoice_obj.set(invoice_id=id, tax_cents=tax_cents) self.assertEqual(invoice_edited.tax_cents, tax_cents) def test_invoice_edit_discount_cents_with_set(self): discount_cents = 500 id = self.invoice.id invoice_edited = self.invoice_obj.set(invoice_id=id, discount_cents=discount_cents) self.assertEqual(invoice_edited.discount_cents, discount_cents) def test_invoice_edit_customer_id_with_set(self): customer_id = self.consumer.id id = self.invoice.id invoice_edited = self.invoice_obj.set(invoice_id=id, customer_id=customer_id) self.assertEqual(invoice_edited.customer_id, customer_id) @unittest.skip("without return from API of the field/attribute ignore_due_email") def test_invoice_edit_ignore_due_email_with_set(self): ignore_due_email = True id = self.invoice.id invoice_edited = self.invoice_obj.set(invoice_id=id, ignore_due_email=ignore_due_email) self.assertEqual(invoice_edited.ignore_due_email, ignore_due_email) # TODO: def test_invoice_edit_subscription_id_with_set(self): # TODO: test_invoice_edit_credits_with_set(self): def test_invoice_edit_due_date_with_set(self): due_date = self.TODAY response_from_api = str(datetime.date.today()) id = self.invoice.id invoice_edited = self.invoice_obj.set(invoice_id=id, due_date=due_date) self.assertEqual(invoice_edited.due_date, response_from_api) def test_invoice_edit_items_with_set(self): self.invoice.items[0].description = "Prod Fixed Text and Value" id = self.invoice.id items = self.invoice.items[0] invoice_edited = self.invoice_obj.set(invoice_id=id, items=items) self.assertEqual(invoice_edited.items[0].description, "Prod Fixed Text and Value") def test_invoice_changed_items_with_save(self): self.invoice.items[0].description = "Prod Saved by Instance" # inv_one is instance not saved. Now, we have invoice saved # and invoice_edited that is the response of webservice res = self.invoice.save() self.assertEqual(res.items[0].description, "Prod Saved by Instance") def test_invoice_destroy_item(self): # Removes one item, the unique, created in invoice self.invoice.items[0].remove() re_invoice = self.invoice.save() self.assertEqual(re_invoice.items, None) def test_invoice_remove(self): # wait webservice response time sleep(3) self.invoice.remove() self.assertEqual(self.invoice.id, None) def test_invoice_get_and_save(self): inv = invoices.IuguInvoice.get(self.invoice.id) inv.email = "test_save@save.com" obj = inv.save() self.assertEqual(obj.email, inv.email) def test_invoice_getitems_and_save(self): sleep(2) # wating...API to persist data inv = None invs = invoices.IuguInvoice.getitems() for i in invs: if i.id == self.invoice.id: inv = i inv.email = "test_save@save.com" obj = inv.save() self.assertEqual(obj.email, inv.email) def test_invoice_cancel(self): invoice = self.invoice_obj.create(draft=False) re_invoice = invoice.cancel() self.assertEqual(re_invoice.status, "canceled") invoice.remove() #@unittest.skip("Support only invoice paid") # TODO def test_invoice_refund(self): item = merchant.Item("Produto My Test", 1, 10000) token = self.client.create_payment_token('4111111111111111', 'JA', 'Silva', '12', '2010', '123') charge = self.client.create_charge(self.EMAIL_CUSTOMER, item, token=token) invoice = invoices.IuguInvoice.get(charge.invoice_id) re_invoice = invoice.refund() self.assertEqual(re_invoice.status, "refunded") def test_invoice_getitems(self): # wait webservice response time sleep(3) l = invoices.IuguInvoice.getitems() self.assertIsInstance(l, list) self.assertIsInstance(l[0], invoices.IuguInvoice) def test_invoice_getitems_limit(self): invoice_2 = self.invoice_obj.create() sleep(3) l = invoices.IuguInvoice.getitems(limit=2) # The comments below was put because API don't exclude # an invoice that was paid. So only does refund. # invoice_2.remove() self.assertEqual(len(l), 2) def test_invoice_getitems_skip(self): invoice_1 = self.invoice_obj.create() invoice_2 = self.invoice_obj.create() invoice_3 = self.invoice_obj.create() sleep(3) l1 = invoices.IuguInvoice.getitems(limit=3) keep_checker = l1[2] l2 = invoices.IuguInvoice.getitems(skip=2) skipped = l2[0] # after skip 2 the first must be keep_checker # The comments below was put because API don't exclude # an invoice that was paid. So only does refund. # invoice_1.remove() # invoice_2.remove() # invoice_3.remove() self.assertEqual(keep_checker.id, skipped.id) # TODO: def test_invoice_getitems_created_at_from(self): # TODO:def test_invoice_getitems_created_at_to(self): # TODO: def test_invoice_getitems_updated_since(self): def test_invoice_getitems_query(self): res = self.invoice_obj.create(customer_id=self.consumer.id) sleep(3) queryset = invoices.IuguInvoice.getitems(query=res.id) self.assertEqual(queryset[0].customer_id, res.customer_id) # The comments below was put because API don't exclude # an invoice that was paid. So only does refund. # res.remove() def test_invoice_getitems_customer_id(self): res = self.invoice_obj.create(customer_id=self.consumer.id) sleep(3) queryset = invoices.IuguInvoice.getitems(query=res.id) self.assertEqual(queryset[0].customer_id, res.customer_id) # The comments below was put because API don't exclude # an invoice that was paid. So only does refund. # res.remove() @unittest.skip("API no support sort (in moment)") def test_invoice_getitems_sort(self): invoice_1 = self.invoice_obj.create() invoice_2 = self.invoice_obj.create() invoice_3 = self.invoice_obj.create() sleep(3) l1 = invoices.IuguInvoice.getitems(limit=3) keep_checker = l1[2] l2 = invoices.IuguInvoice.getitems(limit=3, sort="id") skipped = l2[0] # after skip 2 the first must be keep_checker # The comments below was put because API don't exclude # an invoice that was paid. So only does refund. # invoice_1.remove() # invoice_2.remove() # invoice_3.remove() self.assertEqual(keep_checker.id, skipped.id) class TestPlans(unittest.TestCase): def setUp(self): hash_md5 = md5() seed = randint(1, 199) variation = randint(4, 8) hash_md5.update(str(seed)) identifier = hash_md5.hexdigest()[:variation] self.identifier = identifier # random because can't be repeated plan = plans.IuguPlan() self.plan = plan.create(name="My SetUp Plan", identifier=self.identifier, interval=1, interval_type="months", currency="BRL", value_cents=1500) # features self.features = plans.Feature() self.features.name = "Add feature %s" % self.identifier self.features.identifier = self.identifier self.features.value = 11 def tearDown(self): self.plan.remove() def test_plan_create(self): plan = plans.IuguPlan() identifier = self.identifier + "salt" new_plan = plan.create(name="My first lib Plan", identifier=identifier, interval=1, interval_type="months", currency="BRL", value_cents=1000) self.assertIsInstance(new_plan, plans.IuguPlan) self.assertTrue(new_plan.id) new_plan.remove() def test_plan_create_without_required_fields(self): plan = plans.IuguPlan() self.assertRaises(errors.IuguPlansException, plan.create) def test_plan_create_features(self): salt = randint(1, 99) identifier = self.identifier + str(salt) # init object plan = plans.IuguPlan(name="Plan with features", identifier=identifier, interval=1, interval_type="months", currency="BRL", value_cents=1000) plan.features = [self.features,] new_plan_with_features = plan.create() self.assertIsInstance(new_plan_with_features.features[0], plans.Feature) self.assertEqual(new_plan_with_features.features[0].value, self.features.value) new_plan_with_features.remove() def test_plan_get(self): plan_id = self.plan.id plan = plans.IuguPlan.get(plan_id) self.assertEqual(self.identifier, plan.identifier) def test_plan_get_identifier(self): plan = plans.IuguPlan.get_by_identifier(self.identifier) self.assertEqual(self.identifier, plan.identifier) def test_plan_remove(self): plan = plans.IuguPlan() new_plan = plan.create(name="Remove me", identifier="to_remove", interval=1, interval_type="months", currency="BRL", value_cents=2000) removed_id = new_plan.id new_plan.remove() self.assertRaises(errors.IuguGeneralException, plans.IuguPlan.get, removed_id) def test_plan_edit_changes_name_by_set(self): plan_id = self.plan.id new_name = "New name %s" % self.identifier modified_plan = self.plan.set(plan_id, name=new_name) self.assertEqual(new_name, modified_plan.name) def test_plan_edit_changes_identifier_by_set(self): plan_id = self.plan.id new_identifier = "New identifier %s" % self.identifier modified_plan = self.plan.set(plan_id, identifier=new_identifier) self.assertEqual(new_identifier, modified_plan.identifier) def test_plan_edit_changes_interval_by_set(self): plan_id = self.plan.id new_interval = 3 modified_plan = self.plan.set(plan_id, interval=new_interval) self.assertEqual(new_interval, modified_plan.interval) def test_plan_edit_changes_currency_by_set(self): plan_id = self.plan.id new_currency = "US" self.assertRaises(errors.IuguPlansException, self.plan.set, plan_id, currency=new_currency) def test_plan_edit_changes_value_cents_by_set(self): plan_id = self.plan.id value_cents = 3000 modified_plan = self.plan.set(plan_id, value_cents=value_cents) self.assertEqual(value_cents, modified_plan.prices[0].value_cents) def test_plan_edit_changes_features_name_by_set(self): salt = randint(1, 99) identifier = self.identifier + str(salt) # creating a plan with features plan = plans.IuguPlan() plan.features = [self.features,] plan.name = "Changes Features Name" plan.identifier = identifier # workaround: setUp already creates plan.interval = 2 plan.interval_type = "weeks" plan.currency = "BRL" plan.value_cents = 3000 plan_returned = plan.create() # to change features name where features already has an id changed_features = plan_returned.features changed_features[0].name = "Changed Name of Features" # return plan changed plan_changed = plan.set(plan_returned.id, features=[changed_features[0]]) self.assertEqual(plan_changed.features[0].name, plan_returned.features[0].name) plan_returned.remove() def test_plan_edit_changes_features_identifier_by_set(self): salt = randint(1, 99) identifier = self.identifier + str(salt) # creating a plan with features plan = plans.IuguPlan() plan.features = [self.features,] plan.name = "Changes Features Identifier" plan.identifier = identifier # workaround: setUp already creates plan.interval = 2 plan.interval_type = "weeks" plan.currency = "BRL" plan.value_cents = 3000 plan_returned = plan.create() # to change features name where features already has an id changed_features = plan_returned.features changed_features[0].identifier = "Crazy_Change" # return plan changed plan_changed = plan.set(plan_returned.id, features=[changed_features[0]]) self.assertEqual(plan_changed.features[0].identifier, plan_returned.features[0].identifier) plan_returned.remove() def test_plan_edit_changes_features_value_by_set(self): salt = randint(1, 99) identifier = self.identifier + str(salt) # creating a plan with features plan = plans.IuguPlan() plan.features = [self.features,] plan.name = "Changes Features Identifier" plan.identifier = identifier # workaround: setUp already creates plan.interval = 2 plan.interval_type = "weeks" plan.currency = "BRL" plan.value_cents = 3000 plan_returned = plan.create() # to change features name where features already has an id changed_features = plan_returned.features changed_features[0].value = 10000 # return plan changed plan_changed = plan.set(plan_returned.id, features=[changed_features[0]]) self.assertEqual(plan_changed.features[0].value, plan_returned.features[0].value) plan_returned.remove() def test_plan_edit_changes_name_by_save(self): self.plan.name = "New name %s" % self.identifier response = self.plan.save() self.assertEqual(response.name, self.plan.name) def test_plan_edit_changes_identifier_by_save(self): seed = randint(1, 999) self.plan.identifier = "New_identifier_%s_%s" % (self.identifier, seed) response = self.plan.save() self.assertEqual(response.identifier, self.plan.identifier) def test_plan_edit_changes_interval_by_save(self): self.plan.interval = 4 response = self.plan.save() self.assertEqual(response.interval, 4) def test_plan_edit_changes_currency_by_save(self): # API only support BRL self.plan.currency = "US" # response = self.plan.save() self.assertRaises(errors.IuguPlansException, self.plan.save) def test_plan_edit_changes_value_cents_by_save(self): self.plan.value_cents = 4000 response = self.plan.save() self.assertEqual(response.prices[0].value_cents, 4000) # TODO: test prices attribute of plan in level one def test_plan_edit_changes_features_name_by_save(self): salt = randint(1, 99) identifier = self.identifier + str(salt) # creating a plan with features plan = plans.IuguPlan() plan.features = [self.features,] plan.name = "Changes Features by Save" plan.identifier = identifier # workaround: setUp already creates plan.interval = 2 plan.interval_type = "weeks" plan.currency = "BRL" plan.value_cents = 3000 plan_returned = plan.create() # to change features name where features already has an id to_change_features = plan_returned.features to_change_features[0].name = "Features New by Save" # return plan changed and to save instance plan_returned.features = [to_change_features[0]] plan_changed = plan_returned.save() self.assertEqual(plan_changed.features[0].name, "Features New by Save") plan_returned.remove() def test_plan_edit_changes_features_identifier_by_save(self): salt = randint(1, 99) identifier = self.identifier + str(salt) # creating a plan with features plan = plans.IuguPlan() plan.features = [self.features,] plan.name = "Changes Features by Save" plan.identifier = identifier # workaround: setUp already creates plan.interval = 2 plan.interval_type = "weeks" plan.currency = "BRL" plan.value_cents = 3000 plan_returned = plan.create() # to change features name where features already has an id to_change_features = plan_returned.features to_change_features[0].identifier = "Crazy_Changed" # return plan changed and to save instance plan_returned.features = [to_change_features[0]] plan_changed = plan_returned.save() self.assertEqual(plan_changed.features[0].identifier, "Crazy_Changed") plan_returned.remove() def test_plan_edit_changes_features_value_by_save(self): salt = randint(1, 99) identifier = self.identifier + str(salt) # creating a plan with features plan = plans.IuguPlan() plan.features = [self.features,] plan.name = "Changes Features by Save" plan.identifier = identifier # workaround: setUp already creates plan.interval = 2 plan.interval_type = "weeks" plan.currency = "BRL" plan.value_cents = 3000 plan_returned = plan.create() # to change features name where features already has an id to_change_features = plan_returned.features to_change_features[0].value = 8000 # return plan changed and to save instance plan_returned.features = [to_change_features[0]] plan_changed = plan_returned.save() self.assertEqual(plan_changed.features[0].value, 8000) plan_returned.remove() def test_plan_getitems_filter_limit(self): # creating a plan with features salt = str(randint(1, 199)) + self.identifier plan = plans.IuguPlan() plan_a = plan.create(name="Get Items...", identifier=salt, interval=2, interval_type="weeks", currency="BRL", value_cents=1000) salt = str(randint(1, 199)) + self.identifier plan_b = plan.create(name="Get Items...", identifier=salt, interval=2, interval_type="weeks", currency="BRL", value_cents=2000) salt = str(randint(1, 199)) + self.identifier plan_c = plan.create(name="Get Items...", identifier=salt, interval=2, interval_type="weeks", currency="BRL", value_cents=3000) all_plans = plans.IuguPlan.getitems(limit=3) self.assertEqual(len(all_plans), 3) plan_a.remove() plan_b.remove() plan_c.remove() def test_plan_getitems_filter_skip(self): # creating a plan with features salt = str(randint(1, 199)) + self.identifier plan = plans.IuguPlan() plan_a = plan.create(name="Get Items...", identifier=salt, interval=2, interval_type="weeks", currency="BRL", value_cents=1000) salt = str(randint(1, 199)) + self.identifier plan_b = plan.create(name="Get Items...", identifier=salt, interval=2, interval_type="weeks", currency="BRL", value_cents=2000) salt = str(randint(1, 199)) + self.identifier plan_c = plan.create(name="Get Items...", identifier=salt, interval=2, interval_type="weeks", currency="BRL", value_cents=3000) sleep(2) all_plans_limit = plans.IuguPlan.getitems(limit=3) all_plans_skip = plans.IuguPlan.getitems(skip=2, limit=3) self.assertEqual(all_plans_limit[2].id, all_plans_skip[0].id) plan_a.remove() plan_b.remove() plan_c.remove() def test_plan_getitems_filter_query(self): salt = str(randint(1, 199)) + self.identifier name_repeated = salt plan = plans.IuguPlan() plan_a = plan.create(name=name_repeated, identifier=salt, interval=2, interval_type="weeks", currency="BRL", value_cents=1000) salt = str(randint(1, 199)) + self.identifier plan_b = plan.create(name=name_repeated, identifier=salt, interval=2, interval_type="weeks", currency="BRL", value_cents=2000) salt = str(randint(1, 199)) + self.identifier plan_c = plan.create(name=name_repeated, identifier=salt, interval=2, interval_type="weeks", currency="BRL", value_cents=3000) sleep(3) # waiting API to keep data all_filter_query = plans.IuguPlan.getitems(query=name_repeated) self.assertEqual(all_filter_query[0].name, name_repeated) self.assertEqual(len(all_filter_query), 3) plan_a.remove() plan_b.remove() plan_c.remove() #@unittest.skip("TODO support this test") # TODO: def test_plan_getitems_filter_updated_since(self): #@unittest.skip("Sort not work fine. Waiting support of API providers") #def test_plan_getitems_filter_sort(self): class TestSubscriptions(unittest.TestCase): def clean_invoices(self, recent_invoices): """ Removes invoices created in backgrounds of tests """ # The comments below was put because API don't exclude # an invoice that was paid. So only does refund. (API CHANGED) # #if recent_invoices: # invoice = recent_invoices[0] # invoices.IuguInvoice().remove(invoice_id=invoice["id"]) pass def setUp(self): # preparing object... seed = randint(1, 10000) md5_hash = md5() md5_hash.update(str(seed)) plan_id_random = md5_hash.hexdigest()[:12] plan_name = "Subs Plan %s" % plan_id_random name = "Ze %s" % plan_id_random email = "{name}@example.com".format(name=plan_id_random) # plans for multiple tests self.plan_new = plans.IuguPlan().create(name=plan_name, identifier=plan_id_random, interval=1, interval_type="weeks", currency="BRL", value_cents=9900) plan_identifier = "plan_for_changes_%s" % plan_id_random self.plan_two = plans.IuguPlan().create(name="Plan Two", identifier=plan_identifier, interval=1, interval_type="weeks", currency="BRL", value_cents=8800) # one client self.customer = customers.IuguCustomer().create(name=name, email=email) # for tests to edit subscriptions subs_obj = subscriptions.IuguSubscription() self.subscription = subs_obj.create(customer_id=self.customer.id, plan_identifier=self.plan_two.identifier) def tearDown(self): # Attempt to delete the invoices created by subscriptions cases # But this not remove all invoices due not recognizable behavior # as the API no forever return recents_invoices for created # invoices # The comments below was put because API don't exclude # an invoice that was paid. So only does refund. (API CHANGED) #### if self.subscription.recent_invoices: #### invoice = self.subscription.recent_invoices[0] #### # to instanciate invoice from list of the invoices returned by API #### invoice_obj = invoices.IuguInvoice.get(invoice["id"]) #### # The comments below was put because API don't exclude #### # an invoice that was paid. So only does refund. #### invoice_obj.remove() self.plan_new.remove() self.plan_two.remove() self.subscription.remove() self.customer.remove() def test_subscription_create(self): # Test to create a subscription only client_id and plan_identifier p_obj = subscriptions.IuguSubscription() subscription_new = p_obj.create(self.customer.id, self.plan_new.identifier) self.assertIsInstance(subscription_new, subscriptions.IuguSubscription) self.assertEqual(subscription_new.plan_identifier, self.plan_new.identifier) self.clean_invoices(subscription_new.recent_invoices) subscription_new.remove() def test_subscription_create_with_custom_variables(self): p_obj = subscriptions.IuguSubscription() subscription_new = p_obj.create(self.customer.id, self.plan_new.identifier, custom_variables={'city':'Recife'}) self.assertEqual(subscription_new.custom_variables[0]["name"], "city") self.assertEqual(subscription_new.custom_variables[0]["value"], "Recife") self.clean_invoices(subscription_new.recent_invoices) subscription_new.remove() def test_subscription_set_with_custom_variables(self): p_obj = subscriptions.IuguSubscription() subscription_new = p_obj.set(sid=self.subscription.id, custom_variables={'city':'Recife'}) self.assertEqual(subscription_new.custom_variables[0]["name"], "city") self.assertEqual(subscription_new.custom_variables[0]["value"], "Recife") # self.clean_invoices(subscription_new.recent_invoices) @unittest.skip("API does not support this only_on_charge_success. CHANGED") def test_subscription_create_only_on_charge_success_with_payment(self): # Test to create subscriptions with charge only customer = customers.IuguCustomer().create(name="Pay now", email="pay_now@local.com") pay = customer.payment.create(description="Payment X", number="4111111111111111", verification_value='123', first_name="Romario", last_name="Baixo", month=12, year=2018) p_obj = subscriptions.IuguSubscription() new_subscription = p_obj.create(customer.id, self.plan_new.identifier, only_on_charge_success=True) self.assertEqual(new_subscription.recent_invoices[0]["status"], "paid") self.clean_invoices(new_subscription.recent_invoices) new_subscription.remove() customer.remove() def test_subscription_create_only_on_charge_success_less_payment(self): # Test to create subscriptions with charge only p_obj = subscriptions.IuguSubscription() self.assertRaises(errors.IuguGeneralException, p_obj.create, self.customer.id, self.plan_new.identifier, only_on_charge_success=True) def test_subscription_remove(self): # Test to remove subscription p_obj = subscriptions.IuguSubscription() subscription_new = p_obj.create(self.customer.id, self.plan_new.identifier) sid = subscription_new.id self.clean_invoices(subscription_new.recent_invoices) subscription_new.remove() self.assertRaises(errors.IuguGeneralException, subscriptions.IuguSubscription.get, sid) def test_subscription_get(self): subscription = subscriptions.IuguSubscription.get(self.subscription.id) self.assertIsInstance(subscription, subscriptions.IuguSubscription) def test_subscription_getitems(self): subscription_list = subscriptions.IuguSubscription.getitems() self.assertIsInstance(subscription_list[0], subscriptions.IuguSubscription) def test_subscription_getitem_limit(self): client_subscriptions = subscriptions.IuguSubscription() sub_1 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sub_2 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sub_3 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sub_4 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sleep(3) # slower API subscriptions_list = subscriptions.IuguSubscription.getitems(limit=1) self.assertEqual(len(subscriptions_list), 1) self.assertEqual(subscriptions_list[0].id, sub_4.id) self.clean_invoices(sub_1.recent_invoices) self.clean_invoices(sub_2.recent_invoices) self.clean_invoices(sub_3.recent_invoices) self.clean_invoices(sub_4.recent_invoices) a, b, c, d = sub_1.remove(), sub_2.remove(), sub_3.remove(), sub_4.remove() def test_subscription_getitem_skip(self): client_subscriptions = subscriptions.IuguSubscription() sub_1 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sub_2 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sub_3 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sub_4 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sleep(2) subscriptions_list = subscriptions.IuguSubscription.getitems(skip=1) self.assertEqual(subscriptions_list[0].id, sub_3.id) self.clean_invoices(sub_1.recent_invoices) self.clean_invoices(sub_2.recent_invoices) self.clean_invoices(sub_3.recent_invoices) self.clean_invoices(sub_4.recent_invoices) a, b, c, d = sub_1.remove(), sub_2.remove(), sub_3.remove(), sub_4.remove() # TODO: def test_subscription_getitem_created_at_from(self): def test_subscription_getitem_query(self): term = self.customer.name sleep(3) # very slow API! waiting... subscriptions_list = subscriptions.IuguSubscription.getitems(query=term) self.assertGreaterEqual(len(subscriptions_list), 1) # TODO: def test_subscription_getitem_updated_since(self): @unittest.skip("API not support this. No orders is changed") def test_subscription_getitem_sort(self): client_subscriptions = subscriptions.IuguSubscription() sub_1 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sub_2 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sub_3 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sub_4 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) subscriptions_list = subscriptions.IuguSubscription.getitems(sort="-created_at") #self.assertEqual(subscriptions_list[0].id, sub_3.id) self.clean_invoices(sub_1.recent_invoices) self.clean_invoices(sub_2.recent_invoices) self.clean_invoices(sub_3.recent_invoices) self.clean_invoices(sub_4.recent_invoices) a, b, c, d = sub_1.remove(), sub_2.remove(), sub_3.remove(), sub_4.remove() def test_subscription_getitem_customer_id(self): client_subscriptions = subscriptions.IuguSubscription() # previous subscription was created in setUp sub_1 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sub_2 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sleep(3) subscriptions_list = subscriptions.IuguSubscription.\ getitems(customer_id=self.customer.id) self.assertEqual(len(subscriptions_list), 3) # sub_1 + sub_2 + setUp self.clean_invoices(sub_1.recent_invoices) self.clean_invoices(sub_2.recent_invoices) a, b = sub_1.remove(), sub_2.remove() def test_subscription_set_plan(self): # Test to change an existent plan in subscription subs = subscriptions.IuguSubscription() subscription = subs.create(self.customer.id, self.plan_new.identifier) sid = subscription.id plan_identifier = self.plan_new.identifier + str("_Newest_ID") # changes to this new plan plan_newest = plans.IuguPlan().create("Plan Name: Newest", plan_identifier, 1, "months", "BRL", 5000) # editing... subscription = subscriptions.IuguSubscription().set(sid, plan_identifier=plan_newest.identifier) self.assertEqual(subscription.plan_identifier, plan_identifier) self.clean_invoices(subscription.recent_invoices) subscription.remove() plan_newest.remove() @unittest.skip("API does not support. It returns error 'Subscription Not Found'") def test_subscription_set_customer_id(self): # Test if customer_id changed. Iugu's support (number 782) customer = customers.IuguCustomer().create(name="Cortella", email="mcortella@usp.br") subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, customer_id=customer.id) self.assertEqual(subscription.customer_id, customer.id) customer.remove() def test_subscription_set_expires_at(self): # Test if expires_at was changed subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, expires_at="12/12/2014") self.assertEqual(subscription.expires_at, "2014-12-12") def test_subscription_set_suspended(self): # Test if suspended was changed subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, suspended=True) self.assertEqual(subscription.suspended, True) @unittest.skip("Waiting API developers to support this question") def test_subscription_set_skip_charge(self): # Test if skip_charge was marked print self.subscription.id subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, skip_charge=True) self.assertEqual(subscription.suspended, True) def test_subscription_set_subitems(self): # Test if to insert a new item subitem = merchant.Item("Subitems", 1, 2345) subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[subitem,]) self.assertEqual(subscription.subitems[0].description, subitem.description) def test_subscription_set_subitems_description(self): # Test if subitem/item descriptions was changed subitem = merchant.Item("Subitems", 1, 2345) subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[subitem,]) item_with_id = subscription.subitems[0] item_with_id.description = "Subitems Edited" subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[item_with_id,] ) self.assertEqual(subscription.subitems[0].description, item_with_id.description) def test_subscription_set_subitems_price_cents(self): # Test if subitem/item price_cents was changed subitem = merchant.Item("Subitems", 1, 2345) subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[subitem,]) item_with_id = subscription.subitems[0] item_with_id.price_cents = 2900 subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[item_with_id,] ) self.assertEqual(subscription.subitems[0].price_cents, item_with_id.price_cents) def test_subscription_set_subitems_quantity(self): # Test if subitem/item quantity was changed subitem = merchant.Item("Subitems", 1, 2345) subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[subitem,]) item_with_id = subscription.subitems[0] item_with_id.quantity = 4 subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[item_with_id,] ) self.assertEqual(subscription.subitems[0].quantity, item_with_id.quantity) def test_subscription_set_subitems_recurrent(self): # Test if subitem/item recurrent was changed subitem = merchant.Item("Subitems", 1, 2345) subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[subitem,]) item_with_id = subscription.subitems[0] item_with_id.recurrent = True subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[item_with_id,]) self.assertEqual(subscription.subitems[0].recurrent, item_with_id.recurrent) def test_subscription_set_subitems_destroy(self): # Test if subitem/item was erased subitem = merchant.Item("Subitems", 1, 2345) subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[subitem,]) item_with_id = subscription.subitems[0] item_with_id.destroy = True subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[item_with_id,]) self.assertEqual(subscription.subitems, []) def test_subscription_create_credit_based_with_custom_variables(self): # Test if price_cents changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=10, custom_variables={'city':"Recife"}) self.assertEqual(subscription.custom_variables[0]['name'], "city") self.assertEqual(subscription.custom_variables[0]['value'], "Recife") self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_set_credit_based_with_custom_variables(self): # Test if price_cents changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=10) subscription = subscriptions.SubscriptionCreditsBased().\ set(subscription.id, custom_variables={'city':"Madrid"}) self.assertEqual(subscription.custom_variables[0]['name'], "city") self.assertEqual(subscription.custom_variables[0]['value'], "Madrid") self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_create_credit_based(self): # Test if price_cents changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=10) self.assertIsInstance(subscription, subscriptions.SubscriptionCreditsBased) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_create_credit_based_error_price_cents(self): # Test if price_cents changed subscription = subscriptions.SubscriptionCreditsBased() self.assertRaises(errors.IuguSubscriptionsException, subscription.create, self.customer.id, credits_cycle=2, price_cents=0) def test_subscription_create_credit_based_error_price_cents_empty(self): # Test if price_cents changed subscription = subscriptions.SubscriptionCreditsBased() self.assertRaises(errors.IuguSubscriptionsException, subscription.create, self.customer.id, credits_cycle=2, price_cents=None) def test_subscription_create_credit_based_price_cents(self): # Test if price_cents changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=2000) self.assertEqual(subscription.price_cents, 2000) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_create_credit_based_credits_cycle(self): # Test if price_cents changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=2000) self.assertEqual(subscription.credits_cycle, 2) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_create_credit_based_credits_min(self): # Test if price_cents changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=2000, credits_min=4000) self.assertEqual(subscription.credits_min, 4000) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_set_credit_based_price_cents(self): # Test if price_cents changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=1200) subscription = subscriptions.SubscriptionCreditsBased().\ set(subscription.id, price_cents=3249) self.assertEqual(subscription.price_cents, 3249) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_set_credits_cycle(self): # Test if credits_cycle changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=1300) subscription = subscriptions.SubscriptionCreditsBased().\ set(subscription.id, credits_cycle=10) self.assertEqual(subscription.credits_cycle, 10) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_set_credits_min(self): # Test if credits_min changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=1400) subscription = subscriptions.SubscriptionCreditsBased().\ set(subscription.id, credits_min=2000) self.assertEqual(subscription.credits_min, 2000) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_credit_based_get(self): # Test if credits_min changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=2000) subscription = subscriptions.SubscriptionCreditsBased().\ get(subscription.id) self.assertIsInstance(subscription, subscriptions.SubscriptionCreditsBased) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_credit_based_getitems(self): # Test if credits_min changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=2000) sleep(2) subscription_list = subscriptions.SubscriptionCreditsBased().\ getitems() self.assertIsInstance(subscription_list[0], subscriptions.SubscriptionCreditsBased) self.clean_invoices(subscription.recent_invoices) subscription.remove() # Test save method @unittest.skip("This is not support by API. Return not found") def test_subscription_save_customer_id(self): # Iugu's support (number 782) customer = customers.IuguCustomer().create(name="Subs save", email="subs_save@local.com") self.subscription.customer_id = customer.id obj = self.subscription.save() self.assertEqual(customer.id, obj.customer_id) customer.remove() def test_subscription_save_expires_at(self): self.subscription.expires_at = "12/12/2020" obj = self.subscription.save() self.assertEqual(obj.expires_at, "2020-12-12") def test_subscription_save_subitems(self): # Test if to save a new item subitem = merchant.Item("Subitems", 1, 2345) self.subscription.subitems = [subitem,] obj = self.subscription.save() self.assertEqual(obj.subitems[0].description, subitem.description) def test_subscription_save_subitems_description(self): # Test if subitem/item descriptions was changed subitem = merchant.Item("Subitems", 1, 2345) self.subscription.subitems = [subitem,] new_subscription = self.subscription.save() item_with_id = new_subscription.subitems[0] item_with_id.description = "Subitems Edited" self.subscription.subitems = [item_with_id] obj = self.subscription.save() self.assertEqual(obj.subitems[0].description, item_with_id.description) def test_subscription_save_subitems_price_cents(self): # Test if subitem/item price_cents was changed subitem = merchant.Item("Subitems", 1, 2345) self.subscription.subitems = [subitem,] new_subscription = self.subscription.save() item_with_id = new_subscription.subitems[0] item_with_id.price_cents = 2900 self.subscription.subitems = [item_with_id,] obj = self.subscription.save() self.assertEqual(obj.subitems[0].price_cents, item_with_id.price_cents) def test_subscription_save_subitems_quantity(self): # Test if subitem/item quantity was changed subitem = merchant.Item("Subitems", 1, 2345) self.subscription.subitems = [subitem,] new_subscription = self.subscription.save() item_with_id = new_subscription.subitems[0] item_with_id.quantity = 4 self.subscription.subitems = [item_with_id,] obj = self.subscription.save() self.assertEqual(obj.subitems[0].quantity, item_with_id.quantity) def test_subscription_save_subitems_recurrent(self): # Test if subitem/item recurrent was changed subitem = merchant.Item("Subitems", 1, 2345) self.subscription.subitems = [subitem,] new_subscription = self.subscription.save() item_with_id = new_subscription.subitems[0] item_with_id.recurrent = True self.subscription.subitems = [item_with_id,] obj = self.subscription.save() self.assertEqual(obj.subitems[0].recurrent, item_with_id.recurrent) def test_subscription_save_subitems__destroy(self): # Test if subitem/item was erased subitem = merchant.Item("Subitems", 1, 2345) self.subscription.subitems = [subitem,] new_subscription = self.subscription.save() item_with_id = new_subscription.subitems[0] item_with_id.destroy = True self.subscription.subitems = [item_with_id,] obj = self.subscription.save() self.assertEqual(obj.subitems, []) def test_subscription_save_suspended(self): self.subscription.suspended = True obj = self.subscription.save() self.assertEqual(obj.suspended, True) # @unittest.skip("Waiting API developers to support this question") # TODO: def test_subscription_save_skip_charge(self): def test_subscription_save_price_cents(self): subscription = subscriptions.SubscriptionCreditsBased() subscription = subscription.create(customer_id=self.customer.id, credits_cycle=2, price_cents=1000) subscription.price_cents = 8188 obj = subscription.save() self.assertEqual(obj.price_cents, 8188) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_save_credits_cycle(self): subscription = subscriptions.SubscriptionCreditsBased() subscription = subscription.create(customer_id=self.customer.id, credits_cycle=2, price_cents=1000) subscription.credits_cycle = 5 obj = subscription.save() self.assertEqual(obj.credits_cycle, 5) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_save_credits_min(self): subscription = subscriptions.SubscriptionCreditsBased() subscription = subscription.create(customer_id=self.customer.id, credits_cycle=2, price_cents=1100) subscription.credits_min = 9000 obj = subscription.save() self.assertEqual(obj.credits_min, 9000) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_suspend(self): obj = subscriptions.IuguSubscription().suspend(self.subscription.id) self.assertEqual(obj.suspended, True) @unittest.skip("API not support this activate by REST .../activate") def test_subscription_activate(self): obj = subscriptions.IuguSubscription().suspend(self.subscription.id) self.subscription.suspended = True self.subscription.save() obj = subscriptions.IuguSubscription().activate(self.subscription.id) self.assertEqual(obj.suspended, False) def test_subscription_change_plan(self): seed = randint(1, 999) identifier = "%s_%s" % (self.plan_new.identifier, str(seed)) plan_again_change = plans.IuguPlan().create(name="Change Test", identifier=identifier, interval=1, interval_type="months", currency="BRL", value_cents=1111) obj = subscriptions.IuguSubscription().change_plan( plan_again_change.identifier, sid=self.subscription.id) self.assertEqual(obj.plan_identifier, identifier) self.clean_invoices(obj.recent_invoices) plan_again_change.remove() def test_subscription_change_plan_by_instance(self): seed = randint(1, 999) identifier = "%s_%s" % (self.plan_new.identifier, str(seed)) plan_again_change = plans.IuguPlan().create(name="Change Test", identifier=identifier, interval=1, interval_type="months", currency="BRL", value_cents=1112) obj = self.subscription.change_plan(plan_again_change.identifier) self.assertEqual(obj.plan_identifier, identifier) self.clean_invoices(obj.recent_invoices) plan_again_change.remove() def test_subscription_add_credits(self): subscription = subscriptions.SubscriptionCreditsBased() subscription = subscription.create(customer_id=self.customer.id, credits_cycle=2, price_cents=1100) obj = subscriptions.SubscriptionCreditsBased().add_credits(sid=subscription.id, quantity=20) self.assertEqual(obj.credits, 20) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_add_credits_by_instance(self): subscription = subscriptions.SubscriptionCreditsBased() subscription = subscription.create(customer_id=self.customer.id, credits_cycle=2, price_cents=1100) obj = subscription.add_credits(sid=subscription.id, quantity=20) self.assertEqual(obj.credits, 20) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_remove_credits(self): subscription = subscriptions.SubscriptionCreditsBased() subscription = subscription.create(customer_id=self.customer.id, credits_cycle=2, price_cents=1100) subscription.add_credits(quantity=20) obj = subscriptions.SubscriptionCreditsBased().\ remove_credits(sid=subscription.id, quantity=5) self.assertEqual(obj.credits, 15) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_remove_credits_by_instance(self): subscription = subscriptions.SubscriptionCreditsBased() subscription = subscription.create(customer_id=self.customer.id, credits_cycle=2, price_cents=1100) subscription.add_credits(quantity=20) sleep(2) obj = subscription.remove_credits(quantity=5) self.assertEqual(obj.credits, 15) self.clean_invoices(subscription.recent_invoices) subscription.remove() class TestTransfer(unittest.TestCase): # TODO: to create this tests pass if __name__ == '__main__': unittest.main()
""" Copyright (C) 2017 Open Source Robotics Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import math import numpy as np import latlon import ecef class Enu(object): def __init__(self, e, n, u): self.e = e self.n = n self.u = u def __eq__(self, other): return self.e == other.e and self.n == other.n and self.u == other.u def __hash__(self): return hash((self.e, self.n, self.u)) def to_ecef(self, origin): # this doesn't work at the poles because longitude is not uniquely defined there sin_lon = origin._sin_lon() sin_lat = origin._sin_lat() cos_lon = origin._cos_lon() cos_lat = origin._cos_lat() global_to_ecef_matrix = np.array([[-sin_lon, -cos_lon * sin_lat, cos_lon * cos_lat], [cos_lon, - sin_lon * sin_lat, sin_lon * cos_lat], [0, cos_lat, sin_lat]]) enu_vector = np.array([[self.e], [self.n], [self.u]]) ecef_vector = np.dot(global_to_ecef_matrix, enu_vector) return ecef.Ecef(ecef_vector[0][0], ecef_vector[1][0], ecef_vector[2][0])
"""Package contains helpers for gdata library. """
"""Configure number in a device through MQTT topic.""" from __future__ import annotations import functools import logging import voluptuous as vol from homeassistant.components import number from homeassistant.components.number import ( DEFAULT_MAX_VALUE, DEFAULT_MIN_VALUE, DEFAULT_STEP, NumberEntity, ) from homeassistant.const import ( CONF_NAME, CONF_OPTIMISTIC, CONF_UNIT_OF_MEASUREMENT, CONF_VALUE_TEMPLATE, ) from homeassistant.core import HomeAssistant, callback from homeassistant.helpers import config_validation as cv from homeassistant.helpers.reload import async_setup_reload_service from homeassistant.helpers.restore_state import RestoreEntity from homeassistant.helpers.typing import ConfigType from . import PLATFORMS, MqttCommandTemplate, subscription from .. import mqtt from .const import CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN, CONF_STATE_TOPIC, DOMAIN from .debug_info import log_messages from .mixins import MQTT_ENTITY_COMMON_SCHEMA, MqttEntity, async_setup_entry_helper CONF_COMMAND_TEMPLATE = "command_template" _LOGGER = logging.getLogger(__name__) CONF_MIN = "min" CONF_MAX = "max" CONF_PAYLOAD_RESET = "payload_reset" CONF_STEP = "step" DEFAULT_NAME = "MQTT Number" DEFAULT_OPTIMISTIC = False DEFAULT_PAYLOAD_RESET = "None" MQTT_NUMBER_ATTRIBUTES_BLOCKED = frozenset( { number.ATTR_MAX, number.ATTR_MIN, number.ATTR_STEP, } ) def validate_config(config): """Validate that the configuration is valid, throws if it isn't.""" if config.get(CONF_MIN) >= config.get(CONF_MAX): raise vol.Invalid(f"'{CONF_MAX}' must be > '{CONF_MIN}'") return config _PLATFORM_SCHEMA_BASE = mqtt.MQTT_RW_PLATFORM_SCHEMA.extend( { vol.Optional(CONF_COMMAND_TEMPLATE): cv.template, vol.Optional(CONF_MAX, default=DEFAULT_MAX_VALUE): vol.Coerce(float), vol.Optional(CONF_MIN, default=DEFAULT_MIN_VALUE): vol.Coerce(float), vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean, vol.Optional(CONF_PAYLOAD_RESET, default=DEFAULT_PAYLOAD_RESET): cv.string, vol.Optional(CONF_STEP, default=DEFAULT_STEP): vol.All( vol.Coerce(float), vol.Range(min=1e-3) ), vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string, vol.Optional(CONF_VALUE_TEMPLATE): cv.template, }, ).extend(MQTT_ENTITY_COMMON_SCHEMA.schema) PLATFORM_SCHEMA = vol.All( _PLATFORM_SCHEMA_BASE, validate_config, ) DISCOVERY_SCHEMA = vol.All( _PLATFORM_SCHEMA_BASE.extend({}, extra=vol.REMOVE_EXTRA), validate_config, ) async def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities, discovery_info=None ): """Set up MQTT number through configuration.yaml.""" await async_setup_reload_service(hass, DOMAIN, PLATFORMS) await _async_setup_entity(hass, async_add_entities, config) async def async_setup_entry(hass, config_entry, async_add_entities): """Set up MQTT number dynamically through MQTT discovery.""" setup = functools.partial( _async_setup_entity, hass, async_add_entities, config_entry=config_entry ) await async_setup_entry_helper(hass, number.DOMAIN, setup, DISCOVERY_SCHEMA) async def _async_setup_entity( hass, async_add_entities, config, config_entry=None, discovery_data=None ): """Set up the MQTT number.""" async_add_entities([MqttNumber(hass, config, config_entry, discovery_data)]) class MqttNumber(MqttEntity, NumberEntity, RestoreEntity): """representation of an MQTT number.""" _entity_id_format = number.ENTITY_ID_FORMAT _attributes_extra_blocked = MQTT_NUMBER_ATTRIBUTES_BLOCKED def __init__(self, hass, config, config_entry, discovery_data): """Initialize the MQTT Number.""" self._config = config self._optimistic = False self._sub_state = None self._current_number = None NumberEntity.__init__(self) MqttEntity.__init__(self, hass, config, config_entry, discovery_data) @staticmethod def config_schema(): """Return the config schema.""" return DISCOVERY_SCHEMA def _setup_from_config(self, config): """(Re)Setup the entity.""" self._optimistic = config[CONF_OPTIMISTIC] self._templates = { CONF_COMMAND_TEMPLATE: MqttCommandTemplate( config.get(CONF_COMMAND_TEMPLATE), self.hass ).async_render, CONF_VALUE_TEMPLATE: config.get(CONF_VALUE_TEMPLATE), } value_template = self._templates[CONF_VALUE_TEMPLATE] if value_template is None: self._templates[CONF_VALUE_TEMPLATE] = lambda value: value else: value_template.hass = self.hass self._templates[ CONF_VALUE_TEMPLATE ] = value_template.async_render_with_possible_json_value async def _subscribe_topics(self): """(Re)Subscribe to topics.""" @callback @log_messages(self.hass, self.entity_id) def message_received(msg): """Handle new MQTT messages.""" payload = self._templates[CONF_VALUE_TEMPLATE](msg.payload) try: if payload == self._config[CONF_PAYLOAD_RESET]: num_value = None elif payload.isnumeric(): num_value = int(payload) else: num_value = float(payload) except ValueError: _LOGGER.warning("Payload '%s' is not a Number", msg.payload) return if num_value is not None and ( num_value < self.min_value or num_value > self.max_value ): _LOGGER.error( "Invalid value for %s: %s (range %s - %s)", self.entity_id, num_value, self.min_value, self.max_value, ) return self._current_number = num_value self.async_write_ha_state() if self._config.get(CONF_STATE_TOPIC) is None: # Force into optimistic mode. self._optimistic = True else: self._sub_state = await subscription.async_subscribe_topics( self.hass, self._sub_state, { "state_topic": { "topic": self._config.get(CONF_STATE_TOPIC), "msg_callback": message_received, "qos": self._config[CONF_QOS], } }, ) if self._optimistic and (last_state := await self.async_get_last_state()): self._current_number = last_state.state @property def min_value(self) -> float: """Return the minimum value.""" return self._config[CONF_MIN] @property def max_value(self) -> float: """Return the maximum value.""" return self._config[CONF_MAX] @property def step(self) -> float: """Return the increment/decrement step.""" return self._config[CONF_STEP] @property def unit_of_measurement(self) -> str | None: """Return the unit of measurement.""" return self._config.get(CONF_UNIT_OF_MEASUREMENT) @property def value(self): """Return the current value.""" return self._current_number async def async_set_value(self, value: float) -> None: """Update the current value.""" current_number = value if value.is_integer(): current_number = int(value) payload = self._templates[CONF_COMMAND_TEMPLATE](current_number) if self._optimistic: self._current_number = current_number self.async_write_ha_state() await mqtt.async_publish( self.hass, self._config[CONF_COMMAND_TOPIC], payload, self._config[CONF_QOS], self._config[CONF_RETAIN], ) @property def assumed_state(self): """Return true if we do optimistic updates.""" return self._optimistic
import datetime import uuid import freezegun import pretend import pytest from pyramid.httpexceptions import HTTPMovedPermanently, HTTPSeeOther from warehouse.accounts import views from warehouse.accounts.interfaces import IUserService, TooManyFailedLogins from ...common.db.accounts import UserFactory class TestFailedLoginView: exc = TooManyFailedLogins(resets_in=datetime.timedelta(seconds=600)) request = pretend.stub() resp = views.failed_logins(exc, request) assert resp.status == "429 Too Many Failed Login Attempts" assert resp.detail == ( "There have been too many unsuccessful login attempts. Please try " "again later." ) assert dict(resp.headers).get("Retry-After") == "600" class TestUserProfile: def test_user_redirects_username(self, db_request): user = UserFactory.create() if user.username.upper() != user.username: username = user.username.upper() else: username = user.username.lower() db_request.current_route_path = pretend.call_recorder( lambda username: "/user/the-redirect/" ) db_request.matchdict = {"username": username} result = views.profile(user, db_request) assert isinstance(result, HTTPMovedPermanently) assert result.headers["Location"] == "/user/the-redirect/" assert db_request.current_route_path.calls == [ pretend.call(username=user.username), ] def test_returns_user(self, db_request): user = UserFactory.create() assert views.profile(user, db_request) == { "user": user, "projects": [], } class TestLogin: @pytest.mark.parametrize("next_url", [None, "/foo/bar/", "/wat/"]) def test_get_returns_form(self, pyramid_request, next_url): user_service = pretend.stub() pyramid_request.find_service = pretend.call_recorder( lambda iface, context: user_service ) form_obj = pretend.stub() form_class = pretend.call_recorder(lambda d, user_service: form_obj) if next_url is not None: pyramid_request.GET["next"] = next_url result = views.login(pyramid_request, _form_class=form_class) assert result == { "form": form_obj, "redirect": {"field": "next", "data": next_url}, } assert pyramid_request.find_service.calls == [ pretend.call(IUserService, context=None), ] assert form_class.calls == [ pretend.call(pyramid_request.POST, user_service=user_service), ] @pytest.mark.parametrize("next_url", [None, "/foo/bar/", "/wat/"]) def test_post_invalid_returns_form(self, pyramid_request, next_url): user_service = pretend.stub() pyramid_request.find_service = pretend.call_recorder( lambda iface, context: user_service ) pyramid_request.method = "POST" if next_url is not None: pyramid_request.POST["next"] = next_url form_obj = pretend.stub(validate=pretend.call_recorder(lambda: False)) form_class = pretend.call_recorder(lambda d, user_service: form_obj) result = views.login(pyramid_request, _form_class=form_class) assert result == { "form": form_obj, "redirect": {"field": "next", "data": next_url}, } assert pyramid_request.find_service.calls == [ pretend.call(IUserService, context=None), ] assert form_class.calls == [ pretend.call(pyramid_request.POST, user_service=user_service), ] assert form_obj.validate.calls == [pretend.call()] @pytest.mark.parametrize("with_user", [True, False]) def test_post_validate_redirects(self, monkeypatch, pyramid_request, with_user): remember = pretend.call_recorder( lambda request, user_id: [("foo", "bar")] ) monkeypatch.setattr(views, "remember", remember) new_session = {} user_id = uuid.uuid4() user_service = pretend.stub( find_userid=pretend.call_recorder(lambda username: user_id), update_user=pretend.call_recorder(lambda *a, **kw: None), ) pyramid_request.find_service = pretend.call_recorder( lambda iface, context: user_service ) pyramid_request.method = "POST" pyramid_request.session = pretend.stub( items=lambda: [("a", "b"), ("foo", "bar")], update=new_session.update, invalidate=pretend.call_recorder(lambda: None), new_csrf_token=pretend.call_recorder(lambda: None), ) pyramid_request.set_property( lambda r: str(uuid.uuid4()) if with_user else None, name="unauthenticated_userid", ) form_obj = pretend.stub( validate=pretend.call_recorder(lambda: True), username=pretend.stub(data="theuser"), ) form_class = pretend.call_recorder(lambda d, user_service: form_obj) now = datetime.datetime.utcnow() with freezegun.freeze_time(now): result = views.login(pyramid_request, _form_class=form_class) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/" assert result.headers["foo"] == "bar" assert form_class.calls == [ pretend.call(pyramid_request.POST, user_service=user_service), ] assert form_obj.validate.calls == [pretend.call()] assert user_service.find_userid.calls == [pretend.call("theuser")] assert user_service.update_user.calls == [ pretend.call(user_id, last_login=now), ] if with_user: assert new_session == {} else: assert new_session == {"a": "b", "foo": "bar"} assert remember.calls == [pretend.call(pyramid_request, str(user_id))] assert pyramid_request.session.invalidate.calls == [pretend.call()] assert pyramid_request.find_service.calls == [ pretend.call(IUserService, context=None), pretend.call(IUserService, context=None), ] assert pyramid_request.session.new_csrf_token.calls == [pretend.call()] @pytest.mark.parametrize( # The set of all possible next URLs. Since this set is infinite, we # test only a finite set of reasonable URLs. ("expected_next_url, observed_next_url"), [ ("/security/", "/security/"), ("http://example.com", "/"), ], ) def test_post_validate_no_redirects(self, pyramid_request, expected_next_url, observed_next_url): user_service = pretend.stub( find_userid=pretend.call_recorder(lambda username: 1), update_user=lambda *a, **k: None, ) pyramid_request.find_service = pretend.call_recorder( lambda iface, context: user_service ) pyramid_request.method = "POST" pyramid_request.POST["next"] = expected_next_url form_obj = pretend.stub( validate=pretend.call_recorder(lambda: True), username=pretend.stub(data="theuser"), ) form_class = pretend.call_recorder(lambda d, user_service: form_obj) result = views.login(pyramid_request, _form_class=form_class) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == observed_next_url class TestLogout: @pytest.mark.parametrize("next_url", [None, "/foo/bar/", "/wat/"]) def test_get_returns_empty(self, pyramid_request, next_url): if next_url is not None: pyramid_request.GET["next"] = next_url assert views.logout(pyramid_request) == \ {"redirect": {"field": "next", "data": next_url}} def test_post_forgets_user(self, monkeypatch, pyramid_request): forget = pretend.call_recorder(lambda request: [("foo", "bar")]) monkeypatch.setattr(views, "forget", forget) pyramid_request.method = "POST" pyramid_request.session = pretend.stub( invalidate=pretend.call_recorder(lambda: None), ) result = views.logout(pyramid_request) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/" assert result.headers["foo"] == "bar" assert forget.calls == [pretend.call(pyramid_request)] assert pyramid_request.session.invalidate.calls == [pretend.call()] @pytest.mark.parametrize( # The set of all possible next URLs. Since this set is infinite, we # test only a finite set of reasonable URLs. ("expected_next_url, observed_next_url"), [ ("/security/", "/security/"), ("http://example.com", "/"), ], ) def test_post_redirects_user(self, pyramid_request, expected_next_url, observed_next_url): pyramid_request.method = "POST" pyramid_request.POST["next"] = expected_next_url result = views.logout(pyramid_request) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == observed_next_url class TestRegister: def test_get(self, pyramid_request): form_inst = pretend.stub() form = pretend.call_recorder(lambda *args, **kwargs: form_inst) pyramid_request.find_service = pretend.call_recorder( lambda *args, **kwargs: pretend.stub( enabled=False, csp_policy=pretend.stub(), merge=lambda _: None, ) ) result = views.register(pyramid_request, _form_class=form) assert result["form"] is form_inst def test_redirect_authenticated_user(self): result = views.register(pretend.stub(authenticated_userid=1)) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/" def test_register_redirect(self, pyramid_request): pyramid_request.method = "POST" pyramid_request.find_service = pretend.call_recorder( lambda *args, **kwargs: pretend.stub( csp_policy={}, merge=lambda _: {}, enabled=False, verify_response=pretend.call_recorder(lambda _: None), find_userid=pretend.call_recorder(lambda _: None), find_userid_by_email=pretend.call_recorder(lambda _: None), create_user=pretend.call_recorder( lambda *args, **kwargs: pretend.stub(id=1), ), update_user=lambda *args, **kwargs: None, ) ) pyramid_request.route_path = pretend.call_recorder(lambda name: "/") pyramid_request.POST.update({ "username": "username_value", "password": "MyStr0ng!shP455w0rd", "password_confirm": "MyStr0ng!shP455w0rd", "email": "foo@bar.com", "full_name": "full_name", }) result = views.register(pyramid_request) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/" class TestClientSideIncludes: def test_edit_gravatar_csi_returns_user(self, db_request): user = UserFactory.create() assert views.edit_gravatar_csi(user, db_request) == { "user": user, } class TestProfileCallout: def test_profile_callout_returns_user(self): user = pretend.stub() request = pretend.stub() assert views.profile_callout(user, request) == {"user": user}
''' Created on Nov 26, 2014 @author: Yury Zhauniarovich <y.zhalnerovich{at}gmail.com> ''' import os, time from interfaces.adb_interface import AdbInterface from bboxcoverage import BBoxCoverage from running_strategies import IntentInvocationStrategy import smtplib import email.utils from email.mime.text import MIMEText APK_DIR_SOURCES = ["", ""] SMTP_SERVER = 'smtp.gmail.com' SMTP_PORT = 587 SENDER = "" PASSWORD = "" TO_EMAIL = "" def sendMessage(subj, email_message): msg = MIMEText(email_message) msg['To'] = email.utils.formataddr(('Recipient', TO_EMAIL)) msg['From'] = email.utils.formataddr(('Author', SENDER)) msg['Subject'] = subj server = smtplib.SMTP(SMTP_SERVER, SMTP_PORT) try: server.set_debuglevel(True) # identify ourselves, prompting server for supported features server.ehlo() # If we can encrypt this session, do it if server.has_extn('STARTTLS'): server.starttls() server.ehlo() # re-identify ourselves over TLS connection server.login(SENDER, PASSWORD) server.sendmail(SENDER, [TO_EMAIL], msg.as_string()) finally: server.quit() def getExecutionDevice(): ''' This method allows a user to select a device that is used to for further analysis. ''' dev_list = AdbInterface.getDeviceSerialsList() devNum = len(dev_list) if devNum <= 0: print "No device has been detected! Connect your device and restart the application!" return if devNum == 1: return dev_list[0] choice = None if devNum > 1: print "Select the device to use for analysis:\n" for i in xrange(0, devNum): print "%d. %s\n" % ((i + 1), dev_list[i]) while not choice: try: choice = int(raw_input()) if choice not in range(1, devNum+1): choice = None print 'Invalid choice! Choose right number!' except ValueError: print 'Invalid Number! Choose right number!' return dev_list[choice-1] def getSubdirs(rootDir): return [os.path.join(rootDir, name) for name in os.listdir(rootDir) if os.path.isdir(os.path.join(rootDir, name))] def getInstrApkInFolder(folder): for f in os.listdir(folder): if f.endswith("_aligned.apk"): filepath = os.path.join(folder, f) return filepath return None def runMainIntentsStrategy(adb, androidManifest, delay=10): automaticTriggeringStrategy = IntentInvocationStrategy(adbDevice=adb, pathToAndroidManifest=androidManifest) automaticTriggeringStrategy.run(delay=delay) adb = AdbInterface() device = getExecutionDevice() if not device: exit(1) adb.setTargetSerial(device) bboxcoverage = BBoxCoverage() for apk_dir_source in APK_DIR_SOURCES: print "\n\nStarting experiment for directory: [%s]" % apk_dir_source result_directories = getSubdirs(apk_dir_source) for directory in result_directories: apk_file = getInstrApkInFolder(directory) if apk_file: print "Starting experiment for apk: [%s]" % apk_file try: bboxcoverage.initAlreadyInstrApkEnv(pathToInstrApk=apk_file, resultsDir=directory) except: print "Exception while initialization!" continue try: bboxcoverage.installApkOnDevice() except: print "Exception while installation apk on device!" bboxcoverage.uninstallPackage() try: bboxcoverage.installApkOnDevice() except: continue package_name = bboxcoverage.getPackageName() params = {} params["strategy"] = "main_intents" params["package_name"] = package_name params["main_activity"] = bboxcoverage.androidManifest.getMainActivity() try: bboxcoverage.startTesting() except: print "Exception while startTesting!" bboxcoverage.uninstallPackage() continue try: runMainIntentsStrategy(adb=adb, androidManifest=bboxcoverage.androidManifestFile, delay=10) except: print "Exception while running strategy!" bboxcoverage.uninstallPackage() continue try: bboxcoverage.stopTesting("main_intents", paramsToWrite=params) except: print "Exception while running strategy!" bboxcoverage.uninstallPackage() continue time.sleep(3) bboxcoverage.uninstallPackage() time.sleep(5) sendMessage("[BBoxTester]", "Experiments done for directory [%s]!" % apk_dir_source)
""" Implements vlans for vmwareapi. """ from nova import db from nova import exception from nova import flags from nova import log as logging from nova import utils from nova.virt.vmwareapi_conn import VMWareAPISession from nova.virt.vmwareapi import network_utils LOG = logging.getLogger("nova.network.vmwareapi_net") FLAGS = flags.FLAGS flags.DEFINE_string('vlan_interface', 'vmnic0', 'Physical network adapter name in VMware ESX host for ' 'vlan networking') def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): """Create a vlan and bridge unless they already exist.""" # Open vmwareapi session host_ip = FLAGS.vmwareapi_host_ip host_username = FLAGS.vmwareapi_host_username host_password = FLAGS.vmwareapi_host_password if not host_ip or host_username is None or host_password is None: raise Exception(_("Must specify vmwareapi_host_ip," "vmwareapi_host_username " "and vmwareapi_host_password to use" "connection_type=vmwareapi")) session = VMWareAPISession(host_ip, host_username, host_password, FLAGS.vmwareapi_api_retry_count) vlan_interface = FLAGS.vlan_interface # Check if the vlan_interface physical network adapter exists on the host if not network_utils.check_if_vlan_interface_exists(session, vlan_interface): raise exception.NetworkAdapterNotFound(adapter=vlan_interface) # Get the vSwitch associated with the Physical Adapter vswitch_associated = network_utils.get_vswitch_for_vlan_interface( session, vlan_interface) if vswitch_associated is None: raise exception.SwicthNotFoundForNetworkAdapter(adapter=vlan_interface) # Check whether bridge already exists and retrieve the the ref of the # network whose name_label is "bridge" network_ref = network_utils.get_network_with_the_name(session, bridge) if network_ref is None: # Create a port group on the vSwitch associated with the vlan_interface # corresponding physical network adapter on the ESX host network_utils.create_port_group(session, bridge, vswitch_associated, vlan_num) else: # Get the vlan id and vswitch corresponding to the port group pg_vlanid, pg_vswitch = \ network_utils.get_vlanid_and_vswitch_for_portgroup(session, bridge) # Check if the vswitch associated is proper if pg_vswitch != vswitch_associated: raise exception.InvalidVLANPortGroup(bridge=bridge, expected=vswitch_associated, actual=pg_vswitch) # Check if the vlan id is proper for the port group if pg_vlanid != vlan_num: raise exception.InvalidVLANTag(bridge=bridge, tag=vlan_num, pgroup=pg_vlanid)
import collections import json import math import re import six import uuid from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder import interface from cinder import utils from cinder.volume import driver from cinder.volume.drivers.huawei import constants from cinder.volume.drivers.huawei import fc_zone_helper from cinder.volume.drivers.huawei import huawei_conf from cinder.volume.drivers.huawei import huawei_utils from cinder.volume.drivers.huawei import hypermetro from cinder.volume.drivers.huawei import replication from cinder.volume.drivers.huawei import rest_client from cinder.volume.drivers.huawei import smartx from cinder.volume import utils as volume_utils from cinder.volume import volume_types from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) huawei_opts = [ cfg.StrOpt('cinder_huawei_conf_file', default='/etc/cinder/cinder_huawei_conf.xml', help='The configuration file for the Cinder Huawei driver.'), cfg.StrOpt('hypermetro_devices', default=None, help='The remote device hypermetro will use.'), cfg.StrOpt('metro_san_user', default=None, help='The remote metro device san user.'), cfg.StrOpt('metro_san_password', default=None, help='The remote metro device san password.'), cfg.StrOpt('metro_domain_name', default=None, help='The remote metro device domain name.'), cfg.StrOpt('metro_san_address', default=None, help='The remote metro device request url.'), cfg.StrOpt('metro_storage_pools', default=None, help='The remote metro device pool names.'), ] CONF = cfg.CONF CONF.register_opts(huawei_opts) snap_attrs = ('id', 'volume_id', 'volume', 'provider_location') Snapshot = collections.namedtuple('Snapshot', snap_attrs) vol_attrs = ('id', 'lun_type', 'provider_location', 'metadata') Volume = collections.namedtuple('Volume', vol_attrs) class HuaweiBaseDriver(driver.VolumeDriver): def __init__(self, *args, **kwargs): super(HuaweiBaseDriver, self).__init__(*args, **kwargs) if not self.configuration: msg = _('Configuration is not found.') raise exception.InvalidInput(reason=msg) self.active_backend_id = kwargs.get('active_backend_id') self.configuration.append_config_values(huawei_opts) self.huawei_conf = huawei_conf.HuaweiConf(self.configuration) self.metro_flag = False self.replica = None def get_local_and_remote_dev_conf(self): self.loc_dev_conf = self.huawei_conf.get_local_device() # Now just support one replication_devices. replica_devs = self.huawei_conf.get_replication_devices() self.replica_dev_conf = replica_devs[0] if replica_devs else {} def get_local_and_remote_client_conf(self): if self.active_backend_id: return self.replica_dev_conf, self.loc_dev_conf else: return self.loc_dev_conf, self.replica_dev_conf def do_setup(self, context): """Instantiate common class and login storage system.""" # Set huawei private configuration into Configuration object. self.huawei_conf.update_config_value() self.get_local_and_remote_dev_conf() client_conf, replica_client_conf = ( self.get_local_and_remote_client_conf()) # init local client if not client_conf: msg = _('Get active client failed.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) self.client = rest_client.RestClient(self.configuration, **client_conf) self.client.login() # init remote client metro_san_address = self.configuration.safe_get("metro_san_address") metro_san_user = self.configuration.safe_get("metro_san_user") metro_san_password = self.configuration.safe_get("metro_san_password") if metro_san_address and metro_san_user and metro_san_password: metro_san_address = metro_san_address.split(";") self.rmt_client = rest_client.RestClient(self.configuration, metro_san_address, metro_san_user, metro_san_password) self.rmt_client.login() self.metro_flag = True else: self.metro_flag = False LOG.warning(_LW("Remote device not configured in cinder.conf")) # init replication manager if replica_client_conf: self.replica_client = rest_client.RestClient(self.configuration, **replica_client_conf) self.replica_client.try_login() self.replica = replication.ReplicaPairManager(self.client, self.replica_client, self.configuration) def check_for_setup_error(self): pass def get_volume_stats(self, refresh=False): """Get volume status and reload huawei config file.""" self.huawei_conf.update_config_value() stats = self.client.update_volume_stats() stats = self.update_hypermetro_capability(stats) if self.replica: stats = self.replica.update_replica_capability(stats) targets = [self.replica_dev_conf['backend_id']] stats['replication_targets'] = targets stats['replication_enabled'] = True return stats def update_hypermetro_capability(self, stats): if self.metro_flag: version = self.client.find_array_version() rmt_version = self.rmt_client.find_array_version() if (version >= constants.ARRAY_VERSION and rmt_version >= constants.ARRAY_VERSION): for pool in stats['pools']: pool['hypermetro'] = True pool['consistencygroup_support'] = True return stats def _get_volume_type(self, volume): volume_type = None type_id = volume.volume_type_id if type_id: ctxt = context.get_admin_context() volume_type = volume_types.get_volume_type(ctxt, type_id) return volume_type def _get_volume_params(self, volume_type): """Return the parameters for creating the volume.""" specs = {} if volume_type: specs = dict(volume_type).get('extra_specs') opts = self._get_volume_params_from_specs(specs) return opts def _get_consistencygroup_type(self, group): specs = {} opts = {} type_id = group.volume_type_id.split(",") if type_id[0] and len(type_id) == 2: ctxt = context.get_admin_context() volume_type = volume_types.get_volume_type(ctxt, type_id[0]) specs = dict(volume_type).get('extra_specs') opts = self._get_volume_params_from_specs(specs) return opts def _get_volume_params_from_specs(self, specs): """Return the volume parameters from extra specs.""" opts_capability = { 'smarttier': False, 'smartcache': False, 'smartpartition': False, 'thin_provisioning_support': False, 'thick_provisioning_support': False, 'hypermetro': False, 'replication_enabled': False, 'replication_type': 'async', } opts_value = { 'policy': None, 'partitionname': None, 'cachename': None, } opts_associate = { 'smarttier': 'policy', 'smartcache': 'cachename', 'smartpartition': 'partitionname', } opts = self._get_opts_from_specs(opts_capability, opts_value, opts_associate, specs) opts = smartx.SmartX().get_smartx_specs_opts(opts) opts = replication.get_replication_opts(opts) LOG.debug('volume opts %(opts)s.', {'opts': opts}) return opts def _get_opts_from_specs(self, opts_capability, opts_value, opts_associate, specs): """Get the well defined extra specs.""" opts = {} opts.update(opts_capability) opts.update(opts_value) for key, value in specs.items(): # Get the scope, if is using scope format. scope = None key_split = key.split(':') if len(key_split) > 2 and key_split[0] != "capabilities": continue if len(key_split) == 1: key = key_split[0].lower() else: scope = key_split[0].lower() key = key_split[1].lower() if ((not scope or scope == 'capabilities') and key in opts_capability): words = value.split() if words and len(words) == 2 and words[0] in ('<is>', '<in>'): opts[key] = words[1].lower() elif key == 'replication_type': LOG.error(_LE("Extra specs must be specified as " "replication_type='<in> sync' or " "'<in> async'.")) else: LOG.error(_LE("Extra specs must be specified as " "capabilities:%s='<is> True'."), key) if ((scope in opts_capability) and (key in opts_value) and (scope in opts_associate) and (opts_associate[scope] == key)): opts[key] = value return opts def _get_lun_params(self, volume, opts): pool_name = volume_utils.extract_host(volume.host, level='pool') params = { 'TYPE': '11', 'NAME': huawei_utils.encode_name(volume.id), 'PARENTTYPE': '216', 'PARENTID': self.client.get_pool_id(pool_name), 'DESCRIPTION': volume.name, 'ALLOCTYPE': opts.get('LUNType', self.configuration.lun_type), 'CAPACITY': huawei_utils.get_volume_size(volume), 'WRITEPOLICY': self.configuration.lun_write_type, 'MIRRORPOLICY': self.configuration.lun_mirror_switch, 'PREFETCHPOLICY': self.configuration.lun_prefetch_type, 'PREFETCHVALUE': self.configuration.lun_prefetch_value, 'DATATRANSFERPOLICY': opts.get('policy', self.configuration.lun_policy), 'READCACHEPOLICY': self.configuration.lun_read_cache_policy, 'WRITECACHEPOLICY': self.configuration.lun_write_cache_policy, } LOG.info(_LI('volume: %(volume)s, lun params: %(params)s.'), {'volume': volume.id, 'params': params}) return params def _create_volume(self, volume, lun_params): # Create LUN on the array. model_update = {} lun_info = self.client.create_lun(lun_params) model_update['provider_location'] = lun_info['ID'] admin_metadata = huawei_utils.get_admin_metadata(volume) admin_metadata.update({'huawei_lun_wwn': lun_info['WWN']}) model_update['admin_metadata'] = admin_metadata metadata = huawei_utils.get_volume_metadata(volume) model_update['metadata'] = metadata return lun_info, model_update def _create_base_type_volume(self, opts, volume, volume_type): """Create volume and add some base type. Base type is the services won't conflict with the other service. """ lun_params = self._get_lun_params(volume, opts) lun_info, model_update = self._create_volume(volume, lun_params) lun_id = lun_info['ID'] try: qos = smartx.SmartQos.get_qos_by_volume_type(volume_type) if qos: smart_qos = smartx.SmartQos(self.client) smart_qos.add(qos, lun_id) smartpartition = smartx.SmartPartition(self.client) smartpartition.add(opts, lun_id) smartcache = smartx.SmartCache(self.client) smartcache.add(opts, lun_id) except Exception as err: self._delete_lun_with_check(lun_id) msg = _('Create volume error. Because %s.') % six.text_type(err) raise exception.VolumeBackendAPIException(data=msg) return lun_params, lun_info, model_update def _add_extend_type_to_volume(self, opts, lun_params, lun_info, model_update): """Add the extend type. Extend type is the services may conflict with LUNCopy. So add it after the those services. """ lun_id = lun_info['ID'] if opts.get('hypermetro') == 'true': metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) try: metro_info = metro.create_hypermetro(lun_id, lun_params) model_update['metadata'].update(metro_info) except exception.VolumeBackendAPIException as err: LOG.error(_LE('Create hypermetro error: %s.'), err) self._delete_lun_with_check(lun_id) raise if opts.get('replication_enabled') == 'true': replica_model = opts.get('replication_type') try: replica_info = self.replica.create_replica(lun_info, replica_model) model_update.update(replica_info) except Exception as err: LOG.exception(_LE('Create replication volume error.')) self._delete_lun_with_check(lun_id) raise return model_update def create_volume(self, volume): """Create a volume.""" volume_type = self._get_volume_type(volume) opts = self._get_volume_params(volume_type) if (opts.get('hypermetro') == 'true' and opts.get('replication_enabled') == 'true'): err_msg = _("Hypermetro and Replication can not be " "used in the same volume_type.") LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) lun_params, lun_info, model_update = ( self._create_base_type_volume(opts, volume, volume_type)) model_update = self._add_extend_type_to_volume(opts, lun_params, lun_info, model_update) return model_update def _delete_volume(self, volume): lun_id = volume.provider_location if not lun_id: return lun_group_ids = self.client.get_lungroupids_by_lunid(lun_id) if lun_group_ids and len(lun_group_ids) == 1: self.client.remove_lun_from_lungroup(lun_group_ids[0], lun_id) self.client.delete_lun(lun_id) def delete_volume(self, volume): """Delete a volume. Three steps: Firstly, remove associate from lungroup. Secondly, remove associate from QoS policy. Thirdly, remove the lun. """ lun_id = self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_WARN) if not lun_id: return qos_id = self.client.get_qosid_by_lunid(lun_id) if qos_id: smart_qos = smartx.SmartQos(self.client) smart_qos.remove(qos_id, lun_id) metadata = huawei_utils.get_volume_metadata(volume) if 'hypermetro_id' in metadata: metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) try: metro.delete_hypermetro(volume) except exception.VolumeBackendAPIException as err: LOG.error(_LE('Delete hypermetro error: %s.'), err) # We have checked the LUN WWN above, # no need to check again here. self._delete_volume(volume) raise # Delete a replication volume replica_data = volume.replication_driver_data if replica_data: try: self.replica.delete_replica(volume) except exception.VolumeBackendAPIException as err: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Delete replication error.")) self._delete_volume(volume) self._delete_volume(volume) def _delete_lun_with_check(self, lun_id, lun_wwn=None): if not lun_id: return if self.client.check_lun_exist(lun_id, lun_wwn): qos_id = self.client.get_qosid_by_lunid(lun_id) if qos_id: smart_qos = smartx.SmartQos(self.client) smart_qos.remove(qos_id, lun_id) self.client.delete_lun(lun_id) def _is_lun_migration_complete(self, src_id, dst_id): result = self.client.get_lun_migration_task() found_migration_task = False if 'data' not in result: return False for item in result['data']: if (src_id == item['PARENTID'] and dst_id == item['TARGETLUNID']): found_migration_task = True if constants.MIGRATION_COMPLETE == item['RUNNINGSTATUS']: return True if constants.MIGRATION_FAULT == item['RUNNINGSTATUS']: msg = _("Lun migration error.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not found_migration_task: err_msg = _("Cannot find migration task.") LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) return False def _is_lun_migration_exist(self, src_id, dst_id): try: result = self.client.get_lun_migration_task() except Exception: LOG.error(_LE("Get LUN migration error.")) return False if 'data' in result: for item in result['data']: if (src_id == item['PARENTID'] and dst_id == item['TARGETLUNID']): return True return False def _migrate_lun(self, src_id, dst_id): try: self.client.create_lun_migration(src_id, dst_id) def _is_lun_migration_complete(): return self._is_lun_migration_complete(src_id, dst_id) wait_interval = constants.MIGRATION_WAIT_INTERVAL huawei_utils.wait_for_condition(_is_lun_migration_complete, wait_interval, self.configuration.lun_timeout) # Clean up if migration failed. except Exception as ex: raise exception.VolumeBackendAPIException(data=ex) finally: if self._is_lun_migration_exist(src_id, dst_id): self.client.delete_lun_migration(src_id, dst_id) self._delete_lun_with_check(dst_id) LOG.debug("Migrate lun %s successfully.", src_id) return True def _wait_volume_ready(self, lun_id): wait_interval = self.configuration.lun_ready_wait_interval def _volume_ready(): result = self.client.get_lun_info(lun_id) if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY): return True return False huawei_utils.wait_for_condition(_volume_ready, wait_interval, wait_interval * 10) def _get_original_status(self, volume): return 'in-use' if volume.volume_attachment else 'available' def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status=None): original_name = huawei_utils.encode_name(volume.id) current_name = huawei_utils.encode_name(new_volume.id) lun_id = self.client.get_lun_id_by_name(current_name) try: self.client.rename_lun(lun_id, original_name) except exception.VolumeBackendAPIException: LOG.error(_LE('Unable to rename lun %s on array.'), current_name) return {'_name_id': new_volume.name_id} LOG.debug("Rename lun from %(current_name)s to %(original_name)s " "successfully.", {'current_name': current_name, 'original_name': original_name}) model_update = {'_name_id': None} return model_update def migrate_volume(self, ctxt, volume, host, new_type=None): """Migrate a volume within the same array.""" self._check_volume_exist_on_array(volume, constants.VOLUME_NOT_EXISTS_RAISE) # NOTE(jlc): Replication volume can't migrate. But retype # can remove replication relationship first then do migrate. # So don't add this judgement into _check_migration_valid(). volume_type = self._get_volume_type(volume) opts = self._get_volume_params(volume_type) if opts.get('replication_enabled') == 'true': return (False, None) return self._migrate_volume(volume, host, new_type) def _check_migration_valid(self, host, volume): if 'pool_name' not in host['capabilities']: return False target_device = host['capabilities']['location_info'] # Source and destination should be on same array. if target_device != self.client.device_id: return False # Same protocol should be used if volume is in-use. protocol = self.configuration.san_protocol if (host['capabilities']['storage_protocol'] != protocol and self._get_original_status(volume) == 'in-use'): return False pool_name = host['capabilities']['pool_name'] if len(pool_name) == 0: return False return True def _migrate_volume(self, volume, host, new_type=None): if not self._check_migration_valid(host, volume): return (False, None) type_id = volume.volume_type_id volume_type = None if type_id: volume_type = volume_types.get_volume_type(None, type_id) pool_name = host['capabilities']['pool_name'] pools = self.client.get_all_pools() pool_info = self.client.get_pool_info(pool_name, pools) src_volume_name = huawei_utils.encode_name(volume.id) dst_volume_name = six.text_type(hash(src_volume_name)) src_id = volume.provider_location opts = None qos = None if new_type: # If new type exists, use new type. new_specs = new_type['extra_specs'] opts = self._get_volume_params_from_specs(new_specs) if 'LUNType' not in opts: opts['LUNType'] = self.configuration.lun_type qos = smartx.SmartQos.get_qos_by_volume_type(new_type) elif volume_type: qos = smartx.SmartQos.get_qos_by_volume_type(volume_type) if not opts: opts = self._get_volume_params(volume_type) lun_info = self.client.get_lun_info(src_id) policy = lun_info['DATATRANSFERPOLICY'] if opts['policy']: policy = opts['policy'] lun_params = { 'NAME': dst_volume_name, 'PARENTID': pool_info['ID'], 'DESCRIPTION': lun_info['DESCRIPTION'], 'ALLOCTYPE': opts.get('LUNType', lun_info['ALLOCTYPE']), 'CAPACITY': lun_info['CAPACITY'], 'WRITEPOLICY': lun_info['WRITEPOLICY'], 'MIRRORPOLICY': lun_info['MIRRORPOLICY'], 'PREFETCHPOLICY': lun_info['PREFETCHPOLICY'], 'PREFETCHVALUE': lun_info['PREFETCHVALUE'], 'DATATRANSFERPOLICY': policy, 'READCACHEPOLICY': lun_info['READCACHEPOLICY'], 'WRITECACHEPOLICY': lun_info['WRITECACHEPOLICY'], 'OWNINGCONTROLLER': lun_info['OWNINGCONTROLLER'], } lun_info = self.client.create_lun(lun_params) lun_id = lun_info['ID'] if qos: LOG.info(_LI('QoS: %s.'), qos) SmartQos = smartx.SmartQos(self.client) SmartQos.add(qos, lun_id) if opts: smartpartition = smartx.SmartPartition(self.client) smartpartition.add(opts, lun_id) smartcache = smartx.SmartCache(self.client) smartcache.add(opts, lun_id) dst_id = lun_info['ID'] self._wait_volume_ready(dst_id) moved = self._migrate_lun(src_id, dst_id) return moved, {} def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot. We use LUNcopy to copy a new volume from snapshot. The time needed increases as volume size does. """ volume_type = self._get_volume_type(volume) opts = self._get_volume_params(volume_type) if (opts.get('hypermetro') == 'true' and opts.get('replication_enabled') == 'true'): err_msg = _("Hypermetro and Replication can not be " "used in the same volume_type.") LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) snapshotname = huawei_utils.encode_name(snapshot.id) snapshot_id = snapshot.provider_location if snapshot_id is None: snapshot_id = self.client.get_snapshot_id_by_name(snapshotname) if snapshot_id is None: err_msg = (_( 'create_volume_from_snapshot: Snapshot %(name)s ' 'does not exist.') % {'name': snapshotname}) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) lun_params, lun_info, model_update = ( self._create_base_type_volume(opts, volume, volume_type)) tgt_lun_id = model_update['provider_location'] luncopy_name = huawei_utils.encode_name(volume.id) LOG.info(_LI( 'create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, ' 'tgt_lun_id: %(tgt_lun_id)s, copy_name: %(copy_name)s.'), {'src_lun_id': snapshot_id, 'tgt_lun_id': tgt_lun_id, 'copy_name': luncopy_name}) wait_interval = self.configuration.lun_ready_wait_interval def _volume_ready(): result = self.client.get_lun_info(tgt_lun_id) if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY): return True return False huawei_utils.wait_for_condition(_volume_ready, wait_interval, wait_interval * 10) self._copy_volume(volume, luncopy_name, snapshot_id, tgt_lun_id) # NOTE(jlc): Actually, we just only support replication here right # now, not hypermetro. model_update = self._add_extend_type_to_volume(opts, lun_params, lun_info, model_update) return model_update def create_cloned_volume(self, volume, src_vref): """Clone a new volume from an existing volume.""" self._check_volume_exist_on_array(src_vref, constants.VOLUME_NOT_EXISTS_RAISE) # Form the snapshot structure. snapshot = Snapshot(id=uuid.uuid4().__str__(), volume_id=src_vref.id, volume=src_vref, provider_location=None) # Create snapshot. self.create_snapshot(snapshot) try: # Create volume from snapshot. model_update = self.create_volume_from_snapshot(volume, snapshot) finally: try: # Delete snapshot. self.delete_snapshot(snapshot) except exception.VolumeBackendAPIException: LOG.warning(_LW( 'Failure deleting the snapshot %(snapshot_id)s ' 'of volume %(volume_id)s.'), {'snapshot_id': snapshot.id, 'volume_id': src_vref.id},) return model_update def _check_volume_exist_on_array(self, volume, action): """Check whether the volume exists on the array. If the volume exists on the array, return the LUN ID. If not exists, raise or log warning. """ # Firstly, try to find LUN ID by volume.provider_location. lun_id = volume.provider_location # If LUN ID not recorded, find LUN ID by LUN NAME. if not lun_id: volume_name = huawei_utils.encode_name(volume.id) lun_id = self.client.get_lun_id_by_name(volume_name) if not lun_id: msg = (_("Volume %s does not exist on the array.") % volume.id) if action == constants.VOLUME_NOT_EXISTS_WARN: LOG.warning(msg) if action == constants.VOLUME_NOT_EXISTS_RAISE: raise exception.VolumeBackendAPIException(data=msg) return metadata = huawei_utils.get_admin_metadata(volume) lun_wwn = metadata.get('huawei_lun_wwn') if metadata else None if not lun_wwn: LOG.debug("No LUN WWN recorded for volume %s.", volume.id) if not self.client.check_lun_exist(lun_id, lun_wwn): msg = (_("Volume %s does not exist on the array.") % volume.id) if action == constants.VOLUME_NOT_EXISTS_WARN: LOG.warning(msg) if action == constants.VOLUME_NOT_EXISTS_RAISE: raise exception.VolumeBackendAPIException(data=msg) return return lun_id def extend_volume(self, volume, new_size): """Extend a volume.""" lun_id = self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_RAISE) volume_type = self._get_volume_type(volume) opts = self._get_volume_params(volume_type) if opts.get('replication_enabled') == 'true': msg = (_("Can't extend replication volume, volume: %(id)s") % {"id": volume.id}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) lun_info = self.client.get_lun_info(lun_id) old_size = int(lun_info.get('CAPACITY')) new_size = int(new_size) * units.Gi / 512 if new_size == old_size: LOG.info(_LI("New size is equal to the real size from backend" " storage, no need to extend." " realsize: %(oldsize)s, newsize: %(newsize)s."), {'oldsize': old_size, 'newsize': new_size}) return if new_size < old_size: msg = (_("New size should be bigger than the real size from " "backend storage." " realsize: %(oldsize)s, newsize: %(newsize)s."), {'oldsize': old_size, 'newsize': new_size}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) volume_name = huawei_utils.encode_name(volume.id) LOG.info(_LI( 'Extend volume: %(volumename)s, ' 'oldsize: %(oldsize)s, newsize: %(newsize)s.'), {'volumename': volume_name, 'oldsize': old_size, 'newsize': new_size}) self.client.extend_lun(lun_id, new_size) def create_snapshot(self, snapshot): volume = snapshot.volume if not volume: msg = (_("Can't get volume id from snapshot, snapshot: %(id)s") % {"id": snapshot.id}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) volume_name = huawei_utils.encode_name(snapshot.volume_id) lun_id = self.client.get_lun_id(volume, volume_name) snapshot_name = huawei_utils.encode_name(snapshot.id) snapshot_description = snapshot.id snapshot_info = self.client.create_snapshot(lun_id, snapshot_name, snapshot_description) snapshot_id = snapshot_info['ID'] self.client.activate_snapshot(snapshot_id) return {'provider_location': snapshot_info['ID'], 'lun_info': snapshot_info} def delete_snapshot(self, snapshot): snapshotname = huawei_utils.encode_name(snapshot.id) volume_name = huawei_utils.encode_name(snapshot.volume_id) LOG.info(_LI( 'stop_snapshot: snapshot name: %(snapshot)s, ' 'volume name: %(volume)s.'), {'snapshot': snapshotname, 'volume': volume_name},) snapshot_id = snapshot.provider_location if snapshot_id is None: snapshot_id = self.client.get_snapshot_id_by_name(snapshotname) if snapshot_id and self.client.check_snapshot_exist(snapshot_id): self.client.stop_snapshot(snapshot_id) self.client.delete_snapshot(snapshot_id) else: LOG.warning(_LW("Can't find snapshot on the array.")) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type.""" LOG.debug("Enter retype: id=%(id)s, new_type=%(new_type)s, " "diff=%(diff)s, host=%(host)s.", {'id': volume.id, 'new_type': new_type, 'diff': diff, 'host': host}) self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_RAISE) # Check what changes are needed migration, change_opts, lun_id = self.determine_changes_when_retype( volume, new_type, host) model_update = {} replica_enabled_change = change_opts.get('replication_enabled') replica_type_change = change_opts.get('replication_type') if replica_enabled_change and replica_enabled_change[0] == 'true': try: self.replica.delete_replica(volume) model_update.update({'replication_status': 'disabled', 'replication_driver_data': None}) except exception.VolumeBackendAPIException: LOG.exception(_LE('Retype volume error. ' 'Delete replication failed.')) return False try: if migration: LOG.debug("Begin to migrate LUN(id: %(lun_id)s) with " "change %(change_opts)s.", {"lun_id": lun_id, "change_opts": change_opts}) if not self._migrate_volume(volume, host, new_type): LOG.warning(_LW("Storage-assisted migration failed during " "retype.")) return False else: # Modify lun to change policy self.modify_lun(lun_id, change_opts) except exception.VolumeBackendAPIException: LOG.exception(_LE('Retype volume error.')) return False if replica_enabled_change and replica_enabled_change[1] == 'true': try: # If replica_enabled_change is not None, the # replica_type_change won't be None. See function # determine_changes_when_retype. lun_info = self.client.get_lun_info(lun_id) replica_info = self.replica.create_replica( lun_info, replica_type_change[1]) model_update.update(replica_info) except exception.VolumeBackendAPIException: LOG.exception(_LE('Retype volume error. ' 'Create replication failed.')) return False return (True, model_update) def modify_lun(self, lun_id, change_opts): if change_opts.get('partitionid'): old, new = change_opts['partitionid'] old_id = old[0] old_name = old[1] new_id = new[0] new_name = new[1] if old_id: self.client.remove_lun_from_partition(lun_id, old_id) if new_id: self.client.add_lun_to_partition(lun_id, new_id) LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartpartition from " "(name: %(old_name)s, id: %(old_id)s) to " "(name: %(new_name)s, id: %(new_id)s) success."), {"lun_id": lun_id, "old_id": old_id, "old_name": old_name, "new_id": new_id, "new_name": new_name}) if change_opts.get('cacheid'): old, new = change_opts['cacheid'] old_id = old[0] old_name = old[1] new_id = new[0] new_name = new[1] if old_id: self.client.remove_lun_from_cache(lun_id, old_id) if new_id: self.client.add_lun_to_cache(lun_id, new_id) LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartcache from " "(name: %(old_name)s, id: %(old_id)s) to " "(name: %(new_name)s, id: %(new_id)s) successfully."), {'lun_id': lun_id, 'old_id': old_id, "old_name": old_name, 'new_id': new_id, "new_name": new_name}) if change_opts.get('policy'): old_policy, new_policy = change_opts['policy'] self.client.change_lun_smarttier(lun_id, new_policy) LOG.info(_LI("Retype LUN(id: %(lun_id)s) smarttier policy from " "%(old_policy)s to %(new_policy)s success."), {'lun_id': lun_id, 'old_policy': old_policy, 'new_policy': new_policy}) if change_opts.get('qos'): old_qos, new_qos = change_opts['qos'] old_qos_id = old_qos[0] old_qos_value = old_qos[1] if old_qos_id: smart_qos = smartx.SmartQos(self.client) smart_qos.remove(old_qos_id, lun_id) if new_qos: smart_qos = smartx.SmartQos(self.client) smart_qos.add(new_qos, lun_id) LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartqos from " "%(old_qos_value)s to %(new_qos)s success."), {'lun_id': lun_id, 'old_qos_value': old_qos_value, 'new_qos': new_qos}) def get_lun_specs(self, lun_id): lun_opts = { 'policy': None, 'partitionid': None, 'cacheid': None, 'LUNType': None, } lun_info = self.client.get_lun_info(lun_id) lun_opts['LUNType'] = int(lun_info['ALLOCTYPE']) if lun_info.get('DATATRANSFERPOLICY'): lun_opts['policy'] = lun_info['DATATRANSFERPOLICY'] if lun_info.get('SMARTCACHEPARTITIONID'): lun_opts['cacheid'] = lun_info['SMARTCACHEPARTITIONID'] if lun_info.get('CACHEPARTITIONID'): lun_opts['partitionid'] = lun_info['CACHEPARTITIONID'] return lun_opts def _check_needed_changes(self, lun_id, old_opts, new_opts, change_opts, new_type): new_cache_id = None new_cache_name = new_opts['cachename'] if new_cache_name: new_cache_id = self.client.get_cache_id_by_name(new_cache_name) if new_cache_id is None: msg = (_( "Can't find cache name on the array, cache name is: " "%(name)s.") % {'name': new_cache_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) new_partition_id = None new_partition_name = new_opts['partitionname'] if new_partition_name: new_partition_id = self.client.get_partition_id_by_name( new_partition_name) if new_partition_id is None: msg = (_( "Can't find partition name on the array, partition name " "is: %(name)s.") % {'name': new_partition_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # smarttier if old_opts['policy'] != new_opts['policy']: change_opts['policy'] = (old_opts['policy'], new_opts['policy']) # smartcache old_cache_id = old_opts['cacheid'] if old_cache_id != new_cache_id: old_cache_name = None if old_cache_id: cache_info = self.client.get_cache_info_by_id(old_cache_id) old_cache_name = cache_info['NAME'] change_opts['cacheid'] = ([old_cache_id, old_cache_name], [new_cache_id, new_cache_name]) # smartpartition old_partition_id = old_opts['partitionid'] if old_partition_id != new_partition_id: old_partition_name = None if old_partition_id: partition_info = self.client.get_partition_info_by_id( old_partition_id) old_partition_name = partition_info['NAME'] change_opts['partitionid'] = ([old_partition_id, old_partition_name], [new_partition_id, new_partition_name]) # smartqos new_qos = smartx.SmartQos.get_qos_by_volume_type(new_type) old_qos_id = self.client.get_qosid_by_lunid(lun_id) old_qos = self._get_qos_specs_from_array(old_qos_id) if old_qos != new_qos: change_opts['qos'] = ([old_qos_id, old_qos], new_qos) return change_opts def determine_changes_when_retype(self, volume, new_type, host): migration = False change_opts = { 'policy': None, 'partitionid': None, 'cacheid': None, 'qos': None, 'host': None, 'LUNType': None, 'replication_enabled': None, 'replication_type': None, } lun_id = volume.provider_location old_opts = self.get_lun_specs(lun_id) new_specs = new_type['extra_specs'] new_opts = self._get_volume_params_from_specs(new_specs) if 'LUNType' not in new_opts: new_opts['LUNType'] = self.configuration.lun_type if volume.host != host['host']: migration = True change_opts['host'] = (volume.host, host['host']) if old_opts['LUNType'] != new_opts['LUNType']: migration = True change_opts['LUNType'] = (old_opts['LUNType'], new_opts['LUNType']) volume_type = self._get_volume_type(volume) volume_opts = self._get_volume_params(volume_type) if (volume_opts['replication_enabled'] == 'true' or new_opts['replication_enabled'] == 'true'): # If replication_enabled changes, # then replication_type in change_opts will be set. change_opts['replication_enabled'] = ( volume_opts['replication_enabled'], new_opts['replication_enabled']) change_opts['replication_type'] = (volume_opts['replication_type'], new_opts['replication_type']) change_opts = self._check_needed_changes(lun_id, old_opts, new_opts, change_opts, new_type) LOG.debug("Determine changes when retype. Migration: " "%(migration)s, change_opts: %(change_opts)s.", {'migration': migration, 'change_opts': change_opts}) return migration, change_opts, lun_id def _get_qos_specs_from_array(self, qos_id): qos = {} qos_info = {} if qos_id: qos_info = self.client.get_qos_info(qos_id) for key, value in qos_info.items(): key = key.upper() if key in constants.QOS_KEYS: if key == 'LATENCY' and value == '0': continue else: qos[key] = value return qos def create_export(self, context, volume, connector): """Export a volume.""" pass def ensure_export(self, context, volume): """Synchronously recreate an export for a volume.""" pass def remove_export(self, context, volume): """Remove an export for a volume.""" pass def create_export_snapshot(self, context, snapshot, connector): """Export a snapshot.""" pass def remove_export_snapshot(self, context, snapshot): """Remove an export for a snapshot.""" pass def backup_use_temp_snapshot(self): # This config option has a default to be False, So just return it. return self.configuration.safe_get("backup_use_temp_snapshot") def _copy_volume(self, volume, copy_name, src_lun, tgt_lun): luncopy_id = self.client.create_luncopy(copy_name, src_lun, tgt_lun) wait_interval = self.configuration.lun_copy_wait_interval try: self.client.start_luncopy(luncopy_id) def _luncopy_complete(): luncopy_info = self.client.get_luncopy_info(luncopy_id) if luncopy_info['status'] == constants.STATUS_LUNCOPY_READY: # luncopy_info['status'] means for the running status of # the luncopy. If luncopy_info['status'] is equal to '40', # this luncopy is completely ready. return True elif luncopy_info['state'] != constants.STATUS_HEALTH: # luncopy_info['state'] means for the healthy status of the # luncopy. If luncopy_info['state'] is not equal to '1', # this means that an error occurred during the LUNcopy # operation and we should abort it. err_msg = (_( 'An error occurred during the LUNcopy operation. ' 'LUNcopy name: %(luncopyname)s. ' 'LUNcopy status: %(luncopystatus)s. ' 'LUNcopy state: %(luncopystate)s.') % {'luncopyname': luncopy_id, 'luncopystatus': luncopy_info['status'], 'luncopystate': luncopy_info['state']},) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) huawei_utils.wait_for_condition(_luncopy_complete, wait_interval, self.configuration.lun_timeout) except Exception: with excutils.save_and_reraise_exception(): self.client.delete_luncopy(luncopy_id) self.delete_volume(volume) self.client.delete_luncopy(luncopy_id) def _check_lun_valid_for_manage(self, lun_info, external_ref): lun_id = lun_info.get('ID') # Check whether the LUN is already in LUN group. if lun_info.get('ISADD2LUNGROUP') == 'true': msg = (_("Can't import LUN %s to Cinder. Already exists in a LUN " "group.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN is Normal. if lun_info.get('HEALTHSTATUS') != constants.STATUS_HEALTH: msg = _("Can't import LUN %s to Cinder. LUN status is not " "normal.") % lun_id raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a HyperMetroPair. try: hypermetro_pairs = self.client.get_hypermetro_pairs() except exception.VolumeBackendAPIException: hypermetro_pairs = [] LOG.debug("Can't get hypermetro info, pass the check.") for pair in hypermetro_pairs: if pair.get('LOCALOBJID') == lun_id: msg = (_("Can't import LUN %s to Cinder. Already exists in a " "HyperMetroPair.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a SplitMirror. try: split_mirrors = self.client.get_split_mirrors() except exception.VolumeBackendAPIException as ex: if re.search('License is unavailable', ex.msg): # Can't check whether the LUN has SplitMirror with it, # just pass the check and log it. split_mirrors = [] LOG.warning(_LW('No license for SplitMirror.')) else: msg = _("Failed to get SplitMirror.") raise exception.VolumeBackendAPIException(data=msg) for mirror in split_mirrors: try: target_luns = self.client.get_target_luns(mirror.get('ID')) except exception.VolumeBackendAPIException: msg = _("Failed to get target LUN of SplitMirror.") raise exception.VolumeBackendAPIException(data=msg) if (mirror.get('PRILUNID') == lun_id) or (lun_id in target_luns): msg = (_("Can't import LUN %s to Cinder. Already exists in a " "SplitMirror.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a migration task. try: migration_tasks = self.client.get_migration_task() except exception.VolumeBackendAPIException as ex: if re.search('License is unavailable', ex.msg): # Can't check whether the LUN has migration task with it, # just pass the check and log it. migration_tasks = [] LOG.warning(_LW('No license for migration.')) else: msg = _("Failed to get migration task.") raise exception.VolumeBackendAPIException(data=msg) for migration in migration_tasks: if lun_id in (migration.get('PARENTID'), migration.get('TARGETLUNID')): msg = (_("Can't import LUN %s to Cinder. Already exists in a " "migration task.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a LUN copy task. lun_copy = lun_info.get('LUNCOPYIDS') if lun_copy and lun_copy[1:-1]: msg = (_("Can't import LUN %s to Cinder. Already exists in " "a LUN copy task.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a remote replication task. rmt_replication = lun_info.get('REMOTEREPLICATIONIDS') if rmt_replication and rmt_replication[1:-1]: msg = (_("Can't import LUN %s to Cinder. Already exists in " "a remote replication task.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a LUN mirror. if self.client.is_lun_in_mirror(lun_id): msg = (_("Can't import LUN %s to Cinder. Already exists in " "a LUN mirror.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) def manage_existing(self, volume, external_ref): """Manage an existing volume on the backend storage.""" # Check whether the LUN is belonged to the specified pool. pool = volume_utils.extract_host(volume.host, 'pool') LOG.debug("Pool specified is: %s.", pool) lun_info = self._get_lun_info_by_ref(external_ref) lun_id = lun_info.get('ID') description = lun_info.get('DESCRIPTION', '') if len(description) <= ( constants.MAX_VOL_DESCRIPTION - len(volume.name) - 1): description = volume.name + ' ' + description lun_pool = lun_info.get('PARENTNAME') LOG.debug("Storage pool of existing LUN %(lun)s is %(pool)s.", {"lun": lun_id, "pool": lun_pool}) if pool != lun_pool: msg = (_("The specified LUN does not belong to the given " "pool: %s.") % pool) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check other stuffs to determine whether this LUN can be imported. self._check_lun_valid_for_manage(lun_info, external_ref) type_id = volume.volume_type_id new_opts = None if type_id: # Handle volume type if specified. old_opts = self.get_lun_specs(lun_id) volume_type = volume_types.get_volume_type(None, type_id) new_specs = volume_type.get('extra_specs') new_opts = self._get_volume_params_from_specs(new_specs) if ('LUNType' in new_opts and old_opts['LUNType'] != new_opts['LUNType']): msg = (_("Can't import LUN %(lun_id)s to Cinder. " "LUN type mismatched.") % lun_id) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if volume_type: change_opts = {'policy': None, 'partitionid': None, 'cacheid': None, 'qos': None} change_opts = self._check_needed_changes(lun_id, old_opts, new_opts, change_opts, volume_type) self.modify_lun(lun_id, change_opts) # Rename the LUN to make it manageable for Cinder. new_name = huawei_utils.encode_name(volume.id) LOG.debug("Rename LUN %(old_name)s to %(new_name)s.", {'old_name': lun_info.get('NAME'), 'new_name': new_name}) self.client.rename_lun(lun_id, new_name, description) metadata = huawei_utils.get_admin_metadata(volume) metadata.update({'huawei_lun_wwn': lun_info['WWN']}) model_update = {} model_update.update({'admin_metadata': metadata}) model_update.update({'provider_location': lun_id}) if new_opts and new_opts.get('replication_enabled'): LOG.debug("Manage volume need to create replication.") try: lun_info = self.client.get_lun_info(lun_id) replica_info = self.replica.create_replica( lun_info, new_opts.get('replication_type')) model_update.update(replica_info) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Manage exist volume failed.")) return model_update def _get_lun_info_by_ref(self, external_ref): LOG.debug("Get external_ref: %s", external_ref) name = external_ref.get('source-name') id = external_ref.get('source-id') if not (name or id): msg = _('Must specify source-name or source-id.') raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) lun_id = id or self.client.get_lun_id_by_name(name) if not lun_id: msg = _("Can't find LUN on the array, please check the " "source-name or source-id.") raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) lun_info = self.client.get_lun_info(lun_id) return lun_info def unmanage(self, volume): """Export Huawei volume from Cinder.""" LOG.debug("Unmanage volume: %s.", volume.id) lun_id = self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_WARN) if not lun_id: return lun_name = huawei_utils.encode_name(volume.id) new_name = 'unmged_' + lun_name LOG.debug("Rename LUN %(lun_name)s to %(new_name)s.", {'lun_name': lun_name, 'new_name': new_name}) try: self.client.rename_lun(lun_id, new_name) except Exception: LOG.warning(_LW("Rename lun %(lun_id)s fails when " "unmanaging volume %(volume)s."), {"lun_id": lun_id, "volume": volume.id}) def manage_existing_get_size(self, volume, external_ref): """Get the size of the existing volume.""" lun_info = self._get_lun_info_by_ref(external_ref) size = int(math.ceil(lun_info.get('CAPACITY') / constants.CAPACITY_UNIT)) return size def _check_snapshot_valid_for_manage(self, snapshot_info, external_ref): snapshot_id = snapshot_info.get('ID') # Check whether the snapshot is normal. if snapshot_info.get('HEALTHSTATUS') != constants.STATUS_HEALTH: msg = _("Can't import snapshot %s to Cinder. " "Snapshot status is not normal" " or running status is not online.") % snapshot_id raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) if snapshot_info.get('EXPOSEDTOINITIATOR') != 'false': msg = _("Can't import snapshot %s to Cinder. " "Snapshot is exposed to initiator.") % snapshot_id raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) def _get_snapshot_info_by_ref(self, external_ref): LOG.debug("Get snapshot external_ref: %s.", external_ref) name = external_ref.get('source-name') id = external_ref.get('source-id') if not (name or id): msg = _('Must specify snapshot source-name or source-id.') raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) snapshot_id = id or self.client.get_snapshot_id_by_name(name) if not snapshot_id: msg = _("Can't find snapshot on array, please check the " "source-name or source-id.") raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) snapshot_info = self.client.get_snapshot_info(snapshot_id) return snapshot_info def manage_existing_snapshot(self, snapshot, existing_ref): snapshot_info = self._get_snapshot_info_by_ref(existing_ref) snapshot_id = snapshot_info.get('ID') volume = snapshot.volume lun_id = volume.provider_location if lun_id != snapshot_info.get('PARENTID'): msg = (_("Can't import snapshot %s to Cinder. " "Snapshot doesn't belong to volume."), snapshot_id) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) # Check whether this snapshot can be imported. self._check_snapshot_valid_for_manage(snapshot_info, existing_ref) # Rename the snapshot to make it manageable for Cinder. description = snapshot.id snapshot_name = huawei_utils.encode_name(snapshot.id) self.client.rename_snapshot(snapshot_id, snapshot_name, description) if snapshot_info.get('RUNNINGSTATUS') != constants.STATUS_ACTIVE: self.client.activate_snapshot(snapshot_id) LOG.debug("Rename snapshot %(old_name)s to %(new_name)s.", {'old_name': snapshot_info.get('NAME'), 'new_name': snapshot_name}) return {'provider_location': snapshot_id} def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Get the size of the existing snapshot.""" snapshot_info = self._get_snapshot_info_by_ref(existing_ref) size = (float(snapshot_info.get('USERCAPACITY')) // constants.CAPACITY_UNIT) remainder = (float(snapshot_info.get('USERCAPACITY')) % constants.CAPACITY_UNIT) if int(remainder) > 0: msg = _("Snapshot size must be multiple of 1 GB.") raise exception.VolumeBackendAPIException(data=msg) return int(size) def unmanage_snapshot(self, snapshot): """Unmanage the specified snapshot from Cinder management.""" LOG.debug("Unmanage snapshot: %s.", snapshot.id) snapshot_name = huawei_utils.encode_name(snapshot.id) snapshot_id = self.client.get_snapshot_id_by_name(snapshot_name) if not snapshot_id: LOG.warning(_LW("Can't find snapshot on the array: %s."), snapshot_name) return new_name = 'unmged_' + snapshot_name LOG.debug("Rename snapshot %(snapshot_name)s to %(new_name)s.", {'snapshot_name': snapshot_name, 'new_name': new_name}) try: self.client.rename_snapshot(snapshot_id, new_name) except Exception: LOG.warning(_LW("Failed to rename snapshot %(snapshot_id)s, " "snapshot name on array is %(snapshot_name)s."), {'snapshot_id': snapshot.id, 'snapshot_name': snapshot_name}) def remove_host_with_check(self, host_id): wwns_in_host = ( self.client.get_host_fc_initiators(host_id)) iqns_in_host = ( self.client.get_host_iscsi_initiators(host_id)) if not (wwns_in_host or iqns_in_host or self.client.is_host_associated_to_hostgroup(host_id)): self.client.remove_host(host_id) def create_consistencygroup(self, context, group): """Creates a consistencygroup.""" model_update = {'status': 'available'} opts = self._get_consistencygroup_type(group) if (opts.get('hypermetro') == 'true'): metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) metro.create_consistencygroup(group) return model_update # Array will create CG at create_cgsnapshot time. Cinder will # maintain the CG and volumes relationship in the db. return model_update def delete_consistencygroup(self, context, group, volumes): opts = self._get_consistencygroup_type(group) if opts.get('hypermetro') == 'true': metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) return metro.delete_consistencygroup(context, group, volumes) model_update = {} volumes_model_update = [] model_update.update({'status': group.status}) for volume_ref in volumes: try: self.delete_volume(volume_ref) volumes_model_update.append( {'id': volume_ref.id, 'status': 'deleted'}) except Exception: volumes_model_update.append( {'id': volume_ref.id, 'status': 'error_deleting'}) return model_update, volumes_model_update def update_consistencygroup(self, context, group, add_volumes, remove_volumes): model_update = {'status': 'available'} opts = self._get_consistencygroup_type(group) if opts.get('hypermetro') == 'true': metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) metro.update_consistencygroup(context, group, add_volumes, remove_volumes) return model_update, None, None # Array will create CG at create_cgsnapshot time. Cinder will # maintain the CG and volumes relationship in the db. return model_update, None, None def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Create cgsnapshot.""" LOG.info(_LI('Create cgsnapshot for consistency group' ': %(group_id)s'), {'group_id': cgsnapshot.consistencygroup_id}) model_update = {} snapshots_model_update = [] added_snapshots_info = [] try: for snapshot in snapshots: volume = snapshot.volume if not volume: msg = (_("Can't get volume id from snapshot, " "snapshot: %(id)s") % {"id": snapshot.id}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) volume_name = huawei_utils.encode_name(volume.id) lun_id = self.client.get_lun_id(volume, volume_name) snapshot_name = huawei_utils.encode_name(snapshot.id) snapshot_description = snapshot.id info = self.client.create_snapshot(lun_id, snapshot_name, snapshot_description) snapshot_model_update = {'id': snapshot.id, 'status': 'available', 'provider_location': info['ID']} snapshots_model_update.append(snapshot_model_update) added_snapshots_info.append(info) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Create cgsnapshots failed. " "Cgsnapshot id: %s."), cgsnapshot.id) snapshot_ids = [added_snapshot['ID'] for added_snapshot in added_snapshots_info] try: self.client.activate_snapshot(snapshot_ids) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Active cgsnapshots failed. " "Cgsnapshot id: %s."), cgsnapshot.id) model_update['status'] = 'available' return model_update, snapshots_model_update def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Delete consistency group snapshot.""" LOG.info(_LI('Delete cgsnapshot %(snap_id)s for consistency group: ' '%(group_id)s'), {'snap_id': cgsnapshot.id, 'group_id': cgsnapshot.consistencygroup_id}) model_update = {} snapshots_model_update = [] model_update['status'] = cgsnapshot.status for snapshot in snapshots: try: self.delete_snapshot(snapshot) snapshots_model_update.append({'id': snapshot.id, 'status': 'deleted'}) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Delete cg snapshots failed. " "Cgsnapshot id: %s"), cgsnapshot.id) return model_update, snapshots_model_update def _classify_volume(self, volumes): normal_volumes = [] replica_volumes = [] for v in volumes: volume_type = self._get_volume_type(v) opts = self._get_volume_params(volume_type) if opts.get('replication_enabled') == 'true': replica_volumes.append(v) else: normal_volumes.append(v) return normal_volumes, replica_volumes def _failback_normal_volumes(self, volumes): volumes_update = [] for v in volumes: v_update = {} v_update['volume_id'] = v.id metadata = huawei_utils.get_volume_metadata(v) old_status = 'available' if 'old_status' in metadata: old_status = metadata['old_status'] del metadata['old_status'] v_update['updates'] = {'status': old_status, 'metadata': metadata} volumes_update.append(v_update) return volumes_update def _failback(self, volumes): if self.active_backend_id in ('', None): return 'default', [] normal_volumes, replica_volumes = self._classify_volume(volumes) volumes_update = [] replica_volumes_update = self.replica.failback(replica_volumes) volumes_update.extend(replica_volumes_update) normal_volumes_update = self._failback_normal_volumes(normal_volumes) volumes_update.extend(normal_volumes_update) self.active_backend_id = "" secondary_id = 'default' # Switch array connection. self.client, self.replica_client = self.replica_client, self.client self.replica = replication.ReplicaPairManager(self.client, self.replica_client, self.configuration) return secondary_id, volumes_update def _failover_normal_volumes(self, volumes): volumes_update = [] for v in volumes: v_update = {} v_update['volume_id'] = v.id metadata = huawei_utils.get_volume_metadata(v) metadata.update({'old_status': v.status}) v_update['updates'] = {'status': 'error', 'metadata': metadata} volumes_update.append(v_update) return volumes_update def _failover(self, volumes): if self.active_backend_id not in ('', None): return self.replica_dev_conf['backend_id'], [] normal_volumes, replica_volumes = self._classify_volume(volumes) volumes_update = [] replica_volumes_update = self.replica.failover(replica_volumes) volumes_update.extend(replica_volumes_update) normal_volumes_update = self._failover_normal_volumes(normal_volumes) volumes_update.extend(normal_volumes_update) self.active_backend_id = self.replica_dev_conf['backend_id'] secondary_id = self.active_backend_id # Switch array connection. self.client, self.replica_client = self.replica_client, self.client self.replica = replication.ReplicaPairManager(self.client, self.replica_client, self.configuration) return secondary_id, volumes_update def failover_host(self, context, volumes, secondary_id=None): """Failover all volumes to secondary.""" if secondary_id == 'default': secondary_id, volumes_update = self._failback(volumes) elif (secondary_id == self.replica_dev_conf['backend_id'] or secondary_id is None): secondary_id, volumes_update = self._failover(volumes) else: msg = _("Invalid secondary id %s.") % secondary_id LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return secondary_id, volumes_update def initialize_connection_snapshot(self, snapshot, connector, **kwargs): """Map a snapshot to a host and return target iSCSI information.""" # From the volume structure. volume = Volume(id=snapshot.id, provider_location=snapshot.provider_location, lun_type=constants.SNAPSHOT_TYPE, metadata=None) return self.initialize_connection(volume, connector) def terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Delete map between a snapshot and a host.""" # From the volume structure. volume = Volume(id=snapshot.id, provider_location=snapshot.provider_location, lun_type=constants.SNAPSHOT_TYPE, metadata=None) return self.terminate_connection(volume, connector) def get_lun_id_and_type(self, volume): if hasattr(volume, 'lun_type'): lun_id = volume.provider_location lun_type = constants.SNAPSHOT_TYPE else: lun_id = self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_RAISE) lun_type = constants.LUN_TYPE return lun_id, lun_type @interface.volumedriver class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver): """ISCSI driver for Huawei storage arrays. Version history: .. code-block:: none 1.0.0 - Initial driver 1.1.0 - Provide Huawei OceanStor storage 18000 driver 1.1.1 - Code refactor CHAP support Multiple pools support ISCSI multipath support SmartX support Volume migration support Volume retype support 2.0.0 - Rename to HuaweiISCSIDriver 2.0.1 - Manage/unmanage volume support 2.0.2 - Refactor HuaweiISCSIDriver 2.0.3 - Manage/unmanage snapshot support 2.0.5 - Replication V2 support 2.0.6 - Support iSCSI configuration in Replication 2.0.7 - Hypermetro support Hypermetro consistency group support Consistency group support Cgsnapshot support 2.0.8 - Backup snapshot optimal path support 2.0.9 - Support reporting disk type of pool """ VERSION = "2.0.9" def __init__(self, *args, **kwargs): super(HuaweiISCSIDriver, self).__init__(*args, **kwargs) def get_volume_stats(self, refresh=False): """Get volume status.""" data = HuaweiBaseDriver.get_volume_stats(self, refresh=False) backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or self.__class__.__name__ data['storage_protocol'] = 'iSCSI' data['driver_version'] = self.VERSION data['vendor_name'] = 'Huawei' return data @utils.synchronized('huawei', external=True) def initialize_connection(self, volume, connector): """Map a volume to a host and return target iSCSI information.""" lun_id, lun_type = self.get_lun_id_and_type(volume) initiator_name = connector['initiator'] LOG.info(_LI( 'initiator name: %(initiator_name)s, ' 'LUN ID: %(lun_id)s.'), {'initiator_name': initiator_name, 'lun_id': lun_id}) (iscsi_iqns, target_ips, portgroup_id) = self.client.get_iscsi_params(connector) LOG.info(_LI('initialize_connection, iscsi_iqn: %(iscsi_iqn)s, ' 'target_ip: %(target_ip)s, ' 'portgroup_id: %(portgroup_id)s.'), {'iscsi_iqn': iscsi_iqns, 'target_ip': target_ips, 'portgroup_id': portgroup_id},) # Create hostgroup if not exist. original_host_name = connector['host'] host_name = huawei_utils.encode_host_name(original_host_name) host_id = self.client.add_host_with_check(host_name, original_host_name) # Add initiator to the host. self.client.ensure_initiator_added(initiator_name, host_id) hostgroup_id = self.client.add_host_to_hostgroup(host_id) # Mapping lungroup and hostgroup to view. self.client.do_mapping(lun_id, hostgroup_id, host_id, portgroup_id, lun_type) hostlun_id = self.client.get_host_lun_id(host_id, lun_id, lun_type) LOG.info(_LI("initialize_connection, host lun id is: %s."), hostlun_id) chapinfo = self.client.find_chap_info(self.client.iscsi_info, initiator_name) # Return iSCSI properties. properties = {} properties['target_discovered'] = False properties['volume_id'] = volume.id multipath = connector.get('multipath', False) hostlun_id = int(hostlun_id) if not multipath: properties['target_portal'] = ('%s:3260' % target_ips[0]) properties['target_iqn'] = iscsi_iqns[0] properties['target_lun'] = hostlun_id else: properties['target_iqns'] = [iqn for iqn in iscsi_iqns] properties['target_portals'] = [ '%s:3260' % ip for ip in target_ips] properties['target_luns'] = [hostlun_id] * len(target_ips) # If use CHAP, return CHAP info. if chapinfo: chap_username, chap_password = chapinfo.split(';') properties['auth_method'] = 'CHAP' properties['auth_username'] = chap_username properties['auth_password'] = chap_password LOG.info(_LI("initialize_connection success. Return data: %s."), properties) return {'driver_volume_type': 'iscsi', 'data': properties} @utils.synchronized('huawei', external=True) def terminate_connection(self, volume, connector, **kwargs): """Delete map between a volume and a host.""" lun_id, lun_type = self.get_lun_id_and_type(volume) initiator_name = connector['initiator'] host_name = connector['host'] lungroup_id = None LOG.info(_LI( 'terminate_connection: initiator name: %(ini)s, ' 'LUN ID: %(lunid)s.'), {'ini': initiator_name, 'lunid': lun_id},) portgroup = None portgroup_id = None view_id = None left_lunnum = -1 for ini in self.client.iscsi_info: if ini['Name'] == initiator_name: for key in ini: if key == 'TargetPortGroup': portgroup = ini['TargetPortGroup'] break if portgroup: portgroup_id = self.client.get_tgt_port_group(portgroup) host_name = huawei_utils.encode_host_name(host_name) host_id = self.client.get_host_id_by_name(host_name) if host_id: mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id view_id = self.client.find_mapping_view(mapping_view_name) if view_id: lungroup_id = self.client.find_lungroup_from_map(view_id) # Remove lun from lungroup. if lun_id and lungroup_id: lungroup_ids = self.client.get_lungroupids_by_lunid( lun_id, lun_type) if lungroup_id in lungroup_ids: self.client.remove_lun_from_lungroup(lungroup_id, lun_id, lun_type) else: LOG.warning(_LW("LUN is not in lungroup. " "LUN ID: %(lun_id)s. " "Lungroup id: %(lungroup_id)s."), {"lun_id": lun_id, "lungroup_id": lungroup_id}) # Remove portgroup from mapping view if no lun left in lungroup. if lungroup_id: left_lunnum = self.client.get_obj_count_from_lungroup(lungroup_id) if portgroup_id and view_id and (int(left_lunnum) <= 0): if self.client.is_portgroup_associated_to_view(view_id, portgroup_id): self.client.delete_portgroup_mapping_view(view_id, portgroup_id) if view_id and (int(left_lunnum) <= 0): self.client.remove_chap(initiator_name) if self.client.lungroup_associated(view_id, lungroup_id): self.client.delete_lungroup_mapping_view(view_id, lungroup_id) self.client.delete_lungroup(lungroup_id) if self.client.is_initiator_associated_to_host(initiator_name): self.client.remove_iscsi_from_host(initiator_name) hostgroup_name = constants.HOSTGROUP_PREFIX + host_id hostgroup_id = self.client.find_hostgroup(hostgroup_name) if hostgroup_id: if self.client.hostgroup_associated(view_id, hostgroup_id): self.client.delete_hostgoup_mapping_view(view_id, hostgroup_id) self.client.remove_host_from_hostgroup(hostgroup_id, host_id) self.client.delete_hostgroup(hostgroup_id) self.client.remove_host(host_id) self.client.delete_mapping_view(view_id) @interface.volumedriver class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): """FC driver for Huawei OceanStor storage arrays. Version history: .. code-block:: none 1.0.0 - Initial driver 1.1.0 - Provide Huawei OceanStor 18000 storage volume driver 1.1.1 - Code refactor Multiple pools support SmartX support Volume migration support Volume retype support FC zone enhancement Volume hypermetro support 2.0.0 - Rename to HuaweiFCDriver 2.0.1 - Manage/unmanage volume support 2.0.2 - Refactor HuaweiFCDriver 2.0.3 - Manage/unmanage snapshot support 2.0.4 - Balanced FC port selection 2.0.5 - Replication V2 support 2.0.7 - Hypermetro support Hypermetro consistency group support Consistency group support Cgsnapshot support 2.0.8 - Backup snapshot optimal path support 2.0.9 - Support reporting disk type of pool """ VERSION = "2.0.9" def __init__(self, *args, **kwargs): super(HuaweiFCDriver, self).__init__(*args, **kwargs) self.fcsan = None def get_volume_stats(self, refresh=False): """Get volume status.""" data = HuaweiBaseDriver.get_volume_stats(self, refresh=False) backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or self.__class__.__name__ data['storage_protocol'] = 'FC' data['driver_version'] = self.VERSION data['vendor_name'] = 'Huawei' return data @utils.synchronized('huawei', external=True) @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): lun_id, lun_type = self.get_lun_id_and_type(volume) wwns = connector['wwpns'] LOG.info(_LI( 'initialize_connection, initiator: %(wwpns)s,' ' LUN ID: %(lun_id)s.'), {'wwpns': wwns, 'lun_id': lun_id},) portg_id = None original_host_name = connector['host'] host_name = huawei_utils.encode_host_name(original_host_name) host_id = self.client.add_host_with_check(host_name, original_host_name) if not self.fcsan: self.fcsan = fczm_utils.create_lookup_service() if self.fcsan: # Use FC switch. zone_helper = fc_zone_helper.FCZoneHelper(self.fcsan, self.client) try: (tgt_port_wwns, portg_id, init_targ_map) = ( zone_helper.build_ini_targ_map(wwns, host_id, lun_id, lun_type)) except Exception as err: self.remove_host_with_check(host_id) msg = _('build_ini_targ_map fails. %s') % err raise exception.VolumeBackendAPIException(data=msg) for ini in init_targ_map: self.client.ensure_fc_initiator_added(ini, host_id) else: # Not use FC switch. online_wwns_in_host = ( self.client.get_host_online_fc_initiators(host_id)) online_free_wwns = self.client.get_online_free_wwns() for wwn in wwns: if (wwn not in online_wwns_in_host and wwn not in online_free_wwns): wwns_in_host = ( self.client.get_host_fc_initiators(host_id)) iqns_in_host = ( self.client.get_host_iscsi_initiators(host_id)) if not (wwns_in_host or iqns_in_host or self.client.is_host_associated_to_hostgroup(host_id)): self.client.remove_host(host_id) msg = _('No FC initiator can be added to host.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for wwn in wwns: if wwn in online_free_wwns: self.client.add_fc_port_to_host(host_id, wwn) (tgt_port_wwns, init_targ_map) = ( self.client.get_init_targ_map(wwns)) # Add host into hostgroup. hostgroup_id = self.client.add_host_to_hostgroup(host_id) map_info = self.client.do_mapping(lun_id, hostgroup_id, host_id, portg_id, lun_type) host_lun_id = self.client.get_host_lun_id(host_id, lun_id, lun_type) # Return FC properties. fc_info = {'driver_volume_type': 'fibre_channel', 'data': {'target_lun': int(host_lun_id), 'target_discovered': True, 'target_wwn': tgt_port_wwns, 'volume_id': volume.id, 'initiator_target_map': init_targ_map, 'map_info': map_info}, } # Deal with hypermetro connection. metadata = huawei_utils.get_volume_metadata(volume) LOG.info(_LI("initialize_connection, metadata is: %s."), metadata) if 'hypermetro_id' in metadata: loc_tgt_wwn = fc_info['data']['target_wwn'] local_ini_tgt_map = fc_info['data']['initiator_target_map'] hyperm = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) rmt_fc_info = hyperm.connect_volume_fc(volume, connector) rmt_tgt_wwn = rmt_fc_info['data']['target_wwn'] rmt_ini_tgt_map = rmt_fc_info['data']['initiator_target_map'] fc_info['data']['target_wwn'] = (loc_tgt_wwn + rmt_tgt_wwn) wwns = connector['wwpns'] for wwn in wwns: if (wwn in local_ini_tgt_map and wwn in rmt_ini_tgt_map): fc_info['data']['initiator_target_map'][wwn].extend( rmt_ini_tgt_map[wwn]) elif (wwn not in local_ini_tgt_map and wwn in rmt_ini_tgt_map): fc_info['data']['initiator_target_map'][wwn] = ( rmt_ini_tgt_map[wwn]) # else, do nothing loc_map_info = fc_info['data']['map_info'] rmt_map_info = rmt_fc_info['data']['map_info'] same_host_id = self._get_same_hostid(loc_map_info, rmt_map_info) self.client.change_hostlun_id(loc_map_info, same_host_id) hyperm.rmt_client.change_hostlun_id(rmt_map_info, same_host_id) fc_info['data']['target_lun'] = same_host_id hyperm.rmt_client.logout() LOG.info(_LI("Return FC info is: %s."), fc_info) return fc_info def _get_same_hostid(self, loc_fc_info, rmt_fc_info): loc_aval_luns = loc_fc_info['aval_luns'] loc_aval_luns = json.loads(loc_aval_luns) rmt_aval_luns = rmt_fc_info['aval_luns'] rmt_aval_luns = json.loads(rmt_aval_luns) same_host_id = None for i in range(1, 512): if i in rmt_aval_luns and i in loc_aval_luns: same_host_id = i break LOG.info(_LI("The same hostid is: %s."), same_host_id) if not same_host_id: msg = _("Can't find the same host id from arrays.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return same_host_id @utils.synchronized('huawei', external=True) @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): """Delete map between a volume and a host.""" lun_id, lun_type = self.get_lun_id_and_type(volume) wwns = connector['wwpns'] host_name = connector['host'] left_lunnum = -1 lungroup_id = None view_id = None LOG.info(_LI('terminate_connection: wwpns: %(wwns)s, ' 'LUN ID: %(lun_id)s.'), {'wwns': wwns, 'lun_id': lun_id}) host_name = huawei_utils.encode_host_name(host_name) host_id = self.client.get_host_id_by_name(host_name) if host_id: mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id view_id = self.client.find_mapping_view(mapping_view_name) if view_id: lungroup_id = self.client.find_lungroup_from_map(view_id) if lun_id and lungroup_id: lungroup_ids = self.client.get_lungroupids_by_lunid(lun_id, lun_type) if lungroup_id in lungroup_ids: self.client.remove_lun_from_lungroup(lungroup_id, lun_id, lun_type) else: LOG.warning(_LW("LUN is not in lungroup. " "LUN ID: %(lun_id)s. " "Lungroup id: %(lungroup_id)s."), {"lun_id": lun_id, "lungroup_id": lungroup_id}) else: LOG.warning(_LW("Can't find lun on the array.")) if lungroup_id: left_lunnum = self.client.get_obj_count_from_lungroup(lungroup_id) if int(left_lunnum) > 0: fc_info = {'driver_volume_type': 'fibre_channel', 'data': {}} else: fc_info, portg_id = self._delete_zone_and_remove_fc_initiators( wwns, host_id) if lungroup_id: if view_id and self.client.lungroup_associated( view_id, lungroup_id): self.client.delete_lungroup_mapping_view(view_id, lungroup_id) self.client.delete_lungroup(lungroup_id) if portg_id: if view_id and self.client.is_portgroup_associated_to_view( view_id, portg_id): self.client.delete_portgroup_mapping_view(view_id, portg_id) self.client.delete_portgroup(portg_id) if host_id: hostgroup_name = constants.HOSTGROUP_PREFIX + host_id hostgroup_id = self.client.find_hostgroup(hostgroup_name) if hostgroup_id: if view_id and self.client.hostgroup_associated( view_id, hostgroup_id): self.client.delete_hostgoup_mapping_view( view_id, hostgroup_id) self.client.remove_host_from_hostgroup( hostgroup_id, host_id) self.client.delete_hostgroup(hostgroup_id) if not self.client.check_fc_initiators_exist_in_host( host_id): self.client.remove_host(host_id) if view_id: self.client.delete_mapping_view(view_id) # Deal with hypermetro connection. metadata = huawei_utils.get_volume_metadata(volume) LOG.info(_LI("Detach Volume, metadata is: %s."), metadata) if 'hypermetro_id' in metadata: hyperm = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) hyperm.disconnect_volume_fc(volume, connector) LOG.info(_LI("terminate_connection, return data is: %s."), fc_info) return fc_info def _delete_zone_and_remove_fc_initiators(self, wwns, host_id): # Get tgt_port_wwns and init_targ_map to remove zone. portg_id = None if not self.fcsan: self.fcsan = fczm_utils.create_lookup_service() if self.fcsan: zone_helper = fc_zone_helper.FCZoneHelper(self.fcsan, self.client) (tgt_port_wwns, portg_id, init_targ_map) = ( zone_helper.get_init_targ_map(wwns, host_id)) else: (tgt_port_wwns, init_targ_map) = ( self.client.get_init_targ_map(wwns)) # Remove the initiators from host if need. if host_id: fc_initiators = self.client.get_host_fc_initiators(host_id) for wwn in wwns: if wwn in fc_initiators: self.client.remove_fc_from_host(wwn) info = {'driver_volume_type': 'fibre_channel', 'data': {'target_wwn': tgt_port_wwns, 'initiator_target_map': init_targ_map}} return info, portg_id
from StringIO import StringIO import mock from paasta_tools.cli.cmds.list import paasta_list @mock.patch('sys.stdout', new_callable=StringIO) @mock.patch('paasta_tools.cli.cmds.list.list_services', autospec=True) def test_list_paasta_list(mock_list_services, mock_stdout): """ paasta_list print each service returned by get_services """ mock_services = ['service_1', 'service_2'] mock_list_services.return_value = mock_services args = mock.MagicMock() args.print_instances = False paasta_list(args) output = mock_stdout.getvalue() assert output == 'service_1\nservice_2\n' @mock.patch('sys.stdout', new_callable=StringIO) @mock.patch('paasta_tools.cli.cmds.list.list_service_instances', autospec=True) def test_list_paasta_list_instances(mock_list_service_instances, mock_stdout): """ paasta_list print each service.instance """ mock_services = ['service_1.main', 'service_2.canary'] mock_list_service_instances.return_value = mock_services args = mock.MagicMock() args.print_instances = True paasta_list(args) output = mock_stdout.getvalue() assert output == 'service_1.main\nservice_2.canary\n'
''' Insights Forex live source -------------------------- :copyright (c) 2014 Xavier Bruhiere :license: Apache 2.0, see LICENSE for more details. ''' import time import pandas as pd import dna.logging import intuition.data.forex as forex log = dna.logging.logger(__name__) class Forex(object): ''' At each event datetime of the provided index, ForexLiveSource fetchs live forex data from TrueFX. ''' def __init__(self, pairs, properties): self._wait_retry = properties.get('retry', 10) self.forex = forex.TrueFX(pairs=pairs) self.forex.connect() def get_data(self, sids): while True: rates = self.forex.query_rates() if len(rates.keys()) >= len(sids): log.debug('Data available for {}'.format(rates.keys())) break log.debug('Incomplete data ({}/{}), retrying in {}s'.format( len(rates.keys()), len(sids), self._wait_retry)) time.sleep(self._wait_retry) debug_feedback = self.forex.connect() log.info('New Truefx connection: {}'.format(debug_feedback)) return rates @property def mapping(self): return { 'dt': (lambda x: x, 'dt'), #TODO Here conversion (weird result for now) # Or: (lambda x: pd.tslib.i8_to_pydt(x + '000000'), 'trade_time'), 'trade_time': (lambda x: pd.datetime.utcfromtimestamp( float(x[:-3])), 'timeStamp'), 'sid': (lambda x: x, 'sid'), 'price': (float, 'bid'), 'ask': (float, 'ask'), 'high': (float, 'high'), 'low': (float, 'low'), 'volume': (lambda x: 10000, 'bid') }