max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
src/RIOT/tests/pkg_tensorflow-lite/mnist/mnist_mlp.py
ARte-team/ARte
2
12800
<reponame>ARte-team/ARte<gh_stars>1-10 #!/usr/bin/env python3 import os # imports for array-handling import numpy as np import tensorflow as tf # keras imports for the dataset and building our neural network from tensorflow.keras.datasets import mnist from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout # let's keep our keras backend tensorflow quiet os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # load mnist dataset (X_train, y_train), (X_test, y_test) = mnist.load_data() # building the input vector from the 28x28 pixels X_train = X_train.reshape(60000, 784) X_test = X_test.reshape(10000, 784) X_train = X_train.astype('float32') X_test = X_test.astype('float32') # Split the train set in a train + validation set X_valid = X_train[50000:] y_valid = y_train[50000:] X_train = X_train[:50000] y_train = y_train[:50000] # Normalize the data X_train = X_train / 255.0 X_test = X_test / 255.0 X_valid = X_valid / 255.0 # building a very simple linear stack of layers using a sequential model model = Sequential([ Dense(64, activation='relu', input_shape=(784,)), Dropout(0.2), Dense(10, activation='softmax') ]) # compiling the sequential model model.compile(loss='sparse_categorical_crossentropy', metrics=['accuracy'], optimizer='adam') batch_size = 32 epochs = 30 # training the model and saving metrics in history history = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_data=(X_valid, y_valid)) # saving the model # Convert the model to the TensorFlow Lite format without quantization converter = tf.lite.TFLiteConverter.from_keras_model(model) tflite_model = converter.convert() # Save the basic model to disk open("model_basic.tflite", "wb").write(tflite_model) # Convert the model to the TensorFlow Lite format with quantization converter = tf.lite.TFLiteConverter.from_keras_model(model) (mnist_train, _), (_, _) = mnist.load_data() mnist_train = mnist_train.reshape(60000, 784) mnist_train = mnist_train.astype('float32') mnist_train = mnist_train / 255.0 mnist_ds = tf.data.Dataset.from_tensor_slices((mnist_train)).batch(1) def representative_data_gen(): for input_value in mnist_ds.take(100): yield [input_value] converter.representative_dataset = representative_data_gen converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE] tflite_model = converter.convert() # # Save the quantized model to disk open("model.tflite", "wb").write(tflite_model) basic_model_size = os.path.getsize("model_basic.tflite") print("Basic model is %d bytes" % basic_model_size) quantized_model_size = os.path.getsize("model.tflite") print("Quantized model is %d bytes" % quantized_model_size) difference = basic_model_size - quantized_model_size print("Difference is %d bytes" % difference) # Now let's verify the model on a few input digits # Instantiate an interpreter for the model model_quantized_reloaded = tf.lite.Interpreter('model.tflite') # Allocate memory for each model model_quantized_reloaded.allocate_tensors() # Get the input and output tensors so we can feed in values and get the results model_quantized_input = model_quantized_reloaded.get_input_details()[0]["index"] model_quantized_output = model_quantized_reloaded.get_output_details()[0]["index"] # Create arrays to store the results model_quantized_predictions = np.empty(X_test.size) for i in range(10): # Invoke the interpreter model_quantized_reloaded.set_tensor(model_quantized_input, X_test[i:i+1, :]) model_quantized_reloaded.invoke() model_quantized_prediction = model_quantized_reloaded.get_tensor(model_quantized_output) print("Digit: {} - Prediction:\n{}".format(y_test[i], model_quantized_prediction)) print("")
2.8125
3
lang/Python/random-numbers-1.py
ethansaxenian/RosettaDecode
0
12801
<filename>lang/Python/random-numbers-1.py import random values = [random.gauss(1, .5) for i in range(1000)]
2.796875
3
src/display.py
thebruce87/Photobooth
0
12802
<gh_stars>0 class Display(): def __init__(self, width, height): self.width = width self.height = height def getSize(self): return (self.width, self.height)
2.75
3
stock_api_handler.py
Sergix/analyst-server
2
12803
# This python script handles stock api request from yfinance # Last Updated: 4/7/2020 # Credits:nóto #Import yfinance api lib import yfinance as yf #Import pandas lib import pandas as pd #Import json to manipulate api data import json #Import math import math class StockApi(): def __init__(self): self.panda = pd def request_data(self, t, p='1d', i="5m"): #set the stock we would like to search for stock = yf.Ticker(t) #Retrieve data and store as Panda Data Frame self.unclean_data = stock.history(period=p,interval=i) #unclean_data selectors stored in an array self.data_selectors = list(self.unclean_data.columns) #create list of the index values which the values are equal to the time stamps of our data self.time_stamps = list(self.unclean_data.index) #get the length self.time_stamp_total_length = len(self.time_stamps) #now let us clean the data self.clean_data() #lets convert the data and return it back to what ever called us return self.convert_data() #END #function to organize 'clean' the stock data def clean_data(self): #function to clean panda data returned by Api # self.new_data = { } for count in range(self.time_stamp_total_length): #get the next timestamp and store it as a string self.new_time_stamp = str(self.time_stamps[count]) #insert new data here if(not math.isnan((self.unclean_data.iloc[count].to_list())[0])): self.new_data.update({self.new_time_stamp:self.unclean_data.iloc[count].to_list()}) for i in range(4): self.new_data[self.new_time_stamp][i] = (round(self.new_data[self.new_time_stamp][i], 2)) #return the new data return self.new_data #END #function to convert the data so the front end can read it def convert_data(self): self.new_data = json.dumps(self.new_data, indent=2) return self.new_data #END
3.234375
3
hth/shows/tests/factories.py
roperi/myband
1
12804
from datetime import date from random import randrange import factory import factory.fuzzy from hth.core.tests.utils import from_today class VenueFactory(factory.django.DjangoModelFactory): class Meta: model = 'shows.Venue' name = factory.Sequence(lambda n: 'Venue %d' % n) city = factory.Sequence(lambda n: 'City %d' % n) website = factory.Sequence(lambda n: 'http://venue-%d.dev' % n) class GigFactory(factory.django.DjangoModelFactory): class Meta: model = 'shows.Gig' date = factory.fuzzy.FuzzyDate(date(2000, 1, 1)) venue = factory.SubFactory(VenueFactory) description = factory.fuzzy.FuzzyText(length=100) details = factory.fuzzy.FuzzyText(length=100) class PublishedGigFactory(GigFactory): publish = True class UpcomingGigFactory(PublishedGigFactory): # Pick a random date from today through next year date = factory.LazyAttribute(lambda obj: from_today(days=randrange(365))) @classmethod def create_batch(cls, size, **kwargs): batch = super().create_batch(size, **kwargs) return sorted(batch, key=lambda x: x.date) class PastGigFactory(PublishedGigFactory): # Pick a random date from 10 years ago through yesterday date = factory.LazyAttribute(lambda obj: from_today(randrange(-3650, 0))) @classmethod def create_batch(cls, size, **kwargs): batch = super().create_batch(size, **kwargs) return sorted(batch, key=lambda x: x.date, reverse=True)
2.359375
2
homeassistant/components/eight_sleep/binary_sensor.py
andersop91/core
22,481
12805
<reponame>andersop91/core """Support for Eight Sleep binary sensors.""" from __future__ import annotations import logging from pyeight.eight import EightSleep from homeassistant.components.binary_sensor import ( BinarySensorDeviceClass, BinarySensorEntity, ) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType from . import ( CONF_BINARY_SENSORS, DATA_API, DATA_EIGHT, DATA_HEAT, EightSleepBaseEntity, EightSleepHeatDataCoordinator, ) _LOGGER = logging.getLogger(__name__) async def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType = None, ) -> None: """Set up the eight sleep binary sensor.""" if discovery_info is None: return name = "Eight" sensors = discovery_info[CONF_BINARY_SENSORS] eight: EightSleep = hass.data[DATA_EIGHT][DATA_API] heat_coordinator: EightSleepHeatDataCoordinator = hass.data[DATA_EIGHT][DATA_HEAT] all_sensors = [ EightHeatSensor(name, heat_coordinator, eight, side, sensor) for side, sensor in sensors ] async_add_entities(all_sensors) class EightHeatSensor(EightSleepBaseEntity, BinarySensorEntity): """Representation of a Eight Sleep heat-based sensor.""" def __init__( self, name: str, coordinator: EightSleepHeatDataCoordinator, eight: EightSleep, side: str | None, sensor: str, ) -> None: """Initialize the sensor.""" super().__init__(name, coordinator, eight, side, sensor) self._attr_device_class = BinarySensorDeviceClass.OCCUPANCY assert self._usrobj _LOGGER.debug( "Presence Sensor: %s, Side: %s, User: %s", self._sensor, self._side, self._usrobj.userid, ) @property def is_on(self) -> bool: """Return true if the binary sensor is on.""" assert self._usrobj return bool(self._usrobj.bed_presence)
2.328125
2
tensorbay/opendataset/FLIC/loader.py
rexzheng324-c/tensorbay-python-sdk
0
12806
#!/usr/bin/env python3 # # Copyright 2021 Graviti. Licensed under MIT License. # # pylint: disable=invalid-name # pylint: disable=missing-module-docstring import os from typing import Any, Dict, Iterator, Tuple from tensorbay.dataset import Data, Dataset from tensorbay.exception import ModuleImportError from tensorbay.label import Classification, LabeledBox2D, LabeledKeypoints2D DATASET_NAME = "FLIC" _VALID_KEYPOINT_INDICES = [0, 1, 2, 3, 4, 5, 6, 9, 12, 13, 16] def FLIC(path: str) -> Dataset: """`FLIC <https://bensapp.github.io/flic-dataset.html>`_ dataset. The folder structure should be like:: <path> exampls.mat images/ 2-fast-2-furious-00003571.jpg ... Arguments: path: The root directory of the dataset. Raises: ModuleImportError: When the module "scipy" can not be found. Returns: Loaded :class:`~tensorbay.dataset.dataset.Dataset` instance. """ try: from scipy.io import loadmat # pylint: disable=import-outside-toplevel except ModuleNotFoundError as error: raise ModuleImportError(module_name=error.name) from error root_path = os.path.abspath(os.path.expanduser(path)) dataset = Dataset(DATASET_NAME) annotations = loadmat(os.path.join(root_path, "examples.mat"))["examples"][0] dataset.create_segment("train") dataset.create_segment("test") dataset.load_catalog(os.path.join(os.path.dirname(__file__), "catalog.json")) # try whether the dataset has bad segment try: _ = annotations["isbad"] flag = True dataset.create_segment("bad") dataset.catalog.classification.add_attribute(name="isunchecked", type_="boolean") except ValueError: flag = False for data, segment_name in _get_data(root_path, annotations, flag): dataset[segment_name].append(data) return dataset def _get_data(path: str, annotations: Any, flag: bool) -> Iterator[Tuple[Data, str]]: filepath_to_data: Dict[str, Data] = {} for annotation in annotations: filepath = annotation["filepath"][0] keypoints = LabeledKeypoints2D( annotation["coords"].T[_VALID_KEYPOINT_INDICES], attributes={"poselet_hit_idx": annotation["poselet_hit_idx"].T.tolist()}, ) box2d = LabeledBox2D(*annotation["torsobox"][0].tolist()) if filepath not in filepath_to_data: data = Data(os.path.join(path, "images", filepath)) data.label.keypoints2d = [keypoints] data.label.box2d = [box2d] attribute = {"currframe": int(annotation["currframe"][0][0])} if flag: attribute["isunchecked"] = bool(annotation["isunchecked"]) data.label.classification = Classification( category=annotation["moviename"][0], attributes=attribute ) filepath_to_data[filepath] = data if annotation["istrain"]: segment_name = "train" elif annotation["istest"]: segment_name = "test" else: segment_name = "bad" yield data, segment_name else: image_data = filepath_to_data[filepath] image_data.label.keypoints2d.append(keypoints) image_data.label.box2d.append(box2d)
2.3125
2
my_answers/homework/OOP/athlete.py
eyalle/python_course
0
12807
<filename>my_answers/homework/OOP/athlete.py def get_time(time_in_seconds): import datetime time_str = str(datetime.timedelta(time_in_seconds)) time_fractions = time_str.split(":") time_fractions[0] = time_fractions[0].replace(",","") time_fractions[-1] += 's' time_fractions[-2] += 'm' time_fractions[-3] += 'h' # print(time_fractions) time_str = ":".join(time_fractions) # time_str = f'{time_fractions[0]}:{time_fractions[1]}:{time_fractions[2]}s' return time_str class Athlete: def __init__(self, name, weight, power, speed, endurance): self.name = name self.power = float(power) self.speed = int(speed) self.weight = float(weight) self.endurance = int(endurance) if (self.endurance < self.speed): self.endurance += 3 class Runner(Athlete): def __init__(self, name, weight=60.0, power=0, speed=0, endurance=0): Athlete.__init__(self, name, weight, float(power), int(speed), int(endurance)) self.power += (self.weight * 0.1) self.speed += 25 self.endurance += 8 def get_duration(self, distance): acceleration = self.power / self.weight top_speed = self.speed time_to_reach_top_speed = top_speed / acceleration distance_to_top_speed = top_speed * time_to_reach_top_speed / 2 if distance == distance_to_top_speed: duration = time_to_reach_top_speed elif distance < distance_to_top_speed: duration = (2 * distance / acceleration) ** (1 / 2) else: deceleration = acceleration endurance_speed = self.endurance time_to_reach_endurance_speed = top_speed - endurance_speed / deceleration distance_to_endurance_speed = top_speed * time_to_reach_endurance_speed / 2 if distance == distance_to_top_speed + distance_to_endurance_speed: duration = time_to_reach_endurance_speed elif distance < distance_to_top_speed + distance_to_endurance_speed: duration = time_to_reach_top_speed + (2 * (distance - distance_to_top_speed) / deceleration) ** (1 / 2) else: time_to_reach_distance = (distance - (distance_to_top_speed + distance_to_endurance_speed)) / endurance_speed duration = time_to_reach_top_speed + time_to_reach_endurance_speed + time_to_reach_distance return duration def run(self, distance): import time t = self.get_duration(distance) time.sleep(t/2) return self.name class Sprinter(Runner): def __init__(self, name, weight=70.0, power=0, speed=0, endurance=0): Runner.__init__(self, name, float(weight), int(power), int(speed), int(endurance)) self.power += (0.75 * self.weight) self.speed += 15 self.endurance += 1 class MarathonRunner(Runner): def __init__(self, name, weight=55.0, power=0, speed=0, endurance=0): Runner.__init__(self, name, float(weight), int(power), int(speed), int(endurance)) self.power /= 1.1 self.speed -= 3 self.endurance += 7 self.speed = 8 if (self.speed < 8) else self.speed self.speed = self.endurance + 1 if (self.speed < (self.endurance + 1)) else self.endurance def get_durations(distances, athletes): for distance in distances: for athlete in athletes: print(f'{athlete.run(distance)} ran {distance} meters in {get_time(athlete.get_duration(distance))}') if __name__ == "__main__": runr = Runner("run", 90, 15, 30) sprt1 = Sprinter("sprnt1", 90, 15, 30) sprt2 = Sprinter("sprnt2", 80, 10, 25) mrtn = MarathonRunner("mrtn", 50, 6, 7) # print('getting running time..') # print(f'{runr.run(100)} ran for {runr.get_duration(100)}') distances = (100, 200, 800, 1600, 5000, 20000) athletes = (runr, sprt1, sprt2, mrtn) get_durations(distances, athletes)
3.984375
4
osisoft/pidevclub/piwebapi/models/pi_data_server_license.py
jugillar/PI-Web-API-Client-Python
30
12808
# coding: utf-8 """ Copyright 2018 OSIsoft, LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at <http://www.apache.org/licenses/LICENSE-2.0> Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from pprint import pformat from six import iteritems class PIDataServerLicense(object): swagger_types = { 'amount_left': 'str', 'amount_used': 'str', 'name': 'str', 'total_amount': 'str', 'links': 'PIDataServerLicenseLinks', 'web_exception': 'PIWebException', } attribute_map = { 'amount_left': 'AmountLeft', 'amount_used': 'AmountUsed', 'name': 'Name', 'total_amount': 'TotalAmount', 'links': 'Links', 'web_exception': 'WebException', } def __init__(self, amount_left=None, amount_used=None, name=None, total_amount=None, links=None, web_exception=None): self._amount_left = None self._amount_used = None self._name = None self._total_amount = None self._links = None self._web_exception = None if amount_left is not None: self.amount_left = amount_left if amount_used is not None: self.amount_used = amount_used if name is not None: self.name = name if total_amount is not None: self.total_amount = total_amount if links is not None: self.links = links if web_exception is not None: self.web_exception = web_exception @property def amount_left(self): return self._amount_left @amount_left.setter def amount_left(self, amount_left): self._amount_left = amount_left @property def amount_used(self): return self._amount_used @amount_used.setter def amount_used(self, amount_used): self._amount_used = amount_used @property def name(self): return self._name @name.setter def name(self, name): self._name = name @property def total_amount(self): return self._total_amount @total_amount.setter def total_amount(self, total_amount): self._total_amount = total_amount @property def links(self): return self._links @links.setter def links(self, links): self._links = links @property def web_exception(self): return self._web_exception @web_exception.setter def web_exception(self, web_exception): self._web_exception = web_exception def to_dict(self): result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): return pformat(self.to_dict()) def __repr__(self): return self.to_str() def __ne__(self, other): return not self == other def __eq__(self, other): if not isinstance(other, PIDataServerLicense): return False return self.__dict__ == other.__dict__
1.710938
2
bot.py
JavierOramas/scholar_standing_bot
0
12809
<gh_stars>0 #! /root/anaconda3/bin/python import os from apscheduler.schedulers.asyncio import AsyncIOScheduler from pyrogram import Client, filters from read_config import read_config import json import requests import schedule import time def get_value_usd(sum): price = requests.get('https://api.coingecko.com/api/v3/simple/token_price/ethereum?contract_addresses=0xcc8fa225d80b9c7d42f96e9570156c65d6caaa25&vs_currencies=usd').json()['0xcc8fa225d80b9c7d42f96e9570156c65d6caaa25']['usd'] return price*sum def read_data(id): id = str(id) try: with open('./db/'+id+'.json', 'r') as f: return json.loads(f.readline()) except: return {} def write_data(id,db): id = str(id) with open('./db/'+id+'.json', 'w') as f: f.write(json.dumps(db)) config_data = read_config('./config/config_bot.json') app = Client(config_data['bot_user_name'], config_data['api_id'], config_data['api_hash']) @app.on_message(filters.command('add')) def add_scholar(client, message): users = message.text.split() if len(users) != 3: message.reply_text("formato incorrecto, debe ser de la forma: \n /add pedro 0x000000000") name = str(users[-2]) wallet = str(users[-1]) os.makedirs("./db", exist_ok=True) db = read_data(message.chat.id) # db = read_data('1') if not name in db: db[name] = { "wallet": wallet, "slp": "[0]" } write_data(message.chat.id,db) message.reply_text("Añadido con éxito") else: message.reply_text("Ya tienes un scholar con ese nombre") pass @app.on_message(filters.command('del')) def del_scholar(client, message): users = message.text.split() name = str(users[-2]) # wallet = str(users[-1]) os.makedirs("./db", exist_ok=True) db = read_data(message.chat.id) # db = read_data('1') if name in db: db.pop(name) write_data(message.chat.id,db) else: message.reply_text("no tienes un scholar con ese nombre") pass @app.on_message(filters.command('standing')) def see_fee(client, message): # owner_id = app.get_users(message.chat.id) os.makedirs("./db", exist_ok=True) db = read_data(message.chat.id) list = [] if len(db.keys()) > 0: for i in db.keys(): wallet = db[i]['wallet'] slp = requests.get(f'https://game-api.skymavis.com/game-api/clients/{wallet}/items/1').json()['total'] list.append((i,slp)) list.sort(key=lambda x:x[1], reverse=True) stand = '' for i in list: stand += f'{i[0]} : {i[1]} - ${get_value_usd(i[1])}\n' message.reply_text(stand) else: message.reply_text('no tienes scholars :(') pass @app.on_message(filters.command('week')) def see_fee(client, message): # owner_id = app.get_users(message.chat.id) os.makedirs("./db", exist_ok=True) db = read_data(message.chat.id) list = [] if len(db.keys()) > 0: for i in db.keys(): slp = sum(db[i]['slp']) list.append((i,slp)) list.sort(key=lambda x:x[1], reverse=True) stand = '' for i in list: stand += f'{i[0]} : {i[1]} - ${get_value_usd(i[1])}\n' message.reply_text(stand) else: message.reply_text('no tienes scholars :(') pass # @app.on_message(filters.command('help')) @app.on_message(filters.command('help')) @app.on_message(filters.command('start')) def help(client, message): message.reply_text(""" /add nombre wallet - añade el usuario a tu lista de scholars, recuerda sustituir ronin: por 0x\n /del nombre - elimina el usuario de tu lista\n /standing - muestra todos los scholars ordenados\n Puedes contribuir con el desarrollo aqui: https://github.com/JavierOramas/scholar_standing_bot\no puedes donar para contribuir al desarrollo: 0x64eF391bb5Feae6023440AD12a9870062dd2B342 """) pass app.run()
2.34375
2
skdecide/discrete_optimization/rcpsp_multiskill/parser/rcpsp_multiskill_parser.py
emilienDespres/scikit-decide
27
12810
# Copyright (c) AIRBUS and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations from typing import Dict, Tuple from skdecide.discrete_optimization.rcpsp_multiskill.rcpsp_multiskill import ( Employee, MS_RCPSPModel, SkillDetail, ) def parse_imopse(input_data, max_horizon=None): # parse the input # print('input_data\n',input_data) lines = input_data.split("\n") # "General characteristics: # Tasks: 161 # Resources: 10 # Precedence relations: 321 # Number of skill types: 9 # ==================================================================================================================== # ResourceID Salary Skills # 1 14.2 Q2: 0 Q3: 2 Q1: 0 Q4: 2 Q7: 1 Q8: 2 # 2 31.2 Q0: 0 Q4: 2 Q7: 1 Q3: 1 Q8: 2 Q2: 0 # 3 34.4 Q4: 0 Q2: 1 Q6: 2 Q3: 1 Q0: 1 Q5: 0 # 4 26.0 Q5: 2 Q1: 1 Q4: 1 Q8: 2 Q0: 2 Q2: 2 # 5 30.8 Q8: 0 Q7: 1 Q3: 1 Q1: 2 Q4: 1 Q5: 1 # 6 17.3 Q6: 1 Q3: 2 Q4: 2 Q2: 0 Q7: 2 Q1: 0 # 7 19.8 Q1: 2 Q4: 2 Q5: 0 Q7: 1 Q3: 1 Q6: 2 # 8 35.8 Q2: 1 Q0: 1 Q3: 2 Q6: 0 Q7: 0 Q8: 1 # 9 37.6 Q7: 0 Q5: 2 Q2: 0 Q1: 0 Q0: 1 Q3: 1 # 10 23.5 Q8: 1 Q5: 1 Q1: 2 Q6: 0 Q4: 0 Q3: 2 " nb_task = None nb_worker = None nb_precedence_relation = None nb_skills = None resource_zone = False task_zone = False resource_dict = {} task_dict = {} real_skills_found = set() for line in lines: words = line.split() if len(words) == 2 and words[0] == "Tasks:": nb_task = int(words[1]) continue if len(words) == 2 and words[0] == "Resources:": nb_worker = int(words[1]) continue if len(words) == 3 and words[0] == "Precedence" and words[1] == "relations:": nb_precedence_relation = int(words[2]) continue if len(words) == 5 and words[0] == "Number" and words[1] == "of": nb_skills = int(words[4]) continue if len(words) == 0: continue if words[0] == "ResourceID": resource_zone = True continue if words[0] == "TaskID": task_zone = True continue if resource_zone: if words[0][0] == "=": resource_zone = False continue else: id_worker = words[0] resource_dict[id_worker] = {"salary": float(words[1])} for word in words[2:]: if word[0] == "Q": current_skill = word[:-1] continue resource_dict[id_worker][current_skill] = int(word) + 1 real_skills_found.add(current_skill) if task_zone: if words[0][0] == "=": task_zone = False continue else: task_id = int(words[0]) if task_id not in task_dict: task_dict[task_id] = {"id": task_id, "successors": [], "skills": {}} task_dict[task_id]["duration"] = int(words[1]) i = 2 while i < len(words): if words[i][0] == "Q": current_skill = words[i][:-1] task_dict[task_id]["skills"][current_skill] = int(words[i + 1]) + 1 real_skills_found.add(current_skill) i = i + 2 continue else: if "precedence" not in task_dict[task_id]: task_dict[task_id]["precedence"] = [] task_dict[task_id]["precedence"] += [int(words[i])] if int(words[i]) not in task_dict: task_dict[int(words[i])] = { "id": int(words[i]), "successors": [], "skills": {}, } if "successors" not in task_dict[int(words[i])]: task_dict[int(words[i])]["successors"] = [] task_dict[int(words[i])]["successors"] += [task_id] i += 1 # print(resource_dict) # print(task_dict) sorted_task_names = sorted(task_dict.keys()) task_id_to_new_name = { sorted_task_names[i]: i + 2 for i in range(len(sorted_task_names)) } new_tame_to_original_task_id = { task_id_to_new_name[ind]: ind for ind in task_id_to_new_name } mode_details = { task_id_to_new_name[task_id]: {1: {"duration": task_dict[task_id]["duration"]}} for task_id in task_dict } resource_dict = {int(i): resource_dict[i] for i in resource_dict} # skills = set(["Q"+str(i) for i in range(nb_skills)]) skills = real_skills_found for task_id in task_dict: for skill in skills: req_squill = task_dict[task_id]["skills"].get(skill, 0.0) mode_details[task_id_to_new_name[task_id]][1][skill] = req_squill mode_details[1] = {1: {"duration": 0}} for skill in skills: mode_details[1][1][skill] = int(0) max_t = max(mode_details) mode_details[max_t + 1] = {1: {"duration": 0}} for skill in skills: mode_details[max_t + 1][1][skill] = int(0) successors = { task_id_to_new_name[task_id]: [ task_id_to_new_name[t] for t in task_dict[task_id]["successors"] ] + [max_t + 1] for task_id in task_dict } successors[max_t + 1] = [] successors[1] = [k for k in successors] # max_horizon = 2*sum([task_dict[task_id]["duration"] for task_id in task_dict]) max_horizon = 300 if max_horizon is None else max_horizon return ( MS_RCPSPModel( skills_set=set(real_skills_found), resources_set=set(), non_renewable_resources=set(), resources_availability={}, employees={ res: Employee( dict_skill={ skill: SkillDetail( skill_value=resource_dict[res][skill], efficiency_ratio=1.0, experience=1.0, ) for skill in resource_dict[res] if skill != "salary" }, salary=resource_dict[res]["salary"], calendar_employee=[True] * max_horizon, ) for res in resource_dict }, employees_availability=[len(resource_dict)] * max_horizon, mode_details=mode_details, successors=successors, horizon=max_horizon, source_task=1, sink_task=max_t + 1, one_unit_per_task_max=True, ), new_tame_to_original_task_id, ) def parse_file(file_path, max_horizon=None) -> Tuple[MS_RCPSPModel, Dict]: with open(file_path, "r") as input_data_file: input_data = input_data_file.read() rcpsp_model, new_tame_to_original_task_id = parse_imopse( input_data, max_horizon ) return rcpsp_model, new_tame_to_original_task_id
1.757813
2
pipe_anchorages/logging_monkeypatch.py
GlobalFishingWatch/anchorages_pipeline
3
12811
import logging # monkey patch to suppress the annoying warning you get when you import apache_beam # # No handlers could be found for logger "oauth2client.contrib.multistore_file" # # This warning is harmless, but annooying when you are using beam from a command line app # see: https://issues.apache.org/jira/browse/BEAM-1183 # This just creates a null handler for that logger so there is no output logger = logging.getLogger('oauth2client.contrib.multistore_file') handler = logging.NullHandler() logger.addHandler(handler)
1.835938
2
core/managers.py
Bilal815/ecommerce_storee
95
12812
from django.db import models class SoftDeleteManager(models.Manager): def save_soft_delete(self): self.is_deleted = True self.save() return True def get_soft_delete(self): return self.filter(is_deleted=True) def get_unsoft_delete(self): return self.filter(is_deleted=False)
2.15625
2
mizani/breaks.py
stillmatic/mizani
0
12813
<reponame>stillmatic/mizani<filename>mizani/breaks.py """ All scales have a means by which the values that are mapped onto the scale are interpreted. Numeric digital scales put out numbers for direct interpretation, but most scales cannot do this. What they offer is named markers/ticks that aid in assessing the values e.g. the common odometer will have ticks and values to help gauge the speed of the vehicle. The named markers are what we call breaks. Properly calculated breaks make interpretation straight forward. These functions provide ways to calculate good(hopefully) breaks. """ from __future__ import division import numpy as np import pandas as pd from matplotlib.dates import MinuteLocator, HourLocator, DayLocator from matplotlib.dates import WeekdayLocator, MonthLocator, YearLocator from matplotlib.dates import AutoDateLocator from matplotlib.dates import num2date, YEARLY from matplotlib.ticker import MaxNLocator from .utils import min_max, SECONDS, NANOSECONDS from .utils import same_log10_order_of_magnitude __all__ = ['mpl_breaks', 'log_breaks', 'minor_breaks', 'trans_minor_breaks', 'date_breaks', 'timedelta_breaks', 'extended_breaks'] # The break calculations rely on MPL locators to do # the heavylifting. It may be more convinient to lift # the calculations out of MPL. class DateLocator(AutoDateLocator): def __init__(self): AutoDateLocator.__init__(self, minticks=5, interval_multiples=True) # Remove 4 and 400 self.intervald[YEARLY] = [ 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000] self.create_dummy_axis() def tick_values(self, vmin, vmax): # get locator # if yearlocator # change the vmin to turn of decade or half-decade ticks = AutoDateLocator.tick_values(self, vmin, vmax) return ticks class mpl_breaks(object): """ Compute breaks using MPL's default locator See :class:`~matplotlib.ticker.MaxNLocator` for the parameter descriptions Examples -------- >>> x = range(10) >>> limits = (0, 9) >>> mpl_breaks()(limits) array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]) >>> mpl_breaks(nbins=2)(limits) array([ 0., 5., 10.]) """ def __init__(self, *args, **kwargs): self.locator = MaxNLocator(*args, **kwargs) def __call__(self, limits): """ Compute breaks Parameters ---------- limits : tuple Minimum and maximum values Returns ------- out : array_like Sequence of breaks points """ if any(np.isinf(limits)): return [] if limits[0] == limits[1]: return np.array([limits[0]]) return self.locator.tick_values(limits[0], limits[1]) class log_breaks(object): """ Integer breaks on log transformed scales Parameters ---------- n : int Desired number of breaks base : int Base of logarithm Examples -------- >>> x = np.logspace(3, 7) >>> limits = min(x), max(x) >>> log_breaks()(limits) array([ 100, 10000, 1000000]) >>> log_breaks(2)(limits) array([ 100, 100000]) """ def __init__(self, n=5, base=10): self.n = n self.base = base def __call__(self, limits): """ Compute breaks Parameters ---------- limits : tuple Minimum and maximum values Returns ------- out : array_like Sequence of breaks points """ n = self.n base = self.base if any(np.isinf(limits)): return [] rng = np.log(limits)/np.log(base) if base == 10 and same_log10_order_of_magnitude(rng): return extended_breaks(n=4)(limits) _min = int(np.floor(rng[0])) _max = int(np.ceil(rng[1])) if _max == _min: return base ** _min step = (_max-_min)//n + 1 dtype = float if (_min < 0) else int return base ** np.arange(_min, _max+1, step, dtype=dtype) class minor_breaks(object): """ Compute minor breaks Parameters ---------- n : int Number of minor breaks between the major breaks. Examples -------- >>> major = [1, 2, 3, 4] >>> limits = [0, 5] >>> minor_breaks()(major, limits) array([0.5, 1.5, 2.5, 3.5, 4.5]) """ def __init__(self, n=1): self.n = n def __call__(self, major, limits=None): """ Minor breaks Parameters ---------- major : array_like Major breaks limits : array_like | None Limits of the scale. If *array_like*, must be of size 2. If **None**, then the minimum and maximum of the major breaks are used. Returns ------- out : array_like Minor beraks """ n = self.n if len(major) < 2: return np.array([]) if limits is None: limits = min_max(major) # Try to infer additional major breaks so that # minor breaks can be generated beyond the first # and last major breaks diff = np.diff(major) step = diff[0] if len(diff) > 1 and all(diff == step): major = np.hstack([major[0]-step, major, major[-1]+step]) mbreaks = [] factors = np.arange(1, n+1) for lhs, rhs in zip(major[:-1], major[1:]): sep = (rhs - lhs)/(n+1) mbreaks.append(lhs + factors * sep) minor = np.hstack(mbreaks) minor = minor.compress((limits[0] <= minor) & (minor <= limits[1])) return minor class trans_minor_breaks(object): """ Compute minor breaks for transformed scales The minor breaks are computed in data space. This together with major breaks computed in transform space reveals the non linearity of of a scale. See the log transforms created with :func:`log_trans` like :class:`log10_trans`. Parameters ---------- trans : trans or type Trans object or trans class. n : int Number of minor breaks between the major breaks. Examples -------- >>> from mizani.transforms import sqrt_trans >>> major = [1, 2, 3, 4] >>> limits = [0, 5] >>> sqrt_trans().minor_breaks(major, limits) array([0.5, 1.5, 2.5, 3.5, 4.5]) >>> class sqrt_trans2(sqrt_trans): ... def __init__(self): ... self.minor_breaks = trans_minor_breaks(sqrt_trans2) >>> sqrt_trans2().minor_breaks(major, limits) array([1.58113883, 2.54950976, 3.53553391]) """ def __init__(self, trans, n=1): self.trans = trans self.n = n def __call__(self, major, limits=None): """ Minor breaks for transformed scales Parameters ---------- major : array_like Major breaks limits : array_like | None Limits of the scale. If *array_like*, must be of size 2. If **None**, then the minimum and maximum of the major breaks are used. Returns ------- out : array_like Minor breaks """ if not self.trans.dataspace_is_numerical: raise TypeError( "trans_minor_breaks can only be used for data " "whose format is numerical.") if limits is None: limits = min_max(major) major = self._extend_breaks(major) major = self.trans.inverse(major) limits = self.trans.inverse(limits) minor = minor_breaks(self.n)(major, limits) return self.trans.transform(minor) def _extend_breaks(self, major): """ Append 2 extra breaks at either end of major If breaks of transform space are non-equidistant, :func:`minor_breaks` add minor breaks beyond the first and last major breaks. The solutions is to extend those breaks (in transformed space) before the minor break call is made. How the breaks depends on the type of transform. """ trans = self.trans trans = trans if isinstance(trans, type) else trans.__class__ # so far we are only certain about this extending stuff # making sense for log transform is_log = trans.__name__.startswith('log') diff = np.diff(major) step = diff[0] if is_log and all(diff == step): major = np.hstack([major[0]-step, major, major[-1]+step]) return major # Matplotlib's YearLocator uses different named # arguments than the others LOCATORS = { 'minute': MinuteLocator, 'hour': HourLocator, 'day': DayLocator, 'week': WeekdayLocator, 'month': MonthLocator, 'year': lambda interval: YearLocator(base=interval) } class date_breaks(object): """ Regularly spaced dates Parameters ---------- width : str | None An interval specification. Must be one of [minute, hour, day, week, month, year] If ``None``, the interval automatic. Examples -------- >>> from datetime import datetime >>> x = [datetime(year, 1, 1) for year in [2010, 2026, 2015]] Default breaks will be regularly spaced but the spacing is automatically determined >>> limits = min(x), max(x) >>> breaks = date_breaks() >>> [d.year for d in breaks(limits)] [2010, 2012, 2014, 2016, 2018, 2020, 2022, 2024, 2026] Breaks at 4 year intervals >>> breaks = date_breaks('4 year') >>> [d.year for d in breaks(limits)] [2008, 2012, 2016, 2020, 2024, 2028] """ def __init__(self, width=None): if not width: locator = DateLocator() else: # Parse the width specification # e.g. '10 weeks' => (10, week) _n, units = width.strip().lower().split() interval, units = int(_n), units.rstrip('s') locator = LOCATORS[units](interval=interval) self.locator = locator def __call__(self, limits): """ Compute breaks Parameters ---------- limits : tuple Minimum and maximum :class:`datetime.datetime` values. Returns ------- out : array_like Sequence of break points. """ if any(pd.isnull(x) for x in limits): return [] ret = self.locator.tick_values(*limits) # MPL returns the tick_values in ordinal format, # but we return them in the same space as the # inputs. return [num2date(val) for val in ret] class timedelta_breaks(object): """ Timedelta breaks Returns ------- out : callable ``f(limits)`` A function that takes a sequence of two :class:`datetime.timedelta` values and returns a sequence of break points. Examples -------- >>> from datetime import timedelta >>> breaks = timedelta_breaks() >>> x = [timedelta(days=i*365) for i in range(25)] >>> limits = min(x), max(x) >>> major = breaks(limits) >>> [val.total_seconds()/(365*24*60*60)for val in major] [0.0, 5.0, 10.0, 15.0, 20.0, 25.0] """ def __init__(self, n=5, Q=(1, 2, 5, 10)): self._breaks_func = extended_breaks(n=n, Q=Q) def __call__(self, limits): """ Compute breaks Parameters ---------- limits : tuple Minimum and maximum :class:`datetime.timedelta` values. Returns ------- out : array_like Sequence of break points. """ if any(pd.isnull(x) for x in limits): return [] helper = timedelta_helper(limits) scaled_limits = helper.scaled_limits() scaled_breaks = self._breaks_func(scaled_limits) breaks = helper.numeric_to_timedelta(scaled_breaks) return breaks # This could be cleaned up, state overload? class timedelta_helper(object): """ Helper for computing timedelta breaks and labels. How to use - breaks? 1. Initialise with a timedelta sequence/limits. 2. Get the scaled limits and use those to calculate breaks using a general purpose breaks calculating routine. The scaled limits are in numerical format. 3. Convert the computed breaks from numeric into timedelta. See, :func:`timedelta_breaks` How to use - formating? 1. Call :meth:`format_info` with the timedelta values to be formatted and get back a tuple of numeric values and the units for those values. 2. Format the values with a general purpose formatting routing. See, :func:`timedelta_format` """ def __init__(self, x, units=None): self.x = x self.type = type(x[0]) self.package = self.determine_package(x[0]) _limits = min(x), max(x) self.limits = self.value(_limits[0]), self.value(_limits[1]) self.units = units or self.best_units(_limits) self.factor = self.get_scaling_factor(self.units) @classmethod def determine_package(cls, td): if hasattr(td, 'components'): package = 'pandas' elif hasattr(td, 'total_seconds'): package = 'cpython' else: msg = '{} format not yet supported.' raise ValueError(msg.format(td.__class__)) return package @classmethod def format_info(cls, x, units=None): helper = cls(x, units) return helper.timedelta_to_numeric(x), helper.units def best_units(self, sequence): """ Determine good units for representing a sequence of timedeltas """ # Read # [(0.9, 's'), # (9, 'm)] # as, break ranges between 0.9 seconds (inclusive) # and 9 minutes are represented in seconds. And so on. ts_range = self.value(max(sequence)) - self.value(min(sequence)) package = self.determine_package(sequence[0]) if package == 'pandas': cuts = [ (0.9, 'us'), (0.9, 'ms'), (0.9, 's'), (9, 'm'), (6, 'h'), (4, 'd'), (4, 'w'), (4, 'M'), (3, 'y')] denomination = NANOSECONDS base_units = 'ns' else: cuts = [ (0.9, 's'), (9, 'm'), (6, 'h'), (4, 'd'), (4, 'w'), (4, 'M'), (3, 'y')] denomination = SECONDS base_units = 'ms' for size, units in reversed(cuts): if ts_range >= size*denomination[units]: return units return base_units def value(self, td): """ Return the numeric value representation on a timedelta """ if self.package == 'pandas': return td.value else: return td.total_seconds() def scaled_limits(self): """ Minimum and Maximum to use for computing breaks """ _min = self.limits[0]/self.factor _max = self.limits[1]/self.factor return _min, _max def timedelta_to_numeric(self, timedeltas): """ Convert sequence of timedelta to numerics """ return [self.to_numeric(td) for td in timedeltas] def numeric_to_timedelta(self, numerics): """ Convert sequence of numerics to timedelta """ if self.package == 'pandas': return [self.type(int(x*self.factor), units='ns') for x in numerics] else: return [self.type(seconds=x*self.factor) for x in numerics] def get_scaling_factor(self, units): if self.package == 'pandas': return NANOSECONDS[units] else: return SECONDS[units] def to_numeric(self, td): """ Convert timedelta to a number corresponding to the appropriate units. The appropriate units are those determined with the object is initialised. """ if self.package == 'pandas': return td.value/NANOSECONDS[self.units] else: return td.total_seconds()/SECONDS[self.units] class extended_breaks(object): """ An extension of Wilkinson's tick position algorithm Parameters ---------- n : int Desired number of ticks Q : list List of nice numbers only_inside : bool If ``True``, then all the ticks will be within the given range. w : list Weights applied to the four optimization components (simplicity, coverage, density, and legibility). They should add up to 1. Examples -------- >>> limits = (0, 9) >>> extended_breaks()(limits) array([ 0. , 2.5, 5. , 7.5, 10. ]) >>> extended_breaks(n=6)(limits) array([ 0., 2., 4., 6., 8., 10.]) References ---------- - <NAME>., <NAME>., <NAME>. (2010) An Extension of Wilkinson's Algorithm for Positioning Tick Labels on Axes, InfoVis 2010. Additional Credit to <NAME> on whose code this implementation is almost entirely based. """ def __init__(self, n=5, Q=[1, 5, 2, 2.5, 4, 3], only_inside=False, w=[0.25, 0.2, 0.5, 0.05]): self.Q = Q self.only_inside = only_inside self.w = w self.n = n # Used for lookups during the computations self.Q_index = {q: i for i, q in enumerate(Q)} def coverage(self, dmin, dmax, lmin, lmax): p1 = (dmax-lmax)**2 p2 = (dmin-lmin)**2 p3 = (0.1*(dmax-dmin))**2 return 1 - 0.5*(p1+p2)/p3 def coverage_max(self, dmin, dmax, span): range = dmax-dmin if span > range: half = (span-range)/2.0 return 1 - (half**2) / (0.1*range)**2 else: return 1 def density(self, k, dmin, dmax, lmin, lmax): r = (k-1.0) / (lmax-lmin) rt = (self.n-1) / (max(lmax, dmax) - min(lmin, dmin)) return 2 - max(r/rt, rt/r) def density_max(self, k): if k >= self.n: return 2 - (k-1.0)/(self.n-1.0) else: return 1 def simplicity(self, q, j, lmin, lmax, lstep): eps = 1e-10 n = len(self.Q) i = self.Q_index[q]+1 if ((lmin % lstep < eps or (lstep - lmin % lstep) < eps) and lmin <= 0 and lmax >= 0): v = 1 else: v = 0 return (n-i)/(n-1.0) + v - j def simplicity_max(self, q, j): n = len(self.Q) i = self.Q_index[q]+1 v = 1 return (n-i)/(n-1.0) + v - j def legibility(self, lmin, lmax, lstep): # Legibility depends on fontsize, rotation, overlap ... i.e. # it requires drawing or simulating drawn breaks then calculating # a score. Return 1 ignores all that. return 1 def __call__(self, limits): """ Calculate the breaks Parameters ---------- limits : array Minimum and maximum values. Returns ------- out : array_like Sequence of break points. """ Q = self.Q w = self.w only_inside = self.only_inside simplicity_max = self.simplicity_max density_max = self.density_max coverage_max = self.coverage_max simplicity = self.simplicity coverage = self.coverage density = self.density legibility = self.legibility log10 = np.log10 ceil = np.ceil floor = np.floor dmin, dmax = limits if dmin > dmax: dmin, dmax = dmax, dmin elif dmin == dmax: return np.array([dmin]) best_score = -2 j = 1 while j < float('inf'): for q in Q: sm = simplicity_max(q, j) if w[0]*sm + w[1] + w[2] + w[3] < best_score: j = float('inf') break k = 2 while k < float('inf'): dm = density_max(k) if w[0]*sm + w[1] + w[2]*dm + w[3] < best_score: break delta = (dmax-dmin)/(k+1)/j/q z = ceil(log10(delta)) while z < float('inf'): step = j*q*(10**z) cm = coverage_max(dmin, dmax, step*(k-1)) if w[0]*sm + w[1]*cm + w[2]*dm + w[3] < best_score: break min_start = int(floor(dmax/step)*j - (k-1)*j) max_start = int(ceil(dmin/step)*j) if min_start > max_start: z = z+1 break for start in range(min_start, max_start+1): lmin = start * (step/j) lmax = lmin + step*(k-1) lstep = step s = simplicity(q, j, lmin, lmax, lstep) c = coverage(dmin, dmax, lmin, lmax) d = density(k, dmin, dmax, lmin, lmax) l = legibility(lmin, lmax, lstep) score = w[0]*s + w[1]*c + w[2]*d + w[3]*l if (score > best_score and (not only_inside or (lmin >= dmin and lmax <= dmax))): best_score = score best = (lmin, lmax, lstep, q, k) z = z+1 k = k+1 j = j+1 try: locs = best[0] + np.arange(best[4])*best[2] except UnboundLocalError: locs = [] return locs
3.03125
3
examples/04_sweep_wind_directions.py
ElieKadoche/floris
0
12814
# Copyright 2022 NREL # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. # See https://floris.readthedocs.io for documentation import matplotlib.pyplot as plt import numpy as np from floris.tools import FlorisInterface from floris.tools.visualization import visualize_cut_plane """ 04_sweep_wind_directions This example demonstrates vectorization of wind direction. A vector of wind directions is passed to the intialize function and the powers of the two simulated turbines is computed for all wind directions in one call The power of both turbines for each wind direction is then plotted """ # Instantiate FLORIS using either the GCH or CC model fi = FlorisInterface("inputs/gch.yaml") # GCH model matched to the default "legacy_gauss" of V2 # fi = FlorisInterface("inputs/cc.yaml") # New CumulativeCurl model # Define a two turbine farm D = 126. layout_x = np.array([0, D*6]) layout_y = [0, 0] fi.reinitialize(layout = [layout_x, layout_y]) # Sweep wind speeds but keep wind direction fixed wd_array = np.arange(250,291,1.) fi.reinitialize(wind_directions=wd_array) # Define a matrix of yaw angles to be all 0 # Note that yaw angles is now specified as a matrix whose dimesions are # wd/ws/turbine num_wd = len(wd_array) # Number of wind directions num_ws = 1 # Number of wind speeds num_turbine = len(layout_x) # Number of turbines yaw_angles = np.zeros((num_wd, num_ws, num_turbine)) # Calculate fi.calculate_wake(yaw_angles=yaw_angles) # Collect the turbine powers turbine_powers = fi.get_turbine_powers() / 1E3 # In kW # Pull out the power values per turbine pow_t0 = turbine_powers[:,:,0].flatten() pow_t1 = turbine_powers[:,:,1].flatten() # Plot fig, ax = plt.subplots() ax.plot(wd_array,pow_t0,color='k',label='Upstream Turbine') ax.plot(wd_array,pow_t1,color='r',label='Downstream Turbine') ax.grid(True) ax.legend() ax.set_xlabel('Wind Direction (deg)') ax.set_ylabel('Power (kW)') plt.show()
2.6875
3
1.main.py
learning-nn/nn_from_scratch
0
12815
<reponame>learning-nn/nn_from_scratch import numpy import numpy as np # converting to a layer with 4 input and 3 neuron inputs = [[1.2, 2.1, 3.4, 1.2], [1.2, 2.1, 3.4, 1.2], [1.2, 2.1, 3.4, 1.2]] print(numpy.shape(inputs)) weights = [[4.1, -4.5, 3.1, 2.3], [-4.1, 4.5, 2.1, 2.3], [4.1, 4.5, 3.1, -2.3]] print(numpy.shape(weights)) biases = [1, 2, 3] weights2 = [[4.1, -4.5, 3.1], [-4.1, 4.5, 2.1], [4.1, 4.5, 3.1]] biases2 = [1, 2, 3] layer1_outputs = np.dot(inputs, np.array(weights).T) + biases layer2_outputs = np.dot(layer1_outputs, np.array(weights2).T) + biases2 print(layer2_outputs)
3.515625
4
backend/grant/task/__init__.py
DSBUGAY2/zcash-grant-system
8
12816
<filename>backend/grant/task/__init__.py from . import models from . import views from . import commands from . import jobs
1.195313
1
DjangoTry/venv/Lib/site-packages/django_select2/__init__.py
PavelKoksharov/QR-BOOK
0
12817
<filename>DjangoTry/venv/Lib/site-packages/django_select2/__init__.py """ This is a Django_ integration of Select2_. The application includes Select2 driven Django Widgets and Form Fields. .. _Django: https://www.djangoproject.com/ .. _Select2: https://select2.org/ """ from django import get_version if get_version() < '3.2': default_app_config = "django_select2.apps.Select2AppConfig"
1.71875
2
Sorting/bubble.py
Krylovsentry/Algorithms
1
12818
# O(n ** 2) def bubble_sort(slist, asc=True): need_exchanges = False for iteration in range(len(slist))[:: -1]: for j in range(iteration): if asc: if slist[j] > slist[j + 1]: need_exchanges = True slist[j], slist[j + 1] = slist[j + 1], slist[j] else: if slist[j] < slist[j + 1]: need_exchanges = True slist[j], slist[j + 1] = slist[j + 1], slist[j] if not need_exchanges: return slist return slist print(bubble_sort([8, 1, 13, 34, 5, 2, 21, 3, 1], False)) print(bubble_sort([1, 2, 3, 4, 5, 6]))
3.765625
4
chapter_13/pymail.py
bimri/programming_python
0
12819
"A Console-Based Email Client" #!/usr/local/bin/python """ ########################################################################## pymail - a simple console email interface client in Python; uses Python poplib module to view POP email messages, smtplib to send new mails, and the email package to extract mail headers and payload and compose mails; ########################################################################## """ import poplib, smtplib, email.utils, mailconfig from email.parser import Parser from email.message import Message fetchEncoding = mailconfig.fetchEncoding def decodeToUnicode(messageBytes, fetchEncoding=fetchEncoding): """ 4E, Py3.1: decode fetched bytes to str Unicode string for display or parsing; use global setting (or by platform default, hdrs inspection, intelligent guess); in Python 3.2/3.3, this step may not be required: if so, return message intact; """ return [line.decode(fetchEncoding) for line in messageBytes] def splitaddrs(field): """ 4E: split address list on commas, allowing for commas in name parts """ pairs = email.utils.getaddresses([field]) # [(name,addr)] return [email.utils.formataddr(pair) for pair in pairs] # [name <addr>] def inputmessage(): import sys From = input('From? ').strip() To = input('To? ').strip() # datetime hdr may be set auto To = splitaddrs(To) # possible many, name+<addr> okay Subj = input('Subj? ').strip() # don't split blindly on ',' or ';' print('Type message text, end with line="."') text = '' while True: line = sys.stdin.readline() if line == '.\n': break text += line return From, To, Subj, text def sendmessage(): From, To, Subj, text = inputmessage() msg = Message() msg['From'] = From msg['To'] = ', '.join(To) # join for hdr, not send msg['Subject'] = Subj msg['Date'] = email.utils.formatdate() # curr datetime, rfc2822 msg.set_payload(text) server = smtplib.SMTP(mailconfig.smtpservername) try: failed = server.sendmail(From, To, str(msg)) # may also raise exc except: print('Error - send failed') else: if failed: print('Failed:', failed) def connect(servername, user, passwd): print('Connecting...') server = poplib.POP3(servername) server.user(user) # connect, log in to mail server server.pass_(passwd) # pass is a reserved word print(server.getwelcome()) # print returned greeting message return server def loadmessages(servername, user, passwd, loadfrom=1): server = connect(servername, user, passwd) try: print(server.list()) (msgCount, msgBytes) = server.stat() print('There are', msgCount, 'mail messages in', msgBytes, 'bytes') print('Retrieving...') msgList = [] # fetch mail now for i in range(loadfrom, msgCount+1): # empty if low >= high (hdr, message, octets) = server.retr(i) # save text on list message = decodeToUnicode(message) # 4E, Py3.1: bytes to str msgList.append('\n'.join(message)) # leave mail on server finally: server.quit() # unlock the mail box assert len(msgList) == (msgCount - loadfrom) + 1 # msg nums start at 1 return msgList def deletemessages(servername, user, passwd, toDelete, verify=True): print('To be deleted:', toDelete) if verify and input('Delete?')[:1] not in ['y', 'Y']: print('Delete cancelled.') else: server = connect(servername, user, passwd) try: print('Deleting messages from server...') for msgnum in toDelete: # reconnect to delete mail server.dele(msgnum) # mbox locked until quit() finally: server.quit() def showindex(msgList): count = 0 # show some mail headers for msgtext in msgList: msghdrs = Parser().parsestr(msgtext, headersonly=True) # expects str in 3.1 count += 1 print('%d:\t%d bytes' % (count, len(msgtext))) for hdr in ('From', 'To', 'Date', 'Subject'): try: print('\t%-8s=>%s' % (hdr, msghdrs[hdr])) except KeyError: print('\t%-8s=>(unknown)' % hdr) if count % 5 == 0: input('[Press Enter key]') # pause after each 5 def showmessage(i, msgList): if 1 <= i <= len(msgList): #print(msgList[i-1]) # old: prints entire mail--hdrs+text print('-' * 79) msg = Parser().parsestr(msgList[i-1]) # expects str in 3.1 content = msg.get_payload() # prints payload: string, or [Messages] if isinstance(content, str): # keep just one end-line at end content = content.rstrip() + '\n' print(content) print('-' * 79) # to get text only, see email.parsers else: print('Bad message number') def savemessage(i, mailfile, msgList): if 1 <= i <= len(msgList): savefile = open(mailfile, 'a', encoding=mailconfig.fetchEncoding) # 4E savefile.write('\n' + msgList[i-1] + '-'*80 + '\n') else: print('Bad message number') def msgnum(command): try: return int(command.split()[1]) except: return -1 # assume this is bad helptext = """ Available commands: i - index display l n? - list all messages (or just message n) d n? - mark all messages for deletion (or just message n) s n? - save all messages to a file (or just message n) m - compose and send a new mail message q - quit pymail ? - display this help text """ def interact(msgList, mailfile): showindex(msgList) toDelete = [] while True: try: command = input('[Pymail] Action? (i, l, d, s, m, q, ?) ') except EOFError: command = 'q' if not command: command = '*' # quit if command == 'q': break # index elif command[0] == 'i': showindex(msgList) # list elif command[0] == 'l': if len(command) == 1: for i in range(1, len(msgList)+1): showmessage(i, msgList) else: showmessage(msgnum(command), msgList) # save elif command[0] == 's': if len(command) == 1: for i in range(1, len(msgList)+1): savemessage(i, mailfile, msgList) else: savemessage(msgnum(command), mailfile, msgList) # delete elif command[0] == 'd': if len(command) == 1: # delete all later toDelete = list(range(1, len(msgList)+1)) # 3.x requires list else: delnum = msgnum(command) if (1 <= delnum <= len(msgList)) and (delnum not in toDelete): toDelete.append(delnum) else: print('Bad message number') # mail elif command[0] == 'm': # send a new mail via SMTP sendmessage() #execfile('smtpmail.py', {}) # alt: run file in own namespace elif command[0] == '?': print(helptext) else: print('What? -- type "?" for commands help') return toDelete if __name__ == '__main__': import getpass, mailconfig mailserver = mailconfig.popservername # ex: 'pop.rmi.net' mailuser = mailconfig.popusername # ex: 'lutz' mailfile = mailconfig.savemailfile # ex: r'c:\stuff\savemail' mailpswd = getpass.getpass('Password for %s?' % mailserver) print('[Pymail email client]') msgList = loadmessages(mailserver, mailuser, mailpswd) # load all toDelete = interact(msgList, mailfile) if toDelete: deletemessages(mailserver, mailuser, mailpswd, toDelete) print('Bye.')
2.859375
3
dependencyinjection/internal/param_type_resolver.py
Cologler/dependencyinjection-python
0
12820
<reponame>Cologler/dependencyinjection-python<filename>dependencyinjection/internal/param_type_resolver.py<gh_stars>0 #!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2017~2999 - cologler <<EMAIL>> # ---------- # # ---------- import typing import inspect from .errors import ParameterTypeResolveError class ParameterTypeResolver: ''' desgin for resolve type from parameter. ''' def __init__(self, name_map: typing.Dict[str, type]): self._name_map = name_map.copy() def resolve(self, parameter: inspect.Parameter, allow_none): if parameter.annotation is inspect.Parameter.empty: typ = self._name_map.get(parameter.name) if typ is None: msg = "cannot resolve parameter type from name: '{}'".format(parameter.name) raise ParameterTypeResolveError(msg) return typ elif isinstance(parameter.annotation, type): return parameter.annotation elif not allow_none: msg = 'cannot parse type from annotation: {}'.format(parameter.annotation) raise ParameterTypeResolveError(msg)
2.296875
2
rest-api/server.py
phenomax/resnet50-miml-rest
1
12821
import io import os from flask import Flask, request, jsonify from PIL import Image from resnet_model import MyResnetModel app = Flask(__name__) # max filesize 2mb app.config['MAX_CONTENT_LENGTH'] = 2 * 1024 * 1024 # setup resnet model model = MyResnetModel(os.path.dirname(os.path.abspath(__file__))) @app.route("/") def hello(): return jsonify({"message": "Hello from the API"}) @app.route('/predict', methods=['POST']) def predict(): if 'image' not in request.files: return jsonify({"error": "Missing file in request"}) img = request.files['image'] return jsonify({"result": model.predict(img.read())})
2.703125
3
authors/apps/profiles/tests/test_follow.py
KabohaJeanMark/ah-backend-invictus
7
12822
from django.urls import reverse from rest_framework import status from .base import BaseTestCase class FollowTestCase(BaseTestCase): """Testcases for following a user.""" def test_follow_user_post(self): """Test start following a user.""" url = reverse('follow', kwargs={'username': 'test2'}) response = self.client.post(url, HTTP_AUTHORIZATION=self.auth_header) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_post_follow_already_followed_user(self): """Test start following a user you already follow.""" url = reverse('follow', kwargs={'username': 'test2'}) self.client.post(url, HTTP_AUTHORIZATION=self.auth_header) response = self.client.post(url, HTTP_AUTHORIZATION=self.auth_header) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_follow_missing_user_post(self): """Test trying to start following a missing user.""" url = reverse('follow', kwargs={'username': 'joel'}) response = self.client.post(url, HTTP_AUTHORIZATION=self.auth_header) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_delete_follow(self): """Test unfollowing a user""" url = reverse('follow', kwargs={'username': 'test2'}) self.client.post(url, HTTP_AUTHORIZATION=self.auth_header) response = self.client.delete(url, HTTP_AUTHORIZATION=self.auth_header) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_delete_follow_of_not_followed_user(self): """Test unfollowing a user you are not following""" url = reverse('follow', kwargs={'username': 'test2'}) response = self.client.delete(url, HTTP_AUTHORIZATION=self.auth_header) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_list_followers_of_user(self): """Test list followers of a user""" url_followers = reverse('getfollowers', kwargs={'username': 'test2'}) self.client.get(url_followers, HTTP_AUTHORIZATION=self.auth_header) url_follow = reverse('follow', kwargs={'username': 'test2'}) self.client.post(url_follow, HTTP_AUTHORIZATION=self.auth_header) response = self.client.get(url_followers, HTTP_AUTHORIZATION=self.auth_header) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_list_user_is_following(self): """Test list users the user is following""" url_following = reverse('getfollowing', kwargs={'username': 'test1'}) self.client.get(url_following, HTTP_AUTHORIZATION=self.auth_header) url_follow = reverse('follow', kwargs={'username': 'test2'}) self.client.post(url_follow, HTTP_AUTHORIZATION=self.auth_header) response = self.client.get(url_following, HTTP_AUTHORIZATION=self.auth_header) self.assertEqual(response.status_code, status.HTTP_200_OK)
2.625
3
OpenPNM/Phases/__GenericPhase__.py
thirtywang/OpenPNM
0
12823
<filename>OpenPNM/Phases/__GenericPhase__.py # -*- coding: utf-8 -*- """ =============================================================================== module __GenericPhase__: Base class for building Phase objects =============================================================================== """ from OpenPNM.Network import GenericNetwork import OpenPNM.Phases.models from OpenPNM.Base import Core, Tools, logging import scipy as sp logger = logging.getLogger(__name__) class GenericPhase(Core): r""" Base class to generate a generic phase object. The user must specify models and parameters for all the properties they require. Classes for several common phases are included with OpenPNM and can be found under OpenPNM.Phases. Parameters ---------- network : OpenPNM Network object The network to which this Phase should be attached components : list of OpenPNM Phase objects These Phase objects are ficticious or virtual phases that are the pure components from which the mixture is made. They are used to calculate and store any pure component data. If none are supplied then this object will act like either a pure component, a mixture whose properties are well known (like air) and need not to be found from consideration of the pure component properties. name : str, optional A unique string name to identify the Phase object, typically same as instance name but can be anything. """ def __init__(self, network=None, components=[], **kwargs): super().__init__(**kwargs) logger.name = self.name if network is None: network = GenericNetwork() self.network.update({network.name: network}) # Initialize label 'all' in the object's own info dictionaries self['pore.all'] = self._net['pore.all'] self['throat.all'] = self._net['throat.all'] # Set standard conditions on the fluid to get started self['pore.temperature'] = 298.0 self['pore.pressure'] = 101325.0 # Register Ohase object in Network dictionary self._net['pore.'+self.name] = True self._net['throat.'+self.name] = True if components != []: for comp in components: self.set_component(phase=comp) self._net.phases.update({self.name: self}) # Connect Phase to Network def __setitem__(self, prop, value): for phys in self._physics: if (prop in phys.keys()) and ('all' not in prop.split('.')): logger.error(prop + ' is already defined in at least one \ associated Physics object') return super().__setitem__(prop, value) def __getitem__(self, key): if key.split('.')[-1] == self.name: element = key.split('.')[0] return self[element+'.all'] if key not in self.keys(): logger.debug(key+' not on Phase, constructing data from Physics') return self._interleave_data(key, sources=self._physics) else: return super().__getitem__(key) def props(self, element=None, mode='all', deep=False): # TODO: The mode 'deep' is deprecated in favor of the deep argument # and should be removed in a future version modes = ['all', 'deep', 'models', 'constants'] mode = self._parse_mode(mode=mode, allowed=modes, single=False) prop_list = [] if ('deep' in mode) or (deep is True): if mode.count('deep') > 0: mode.remove('deep') for phys in self._physics: prop_list.extend(phys.props(element=element, mode=mode)) # Get unique values prop_list = Tools.PrintableList(set(prop_list)) prop_list.extend(super().props(element=element, mode=mode)) return prop_list props.__doc__ = Core.props.__doc__ def set_component(self, phase, mode='add'): r""" This method is used to add or remove a ficticious phase object to this object. Parameters ---------- phase : OpenPNM Phase object This is the ficticious phase object defining a pure component. mode : string Indicates whether to 'add' or 'remove' the supplied Phase object """ if mode == 'add': if phase.name in self.phases(): raise Exception('Phase already present') else: # Associate components with self self.phases.update({phase.name: phase}) # Associate self with components phase.phases.update({self.name: self}) # Add models for components to inherit mixture T and P phase.models.add(propname='pore.temperature', model=OpenPNM.Phases.models.misc.mixture_value) phase.models.add(propname='pore.pressure', model=OpenPNM.Phases.models.misc.mixture_value) # Move T and P models to beginning of regeneration order phase.models.reorder({'pore.temperature': 0, 'pore.pressure': 1}) elif mode == 'remove': if phase.name in self.phases(): self.phases.pop(phase.name) else: raise Exception('Phase not found') def check_mixture_health(self): r""" Query the properties of the 'virtual phases' that make up a mixture to ensure they all add up """ mole_sum = sp.zeros((self.Np,)) for comp in self._phases: try: mole_sum = mole_sum + comp['pore.mole_fraction'] except: pass return mole_sum def check_physics_health(self): r""" Perform a check to find pores which have overlapping or undefined Physics """ phys = self.physics() Ptemp = sp.zeros((self.Np,)) Ttemp = sp.zeros((self.Nt,)) for item in phys: Pind = self['pore.'+item] Tind = self['throat.'+item] Ptemp[Pind] = Ptemp[Pind] + 1 Ttemp[Tind] = Ttemp[Tind] + 1 health = Tools.HealthDict() health['overlapping_pores'] = sp.where(Ptemp > 1)[0].tolist() health['undefined_pores'] = sp.where(Ptemp == 0)[0].tolist() health['overlapping_throats'] = sp.where(Ttemp > 1)[0].tolist() health['undefined_throats'] = sp.where(Ttemp == 0)[0].tolist() return health
2.390625
2
rqt_mypkg/src/rqt_mypkg/statistics.py
mounteverset/moveit_path_visualizer
0
12824
<reponame>mounteverset/moveit_path_visualizer<gh_stars>0 #!/usr/bin/env python3 import sys import copy from moveit_commander import move_group import rospy import moveit_commander import moveit_msgs.msg import geometry_msgs.msg from math import pi, sqrt, pow from std_msgs.msg import String import io import shutil import json #used to convert the points from the gui in a valid message for ros from geometry_msgs.msg import Pose, PoseStamped #used to read out the start points import os from nav_msgs.msg import Path #used for publishing the planned path from start to goal from visualization_msgs.msg import Marker, MarkerArray #used to make a service request from moveit_msgs.srv import GetPositionIKRequest, GetPositionIK from rqt_mypkg import path_planning_interface from trajectory_msgs.msg import JointTrajectoryPoint ## StatsitcisDefinedPath is used to get the path length of given points/positions generated by the Motion Plan class StatisticsDefinedPath(object): ## Returns the path length # @param eef_poses A list of end effector poses derived from the motion between start and goal pose def get_path_length(self, eef_poses): path_length = 0 for i in range(len(eef_poses) - 1): ## @var posex # position x of the given position/point posex = eef_poses[i].position.x ## @var posey # position y of the given position/point posey = eef_poses[i].position.y ## @var posez # position z of the given position/point posez = eef_poses[i].position.z ## @var posex1 # position x of the next given position/point posex1 = eef_poses[i+1].position.x ## @var posey1 # position y of the next given position/point posey1 = eef_poses[i+1].position.y ## @var posez1 # position z of the next given position/point posez1 = eef_poses[i+1].position.z ## @var path_length # formula to get the length of 2 corresponding points path_length += sqrt(pow((posex1 - posex), 2) + pow((posey1- posey), 2))+ pow((posez1-posez),2) return path_length ## Returns the maximum joint acceleration of every acceleration measured # @param motion_plan The motion plan retrieved by the planner def get_max_joint_acceleration(self, motion_plan): ## @var maxlist # This list contains all accelerations given by the motion plan maxlist = [] for i in range(len(motion_plan[1].joint_trajectory.points)): for j in range(len(motion_plan[1].joint_trajectory.points[i].accelerations)): for k in range(len(motion_plan[1].joint_trajectory.points[i].accelerations)): maxlist.append(motion_plan[1].joint_trajectory.points[i].accelerations[j]) return max(maxlist)
2.296875
2
apps/sendmail/admin.py
CasualGaming/studlan
9
12825
# -*- coding: utf-8 -*- from django.contrib import admin from .models import Mail class MailAdmin(admin.ModelAdmin): list_display = ['subject', 'sent_time', 'recipients_total', 'successful_mails', 'failed_mails', 'done_sending'] ordering = ['-sent_time'] # Prevent creation def has_add_permission(self, request, obj=None): return False # Prevent changes def save_model(self, request, obj, form, change): pass # Prevent M2M changes def save_related(self, request, form, formsets, change): pass admin.site.register(Mail, MailAdmin)
2.078125
2
event/test_event.py
Web-Team-IITI-Gymkhana/gymkhana_server
0
12826
from uuid import uuid4 from fastapi.testclient import TestClient from ..main import app client = TestClient(app) class Test_Event: record = { "name": "<NAME>", "description": "It is a coding event held in the month of Decemeber by Programming Club", "created_on": "2022-01-28T21:33:50.795775", "last_update": "2021-01-28T12:33:52.795775", "start_time": "2022-02-19T19:33:10.895775", "end_time": "2022-02-19T21:00:10.895775", "image": "https://www.google.com/search?q=P", "website": "", "notify": True, "is_online": False, "meet_link": "", "venue": "Carbon Building", } updated_record = { "name": "<NAME>", "description": "It is a coding event held in the month of Decemeber by Programming Club", "created_on": "2022-01-28T21:33:50.795775", "last_update": "2021-01-28T12:33:52.795775", "start_time": "2022-02-19T19:33:10.895775", "end_time": "2022-02-19T21:00:10.895775", "image": "https://www.google.com/search?", "website": "", "notify": False, "is_online": True, "meet_link": "https://meet.google.com/abc-defg-hij", "venue": "", } def test_create(self): response = client.post("/event/", json=self.record) assert response.status_code == 201, f"Received {response.status_code}" response_record = response.json() self.record["id"] = response_record["id"] print(self.record) for key in response_record.keys(): assert self.record[key] == response_record[key] def test_get_one(self): response = client.get(f"/event/{self.record['id']}") assert response.status_code == 200, f"Received {response.status_code}" assert response.json() == self.record def test_get_non_existing(self): response = client.get(f"/event/{uuid4()}") assert response.status_code == 404, f"Received {response.status_code}" assert response.json() == {"detail": "Event not found"} def test_patch(self): response = client.patch( f"/event/{self.record['id']}", json=self.updated_record ) assert response.status_code == 202, f"Received {response.status_code}" assert response.json() == self.updated_record def test_get_all(self): response = client.get("/event/") assert response.status_code == 200, f"Received {response.status_code}" def test_delete(self): response = client.delete(f"/event/{self.record['id']}") assert response.status_code == 204, f"Received {response.status_code}" def test_delete_non_existing(self): response = client.get(f"/event/{uuid4()}") assert response.status_code == 404, f"Received {response.status_code}" assert response.json() == {"detail": "Event not found"}
2.71875
3
zigzag_conversion.py
cheng10/leetcode
0
12827
<filename>zigzag_conversion.py class Solution(object): def convert(self, s, numRows): """ :type s: str :type numRows: int :rtype: str """ cycle = 2*(numRows-1) if numRows == 1: cycle = 1 map = [] for i in range(numRows): map.append('') for j in range(len(s)): mod = j % cycle if mod < numRows: map[mod] += s[j] else: map[2*(numRows-1)-mod] += s[j] result = '' for i in range(numRows): result += map[i]; return result
3.46875
3
src/tests/ftest/soak/soak.py
cdurf1/daos
0
12828
<reponame>cdurf1/daos #!/usr/bin/python """ (C) Copyright 2019 Intel Corporation. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE The Government's rights to use, modify, reproduce, release, perform, display, or disclose this software are subject to the terms of the Apache License as provided in Contract No. 8F-30005. Any reproduction of computer software, computer software documentation, or portions thereof marked with this legend must also reproduce the markings. """ from __future__ import print_function import os import time from apricot import TestWithServers from ior_utils import IorCommand import slurm_utils from test_utils_pool import TestPool from test_utils_container import TestContainer from ClusterShell.NodeSet import NodeSet from general_utils import pcmd import socket class SoakTestError(Exception): """Soak exception class.""" class Soak(TestWithServers): """Execute DAOS Soak test cases. :avocado: recursive Args: TestWithServers (AvocadoTest): Unit Test test cases There are currently two types of soak tests. 1) smoke - runs each specified cmdline (job spec) for a single iteration. The smoke test is to verify the environment is configured properly before running the longer soaks 2) 1 hour - this will run a defined set of jobs and continue to submit the jobs until the time has expired. The tests also use an IOR that is compiled with MPICH and is built with both the DAOS and MPI-IO drivers. """ def job_done(self, args): """Call this function when a job is done. Args: args (list):handle --which job, i.e. the job ID, state --string indicating job completion status """ self.soak_results[args["handle"]] = args["state"] def create_pool(self, pools): """Create a pool that the various tests use for storage. Args: pools: list of pool name from yaml file /run/<test_params>/poollist/* Returns: list: list of TestPool object """ pool_obj_list = [] for pool_name in pools: # Create a pool pool_obj_list.append(TestPool(self.context, self.log)) pool_obj_list[-1].namespace = "/".join(["/run", pool_name, "*"]) pool_obj_list[-1].get_params(self) pool_obj_list[-1].create() self.log.info("Valid Pool UUID is %s", pool_obj_list[-1].uuid) # Commented out due to DAOS-3836. ## Check that the pool was created #self.assertTrue( # pool_obj_list[-1].check_files(self.hostlist_servers), # "Pool data not detected on servers") return pool_obj_list def destroy_pool(self, pool): """Destroy the specified pool - TO DO.""" pass def remote_copy(self, hostlist, remote_dir, local_dir): """Copy files from remote dir to local dir. Args: hostlist (list): list of remote nodes remote_dir (str): remote directory of files local_dir (str): local directory Raises: SoakTestError: if there is an error with the remote copy """ this_host = socket.gethostname() result = pcmd( NodeSet.fromlist(hostlist), "if [ ! -z '$(ls -A {0})' ]; then " "scp -p -r {0}/ \"{1}:'{2}/'\" && rm -rf {0}/*; fi".format( remote_dir, this_host, local_dir), verbose=False) if len(result) > 1 or 0 not in result: raise SoakTestError( "Error executing remote copy: {}".format( ", ".join( [str(result[key]) for key in result if key != 0]))) def create_ior_cmdline(self, job_params, job_spec, pool): """Create an IOR cmdline to run in slurm batch. Args: job_params (str): job params from yaml file job_spec (str): specific ior job to run pool (obj): TestPool obj Returns: cmd: cmdline string """ command = [] iteration = self.test_iteration ior_params = "/".join(["run", job_spec, "*"]) ior_cmd = IorCommand() ior_cmd.namespace = ior_params ior_cmd.get_params(self) if iteration is not None and iteration < 0: ior_cmd.repetitions.update(1000000) ior_cmd.max_duration.update(self.params.get( "time", "/".join([job_params, "*"]))) # IOR job specs with a list of parameters; update each value # transfer_size # block_size # daos object class tsize_list = ior_cmd.transfer_size.value bsize_list = ior_cmd.block_size.value oclass_list = ior_cmd.daos_oclass.value for b_size in bsize_list: ior_cmd.block_size.update(b_size) for o_type in oclass_list: ior_cmd.daos_oclass.update(o_type) for t_size in tsize_list: ior_cmd.transfer_size.update(t_size) ior_cmd.set_daos_params(self.server_group, pool) # export the user environment to test node exports = ["ALL"] if ior_cmd.api.value == "MPIIO": env = { "CRT_ATTACH_INFO_PATH": os.path.join( self.basepath, "install/tmp"), "DAOS_POOL": str(ior_cmd.daos_pool.value), "MPI_LIB": "\"\"", "DAOS_SVCL": str(ior_cmd.daos_svcl.value), "DAOS_SINGLETON_CLI": 1, "FI_PSM2_DISCONNECT": 1 } exports.extend( ["{}={}".format( key, val) for key, val in env.items()]) cmd = "srun -l --mpi=pmi2 --export={} {}".format( ",".join(exports), ior_cmd) command.append(cmd) self.log.debug("<<IOR cmdline >>: %s \n", cmd) return command def create_dmg_cmdline(self, job_params, job_spec, pool): """Create a dmg cmdline to run in slurm batch. Args: job_params (str): job params from yaml file job_spec (str): specific dmg job to run Returns: cmd: [description] """ cmd = "" return cmd def build_job_script(self, nodesperjob, job, pool): """Create a slurm batch script that will execute a list of jobs. Args: nodesperjob(int): number of nodes executing each job job(str): the job that will be defined in the slurm script with /run/"job"/. It is currently defined in the yaml as: Example job: job1: name: job1 - unique name time: 10 - cmdline time in seconds; used in IOR -T param tasks: 1 - number of processes per node --ntaskspernode jobspec: - ior_daos - ior_mpiio pool (obj): TestPool obj Returns: script_list: list of slurm batch scripts """ self.log.info("<<Build Script for job %s >> at %s", job, time.ctime()) script_list = [] # create one batch script per cmdline # get job params job_params = "/run/" + job job_name = self.params.get("name", "/".join([job_params, "*"])) job_specs = self.params.get("jobspec", "/".join([job_params, "*"])) task_list = self.params.get("tasks", "/".join([job_params, "*"])) job_time = self.params.get("time", "/".join([job_params, "*"])) # job_time in minutes:seconds format job_time = str(job_time) + ":00" for job_spec in job_specs: if "ior" in job_spec: # Create IOR cmdline cmd_list = self.create_ior_cmdline(job_params, job_spec, pool) elif "dmg" in job_spec: # create dmg cmdline cmd_list = self.create_dmg_cmdline(job_params, job_spec, pool) else: raise SoakTestError( "<<FAILED: Soak job: {} Job spec {} is invalid>>".format( job, job_spec)) # a single cmdline per batch job; so that a failure is per cmdline # change to multiple cmdlines per batch job later. for cmd in cmd_list: # additional sbatch params for tasks in task_list: output = os.path.join( self.rem_pass_dir, "%N_" + self.test_name + "_" + job_name + "_" + job_spec + "_results.out_%j_%t_" + str(tasks) + "_") num_tasks = nodesperjob * tasks sbatch = { "ntasks-per-node": tasks, "ntasks": num_tasks, "time": job_time, "partition": self.partition_clients, "exclude": NodeSet.fromlist(self.hostlist_servers)} script = slurm_utils.write_slurm_script( self.rem_pass_dir, job_name, output, nodesperjob, [cmd], sbatch) script_list.append(script) return script_list def job_setup(self, test_param, pool): """Create the slurm job batch script . Args: test_param (str): test_param from yaml file pool (obj): TestPool obj Returns: scripts: list of slurm batch scripts """ # Get jobmanager self.job_manager = self.params.get("jobmanager", "/run/*") # Get test params test_params = "".join([test_param, "*"]) self.test_name = self.params.get("name", test_params) self.test_iteration = self.params.get("test_iteration", test_params) self.job_list = self.params.get("joblist", test_params) self.nodesperjob = self.params.get("nodesperjob", test_params) self.soak_results = {} script_list = [] self.log.info( "<<Job_Setup %s >> at %s", self.test_name, time.ctime()) # Create the remote log directories from new loop/pass self.rem_pass_dir = self.log_dir + "/pass" + str(self.loop) self.local_pass_dir = self.outputsoakdir + "/pass" + str(self.loop) result = pcmd( NodeSet.fromlist(self.hostlist_clients), "mkdir -p {}".format(self.rem_pass_dir), verbose=False) if len(result) > 1 or 0 not in result: raise SoakTestError( "<<FAILED: logfile directory not" "created on clients>>: {}".format(", ".join( [str(result[key]) for key in result if key != 0]))) # Create local log directory os.makedirs(self.local_pass_dir) # nodesperjob = -1 indicates to use all nodes in client hostlist if self.nodesperjob < 0: self.nodesperjob = len(self.hostlist_clients) if len(self.hostlist_clients)/self.nodesperjob < 1: raise SoakTestError( "<<FAILED: There are only {} client nodes for this job. " "Job requires {}".format( len(self.hostlist_clients), self.nodesperjob)) if self.job_manager == "slurm": # queue up slurm script and register a callback to retrieve # results. The slurm batch script are single cmdline for now. # scripts is a list of slurm batch scripts with a single cmdline for job in self.job_list: scripts = self.build_job_script(self.nodesperjob, job, pool) script_list.extend(scripts) return script_list else: raise SoakTestError( "<<FAILED: Job manager {} is not yet enabled. " "Job requires slurm".format(self.job_manager)) def job_startup(self, scripts): """Submit job batch script. Args: scripts (list): list of slurm batch scripts to submit to queue Returns: job_id_list: IDs of each job submitted to slurm. """ self.log.info( "<<Job Startup - %s >> at %s", self.test_name, time.ctime()) job_id_list = [] # scripts is a list of batch script files for script in scripts: try: job_id = slurm_utils.run_slurm_script(str(script)) except slurm_utils.SlurmFailed as error: self.log.error(error) # Force the test to exit with failure job_id = None if job_id: print( "<<Job {} started with {} >> at {}".format( job_id, script, time.ctime())) slurm_utils.register_for_job_results( job_id, self, maxwait=self.test_timeout) # keep a list of the job_id's job_id_list.append(int(job_id)) else: # one of the jobs failed to queue; exit on first fail for now. err_msg = "Slurm failed to submit job for {}".format(script) job_id_list = [] raise SoakTestError( "<<FAILED: Soak {}: {}>>".format(self.test_name, err_msg)) return job_id_list def job_completion(self, job_id_list): """Wait for job completion and cleanup. Args: job_id_list: IDs of each job submitted to slurm """ self.log.info( "<<Job Completion - %s >> at %s", self.test_name, time.ctime()) # If there is nothing to do; exit if len(job_id_list) > 0: # wait for all the jobs to finish while len(self.soak_results) < len(job_id_list): # print("<<Waiting for results {} >>".format( # self.soak_results)) time.sleep(2) # check for job COMPLETED and remove it from the job queue for job, result in self.soak_results.items(): # The queue include status of "COMPLETING" # sleep to allow job to move to final state if result == "COMPLETED": job_id_list.remove(int(job)) else: self.log.info( "<< Job %s failed with status %s>>", job, result) if len(job_id_list) > 0: self.log.info( "<<Cancel jobs in queue with id's %s >>", job_id_list) for job in job_id_list: status = slurm_utils.cancel_jobs(int(job)) if status == 0: self.log.info("<<Job %s successfully cancelled>>", job) # job_id_list.remove(int(job)) else: self.log.info("<<Job %s could not be killed>>", job) # gather all the logfiles for this pass and cleanup test nodes # If there is a failure the files can be gathered again in Teardown try: self.remote_copy( self.node_list, self.rem_pass_dir, self.outputsoakdir) except SoakTestError as error: self.log.info("Remote copy failed with %s", error) self.soak_results = {} return job_id_list def execute_jobs(self, test_param, pools): """Execute the overall soak test. Args: test_param (str): test_params from yaml file pools (list): list of TestPool obj Raise: SoakTestError """ cmdlist = [] # Setup cmdlines for jobs for pool in pools: cmdlist.extend(self.job_setup(test_param, pool)) # Gather the job_ids self.job_id_list = self.job_startup(cmdlist) # Initialize the failed_job_list to job_list so that any # unexpected failures will clear the squeue in tearDown self.failed_job_id_list = self.job_id_list # Wait for jobs to finish and cancel/kill jobs if necessary self.failed_job_id_list = self.job_completion(self.job_id_list) # Test fails on first error but could use continue on error here if len(self.failed_job_id_list) > 0: raise SoakTestError( "<<FAILED: The following jobs failed {} >>".format( " ,".join( str(job_id) for job_id in self.failed_job_id_list))) def run_soak(self, test_param): """Run the soak test specified by the test params. Args: test_param (str): test_params from yaml file """ pool_list = self.params.get("poollist", "".join([test_param, "*"])) self.test_timeout = self.params.get("test_timeout", test_param) self.job_id_list = [] start_time = time.time() rank = self.params.get("rank", "/run/container_reserved/*") obj_class = self.params.get( "object_class", "/run/container_reserved/*") # Create the reserved pool with data self.pool = self.create_pool(["pool_reserved"]) self.pool[0].connect() self.container = TestContainer(self.pool[0]) self.container.namespace = "/run/container_reserved/*" self.container.get_params(self) self.container.create() self.container.write_objects(rank, obj_class) while time.time() < start_time + self.test_timeout: print("<<Soak1 PASS {}: time until done {}>>".format( self.loop, (start_time + self.test_timeout - time.time()))) # Create all specified pools self.pool.extend(self.create_pool(pool_list)) try: self.execute_jobs(test_param, self.pool[1:]) except SoakTestError as error: self.fail(error) errors = self.destroy_pools(self.pool[1:]) # delete the test pools from self.pool; preserving reserved pool self.pool = [self.pool[0]] self.assertEqual(len(errors), 0, "\n".join(errors)) self.loop += 1 # Break out of loop if smoke if "smoke" in self.test_name: break # Commented out due to DAOS-3836. ## Check that the reserve pool is still allocated #self.assertTrue( # self.pool[0].check_files(self.hostlist_servers), # "Pool data not detected on servers") # Verify the data after soak is done self.assertTrue( self.container.read_objects(), "Data verification error on reserved pool" "after SOAK completed") def setUp(self): """Define test setup to be done.""" print("<<setUp Started>> at {}".format(time.ctime())) super(Soak, self).setUp() # Initialize loop param for all tests self.loop = 1 self.failed_job_id_list = [] # Fail if slurm partition daos_client is not defined if not self.partition_clients: raise SoakTestError( "<<FAILED: Partition is not correctly setup for daos " "slurm partition>>") # Check if the server nodes are in the client list; # this will happen when only one partition is specified for host_server in self.hostlist_servers: if host_server in self.hostlist_clients: self.hostlist_clients.remove(host_server) self.log.info( "<<Updated hostlist_clients %s >>", self.hostlist_clients) # include test node for log cleanup; remove from client list self.test_node = [socket.gethostname().split('.', 1)[0]] if self.test_node[0] in self.hostlist_clients: self.hostlist_clients.remove(self.test_node[0]) self.log.info( "<<Updated hostlist_clients %s >>", self.hostlist_clients) self.node_list = self.hostlist_clients + self.test_node # self.node_list = self.hostlist_clients # Setup logging directories for soak logfiles # self.output dir is an avocado directory .../data/ self.log_dir = "/tmp/soak" self.outputsoakdir = self.outputdir + "/soak" # Create the remote log directories on all client nodes self.rem_pass_dir = self.log_dir + "/pass" + str(self.loop) self.local_pass_dir = self.outputsoakdir + "/pass" + str(self.loop) # cleanup soak log directories before test on all nodes result = pcmd( NodeSet.fromlist(self.node_list), "rm -rf {}".format(self.log_dir), verbose=False) if len(result) > 1 or 0 not in result: raise SoakTestError( "<<FAILED: Soak directories not removed" "from clients>>: {}".format(", ".join( [str(result[key]) for key in result if key != 0]))) def tearDown(self): """Define tearDown and clear any left over jobs in squeue.""" print("<<tearDown Started>> at {}".format(time.ctime())) # clear out any jobs in squeue; errors_detected = False if len(self.failed_job_id_list) > 0: print("<<Cancel jobs in queue with ids {} >>".format( self.failed_job_id_list)) for job_id in self.failed_job_id_list: try: slurm_utils.cancel_jobs(job_id) except slurm_utils.SlurmFailed as error: self.log.info( " Failed to cancel job %s with error %s", job_id, str( error)) errors_detected = True # One last attempt to copy any logfiles from client nodes try: self.remote_copy( self.node_list, self.rem_pass_dir, self.outputsoakdir) except SoakTestError as error: self.log.info("Remote copy failed with %s", error) errors_detected = True super(Soak, self).tearDown() if errors_detected: self.fail("Errors detected cancelling slurm jobs in tearDown()") def test_soak_smoke(self): """Run soak smoke. Test ID: DAOS-2192 Test Description: This will create a slurm batch job that runs IOR with DAOS with the number of processes determined by the number of nodes. For this test a single pool will be created. It will run for ~10 min :avocado: tags=soak,soak_smoke """ test_param = "/run/smoke/" self.run_soak(test_param) def test_soak_ior_daos(self): """Run soak test with IOR -a daos. Test ID: DAOS-2256 Test Description: This will create a slurm batch job that runs various jobs defined in the soak yaml This test will run for the time specififed in /run/test_param_test_timeout. :avocado: tags=soak,soak_ior,soak_ior_daos """ test_param = "/run/soak_ior_daos/" self.run_soak(test_param) def test_soak_ior_mpiio(self): """Run soak test with IOR -a mpiio. Test ID: DAOS-2401, Test Description: This will create a slurm batch job that runs various jobs defined in the soak yaml This test will run for the time specififed in /run/test_param_test_timeout. :avocado: tags=soak,soak_ior,soak_ior_mpiio """ test_param = "/run/soak_ior_mpiio/" self.run_soak(test_param) def test_soak_stress(self): """Run soak stress. Test ID: DAOS-2256 Test Description: This will create a slurm batch job that runs various jobs defined in the soak yaml This test will run for the time specififed in /run/test_param_test_timeout. :avocado: tags=soak,soak_stress """ test_param = "/run/soak_stress/" self.run_soak(test_param)
1.960938
2
aoc/year_2020/day_06/solver.py
logan-connolly/AoC
2
12829
<filename>aoc/year_2020/day_06/solver.py<gh_stars>1-10 """This is the Solution for Year 2020 Day 06""" import re from aoc.abstracts.solver import Answers, StrLines class Solver: def __init__(self, data: str) -> None: self.data = data def _preprocess(self) -> StrLines: delim = "\n\n" return self.data.split(delim) def _solve_part_one(self, lines: StrLines) -> int: cleaned = [re.sub(r"\n", "", answer).strip() for answer in lines] return sum(len(set(answer)) for answer in cleaned) def _solve_part_two(self, lines: StrLines) -> int: cleaned = [answer.rstrip("\n").split("\n") for answer in lines] shared_answer_count = 0 for group in cleaned: shared_answers = set.intersection(*[set(member) for member in group]) shared_answer_count += len(shared_answers) return shared_answer_count def solve(self) -> Answers: lines = self._preprocess() ans_one = self._solve_part_one(lines) ans_two = self._solve_part_two(lines) return Answers(part_one=ans_one, part_two=ans_two)
3.109375
3
setup.py
kuzxnia/typer
0
12830
<gh_stars>0 from setuptools import find_packages, setup setup( name="typer", packages=find_packages(), )
1.101563
1
speedup.py
hjdeheer/malpaca
0
12831
<gh_stars>0 import numpy as np from numba import jit, prange from scipy.stats import mode from sklearn.metrics import accuracy_score __all__ = ['dtw_distance', 'KnnDTW'] @jit(nopython=True, fastmath=True) def cosine_distance(u:np.ndarray, v:np.ndarray): assert(u.shape[0] == v.shape[0]) uv = 0 uu = 0 vv = 0 for i in range(u.shape[0]): uv += u[i]*v[i] uu += u[i]*u[i] vv += v[i]*v[i] cos_theta = 1 if uu!=0 and vv!=0: cos_theta = uv/np.sqrt(uu*vv) return 1 - cos_theta @jit(nopython=True, parallel=True, nogil=True) def dtw_distance(dataset1, dataset2): """ Computes the dataset DTW distance matrix using multiprocessing. Args: dataset1: timeseries dataset of shape [N1, T1] dataset2: timeseries dataset of shape [N2, T2] Returns: Distance matrix of shape [N1, N2] """ n1 = dataset1.shape[0] n2 = dataset2.shape[0] dist = np.empty((n1, n2), dtype=np.float64) for i in prange(n1): for j in prange(n2): dist[i][j] = _dtw_distance(dataset1[i], dataset2[j]) return dist @jit(nopython=True, cache=True) def _dtw_distance(series1, series2): """ Returns the DTW similarity distance between two 1-D timeseries numpy arrays. Args: series1, series2 : array of shape [n_timepoints] Two arrays containing n_samples of timeseries data whose DTW distance between each sample of A and B will be compared. Returns: DTW distance between A and B """ l1 = series1.shape[0] l2 = series2.shape[0] E = np.empty((l1, l2)) # Fill First Cell v = series1[0] - series2[0] E[0][0] = v * v # Fill First Column for i in range(1, l1): v = series1[i] - series2[0] E[i][0] = E[i - 1][0] + v * v # Fill First Row for i in range(1, l2): v = series1[0] - series2[i] E[0][i] = E[0][i - 1] + v * v for i in range(1, l1): for j in range(1, l2): v = series1[i] - series2[j] v = v * v v1 = E[i - 1][j] v2 = E[i - 1][j - 1] v3 = E[i][j - 1] if v1 <= v2 and v1 <= v3: E[i][j] = v1 + v elif v2 <= v1 and v2 <= v3: E[i][j] = v2 + v else: E[i][j] = v3 + v return np.sqrt(E[-1][-1]) # Modified from https://github.com/markdregan/K-Nearest-Neighbors-with-Dynamic-Time-Warping class KnnDTW(object): """K-nearest neighbor classifier using dynamic time warping as the distance measure between pairs of time series arrays Arguments --------- n_neighbors : int, optional (default = 1) Number of neighbors to use by default for KNN """ def __init__(self, n_neighbors=1): self.n_neighbors = n_neighbors def fit(self, x, y): """Fit the model using x as training data and y as class labels Arguments --------- x : array of shape [n_samples, n_timepoints] Training data set for input into KNN classifer y : array of shape [n_samples] Training labels for input into KNN classifier """ self.x = np.copy(x) self.y = np.copy(y) def _dist_matrix(self, x, y): """Computes the M x N distance matrix between the training dataset and testing dataset (y) using the DTW distance measure Arguments --------- x : array of shape [n_samples, n_timepoints] y : array of shape [n_samples, n_timepoints] Returns ------- Distance matrix between each item of x and y with shape [training_n_samples, testing_n_samples] """ dm = dtw_distance(x, y) return dm def predict(self, x): """Predict the class labels or probability estimates for the provided data Arguments --------- x : array of shape [n_samples, n_timepoints] Array containing the testing data set to be classified Returns ------- 2 arrays representing: (1) the predicted class labels (2) the knn label count probability """ np.random.seed(0) dm = self._dist_matrix(x, self.x) # Identify the k nearest neighbors knn_idx = dm.argsort()[:, :self.n_neighbors] # Identify k nearest labels knn_labels = self.y[knn_idx] # Model Label mode_data = mode(knn_labels, axis=1) mode_label = mode_data[0] mode_proba = mode_data[1] / self.n_neighbors return mode_label.ravel(), mode_proba.ravel() def evaluate(self, x, y): """ Predict the class labels or probability estimates for the provided data and then evaluates the accuracy score. Arguments --------- x : array of shape [n_samples, n_timepoints] Array containing the testing data set to be classified y : array of shape [n_samples] Array containing the labels of the testing dataset to be classified Returns ------- 1 floating point value representing the accuracy of the classifier """ # Predict the labels and the probabilities pred_labels, pred_probas = self.predict(x) # Ensure labels are integers y = y.astype('int32') pred_labels = pred_labels.astype('int32') # Compute accuracy measure accuracy = accuracy_score(y, pred_labels) return accuracy def predict_proba(self, x): """Predict the class labels probability estimates for the provided data Arguments --------- x : array of shape [n_samples, n_timepoints] Array containing the testing data set to be classified Returns ------- 2 arrays representing: (1) the predicted class probabilities (2) the knn labels """ np.random.seed(0) dm = self._dist_matrix(x, self.x) # Invert the distance matrix dm = -dm classes = np.unique(self.y) class_dm = [] # Partition distance matrix by class for i, cls in enumerate(classes): idx = np.argwhere(self.y == cls)[:, 0] cls_dm = dm[:, idx] # [N_test, N_train_c] # Take maximum distance vector due to softmax probabilities cls_dm = np.max(cls_dm, axis=-1) # [N_test,] class_dm.append([cls_dm]) # Concatenate the classwise distance matrices and transpose class_dm = np.concatenate(class_dm, axis=0) # [C, N_test] class_dm = class_dm.transpose() # [N_test, C] # Compute softmax probabilities class_dm_exp = np.exp(class_dm - class_dm.max()) class_dm = class_dm_exp / np.sum(class_dm_exp, axis=-1, keepdims=True) probabilities = class_dm knn_labels = np.argmax(class_dm, axis=-1) return probabilities, knn_labels
2.5625
3
setup.py
colineRamee/UAM_simulator_scitech2021
1
12832
from setuptools import setup setup( name='uam_simulator', version='1.0', description='A tool to simulate different architectures for UAM traffic management', author='<NAME>', author_email='<EMAIL>', packages=['uam_simulator'], install_requires=['numpy', 'scikit-learn', 'gurobipy'] ) # If installing from source the package name is gurobipy, if installing with conda it's gurobi, but when importing it's still gurobipy
1.210938
1
DFS/Leetcode1239.py
Rylie-W/LeetRecord
0
12833
class Solution: def maxLength(self, arr) -> int: def helper(word): temp=[] temp[:0]=word res=set() for w in temp: if w not in res: res.add(w) else: return None return res memo=[] for a in arr: temp=helper(a) if temp is not None: memo.append(temp) memo.sort(key=lambda a:len(a),reverse=True) def dfs(index,path): if index==len(memo): return 0 res=0 for i in range(index,len(memo)): if len(path|memo[i])==len(path)+len(memo[i]): res=max(res,len(memo[i])+dfs(i+1,path|memo[i])) return res return dfs(0,set()) if __name__ == '__main__': sol=Solution() arr = ["un", "iq", "ue"] # arr = ["cha", "r", "act", "ers"] # arr = ["abcdefghijklmnopqrstuvwxyz"] # arr=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p"] print(sol.maxLength(arr))
3.375
3
apps/addresses/migrations/0002_address_picture.py
skyride/python-docker-compose
0
12834
<reponame>skyride/python-docker-compose<gh_stars>0 # Generated by Django 3.0.6 on 2020-05-25 22:13 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('addresses', '0001_initial'), ] operations = [ migrations.AddField( model_name='address', name='picture', field=models.ImageField(default=None, null=True, upload_to='addresses/images/'), ), ]
1.554688
2
liberapay/payin/common.py
Panquesito7/liberapay.com
1
12835
<reponame>Panquesito7/liberapay.com<gh_stars>1-10 from collections import namedtuple from datetime import timedelta import itertools from operator import attrgetter from pando.utils import utcnow from psycopg2.extras import execute_batch from ..constants import SEPA from ..exceptions import ( AccountSuspended, MissingPaymentAccount, RecipientAccountSuspended, NoSelfTipping, UserDoesntAcceptTips, ) from ..i18n.currencies import Money, MoneyBasket from ..utils import group_by ProtoTransfer = namedtuple( 'ProtoTransfer', 'amount recipient destination context unit_amount period team visibility', ) def prepare_payin(db, payer, amount, route, proto_transfers, off_session=False): """Prepare to charge a user. Args: payer (Participant): the user who will be charged amount (Money): the presentment amount of the charge route (ExchangeRoute): the payment instrument to charge proto_transfers ([ProtoTransfer]): the transfers to prepare off_session (bool): `True` means that the payment is being initiated because it was scheduled, `False` means that the payer has initiated the operation just now Returns: Record: the row created in the `payins` table Raises: AccountSuspended: if the payer's account is suspended """ assert isinstance(amount, Money), type(amount) assert route.participant == payer, (route.participant, payer) assert route.status in ('pending', 'chargeable') if payer.is_suspended or not payer.get_email_address(): raise AccountSuspended() with db.get_cursor() as cursor: payin = cursor.one(""" INSERT INTO payins (payer, amount, route, status, off_session) VALUES (%s, %s, %s, 'pre', %s) RETURNING * """, (payer.id, amount, route.id, off_session)) cursor.run(""" INSERT INTO payin_events (payin, status, error, timestamp) VALUES (%s, %s, NULL, current_timestamp) """, (payin.id, payin.status)) payin_transfers = [] for t in proto_transfers: payin_transfers.append(prepare_payin_transfer( cursor, payin, t.recipient, t.destination, t.context, t.amount, t.visibility, t.unit_amount, t.period, t.team, )) return payin, payin_transfers def update_payin( db, payin_id, remote_id, status, error, amount_settled=None, fee=None, intent_id=None, refunded_amount=None, ): """Update the status and other attributes of a charge. Args: payin_id (int): the ID of the charge in our database remote_id (str): the ID of the charge in the payment processor's database status (str): the new status of the charge error (str): if the charge failed, an error message to show to the payer Returns: Record: the row updated in the `payins` table """ with db.get_cursor() as cursor: payin = cursor.one(""" UPDATE payins SET status = %(status)s , error = %(error)s , remote_id = coalesce(remote_id, %(remote_id)s) , amount_settled = coalesce(amount_settled, %(amount_settled)s) , fee = coalesce(fee, %(fee)s) , intent_id = coalesce(intent_id, %(intent_id)s) , refunded_amount = coalesce(%(refunded_amount)s, refunded_amount) WHERE id = %(payin_id)s RETURNING * , (SELECT status FROM payins WHERE id = %(payin_id)s) AS old_status """, locals()) if not payin: return if remote_id and payin.remote_id != remote_id: raise AssertionError(f"the remote IDs don't match: {payin.remote_id!r} != {remote_id!r}") if status != payin.old_status: cursor.run(""" INSERT INTO payin_events (payin, status, error, timestamp) VALUES (%s, %s, %s, current_timestamp) """, (payin_id, status, error)) if status in ('pending', 'succeeded'): cursor.run(""" UPDATE exchange_routes SET status = 'consumed' WHERE id = %s AND one_off IS TRUE """, (payin.route,)) # Lock to avoid concurrent updates cursor.run("SELECT * FROM participants WHERE id = %s FOR UPDATE", (payin.payer,)) # Update scheduled payins, if appropriate if status in ('pending', 'succeeded'): sp = cursor.one(""" SELECT * FROM scheduled_payins WHERE payer = %s AND payin = %s """, (payin.payer, payin.id)) if not sp: # Try to find a scheduled renewal that matches this payin. # It doesn't have to be an exact match. schedule = cursor.all(""" SELECT * FROM scheduled_payins WHERE payer = %s AND payin IS NULL AND mtime < %s """, (payin.payer, payin.ctime)) today = utcnow().date() schedule.sort(key=lambda sp: abs((sp.execution_date - today).days)) payin_tippees = set(cursor.all(""" SELECT coalesce(team, recipient) AS tippee FROM payin_transfers WHERE payer = %s AND payin = %s """, (payin.payer, payin.id))) for sp in schedule: if any((tr['tippee_id'] in payin_tippees) for tr in sp.transfers): cursor.run(""" UPDATE scheduled_payins SET payin = %s , mtime = current_timestamp WHERE id = %s """, (payin.id, sp.id)) break return payin def adjust_payin_transfers(db, payin, net_amount): """Correct a payin's transfers once the net amount is known. Args: payin (Record): a row from the `payins` table net_amount (Money): the amount of money available to transfer """ payer = db.Participant.from_id(payin.payer) route = db.ExchangeRoute.from_id(payer, payin.route) provider = route.network.split('-', 1)[0] payer_country = route.country # We have to update the transfer amounts in a single transaction to # avoid ending up in an inconsistent state. with db.get_cursor() as cursor: payin_transfers = cursor.all(""" SELECT pt.id, pt.amount, pt.status, pt.remote_id, pt.team, pt.recipient, team_p FROM payin_transfers pt LEFT JOIN participants team_p ON team_p.id = pt.team WHERE pt.payin = %s ORDER BY pt.id FOR UPDATE OF pt """, (payin.id,)) assert payin_transfers if any(pt.status == 'succeeded' for pt in payin_transfers): # At least one of the transfers has already been executed, so it's # too complicated to adjust the amounts now. return transfers_by_tippee = group_by( payin_transfers, lambda pt: (pt.team or pt.recipient) ) prorated_amounts = resolve_amounts(net_amount, { tippee: MoneyBasket(pt.amount for pt in grouped).fuzzy_sum(net_amount.currency) for tippee, grouped in transfers_by_tippee.items() }) teams = set(pt.team for pt in payin_transfers if pt.team is not None) updates = [] for tippee, prorated_amount in prorated_amounts.items(): transfers = transfers_by_tippee[tippee] if tippee in teams: team = transfers[0].team_p tip = payer.get_tip_to(team) try: team_donations = resolve_team_donation( db, team, provider, payer, payer_country, prorated_amount, tip, sepa_only=True, ) except (MissingPaymentAccount, NoSelfTipping): team_amounts = resolve_amounts(prorated_amount, { pt.id: pt.amount.convert(prorated_amount.currency) for pt in transfers }) for pt in transfers: if pt.amount != team_amounts.get(pt.id): assert pt.remote_id is None and pt.status in ('pre', 'pending') updates.append((team_amounts[pt.id], pt.id)) else: team_donations = {d.recipient.id: d for d in team_donations} for pt in transfers: if pt.status == 'failed': continue d = team_donations.pop(pt.recipient, None) if d is None: assert pt.remote_id is None and pt.status in ('pre', 'pending') cursor.run(""" DELETE FROM payin_transfer_events WHERE payin_transfer = %(pt_id)s AND status = 'pending'; DELETE FROM payin_transfers WHERE id = %(pt_id)s; """, dict(pt_id=pt.id)) elif pt.amount != d.amount: assert pt.remote_id is None and pt.status in ('pre', 'pending') updates.append((d.amount, pt.id)) n_periods = prorated_amount / tip.periodic_amount.convert(prorated_amount.currency) for d in team_donations.values(): unit_amount = (d.amount / n_periods).round(allow_zero=False) prepare_payin_transfer( db, payin, d.recipient, d.destination, 'team-donation', d.amount, tip.visibility, unit_amount, tip.period, team=team.id, ) else: pt = transfers[0] if pt.amount != prorated_amount: assert pt.remote_id is None and pt.status in ('pre', 'pending') updates.append((prorated_amount, pt.id)) if updates: execute_batch(cursor, """ UPDATE payin_transfers SET amount = %s WHERE id = %s AND status <> 'succeeded'; """, updates) def resolve_tip( db, tip, tippee, provider, payer, payer_country, payment_amount, sepa_only=False, excluded_destinations=set(), ): """Prepare to fund a tip. Args: tip (Row): a row from the `tips` table tippee (Participant): the intended beneficiary of the donation provider (str): the payment processor ('paypal' or 'stripe') payer (Participant): the donor payer_country (str): the country the money is supposedly coming from payment_amount (Money): the amount of money being sent sepa_only (bool): only consider destination accounts within SEPA excluded_destinations (set): any `payment_accounts.pk` values to exclude Returns: a list of `ProtoTransfer` objects Raises: MissingPaymentAccount: if no suitable destination has been found NoSelfTipping: if the donor would end up sending money to themself RecipientAccountSuspended: if the tippee's account is suspended UserDoesntAcceptTips: if the tippee doesn't accept donations """ assert tip.tipper == payer.id assert tip.tippee == tippee.id if not tippee.accepts_tips: raise UserDoesntAcceptTips(tippee.username) if tippee.is_suspended: raise RecipientAccountSuspended(tippee) if tippee.kind == 'group': return resolve_team_donation( db, tippee, provider, payer, payer_country, payment_amount, tip, sepa_only=sepa_only, excluded_destinations=excluded_destinations, ) else: destination = resolve_destination( db, tippee, provider, payer, payer_country, payment_amount, sepa_only=sepa_only, excluded_destinations=excluded_destinations, ) return [ProtoTransfer( payment_amount, tippee, destination, 'personal-donation', tip.periodic_amount, tip.period, None, tip.visibility, )] def resolve_destination( db, tippee, provider, payer, payer_country, payin_amount, sepa_only=False, excluded_destinations=(), ): """Figure out where to send a payment. Args: tippee (Participant): the intended beneficiary of the payment provider (str): the payment processor ('paypal' or 'stripe') payer (Participant): the user who wants to pay payer_country (str): the country the money is supposedly coming from payin_amount (Money): the payment amount sepa_only (bool): only consider destination accounts within SEPA excluded_destinations (set): any `payment_accounts.pk` values to exclude Returns: Record: a row from the `payment_accounts` table Raises: MissingPaymentAccount: if no suitable destination has been found NoSelfTipping: if the payer would end up sending money to themself """ tippee_id = tippee.id if tippee_id == payer.id: raise NoSelfTipping() currency = payin_amount.currency excluded_destinations = list(excluded_destinations) destination = db.one(""" SELECT * FROM payment_accounts WHERE participant = %(tippee_id)s AND provider = %(provider)s AND is_current AND verified AND coalesce(charges_enabled, true) AND array_position(%(excluded_destinations)s::bigint[], pk) IS NULL AND ( country IN %(SEPA)s OR NOT %(sepa_only)s ) ORDER BY default_currency = %(currency)s DESC , country = %(payer_country)s DESC , connection_ts LIMIT 1 """, dict(locals(), SEPA=SEPA)) if destination: return destination else: raise MissingPaymentAccount(tippee) def resolve_team_donation( db, team, provider, payer, payer_country, payment_amount, tip, sepa_only=False, excluded_destinations=(), ): """Figure out how to distribute a donation to a team's members. Args: team (Participant): the team the donation is for provider (str): the payment processor ('paypal' or 'stripe') payer (Participant): the donor payer_country (str): the country code the money is supposedly coming from payment_amount (Money): the amount of money being sent tip (Row): the row from the `tips` table sepa_only (bool): only consider destination accounts within SEPA excluded_destinations (set): any `payment_accounts.pk` values to exclude Returns: a list of `ProtoTransfer` objects Raises: MissingPaymentAccount: if no suitable destination has been found NoSelfTipping: if the payer would end up sending money to themself RecipientAccountSuspended: if the team or all of its members are suspended """ if team.is_suspended: raise RecipientAccountSuspended(team) currency = payment_amount.currency takes = team.get_current_takes_for_payment(currency, tip.amount) if all(t.is_suspended for t in takes): raise RecipientAccountSuspended(takes) takes = [t for t in takes if not t.is_suspended] if len(takes) == 1 and takes[0].member == payer.id: raise NoSelfTipping() member_ids = tuple([t.member for t in takes]) excluded_destinations = list(excluded_destinations) payment_accounts = {row.participant: row for row in db.all(""" SELECT DISTINCT ON (participant) * FROM payment_accounts WHERE participant IN %(member_ids)s AND provider = %(provider)s AND is_current AND verified AND coalesce(charges_enabled, true) AND array_position(%(excluded_destinations)s::bigint[], pk) IS NULL ORDER BY participant , default_currency = %(currency)s DESC , country = %(payer_country)s DESC , connection_ts """, locals())} del member_ids if not payment_accounts: raise MissingPaymentAccount(team) takes = [t for t in takes if t.member in payment_accounts and t.member != payer.id] if not takes: raise NoSelfTipping() takes.sort(key=lambda t: ( -(t.amount / (t.paid_in_advance + payment_amount)), t.paid_in_advance, t.ctime )) # Try to distribute the donation to multiple members. if sepa_only or provider == 'stripe': sepa_accounts = {a.participant: a for a in db.all(""" SELECT DISTINCT ON (a.participant) a.* FROM payment_accounts a WHERE a.participant IN %(member_ids)s AND a.provider = %(provider)s AND a.is_current AND a.verified AND coalesce(a.charges_enabled, true) AND array_position(%(excluded_destinations)s::bigint[], a.pk) IS NULL AND a.country IN %(SEPA)s ORDER BY a.participant , a.default_currency = %(currency)s DESC , a.connection_ts """, dict(locals(), SEPA=SEPA, member_ids={t.member for t in takes}))} if sepa_only or len(sepa_accounts) > 1 and takes[0].member in sepa_accounts: selected_takes = [ t for t in takes if t.member in sepa_accounts and t.amount != 0 ] if selected_takes: resolve_take_amounts(payment_amount, selected_takes) selected_takes.sort(key=attrgetter('member')) n_periods = payment_amount / tip.periodic_amount.convert(currency) return [ ProtoTransfer( t.resolved_amount, db.Participant.from_id(t.member), sepa_accounts[t.member], 'team-donation', (t.resolved_amount / n_periods).round(allow_zero=False), tip.period, team.id, tip.visibility, ) for t in selected_takes if t.resolved_amount != 0 ] elif sepa_only: raise MissingPaymentAccount(team) # Fall back to sending the entire donation to the member who "needs" it most. member = db.Participant.from_id(takes[0].member) account = payment_accounts[member.id] return [ProtoTransfer( payment_amount, member, account, 'team-donation', tip.periodic_amount, tip.period, team.id, tip.visibility, )] def resolve_take_amounts(payment_amount, takes): """Compute team transfer amounts. Args: payment_amount (Money): the total amount of money to transfer takes (list): rows returned by `team.get_current_takes_for_payment(...)` This function doesn't return anything, instead it mutates the given takes, adding a `resolved_amount` attribute to each one. """ max_weeks_of_advance = 0 for t in takes: if t.amount == 0: t.weeks_of_advance = 0 continue t.weeks_of_advance = t.paid_in_advance / t.amount if t.weeks_of_advance > max_weeks_of_advance: max_weeks_of_advance = t.weeks_of_advance base_amounts = {t.member: t.amount for t in takes} convergence_amounts = { t.member: ( t.amount * (max_weeks_of_advance - t.weeks_of_advance) ).round_up() for t in takes } tr_amounts = resolve_amounts(payment_amount, base_amounts, convergence_amounts) for t in takes: t.resolved_amount = tr_amounts.get(t.member, payment_amount.zero()) def resolve_amounts(available_amount, base_amounts, convergence_amounts=None, payday_id=1): """Compute transfer amounts. Args: available_amount (Money): the payin amount to split into transfer amounts base_amounts (Dict[Any, Money]): a map of IDs to raw transfer amounts convergence_amounts (Dict[Any, Money]): an optional map of IDs to ideal additional amounts payday_id (int): the ID of the current or next payday, used to rotate who receives the remainder when there is a tie Returns a copy of `base_amounts` with updated values. """ min_transfer_amount = Money.MINIMUMS[available_amount.currency] r = {} amount_left = available_amount # Attempt to converge if convergence_amounts: convergence_sum = Money.sum(convergence_amounts.values(), amount_left.currency) if convergence_sum != 0: convergence_amounts = {k: v for k, v in convergence_amounts.items() if v != 0} if amount_left == convergence_sum: # We have just enough money for convergence. return convergence_amounts elif amount_left > convergence_sum: # We have more than enough money for full convergence, the extra # funds will be allocated in proportion to `base_amounts`. r.update(convergence_amounts) amount_left -= convergence_sum else: # We only have enough for partial convergence, the funds will be # allocated in proportion to `convergence_amounts`. base_amounts = convergence_amounts # Compute the prorated amounts base_sum = Money.sum(base_amounts.values(), amount_left.currency) base_ratio = 0 if base_sum == 0 else amount_left / base_sum for key, base_amount in sorted(base_amounts.items()): if base_amount == 0: continue assert amount_left >= min_transfer_amount amount = min((base_amount * base_ratio).round_down(), amount_left) r[key] = amount + r.get(key, 0) amount_left -= amount # Deal with rounding errors if amount_left > 0: # Try to distribute in a way that doesn't skew the percentages much. # If there's a tie, use the payday ID to rotate the winner every week. i = itertools.count(1) n = len(r) def compute_priority(item): key, current_amount = item base_amount = base_amounts[key] * base_ratio return ( (current_amount - base_amount) / base_amount if base_amount else 2, (next(i) - payday_id) % n ) for key, amount in sorted(r.items(), key=compute_priority): r[key] += min_transfer_amount amount_left -= min_transfer_amount if amount_left == 0: break # Final check and return assert amount_left == 0, '%r != 0' % amount_left return r def prepare_payin_transfer( db, payin, recipient, destination, context, amount, visibility, unit_amount=None, period=None, team=None, ): """Prepare the allocation of funds from a payin. Args: payin (Record): a row from the `payins` table recipient (Participant): the user who will receive the money destination (Record): a row from the `payment_accounts` table amount (Money): the amount of money that will be received visibility (int): a copy of `tip.visibility` unit_amount (Money): the `periodic_amount` of a recurrent donation period (str): the period of a recurrent payment team (int): the ID of the project this payment is tied to Returns: Record: the row created in the `payin_transfers` table """ assert recipient.id == destination.participant, (recipient, destination) if recipient.is_suspended: raise RecipientAccountSuspended() if unit_amount: n_units = int(amount / unit_amount.convert(amount.currency)) else: n_units = None return db.one(""" INSERT INTO payin_transfers (payin, payer, recipient, destination, context, amount, unit_amount, n_units, period, team, visibility, status, ctime) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'pre', clock_timestamp()) RETURNING * """, (payin.id, payin.payer, recipient.id, destination.pk, context, amount, unit_amount, n_units, period, team, visibility)) def update_payin_transfer( db, pt_id, remote_id, status, error, amount=None, fee=None, update_donor=True, reversed_amount=None, ): """Update the status and other attributes of a payment. Args: pt_id (int): the ID of the payment in our database remote_id (str): the ID of the transfer in the payment processor's database status (str): the new status of the payment error (str): if the payment failed, an error message to show to the payer Returns: Record: the row updated in the `payin_transfers` table """ with db.get_cursor() as cursor: pt = cursor.one(""" UPDATE payin_transfers SET status = %(status)s , error = %(error)s , remote_id = coalesce(remote_id, %(remote_id)s) , amount = COALESCE(%(amount)s, amount) , fee = COALESCE(%(fee)s, fee) , reversed_amount = coalesce(%(reversed_amount)s, reversed_amount) WHERE id = %(pt_id)s RETURNING * , (SELECT amount FROM payin_transfers WHERE id = %(pt_id)s) AS old_amount , (SELECT reversed_amount FROM payin_transfers WHERE id = %(pt_id)s) AS old_reversed_amount , (SELECT status FROM payin_transfers WHERE id = %(pt_id)s) AS old_status """, locals()) if not pt: return if remote_id and pt.remote_id != remote_id: raise AssertionError(f"the remote IDs don't match: {pt.remote_id!r} != {remote_id!r}") if status != pt.old_status: cursor.run(""" INSERT INTO payin_transfer_events (payin_transfer, status, error, timestamp) VALUES (%s, %s, %s, current_timestamp) """, (pt_id, status, error)) # If the payment has failed or hasn't been settled yet, then stop here. if status != 'succeeded': return pt # Update the `paid_in_advance` value of the donation. params = pt._asdict() params['delta'] = pt.amount if pt.old_status == 'succeeded': params['delta'] -= pt.old_amount if pt.reversed_amount: params['delta'] += -(pt.reversed_amount - (pt.old_reversed_amount or 0)) elif pt.old_reversed_amount: params['delta'] += pt.old_reversed_amount if params['delta'] == 0: return pt updated_tips = cursor.all(""" WITH latest_tip AS ( SELECT * FROM tips WHERE tipper = %(payer)s AND tippee = COALESCE(%(team)s, %(recipient)s) ORDER BY mtime DESC LIMIT 1 ) UPDATE tips t SET paid_in_advance = ( coalesce_currency_amount(t.paid_in_advance, t.amount::currency) + convert(%(delta)s, t.amount::currency) ) , is_funded = true FROM latest_tip lt WHERE t.tipper = lt.tipper AND t.tippee = lt.tippee AND t.mtime >= lt.mtime RETURNING t.* """, params) if not updated_tips: # This transfer isn't linked to a tip. return pt assert len(updated_tips) < 10, updated_tips if any(t.paid_in_advance <= 0 for t in updated_tips): cursor.run(""" UPDATE tips SET is_funded = false WHERE tipper = %(payer)s AND paid_in_advance <= 0 """, params) # If it's a team donation, update the `paid_in_advance` value of the take. if pt.context == 'team-donation': updated_takes = cursor.all(""" WITH latest_take AS ( SELECT * FROM takes WHERE team = %(team)s AND member = %(recipient)s AND amount IS NOT NULL ORDER BY mtime DESC LIMIT 1 ) UPDATE takes t SET paid_in_advance = ( coalesce_currency_amount(lt.paid_in_advance, lt.amount::currency) + convert(%(delta)s, lt.amount::currency) ) FROM latest_take lt WHERE t.team = lt.team AND t.member = lt.member AND t.mtime >= lt.mtime RETURNING t.id """, params) assert 0 < len(updated_takes) < 10, params # Recompute the cached `receiving` amount of the donee. cursor.run(""" WITH our_tips AS ( SELECT t.amount FROM current_tips t WHERE t.tippee = %(p_id)s AND t.is_funded ) UPDATE participants AS p SET receiving = taking + coalesce_currency_amount( (SELECT sum(t.amount, p.main_currency) FROM our_tips t), p.main_currency ) , npatrons = (SELECT count(*) FROM our_tips) WHERE p.id = %(p_id)s """, dict(p_id=(pt.team or pt.recipient))) # Recompute the donor's cached `giving` amount and payment schedule. if update_donor: donor = db.Participant.from_id(pt.payer) donor.update_giving() donor.schedule_renewals() return pt def abort_payin(db, payin, error='aborted by payer'): """Mark a payin as cancelled. Args: payin (Record): a row from the `payins` table error (str): the error message to attach to the payin Returns: Record: the row updated in the `payins` table """ payin = update_payin(db, payin.id, payin.remote_id, 'failed', error) db.run(""" WITH updated_transfers as ( UPDATE payin_transfers SET status = 'failed' , error = %(error)s WHERE payin = %(payin_id)s AND status <> 'failed' RETURNING * ) INSERT INTO payin_transfer_events (payin_transfer, status, error, timestamp) SELECT pt.id, 'failed', pt.error, current_timestamp FROM updated_transfers pt """, dict(error=error, payin_id=payin.id)) return payin def record_payin_refund( db, payin_id, remote_id, amount, reason, description, status, error=None, ctime=None, ): """Record a charge refund. Args: payin_id (int): the ID of the refunded payin in our database remote_id (int): the ID of the refund in the payment processor's database amount (Money): the refund amount, must be less or equal to the payin amount reason (str): why this refund was initiated (`refund_reason` SQL type) description (str): details of the circumstances of this refund status (str): the current status of the refund (`refund_status` SQL type) error (str): error message, if the refund has failed ctime (datetime): when the refund was initiated Returns: Record: the row inserted in the `payin_refunds` table """ refund = db.one(""" INSERT INTO payin_refunds (payin, remote_id, amount, reason, description, status, error, ctime) VALUES (%(payin_id)s, %(remote_id)s, %(amount)s, %(reason)s, %(description)s, %(status)s, %(error)s, coalesce(%(ctime)s, current_timestamp)) ON CONFLICT (payin, remote_id) DO UPDATE SET amount = excluded.amount , reason = excluded.reason , description = excluded.description , status = excluded.status , error = excluded.error RETURNING * , ( SELECT old.status FROM payin_refunds old WHERE old.payin = %(payin_id)s AND old.remote_id = %(remote_id)s ) AS old_status """, locals()) notify = ( refund.status in ('pending', 'succeeded') and refund.status != refund.old_status and refund.ctime > (utcnow() - timedelta(hours=24)) ) if notify: payin = db.one("SELECT * FROM payins WHERE id = %s", (refund.payin,)) payer = db.Participant.from_id(payin.payer) payer.notify( 'payin_refund_initiated', payin_amount=payin.amount, payin_ctime=payin.ctime, refund_amount=refund.amount, refund_reason=refund.reason, email_unverified_address=True, ) return refund def record_payin_transfer_reversal( db, pt_id, remote_id, amount, payin_refund_id=None, ctime=None ): """Record a transfer reversal. Args: pt_id (int): the ID of the reversed transfer in our database remote_id (int): the ID of the reversal in the payment processor's database amount (Money): the reversal amount, must be less or equal to the transfer amount payin_refund_id (int): the ID of the associated payin refund in our database ctime (datetime): when the refund was initiated Returns: Record: the row inserted in the `payin_transfer_reversals` table """ return db.one(""" INSERT INTO payin_transfer_reversals (payin_transfer, remote_id, amount, payin_refund, ctime) VALUES (%(pt_id)s, %(remote_id)s, %(amount)s, %(payin_refund_id)s, coalesce(%(ctime)s, current_timestamp)) ON CONFLICT (payin_transfer, remote_id) DO UPDATE SET amount = excluded.amount , payin_refund = excluded.payin_refund RETURNING * """, locals())
2.234375
2
vendor/func_lib/assert_handle.py
diudiu/featurefactory
0
12836
# -*- coding:utf-8 -*- from vendor.errors.feature import FeatureProcessError """ 此目录下所有功能函数均为: 按一定条件检查传入参数合法性 **若不合法, 将抛出异常** """ def f_assert_not_null(seq): """检测值是否非空或值得列表是否存在非空元素""" if seq in (None, '', [], {}, ()): raise FeatureProcessError("value: %s f_assert_not_null Error" % seq) if isinstance(seq, list): for value in seq: if value in (None, '', {}, [], ()): raise FeatureProcessError("value: %s f_assert_not_null Error" % seq) return seq def f_assert_jsonpath_true(seq): """假设jsonpath查询到的为true seq为[]空列表时代表没查到字段""" if seq in ([],): raise FeatureProcessError("jsonpath not find field") return seq def f_assert_must_int(value_list): """检测列表中的元素是否为int类型""" for value in value_list: if not isinstance(value, int): raise FeatureProcessError('%s f_assert_must_int Error' % value_list) return value_list def f_assert_must_list(value_list): """检测列表中的元素是否为list类型""" for value in value_list: if not isinstance(value, list): raise FeatureProcessError('%s f_assert_must_list Error' % value_list) return value_list def f_assert_must_dict(value_list): """检测列表中的元素是否为dict类型""" for value in value_list: if not isinstance(value, dict): raise FeatureProcessError('%s f_assert_must_dict Error' % value_list) return value_list def f_assert_must_digit(value_list, args=False): """ 检测列表中的元素是否为数字 :param value_list: 待检测列表 :param args: 负数是否通过 false 不通过报异常 True 负数通过 :return: 异常或原值 example: :value_list [-2,'-2', 3] :args false :return 异常 :value_list [-2,'-2', 3] :args True :return [-2,'-2', 3] """ for value in value_list: if args: if not str(value).lstrip('-').isdigit(): raise FeatureProcessError('%s negative number=%s f_assert_must_digit Error' % (value_list, args)) else: if not str(value).isdigit(): raise FeatureProcessError('%s negative number=%s f_assert_must_digit Error' % (value_list, args)) return value_list def f_assert_must_basestring(value_list): """检测列表中的元素是否为字符串""" for value in value_list: if not isinstance(value, basestring): raise FeatureProcessError('%s f_assert_must_basestring Error' % value_list) return value_list def f_assert_must_digit_or_float(value_list, args=False): """ 检测列表中的元素是否为数字或float, args=false 负数报异常 True 负数通过 :param value_list: 待检测列表 :param args: 负数是否通过 false 不通过报异常 True 负数通过 :return: 异常或原值 example: :value_list [-2.0,'-2', 3] :args false :return 异常 :value_list [-2.0,'-2', 3] :args True :return [-2.0,'-2', 3] """ for value in value_list: if args: if not (str(value).count('.') <= 1 and str(value).replace('.', '').lstrip('-').isdigit()): raise FeatureProcessError( '%s negative number=%s f_assert_must_digit_or_float Error' % (value_list, args)) else: if not (str(value).count('.') <= 1 and str(value).replace('.', '').isdigit()): raise FeatureProcessError( '%s negative number=%s f_assert_must_digit_or_float Error' % (value_list, args)) return value_list def f_assert_must_percent(value_list): """ 检测是否是百分数 """ for value in value_list: if not (str(value)[-1] == '%' and (str(value[:-1]).count('.') <= 1 and str(value[:-1]).replace('.', '').isdigit())): raise FeatureProcessError( '%s f_assert_must_percent Error' % value_list) return value_list def f_assert_must_between(value_list, args): """ 检测列表中的元素是否为数字或浮点数且在args的范围内 :param value_list: 待检测列表 :param args: 范围列表 :return: 异常或原值 example: :value_list [2, 2, 3] :args [1,3] :value_list ['-2', '-3', 3] :args ['-5',3] """ assert len(args) == 2 for value in value_list: if not (str(value).count('.') <= 1 and str(value).replace('.', '').lstrip('-').isdigit() and float(args[0]) <= float(value) <= float(args[1])): raise FeatureProcessError('%s f_assert_must_between %s Error' % (value_list, args)) return value_list def f_assert_seq0_gte_seq1(value_list): """检测列表中的第一个元素是否大于等于第二个元素""" if not value_list[0] >= value_list[1]: raise FeatureProcessError('%s f_assert_seq0_gte_seq1 Error' % value_list) return value_list if __name__ == '__main__': print f_assert_must_percent(['7.0%'])
2.671875
3
main.py
Potapov-AA/CaesarCipherWithKeyword
0
12837
import time from os import system, walk from config import CONFIG from encry import ENCRY from decry import DECRY # Функция настройки конфигурации def conf_setting(): system('CLS') print("Enter key elements: ") # Выбор алфавита alphabet = input("Select the used alphabet [EN]GLISH | [RU]SSIAN: ") # Ввод числового ключа numberKey = input("Enter a numeric key: ") # Ввод ключевого слова stringKey = input("Enter your keyword: ") return CONFIG(alphabet, numberKey, stringKey) def en_message(): print("Encryption") def de_message(): print("Decryption") def select_file(): # Создаем список всех .txt файлов filelist = [] for root, dirs, files in walk("."): for file in files: if file.endswith(".txt"): # Добавляем в список filelist.append(file) s = '' while True: system('CLS') print("List of txt files: ") for i in filelist: print(i) file = input("Select a file: ") try: f = open(file, 'r', encoding='utf-8') s = f.read() f.close() break except Exception: print("Error: file not found") return s # Вывод меню def print_menu(cryptMode, CONF, text): file_text = text while cryptMode != 'EXIT': system('CLS') # Выбор действия cryptMode = input("[E]ncryption|[D]ecryption| [Select] file |[S]etting configure |[Show] configuration |[Show text] |[Exit]: ").upper() # Если команды не существует if cryptMode not in ['E', 'D', 'S', 'EXIT', 'SHOW', 'SELECT', 'SHOW TEXT']: print("Error: command not find!") time.sleep(2) # Если выбрана настройка конфигурации if cryptMode == 'S': CONF = conf_setting() # Если выбран шифровка или дешифровка if cryptMode in ['E', 'D']: # Проверка на то, что файл выбран и проведены настройки конфигурации if CONF is not object: try: if cryptMode == 'E': print("Encryption in progress please wait...") en_text = ENCRY(CONF.alphaList, CONF.new_alphaList, file_text.upper()).new_text() print(file_text) print(en_text) try: f = open("en_text.txt", 'w', encoding='utf-8') f.write(en_text) f.close() print("Successfully. Encrypted file written! (en_text.txt)") input("Please enter something to continue ...") except Exception: print("Error: file don't creat!") input("Please enter something to continue ...") if cryptMode == 'D': print("Decryption in progress please wait...") de_text = DECRY(CONF.alphaList, CONF.new_alphaList, file_text.upper()).new_text() print(file_text) print(de_text) try: f = open("de_text.txt", 'w', encoding='utf-8') f.write(de_text) f.close() print("Successfully. Encrypted file written! (de_text.txt)") input("Please enter something to continue ...") except Exception: print("Error: file don't creat!") input("Please enter something to continue ...") except Exception: print(Exception) time.sleep(2) else: if CONF is object: print("Customize the configuration!") time.sleep(2) if file_text == '': print("Chose file!") time.sleep(2) print("Wait...") time.sleep(2) # Если выбран выбор файла if cryptMode == 'SELECT': file_text = select_file() # Если выбран показать файлы конфигурации if cryptMode == 'SHOW': if CONF is not object: CONF.print_conf() input("Please enter something to continue ...") else: print("Customize the configuration!") time.sleep(2) # Если выбран показать текст if cryptMode == 'SHOW TEXT': if file_text != '': print(file_text) input("Please enter something to continue ...") else: print("Please choose file!") time.sleep(2) if __name__ == '__main__': CONF = object text = '' cryptMode = '' print_menu(cryptMode, CONF, text)
3.21875
3
frappe/email/doctype/email_queue_recipient/email_queue_recipient.py
oryxsolutions/frappe
0
12838
# -*- coding: utf-8 -*- # Copyright (c) 2015, Frappe Technologies and contributors # License: MIT. See LICENSE import frappe from frappe.model.document import Document class EmailQueueRecipient(Document): DOCTYPE = "Email Queue Recipient" def is_mail_to_be_sent(self): return self.status == "Not Sent" def is_main_sent(self): return self.status == "Sent" def update_db(self, commit=False, **kwargs): frappe.db.set_value(self.DOCTYPE, self.name, kwargs) if commit: frappe.db.commit()
2.28125
2
tests/groups/family/test_pseudo_dojo.py
mbercx/aiida-pseudo
0
12839
# -*- coding: utf-8 -*- # pylint: disable=unused-argument,pointless-statement """Tests for the `PseudoDojoFamily` class.""" import pytest from aiida_pseudo.data.pseudo import UpfData, Psp8Data, PsmlData, JthXmlData from aiida_pseudo.groups.family import PseudoDojoConfiguration, PseudoDojoFamily def test_type_string(clear_db): """Verify the `_type_string` class attribute is correctly set to the corresponding entry point name.""" assert PseudoDojoFamily._type_string == 'pseudo.family.pseudo_dojo' # pylint: disable=protected-access def test_pseudo_types(): """Test the `PseudoDojoFamily.pseudo_types` method.""" assert PseudoDojoFamily.pseudo_types == (UpfData, PsmlData, Psp8Data, JthXmlData) def test_default_configuration(): """Test the `PseudoDojoFamily.default_configuration` class attribute.""" assert isinstance(PseudoDojoFamily.default_configuration, PseudoDojoConfiguration) def test_valid_configurations(): """Test the `PseudoDojoFamily.valid_configurations` class attribute.""" valid_configurations = PseudoDojoFamily.valid_configurations assert isinstance(valid_configurations, tuple) for entry in valid_configurations: assert isinstance(entry, PseudoDojoConfiguration) def test_get_valid_labels(): """Test the `PseudoDojoFamily.get_valid_labels` class method.""" valid_labels = PseudoDojoFamily.get_valid_labels() assert isinstance(valid_labels, tuple) for entry in valid_labels: assert isinstance(entry, str) def test_format_configuration_label(): """Test the `PseudoDojoFamily.format_configuration_label` class method.""" configuration = PseudoDojoConfiguration('0.4', 'PBE', 'SR', 'standard', 'psp8') assert PseudoDojoFamily.format_configuration_label(configuration) == 'PseudoDojo/0.4/PBE/SR/standard/psp8' def test_constructor(): """Test that the `PseudoDojoFamily` constructor validates the label.""" with pytest.raises(ValueError, match=r'the label `.*` is not a valid PseudoDojo configuration label'): PseudoDojoFamily() with pytest.raises(ValueError, match=r'the label `.*` is not a valid PseudoDojo configuration label'): PseudoDojoFamily(label='nc-sr-04_pbe_standard_psp8') label = PseudoDojoFamily.format_configuration_label(PseudoDojoFamily.default_configuration) family = PseudoDojoFamily(label=label) assert isinstance(family, PseudoDojoFamily) @pytest.mark.usefixtures('clear_db') def test_create_from_folder(filepath_pseudos): """Test the `PseudoDojoFamily.create_from_folder` class method.""" family = PseudoDojoFamily.create_from_folder( filepath_pseudos('upf'), 'PseudoDojo/0.4/PBE/SR/standard/psp8', pseudo_type=UpfData ) assert isinstance(family, PseudoDojoFamily) @pytest.mark.usefixtures('clear_db') def test_create_from_folder_duplicate(filepath_pseudos): """Test that `PseudoDojoFamily.create_from_folder` raises for duplicate label.""" label = 'PseudoDojo/0.4/PBE/SR/standard/psp8' PseudoDojoFamily(label=label).store() with pytest.raises(ValueError, match=r'the PseudoDojoFamily `.*` already exists'): PseudoDojoFamily.create_from_folder(filepath_pseudos('upf'), label)
2.09375
2
converter.py
Poudingue/Max2Mitsuba
4
12840
<filename>converter.py import sys import os if sys.version_info[0] != 3 : print("Running in python "+sys.version_info[0]+", should be python 3.") print("Please install python 3.7 from the official site python.org") print("Exiting now.") exit() import shutil import argparse import fbx2tree import builder_fromfbx import time # config is useful to keep info for the different modules import config parser = argparse.ArgumentParser() parser.add_argument("file", help="file") parser.add_argument("-v", "--verbose", help="Print more stuff", action="store_true") parser.add_argument("-d", "--debug", help="Create intermediate xml files for debug", action="store_true") parser.add_argument("--closest", help="Try to stick as close to the original materials in 3dsmax, even if it is at the expense of realism", action="store_true") parser.add_argument("--realist", help="Try to make materials as realist as possible, even if it is at the expense of fidelity to the original scene", action="store_true") args = parser.parse_args() if args.closest and args.realist : print("Incompatible options : --closest and --realist. Choose one, or neither for a balanced result") exit(0) fullname = args.file if fullname.split(".")[-1].lower() != "fbx" : print("The file is not an fbx file") exit(0) config.curr_place = os.path.dirname(os.path.abspath(__file__)) config.filename = ".".join(fullname.split(".")[:-1]).split("\\")[-1]#Remove extension, remove path. config.filepath = "\\".join(fullname.split("\\")[:-1])+"\\"#Keep only path config.verbose = args.verbose config.debug = args.debug config.closest = args.closest config.realist = args.realist fbxtree = fbx2tree.transform() builder_fromfbx.build(fbxtree) print("Conversion finished !")
2.5
2
run.py
snandasena/disaster-response-pipeline
0
12841
import sys import json import plotly from flask import Flask from flask import render_template, request from plotly.graph_objects import Heatmap, Bar from sklearn.externals import joblib from sqlalchemy import create_engine sys.path.append("common") from common.nlp_common_utils import * if len(sys.argv) == 1: sys.argv.append('./data/DisasterResponse.db') sys.argv.append('./models/classifier.pkl') # this requires for joblib and pickle def tokenize(text): """ Used a common utility functions for tokenize text in to cleaned token list. INPUT: text - raw message OUTPUT: clean_tokens -- cleaned tokenized list """ return tokenize_text(text) # create a flask app app = Flask(__name__, template_folder='app/templates') # database_file_location, model_location = sys.argv[1:] # load data engine = create_engine('sqlite:///{}'.format(database_file_location)) df = pd.read_sql_table('DisasterResponse', engine) # category df df_categories = df.iloc[:, 4:] # load model model = joblib.load(model_location) def generate_graph_with_template(data, title, yaxis_title, xaxi_title): """ This common layout can be used to create Plotly graph layout. INPUT: data - a graph required JSON data i.e list title - a tile of the chart yaxis_title - Y title xaxix_title - X title OUTPUT: layout for particular graph. """ return { 'data': [data], 'layout': { 'title': title, 'yaxis': { 'title': yaxis_title }, 'xaxis': { 'title': xaxi_title } } } def generate_message_genres_bar_chart(): """ create a graph using extracted data for `genre` """ # extract data needed for visuals genre_counts = df.groupby('genre').count()['message'] genre_names = list(genre_counts.index) data = Bar(x=genre_names, y=genre_counts) title = 'Distribution of Message Genres' y_title = 'Count' x_title = 'Genre' return generate_graph_with_template(data, title, y_title, x_title) def generate_message_categories_distribution_bar_chart(): """ create a graph for distribution of the messages. """ data = Bar(x=df_categories.columns, y=list(df_categories.sum().sort_values(ascending=False))) title = 'Distribution of Message Categories' y_title = 'Count' x_title = 'Category' return generate_graph_with_template(data, title, y_title, x_title) def generate_two_cat_relation_heat_map(): """ A correlation matrix for categories """ data = Heatmap( z=df_categories.corr(), y=df_categories.columns, x=df_categories.columns) title = 'Correlation Distribution of Categories' y_title = 'Category' x_title = 'Category' return generate_graph_with_template(data, title, y_title, x_title) def generate_graphs(): # create visuals graphs = [generate_message_genres_bar_chart(), generate_message_categories_distribution_bar_chart(), generate_two_cat_relation_heat_map()] return graphs # index webpage displays cool visuals and receives user input text for model @app.route('/') @app.route('/index') def index(): graphs = generate_graphs() # encode plotly graphs in JSON ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)] graph_json = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder) # render web page with plotly graphs return render_template('master.html', ids=ids, graphJSON=graph_json) # web page that handles user query and displays model results @app.route('/go') def go(): # save user input in query query = request.args.get('query', '') # use model to predict classification for query classification_labels = model.predict([query])[0] classification_results = dict(zip(df.columns[4:], classification_labels)) # This will render the go.html Please see that file. return render_template( 'go.html', query=query, classification_result=classification_results ) def main(): app.run(host='0.0.0.0', port=3001, debug=True) if __name__ == '__main__': main()
2.84375
3
process.py
s-xie/processing
0
12842
<reponame>s-xie/processing import re import sys from nltk.tokenize import word_tokenize from unidecode import unidecode from nltk.tokenize import sent_tokenize import argparse parser = argparse.ArgumentParser() parser.add_argument('fin') parser.add_argument('fout') args = parser.parse_args() textproc = TextProc() tokenizer = Tokenizer() sentences=set() with open(args.fin, 'r') as f: count = 0 for line in f: count+=1 sentences.add(line.strip()) if count % 100000==0: print(count) with open(args.fout, 'w') as f: count = 0 group = '' for s in sentences: count+=1 if s !='': group+=s+'\n' if count % 20==0: try: p = sent_tokenize(unidecode(group)) f.write('\n'.join(p)) group = '' except: print("nltk error") if count % 10000==0: print(count)
2.515625
3
Aula 14 - Estrutura de repetição while/desafio058-jogo-da-adivinhação.py
josue-rosa/Python---Curso-em-Video
3
12843
<reponame>josue-rosa/Python---Curso-em-Video<filename>Aula 14 - Estrutura de repetição while/desafio058-jogo-da-adivinhação.py """ Melhore o jogo do DESAFIO 028 onde o computador vai "pensar" em um numero entre 0 e 10. Só que agora o jogador vai tentar adivinhar até acertar, mostrando no final quantos palpites foram necessários para vencer """ """ from random import randint tentativas = 1 computador = randint(0, 10) jogador = int(input('Informe um numero para jogarmos ')) while jogador != computador: jogador = int(input('Errou. Tente novamente. ')) tentativas += 1 print(f'Acertou. Pensei no {computador} também.') print(f'Total de tentativas {tentativas}.') """ # Corrigido do Professor from random import randint computador = randint(0, 10) print('Pensei em um número entre 0 e 10') acertou = False palpites = 0 while not acertou: jogador = int(input('Qual o seu palpite? ')) palpites += 1 if jogador == computador: acertou = True else: if jogador < computador: print('Mais..Tente mais uma vez.') elif jogador > computador: print('Menos. Tente mais uma vez.') print(f'Acertou com {palpites} tentativas. Parabéns!')
3.953125
4
plugins/googlefight.py
serpis/pynik
4
12844
# coding: utf-8 import re import utility from commands import Command def google_pages(string): url = 'http://www.google.se/search?q=' + utility.escape(string) + '&ie=UTF-8&oe=UTF-8' response = utility.read_url(url) data = response["data"] search = re.search('swrnum=(\d+)">', data) if search: result = search.group(1) if result: return int(result, 10) else: return None else: return None def google_divisor(int1, int2): if int1 < int2: biggest = int1 else: biggest = int2 if biggest > 1000000: divisor = 1000000.0 unit = 'm' elif biggest > 1000: divisor = 1000.0 unit = 'k' else: divisor = 1 unit = '' return (divisor, unit) class Googlefight(Command): def __init__(self): pass def trig_googlefight(self, bot, source, target, trigger, argument): args = argument.split('|', 2) if len(args) == 2 and len(args[0]) > 0 and len(args[1]) > 0: result1 = google_pages(args[0]) result2 = google_pages(args[1]) if result1 and result2: grej = google_divisor(result1, result2) result1 = result1 / grej[0] result2 = result2 / grej[0] unit = grej[1] if result1 == result2: return "It's a tie! " + str(result1/1000.0) + "k hits!" elif result1 > result2: return args[0] + ' is the winner! (' + str(result1) + unit + ' to ' + str(result2) + unit + ')' else: return args[1] + ' is the winner! (' + str(result2) + unit + ' to ' + str(result1) + unit + ')' else: return "Couldn't search." else: return "Usage: .googlefight arg1|arg2"
2.890625
3
was/lib/tuning/actions/ThreadPool.py
rocksun/ucmd
2
12845
import os min=512 max=512 def app_server_tuning(server_confid): server_name=AdminConfig.showAttribute(server_confid, "name") threadpool_list=AdminConfig.list('ThreadPool',server_confid).split("\n") for tp in threadpool_list: if tp.count('WebContainer')==1: print "Modify Server '%s' WebContainer Pool Min=%d, Max=%d"% (server_name, min, max) AdminConfig.modify(tp,[["minimumSize" ,min],["maximumSize" ,max]])
2.671875
3
app/api/v2/resources/saleorders.py
calebrotich10/store-manager-api-v2
0
12846
<filename>app/api/v2/resources/saleorders.py """This module contains objects for saleorders endpoints""" from flask import Flask, jsonify, request, abort, make_response from flask_restful import Resource from flask_jwt_extended import get_jwt_identity, jwt_required from . import common_functions from ..models import products, saleorders from ..utils import verify from .. import database class SaleOrder(Resource): """Class contains CRUD definitions for saleorders """ def post(self): """POST /saleorder endpoint""" user_id = verify.verify_tokens()[1] data = request.get_json() common_functions.no_json_in_request(data) try: items = data['items'] except KeyError: return make_response(jsonify({ "message":"list of items missing" }), 403) if not isinstance(items, (list, )): abort(make_response(jsonify( message="The value should be a list of dictionaries" ), 400)) totalAmount = 0 saleorder = saleorders.SaleOrder(amount=totalAmount, made_by=user_id) saleorder.save() query = """SELECT saleorder_id from saleorders WHERE amount = 0 """ saleorder_id = database.select_from_db(query)[0]['saleorder_id'] items_sold = [] for item in items: try: product = item['product'] except: return make_response(jsonify({ "message":"Kindly specify the product you want to buy" }), 403) try: quantity = item['quantity'] except: return make_response(jsonify({ "message":"Kindly specify the quantity of the product you want" }), 403) if not isinstance(product, int): rollback_saleorder = saleorders.SaleOrder(saleorder_id=saleorder_id) rollback_saleorder.rollback_saleorder() abort(make_response(jsonify( message="Please select the a product you want to purchase" ), 400)) if not isinstance(quantity, int): rollback_saleorder = saleorders.SaleOrder(saleorder_id=saleorder_id) rollback_saleorder.rollback_saleorder() abort(make_response(jsonify( message="Please have a number for the quantity value" ), 400)) if quantity < 1: rollback_saleorder = saleorders.SaleOrder(saleorder_id=saleorder_id) rollback_saleorder.rollback_saleorder() abort(make_response(jsonify( message="Please have a quantity value over 0" ), 400)) query = """SELECT * FROM products WHERE product_id = '{}'""".format(product) product_exists = database.select_from_db(query) if product_exists: product_name = product_exists[0]['product_name'] product_price = product_exists[0]['product_price'] inventory = product_exists[0]['inventory'] if inventory == 0: rollback_saleorder = saleorders.SaleOrder(saleorder_id=saleorder_id) rollback_saleorder.rollback_saleorder() return abort(make_response(jsonify( message="Please eliminate {} from your sale. It is currently out of stock".format(product_name) ), 400)) if quantity > inventory: rollback_saleorder = saleorders.SaleOrder(saleorder_id=saleorder_id) rollback_saleorder.rollback_saleorder() return abort(make_response(jsonify( message="Our current stock cannot serve an order of {}. You can currently order a maximum of {} for the product '{}'".format(quantity, inventory, product_name) ), 400)) product_amount = product_price * quantity current_item = { "product": product_exists[0]['product_name'], "quantity": quantity, "price": product_exists[0]['product_price'], "product_amount": product_amount } items_sold.append(current_item) totalAmount += product_amount sale_item = saleorders.SaleItems(saleorder_id=saleorder_id, product=product, quantity=quantity) sale_item.save() updated_inventory = inventory - quantity product_to_update = products.Products(product_id=product ,inventory=updated_inventory) product_to_update.deduct_inventory() if not product_exists: rollback_saleorder = saleorders.SaleOrder(saleorder_id=saleorder_id) rollback_saleorder.rollback_saleorder() return abort(make_response(jsonify({ "message": "Product with id {} is not available in the store".format(product) }), 404)) update_amount_query = """UPDATE saleorders SET amount = {} WHERE saleorder_id = {}""".format(totalAmount, saleorder_id) database.insert_to_db(update_amount_query) return make_response(jsonify({ "message": "Checkout complete", "items_sold": items_sold, "total_amount": totalAmount }), 201) def get(self): """GET /saleorder endpoint""" verify.verify_tokens() saleorder = saleorders.SaleOrder() get_saleorder = saleorder.get() if not get_saleorder: return make_response(jsonify({ 'message': "No sale orders created yet" }), 404) response = jsonify({ 'message': "Successfully fetched all the sale orders", 'sale_orders': get_saleorder }) response.status_code = 200 return response class SpecificSaleOrder(Resource): """Class contains CRUD definitions for saleorders """ def get(self, saleorder_id): """GET /saleorder/<int:saleorder_id>""" verify.verify_tokens() query = """SELECT * FROM saleorders WHERE saleorder_id = '{}'""".format(saleorder_id) sale_order = database.select_from_db(query) if not sale_order: return make_response(jsonify({ "message": "Sale Order with id {} not found".format(saleorder_id) } ), 404) return make_response(jsonify({ "message": "Sale order fetched successfully", "saleorder": sale_order } ), 200)
2.8125
3
nautobot/circuits/__init__.py
psmware-ltd/nautobot
384
12847
<reponame>psmware-ltd/nautobot default_app_config = "nautobot.circuits.apps.CircuitsConfig"
0.96875
1
autogalaxy/profiles/mass_profiles/stellar_mass_profiles.py
Jammy2211/PyAutoModel
4
12848
import copy import numpy as np from scipy.special import wofz from scipy.integrate import quad from typing import List, Tuple import autoarray as aa from autogalaxy.profiles.mass_profiles import MassProfile from autogalaxy.profiles.mass_profiles.mass_profiles import ( MassProfileMGE, MassProfileCSE, ) from autogalaxy.profiles.mass_profiles.mass_profiles import psi_from class StellarProfile: pass class EllGaussian(MassProfile, StellarProfile): def __init__( self, centre: Tuple[float, float] = (0.0, 0.0), elliptical_comps: Tuple[float, float] = (0.0, 0.0), intensity: float = 0.1, sigma: float = 0.01, mass_to_light_ratio: float = 1.0, ): """ The elliptical Gaussian light profile. Parameters ---------- centre The (y,x) arc-second coordinates of the profile centre. elliptical_comps The first and second ellipticity components of the elliptical coordinate system, (see the module `autogalaxy -> convert.py` for the convention). intensity Overall intensity normalisation of the light profile (units are dimensionless and derived from the data the light profile's image is compared too, which is expected to be electrons per second). sigma The sigma value of the Gaussian. """ super(EllGaussian, self).__init__( centre=centre, elliptical_comps=elliptical_comps ) super(MassProfile, self).__init__( centre=centre, elliptical_comps=elliptical_comps ) self.mass_to_light_ratio = mass_to_light_ratio self.intensity = intensity self.sigma = sigma def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike): """ Calculate the deflection angles at a given set of arc-second gridded coordinates. Parameters ---------- grid The grid of (y,x) arc-second coordinates the deflection angles are computed on. """ return self.deflections_2d_via_analytic_from(grid=grid) @aa.grid_dec.grid_2d_to_structure @aa.grid_dec.transform @aa.grid_dec.relocate_to_radial_minimum def deflections_2d_via_analytic_from(self, grid: aa.type.Grid2DLike): """ Calculate the deflection angles at a given set of arc-second gridded coordinates. Parameters ---------- grid The grid of (y,x) arc-second coordinates the deflection angles are computed on. """ deflections = ( self.mass_to_light_ratio * self.intensity * self.sigma * np.sqrt((2 * np.pi) / (1.0 - self.axis_ratio ** 2.0)) * self.zeta_from(grid=grid) ) return self.rotate_grid_from_reference_frame( np.multiply( 1.0, np.vstack((-1.0 * np.imag(deflections), np.real(deflections))).T ) ) @aa.grid_dec.grid_2d_to_structure @aa.grid_dec.transform @aa.grid_dec.relocate_to_radial_minimum def deflections_2d_via_integral_from(self, grid: aa.type.Grid2DLike): """ Calculate the deflection angles at a given set of arc-second gridded coordinates. Parameters ---------- grid The grid of (y,x) arc-second coordinates the deflection angles are computed on. Note: sigma is divided by sqrt(q) here. """ def calculate_deflection_component(npow, index): deflection_grid = self.axis_ratio * grid[:, index] for i in range(grid.shape[0]): deflection_grid[i] *= ( self.intensity * self.mass_to_light_ratio * quad( self.deflection_func, a=0.0, b=1.0, args=( grid[i, 0], grid[i, 1], npow, self.axis_ratio, self.sigma / np.sqrt(self.axis_ratio), ), )[0] ) return deflection_grid deflection_y = calculate_deflection_component(1.0, 0) deflection_x = calculate_deflection_component(0.0, 1) return self.rotate_grid_from_reference_frame( np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T) ) @staticmethod def deflection_func(u, y, x, npow, axis_ratio, sigma): eta_u = np.sqrt(axis_ratio) * np.sqrt( (u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))) ) return np.exp(-0.5 * np.square(np.divide(eta_u, sigma))) / ( (1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5) ) @aa.grid_dec.grid_2d_to_structure @aa.grid_dec.transform @aa.grid_dec.relocate_to_radial_minimum def convergence_2d_from(self, grid: aa.type.Grid2DLike): """Calculate the projected convergence at a given set of arc-second gridded coordinates. Parameters ---------- grid The grid of (y,x) arc-second coordinates the convergence is computed on. """ return self.convergence_func(self.grid_to_eccentric_radii(grid)) def convergence_func(self, grid_radius: float) -> float: return self.mass_to_light_ratio * self.image_2d_via_radii_from(grid_radius) @aa.grid_dec.grid_2d_to_structure def potential_2d_from(self, grid: aa.type.Grid2DLike): return np.zeros(shape=grid.shape[0]) def image_2d_via_radii_from(self, grid_radii: np.ndarray): """Calculate the intensity of the Gaussian light profile on a grid of radial coordinates. Parameters ---------- grid_radii The radial distance from the centre of the profile. for each coordinate on the grid. Note: sigma is divided by sqrt(q) here. """ return np.multiply( self.intensity, np.exp( -0.5 * np.square( np.divide(grid_radii, self.sigma / np.sqrt(self.axis_ratio)) ) ), ) @property def axis_ratio(self): axis_ratio = super().axis_ratio return axis_ratio if axis_ratio < 0.9999 else 0.9999 def zeta_from(self, grid: aa.type.Grid2DLike): q2 = self.axis_ratio ** 2.0 ind_pos_y = grid[:, 0] >= 0 shape_grid = np.shape(grid) output_grid = np.zeros((shape_grid[0]), dtype=np.complex128) scale_factor = self.axis_ratio / (self.sigma * np.sqrt(2.0 * (1.0 - q2))) xs_0 = grid[:, 1][ind_pos_y] * scale_factor ys_0 = grid[:, 0][ind_pos_y] * scale_factor xs_1 = grid[:, 1][~ind_pos_y] * scale_factor ys_1 = -grid[:, 0][~ind_pos_y] * scale_factor output_grid[ind_pos_y] = -1j * ( wofz(xs_0 + 1j * ys_0) - np.exp(-(xs_0 ** 2.0) * (1.0 - q2) - ys_0 * ys_0 * (1.0 / q2 - 1.0)) * wofz(self.axis_ratio * xs_0 + 1j * ys_0 / self.axis_ratio) ) output_grid[~ind_pos_y] = np.conj( -1j * ( wofz(xs_1 + 1j * ys_1) - np.exp(-(xs_1 ** 2.0) * (1.0 - q2) - ys_1 * ys_1 * (1.0 / q2 - 1.0)) * wofz(self.axis_ratio * xs_1 + 1j * ys_1 / self.axis_ratio) ) ) return output_grid def with_new_normalization(self, normalization): mass_profile = copy.copy(self) mass_profile.mass_to_light_ratio = normalization return mass_profile # noinspection PyAbstractClass class AbstractEllSersic(MassProfile, MassProfileMGE, MassProfileCSE, StellarProfile): def __init__( self, centre: Tuple[float, float] = (0.0, 0.0), elliptical_comps: Tuple[float, float] = (0.0, 0.0), intensity: float = 0.1, effective_radius: float = 0.6, sersic_index: float = 0.6, mass_to_light_ratio: float = 1.0, ): """ The Sersic mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens \ model_galaxy's light. Parameters ---------- centre The (y,x) arc-second coordinates of the profile centre. elliptical_comps The first and second ellipticity components of the elliptical coordinate system, (see the module `autogalaxy -> convert.py` for the convention). intensity Overall flux intensity normalisation in the light profiles (electrons per second). effective_radius The radius containing half the light of this profile. sersic_index Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated). mass_to_light_ratio The mass-to-light ratio of the light profiles """ super(AbstractEllSersic, self).__init__( centre=centre, elliptical_comps=elliptical_comps ) super(MassProfile, self).__init__( centre=centre, elliptical_comps=elliptical_comps ) super(MassProfileMGE, self).__init__() super(MassProfileCSE, self).__init__() self.mass_to_light_ratio = mass_to_light_ratio self.intensity = intensity self.effective_radius = effective_radius self.sersic_index = sersic_index def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike): return self.deflections_2d_via_cse_from(grid=grid) @aa.grid_dec.grid_2d_to_structure @aa.grid_dec.transform @aa.grid_dec.relocate_to_radial_minimum def deflections_2d_via_mge_from(self, grid: aa.type.Grid2DLike): """ Calculate the projected 2D deflection angles from a grid of (y,x) arc second coordinates, by computing and summing the convergence of each individual cse used to decompose the mass profile. The cored steep elliptical (cse) decomposition of a the elliptical NFW mass profile (e.g. `decompose_convergence_via_cse`) is using equation (12) of Oguri 2021 (https://arxiv.org/abs/2106.11464). Parameters ---------- grid The grid of (y,x) arc-second coordinates the convergence is computed on. """ return self._deflections_2d_via_mge_from( grid=grid, sigmas_factor=np.sqrt(self.axis_ratio) ) @aa.grid_dec.grid_2d_to_structure @aa.grid_dec.transform @aa.grid_dec.relocate_to_radial_minimum def deflections_2d_via_cse_from(self, grid: aa.type.Grid2DLike): """ Calculate the projected 2D deflection angles from a grid of (y,x) arc second coordinates, by computing and summing the convergence of each individual cse used to decompose the mass profile. The cored steep elliptical (cse) decomposition of a the elliptical NFW mass profile (e.g. `decompose_convergence_via_cse`) is using equation (12) of Oguri 2021 (https://arxiv.org/abs/2106.11464). Parameters ---------- grid The grid of (y,x) arc-second coordinates the convergence is computed on. """ return self._deflections_2d_via_cse_from(grid=grid) @aa.grid_dec.grid_2d_to_structure @aa.grid_dec.transform @aa.grid_dec.relocate_to_radial_minimum def convergence_2d_from(self, grid: aa.type.Grid2DLike): """Calculate the projected convergence at a given set of arc-second gridded coordinates. Parameters ---------- grid The grid of (y,x) arc-second coordinates the convergence is computed on. """ return self.convergence_func(self.grid_to_eccentric_radii(grid)) @aa.grid_dec.grid_2d_to_structure @aa.grid_dec.transform @aa.grid_dec.relocate_to_radial_minimum def convergence_2d_via_mge_from(self, grid: aa.type.Grid2DLike): """ Calculate the projected convergence at a given set of arc-second gridded coordinates. Parameters ---------- grid The grid of (y,x) arc-second coordinates the convergence is computed on. """ eccentric_radii = self.grid_to_eccentric_radii(grid=grid) return self._convergence_2d_via_mge_from(grid_radii=eccentric_radii) @aa.grid_dec.grid_2d_to_structure @aa.grid_dec.transform @aa.grid_dec.relocate_to_radial_minimum def convergence_2d_via_cse_from(self, grid: aa.type.Grid2DLike): """ Calculate the projected 2D convergence from a grid of (y,x) arc second coordinates, by computing and summing the convergence of each individual cse used to decompose the mass profile. The cored steep elliptical (cse) decomposition of a the elliptical NFW mass profile (e.g. `decompose_convergence_via_cse`) is using equation (12) of Oguri 2021 (https://arxiv.org/abs/2106.11464). Parameters ---------- grid The grid of (y,x) arc-second coordinates the convergence is computed on. """ elliptical_radii = self.grid_to_elliptical_radii(grid=grid) return self._convergence_2d_via_cse_from(grid_radii=elliptical_radii) def convergence_func(self, grid_radius: float) -> float: return self.mass_to_light_ratio * self.image_2d_via_radii_from(grid_radius) @aa.grid_dec.grid_2d_to_structure def potential_2d_from(self, grid: aa.type.Grid2DLike): return np.zeros(shape=grid.shape[0]) def image_2d_via_radii_from(self, radius: np.ndarray): """ Returns the intensity of the profile at a given radius. Parameters ---------- radius The distance from the centre of the profile. """ return self.intensity * np.exp( -self.sersic_constant * (((radius / self.effective_radius) ** (1.0 / self.sersic_index)) - 1) ) def decompose_convergence_via_mge(self) -> Tuple[List, List]: radii_min = self.effective_radius / 100.0 radii_max = self.effective_radius * 20.0 def sersic_2d(r): return ( self.mass_to_light_ratio * self.intensity * np.exp( -self.sersic_constant * (((r / self.effective_radius) ** (1.0 / self.sersic_index)) - 1.0) ) ) return self._decompose_convergence_via_mge( func=sersic_2d, radii_min=radii_min, radii_max=radii_max ) def decompose_convergence_via_cse(self,) -> Tuple[List, List]: """ Decompose the convergence of the Sersic profile into cored steep elliptical (cse) profiles. This decomposition uses the standard 2d profile of a Sersic mass profile. Parameters ---------- func The function representing the profile that is decomposed into CSEs. radii_min: The minimum radius to fit radii_max: The maximum radius to fit total_cses The number of CSEs used to approximate the input func. sample_points: int (should be larger than 'total_cses') The number of data points to fit Returns ------- Tuple[List, List] A list of amplitudes and core radii of every cored steep elliptical (cse) the mass profile is decomposed into. """ upper_dex, lower_dex, total_cses, sample_points = cse_settings_from( effective_radius=self.effective_radius, sersic_index=self.sersic_index, sersic_constant=self.sersic_constant, mass_to_light_gradient=0.0, ) scaled_effective_radius = self.effective_radius / np.sqrt(self.axis_ratio) radii_min = scaled_effective_radius / 10.0 ** lower_dex radii_max = scaled_effective_radius * 10.0 ** upper_dex def sersic_2d(r): return ( self.mass_to_light_ratio * self.intensity * np.exp( -self.sersic_constant * ( ((r / scaled_effective_radius) ** (1.0 / self.sersic_index)) - 1.0 ) ) ) return self._decompose_convergence_via_cse_from( func=sersic_2d, radii_min=radii_min, radii_max=radii_max, total_cses=total_cses, sample_points=sample_points, ) @property def sersic_constant(self): """A parameter derived from Sersic index which ensures that effective radius contains 50% of the profile's total integrated light. """ return ( (2 * self.sersic_index) - (1.0 / 3.0) + (4.0 / (405.0 * self.sersic_index)) + (46.0 / (25515.0 * self.sersic_index ** 2)) + (131.0 / (1148175.0 * self.sersic_index ** 3)) - (2194697.0 / (30690717750.0 * self.sersic_index ** 4)) ) @property def ellipticity_rescale(self): return 1.0 - ((1.0 - self.axis_ratio) / 2.0) @property def elliptical_effective_radius(self): """ The effective_radius of a Sersic light profile is defined as the circular effective radius. This is the \ radius within which a circular aperture contains half the profiles's total integrated light. For elliptical \ systems, this won't robustly capture the light profile's elliptical shape. The elliptical effective radius instead describes the major-axis radius of the ellipse containing \ half the light, and may be more appropriate for highly flattened systems like disk galaxies. """ return self.effective_radius / np.sqrt(self.axis_ratio) def with_new_normalization(self, normalization): mass_profile = copy.copy(self) mass_profile.mass_to_light_ratio = normalization return mass_profile class EllSersic(AbstractEllSersic, MassProfileMGE, MassProfileCSE): @aa.grid_dec.grid_2d_to_structure @aa.grid_dec.transform @aa.grid_dec.relocate_to_radial_minimum def deflections_2d_via_integral_from(self, grid: aa.type.Grid2DLike): """ Calculate the deflection angles at a given set of arc-second gridded coordinates. Parameters ---------- grid The grid of (y,x) arc-second coordinates the deflection angles are computed on. """ def calculate_deflection_component(npow, index): sersic_constant = self.sersic_constant deflection_grid = self.axis_ratio * grid[:, index] for i in range(grid.shape[0]): deflection_grid[i] *= ( self.intensity * self.mass_to_light_ratio * quad( self.deflection_func, a=0.0, b=1.0, args=( grid[i, 0], grid[i, 1], npow, self.axis_ratio, self.sersic_index, self.effective_radius, sersic_constant, ), )[0] ) return deflection_grid deflection_y = calculate_deflection_component(1.0, 0) deflection_x = calculate_deflection_component(0.0, 1) return self.rotate_grid_from_reference_frame( np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T) ) @staticmethod def deflection_func( u, y, x, npow, axis_ratio, sersic_index, effective_radius, sersic_constant ): eta_u = np.sqrt(axis_ratio) * np.sqrt( (u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))) ) return np.exp( -sersic_constant * (((eta_u / effective_radius) ** (1.0 / sersic_index)) - 1) ) / ((1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5)) class SphSersic(EllSersic): def __init__( self, centre: Tuple[float, float] = (0.0, 0.0), intensity: float = 0.1, effective_radius: float = 0.6, sersic_index: float = 0.6, mass_to_light_ratio: float = 1.0, ): """ The Sersic mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens model_galaxy's light. Parameters ---------- centre The (y,x) arc-second coordinates of the profile centre intensity Overall flux intensity normalisation in the light profiles (electrons per second) effective_radius The circular radius containing half the light of this profile. sersic_index Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated). mass_to_light_ratio The mass-to-light ratio of the light profile. """ super().__init__( centre=centre, elliptical_comps=(0.0, 0.0), intensity=intensity, effective_radius=effective_radius, sersic_index=sersic_index, mass_to_light_ratio=mass_to_light_ratio, ) class EllExponential(EllSersic): def __init__( self, centre: Tuple[float, float] = (0.0, 0.0), elliptical_comps: Tuple[float, float] = (0.0, 0.0), intensity: float = 0.1, effective_radius: float = 0.6, mass_to_light_ratio: float = 1.0, ): """ The EllExponential mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens model_galaxy's light. Parameters ---------- centre The (y,x) arc-second coordinates of the profile centre. elliptical_comps The first and second ellipticity components of the elliptical coordinate system, (see the module `autogalaxy -> convert.py` for the convention). intensity Overall flux intensity normalisation in the light profiles (electrons per second). effective_radius The circular radius containing half the light of this profile. mass_to_light_ratio The mass-to-light ratio of the light profiles """ super().__init__( centre=centre, elliptical_comps=elliptical_comps, intensity=intensity, effective_radius=effective_radius, sersic_index=1.0, mass_to_light_ratio=mass_to_light_ratio, ) class SphExponential(EllExponential): def __init__( self, centre: Tuple[float, float] = (0.0, 0.0), intensity: float = 0.1, effective_radius: float = 0.6, mass_to_light_ratio: float = 1.0, ): """ The Exponential mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens model_galaxy's light. Parameters ---------- centre The (y,x) arc-second coordinates of the profile centre. intensity Overall flux intensity normalisation in the light profiles (electrons per second). effective_radius The circular radius containing half the light of this profile. mass_to_light_ratio The mass-to-light ratio of the light profiles. """ super().__init__( centre=centre, elliptical_comps=(0.0, 0.0), intensity=intensity, effective_radius=effective_radius, mass_to_light_ratio=mass_to_light_ratio, ) class EllDevVaucouleurs(EllSersic): def __init__( self, centre: Tuple[float, float] = (0.0, 0.0), elliptical_comps: Tuple[float, float] = (0.0, 0.0), intensity: float = 0.1, effective_radius: float = 0.6, mass_to_light_ratio: float = 1.0, ): """ The EllDevVaucouleurs mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens model_galaxy's light. Parameters ---------- centre The (y,x) arc-second coordinates of the profile centre. elliptical_comps The first and second ellipticity components of the elliptical coordinate system, (see the module `autogalaxy -> convert.py` for the convention). intensity Overall flux intensity normalisation in the light profiles (electrons per second). effective_radius The radius containing half the light of this profile. mass_to_light_ratio The mass-to-light ratio of the light profile. """ super().__init__( centre=centre, elliptical_comps=elliptical_comps, intensity=intensity, effective_radius=effective_radius, sersic_index=4.0, mass_to_light_ratio=mass_to_light_ratio, ) class SphDevVaucouleurs(EllDevVaucouleurs): def __init__( self, centre: Tuple[float, float] = (0.0, 0.0), intensity: float = 0.1, effective_radius: float = 0.6, mass_to_light_ratio: float = 1.0, ): """ The DevVaucouleurs mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens model_galaxy's light. Parameters ---------- centre The (y,x) arc-second coordinates of the profile centre. intensity Overall flux intensity normalisation in the light profiles (electrons per second). effective_radius The circular radius containing half the light of this profile. mass_to_light_ratio The mass-to-light ratio of the light profiles. """ super().__init__( centre=centre, elliptical_comps=(0.0, 0.0), intensity=intensity, effective_radius=effective_radius, mass_to_light_ratio=mass_to_light_ratio, ) class EllSersicRadialGradient(AbstractEllSersic): def __init__( self, centre: Tuple[float, float] = (0.0, 0.0), elliptical_comps: Tuple[float, float] = (0.0, 0.0), intensity: float = 0.1, effective_radius: float = 0.6, sersic_index: float = 0.6, mass_to_light_ratio: float = 1.0, mass_to_light_gradient: float = 0.0, ): """ Setup a Sersic mass and light profiles. Parameters ---------- centre The (y,x) arc-second coordinates of the profile centre. elliptical_comps The first and second ellipticity components of the elliptical coordinate system, (see the module `autogalaxy -> convert.py` for the convention). intensity Overall flux intensity normalisation in the light profiles (electrons per second). effective_radius The circular radius containing half the light of this profile. sersic_index Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated). mass_to_light_ratio The mass-to-light ratio of the light profile. mass_to_light_gradient The mass-to-light radial gradient. """ super().__init__( centre=centre, elliptical_comps=elliptical_comps, intensity=intensity, effective_radius=effective_radius, sersic_index=sersic_index, mass_to_light_ratio=mass_to_light_ratio, ) self.mass_to_light_gradient = mass_to_light_gradient @aa.grid_dec.grid_2d_to_structure @aa.grid_dec.transform @aa.grid_dec.relocate_to_radial_minimum def deflections_2d_via_integral_from(self, grid: aa.type.Grid2DLike): """ Calculate the deflection angles at a given set of arc-second gridded coordinates. Parameters ---------- grid The grid of (y,x) arc-second coordinates the deflection angles are computed on. """ def calculate_deflection_component(npow, index): sersic_constant = self.sersic_constant deflection_grid = self.axis_ratio * grid[:, index] for i in range(grid.shape[0]): deflection_grid[i] *= ( self.intensity * self.mass_to_light_ratio * quad( self.deflection_func, a=0.0, b=1.0, args=( grid[i, 0], grid[i, 1], npow, self.axis_ratio, self.sersic_index, self.effective_radius, self.mass_to_light_gradient, sersic_constant, ), )[0] ) return deflection_grid deflection_y = calculate_deflection_component(1.0, 0) deflection_x = calculate_deflection_component(0.0, 1) return self.rotate_grid_from_reference_frame( np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T) ) @staticmethod def deflection_func( u, y, x, npow, axis_ratio, sersic_index, effective_radius, mass_to_light_gradient, sersic_constant, ): eta_u = np.sqrt(axis_ratio) * np.sqrt( (u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u)))) ) return ( (((axis_ratio * eta_u) / effective_radius) ** -mass_to_light_gradient) * np.exp( -sersic_constant * (((eta_u / effective_radius) ** (1.0 / sersic_index)) - 1) ) / ((1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5)) ) @aa.grid_dec.grid_2d_to_structure @aa.grid_dec.transform @aa.grid_dec.relocate_to_radial_minimum def convergence_2d_from(self, grid: aa.type.Grid2DLike): """Calculate the projected convergence at a given set of arc-second gridded coordinates. Parameters ---------- grid The grid of (y,x) arc-second coordinates the convergence is computed on. """ return self.convergence_func(self.grid_to_eccentric_radii(grid)) def convergence_func(self, grid_radius: float) -> float: return ( self.mass_to_light_ratio * ( ((self.axis_ratio * grid_radius) / self.effective_radius) ** -self.mass_to_light_gradient ) * self.image_2d_via_radii_from(grid_radius) ) def decompose_convergence_via_mge(self): radii_min = self.effective_radius / 100.0 radii_max = self.effective_radius * 20.0 def sersic_radial_gradient_2D(r): return ( self.mass_to_light_ratio * self.intensity * ( ((self.axis_ratio * r) / self.effective_radius) ** -self.mass_to_light_gradient ) * np.exp( -self.sersic_constant * (((r / self.effective_radius) ** (1.0 / self.sersic_index)) - 1.0) ) ) return self._decompose_convergence_via_mge( func=sersic_radial_gradient_2D, radii_min=radii_min, radii_max=radii_max ) def decompose_convergence_via_cse(self) -> Tuple[List, List]: """ Decompose the convergence of the Sersic profile into singular isothermal elliptical (sie) profiles. This decomposition uses the standard 2d profile of a Sersic mass profile. Parameters ---------- func The function representing the profile that is decomposed into CSEs. radii_min: The minimum radius to fit radii_max: The maximum radius to fit total_sies The number of SIEs used to approximate the input func. sample_points: int (should be larger than 'total_sies') The number of data points to fit Returns ------- Tuple[List, List] A list of amplitudes and core radii of every singular isothernal ellipsoids (sie) the mass profile is decomposed into. """ upper_dex, lower_dex, total_cses, sample_points = cse_settings_from( effective_radius=self.effective_radius, sersic_index=self.sersic_index, sersic_constant=self.sersic_constant, mass_to_light_gradient=self.mass_to_light_gradient, ) scaled_effective_radius = self.effective_radius / np.sqrt(self.axis_ratio) radii_min = scaled_effective_radius / 10.0 ** lower_dex radii_max = scaled_effective_radius * 10.0 ** upper_dex def sersic_radial_gradient_2D(r): return ( self.mass_to_light_ratio * self.intensity * ( ((self.axis_ratio * r) / scaled_effective_radius) ** -self.mass_to_light_gradient ) * np.exp( -self.sersic_constant * ( ((r / scaled_effective_radius) ** (1.0 / self.sersic_index)) - 1.0 ) ) ) return self._decompose_convergence_via_cse_from( func=sersic_radial_gradient_2D, radii_min=radii_min, radii_max=radii_max, total_cses=total_cses, sample_points=sample_points, ) class SphSersicRadialGradient(EllSersicRadialGradient): def __init__( self, centre: Tuple[float, float] = (0.0, 0.0), intensity: float = 0.1, effective_radius: float = 0.6, sersic_index: float = 0.6, mass_to_light_ratio: float = 1.0, mass_to_light_gradient: float = 0.0, ): """ Setup a Sersic mass and light profiles. Parameters ---------- centre The (y,x) arc-second coordinates of the profile centre. intensity Overall flux intensity normalisation in the light profiles (electrons per second). effective_radius The circular radius containing half the light of this profile. sersic_index Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated). mass_to_light_ratio The mass-to-light ratio of the light profile. mass_to_light_gradient The mass-to-light radial gradient. """ super().__init__( centre=centre, elliptical_comps=(0.0, 0.0), intensity=intensity, effective_radius=effective_radius, sersic_index=sersic_index, mass_to_light_ratio=mass_to_light_ratio, mass_to_light_gradient=mass_to_light_gradient, ) class EllSersicCore(EllSersic): def __init__( self, centre: Tuple[float, float] = (0.0, 0.0), elliptical_comps: Tuple[float, float] = (0.0, 0.0), effective_radius: float = 0.6, sersic_index: float = 4.0, radius_break: float = 0.01, intensity_break: float = 0.05, gamma: float = 0.25, alpha: float = 3.0, mass_to_light_ratio: float = 1.0, ): """ The elliptical cored-Sersic light profile. Parameters ---------- centre The (y,x) arc-second coordinates of the profile centre. elliptical_comps The first and second ellipticity components of the elliptical coordinate system, (see the module `autogalaxy -> convert.py` for the convention). intensity Overall intensity normalisation of the light profile (units are dimensionless and derived from the data the light profile's image is compared too, which is expected to be electrons per second). effective_radius The circular radius containing half the light of this profile. sersic_index Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated). radius_break The break radius separating the inner power-law (with logarithmic slope gamma) and outer Sersic function. intensity_break The intensity at the break radius. gamma The logarithmic power-law slope of the inner core profiles alpha : Controls the sharpness of the transition between the inner core / outer Sersic profiles. """ super().__init__( centre=centre, elliptical_comps=elliptical_comps, intensity=intensity_break, effective_radius=effective_radius, sersic_index=sersic_index, mass_to_light_ratio=mass_to_light_ratio, ) self.radius_break = radius_break self.intensity_break = intensity_break self.alpha = alpha self.gamma = gamma def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike): return self.deflections_2d_via_mge_from(grid=grid) def image_2d_via_radii_from(self, grid_radii: np.ndarray): """ Calculate the intensity of the cored-Sersic light profile on a grid of radial coordinates. Parameters ---------- grid_radii The radial distance from the centre of the profile. for each coordinate on the grid. """ return np.multiply( np.multiply( self.intensity_prime, np.power( np.add( 1, np.power(np.divide(self.radius_break, grid_radii), self.alpha), ), (self.gamma / self.alpha), ), ), np.exp( np.multiply( -self.sersic_constant, ( np.power( np.divide( np.add( np.power(grid_radii, self.alpha), (self.radius_break ** self.alpha), ), (self.effective_radius ** self.alpha), ), (1.0 / (self.alpha * self.sersic_index)), ) ), ) ), ) def decompose_convergence_via_mge(self): radii_min = self.effective_radius / 50.0 radii_max = self.effective_radius * 20.0 def core_sersic_2D(r): return ( self.mass_to_light_ratio * self.intensity_prime * (1.0 + (self.radius_break / r) ** self.alpha) ** (self.gamma / self.alpha) * np.exp( -self.sersic_constant * ( (r ** self.alpha + self.radius_break ** self.alpha) / self.effective_radius ** self.alpha ) ** (1.0 / (self.sersic_index * self.alpha)) ) ) return self._decompose_convergence_via_mge( func=core_sersic_2D, radii_min=radii_min, radii_max=radii_max ) @property def intensity_prime(self): """Overall intensity normalisation in the rescaled Core-Sersic light profiles (electrons per second)""" return ( self.intensity_break * (2.0 ** (-self.gamma / self.alpha)) * np.exp( self.sersic_constant * ( ((2.0 ** (1.0 / self.alpha)) * self.radius_break) / self.effective_radius ) ** (1.0 / self.sersic_index) ) ) class SphSersicCore(EllSersicCore): def __init__( self, centre: Tuple[float, float] = (0.0, 0.0), effective_radius: float = 0.6, sersic_index: float = 4.0, radius_break: float = 0.01, intensity_break: float = 0.05, gamma: float = 0.25, alpha: float = 3.0, ): """ The elliptical cored-Sersic light profile. Parameters ---------- centre The (y,x) arc-second coordinates of the profile centre. intensity Overall intensity normalisation of the light profile (units are dimensionless and derived from the data the light profile's image is compared too, which is expected to be electrons per second). effective_radius The circular radius containing half the light of this profile. sersic_index Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated). radius_break The break radius separating the inner power-law (with logarithmic slope gamma) and outer Sersic function. intensity_break The intensity at the break radius. gamma The logarithmic power-law slope of the inner core profiles alpha : Controls the sharpness of the transition between the inner core / outer Sersic profiles. """ super().__init__( centre=centre, elliptical_comps=(0.0, 0.0), effective_radius=effective_radius, sersic_index=sersic_index, radius_break=radius_break, intensity_break=intensity_break, gamma=gamma, alpha=alpha, ) self.radius_break = radius_break self.intensity_break = intensity_break self.alpha = alpha self.gamma = gamma class EllChameleon(MassProfile, StellarProfile): def __init__( self, centre: Tuple[float, float] = (0.0, 0.0), elliptical_comps: Tuple[float, float] = (0.0, 0.0), intensity: float = 0.1, core_radius_0: float = 0.01, core_radius_1: float = 0.02, mass_to_light_ratio: float = 1.0, ): """ The elliptical Chamelon mass profile. Parameters ---------- centre The (y,x) arc-second coordinates of the profile centre. elliptical_comps The first and second ellipticity components of the elliptical coordinate system, (see the module `autogalaxy -> convert.py` for the convention). intensity Overall intensity normalisation of the light profile (units are dimensionless and derived from the data the light profile's image is compared too, which is expected to be electrons per second). core_radius_0 : the core size of the first elliptical cored Isothermal profile. core_radius_1 : core_radius_0 + core_radius_1 is the core size of the second elliptical cored Isothermal profile. We use core_radius_1 here is to avoid negative values. Profile form: mass_to_light_ratio * intensity *\ (1.0 / Sqrt(x^2 + (y/q)^2 + core_radius_0^2) - 1.0 / Sqrt(x^2 + (y/q)^2 + (core_radius_0 + core_radius_1)**2.0)) """ super(EllChameleon, self).__init__( centre=centre, elliptical_comps=elliptical_comps ) super(MassProfile, self).__init__( centre=centre, elliptical_comps=elliptical_comps ) self.mass_to_light_ratio = mass_to_light_ratio self.intensity = intensity self.core_radius_0 = core_radius_0 self.core_radius_1 = core_radius_1 def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike): return self.deflections_2d_via_analytic_from(grid=grid) @aa.grid_dec.grid_2d_to_structure @aa.grid_dec.transform @aa.grid_dec.relocate_to_radial_minimum def deflections_2d_via_analytic_from(self, grid: aa.type.Grid2DLike): """ Calculate the deflection angles at a given set of arc-second gridded coordinates. Following Eq. (15) and (16), but the parameters are slightly different. Parameters ---------- grid The grid of (y,x) arc-second coordinates the deflection angles are computed on. """ factor = ( 2.0 * self.mass_to_light_ratio * self.intensity / (1 + self.axis_ratio) * self.axis_ratio / np.sqrt(1.0 - self.axis_ratio ** 2.0) ) core_radius_0 = np.sqrt( (4.0 * self.core_radius_0 ** 2.0) / (1.0 + self.axis_ratio) ** 2 ) core_radius_1 = np.sqrt( (4.0 * self.core_radius_1 ** 2.0) / (1.0 + self.axis_ratio) ** 2 ) psi0 = psi_from( grid=grid, axis_ratio=self.axis_ratio, core_radius=core_radius_0 ) psi1 = psi_from( grid=grid, axis_ratio=self.axis_ratio, core_radius=core_radius_1 ) deflection_y0 = np.arctanh( np.divide( np.multiply(np.sqrt(1.0 - self.axis_ratio ** 2.0), grid[:, 0]), np.add(psi0, self.axis_ratio ** 2.0 * core_radius_0), ) ) deflection_x0 = np.arctan( np.divide( np.multiply(np.sqrt(1.0 - self.axis_ratio ** 2.0), grid[:, 1]), np.add(psi0, core_radius_0), ) ) deflection_y1 = np.arctanh( np.divide( np.multiply(np.sqrt(1.0 - self.axis_ratio ** 2.0), grid[:, 0]), np.add(psi1, self.axis_ratio ** 2.0 * core_radius_1), ) ) deflection_x1 = np.arctan( np.divide( np.multiply(np.sqrt(1.0 - self.axis_ratio ** 2.0), grid[:, 1]), np.add(psi1, core_radius_1), ) ) deflection_y = np.subtract(deflection_y0, deflection_y1) deflection_x = np.subtract(deflection_x0, deflection_x1) return self.rotate_grid_from_reference_frame( np.multiply(factor, np.vstack((deflection_y, deflection_x)).T) ) @aa.grid_dec.grid_2d_to_structure @aa.grid_dec.transform @aa.grid_dec.relocate_to_radial_minimum def convergence_2d_from(self, grid: aa.type.Grid2DLike): """Calculate the projected convergence at a given set of arc-second gridded coordinates. Parameters ---------- grid The grid of (y,x) arc-second coordinates the convergence is computed on. """ return self.convergence_func(self.grid_to_elliptical_radii(grid)) def convergence_func(self, grid_radius: float) -> float: return self.mass_to_light_ratio * self.image_2d_via_radii_from(grid_radius) @aa.grid_dec.grid_2d_to_structure def potential_2d_from(self, grid: aa.type.Grid2DLike): return np.zeros(shape=grid.shape[0]) def image_2d_via_radii_from(self, grid_radii: np.ndarray): """Calculate the intensity of the Chamelon light profile on a grid of radial coordinates. Parameters ---------- grid_radii The radial distance from the centre of the profile. for each coordinate on the grid. """ axis_ratio_factor = (1.0 + self.axis_ratio) ** 2.0 return np.multiply( self.intensity / (1 + self.axis_ratio), np.add( np.divide( 1.0, np.sqrt( np.add( np.square(grid_radii), (4.0 * self.core_radius_0 ** 2.0) / axis_ratio_factor, ) ), ), -np.divide( 1.0, np.sqrt( np.add( np.square(grid_radii), (4.0 * self.core_radius_1 ** 2.0) / axis_ratio_factor, ) ), ), ), ) @property def axis_ratio(self): axis_ratio = super().axis_ratio return axis_ratio if axis_ratio < 0.99999 else 0.99999 def with_new_normalization(self, normalization): mass_profile = copy.copy(self) mass_profile.mass_to_light_ratio = normalization return mass_profile class SphChameleon(EllChameleon): def __init__( self, centre: Tuple[float, float] = (0.0, 0.0), intensity: float = 0.1, core_radius_0: float = 0.01, core_radius_1: float = 0.02, mass_to_light_ratio: float = 1.0, ): """ The spherica; Chameleon mass profile. Profile form: mass_to_light_ratio * intensity *\ (1.0 / Sqrt(x^2 + (y/q)^2 + core_radius_0^2) - 1.0 / Sqrt(x^2 + (y/q)^2 + (core_radius_0 + core_radius_1)**2.0)) Parameters ---------- centre The (y,x) arc-second coordinates of the profile centre. elliptical_comps The first and second ellipticity components of the elliptical coordinate system, (see the module `autogalaxy -> convert.py` for the convention). intensity Overall intensity normalisation of the light profile (units are dimensionless and derived from the data the light profile's image is compared too, which is expected to be electrons per second). core_radius_0 : the core size of the first elliptical cored Isothermal profile. core_radius_1 : core_radius_0 + core_radius_1 is the core size of the second elliptical cored Isothermal profile. We use core_radius_1 here is to avoid negative values. """ super().__init__( centre=centre, elliptical_comps=(0.0, 0.0), intensity=intensity, core_radius_0=core_radius_0, core_radius_1=core_radius_1, mass_to_light_ratio=mass_to_light_ratio, ) def cse_settings_from( effective_radius, sersic_index, sersic_constant, mass_to_light_gradient ): if mass_to_light_gradient > 0.5: if effective_radius > 0.2: lower_dex = 6.0 upper_dex = np.min( [np.log10((18.0 / sersic_constant) ** sersic_index), 1.1] ) if sersic_index <= 1.2: total_cses = 50 sample_points = 80 elif sersic_index > 3.8: total_cses = 40 sample_points = 50 lower_dex = 6.5 else: total_cses = 30 sample_points = 50 else: if sersic_index <= 1.2: upper_dex = 1.0 total_cses = 50 sample_points = 80 lower_dex = 4.5 elif sersic_index > 3.8: total_cses = 40 sample_points = 50 lower_dex = 6.0 upper_dex = 1.5 else: upper_dex = 1.1 lower_dex = 6.0 total_cses = 30 sample_points = 50 else: upper_dex = np.min( [ np.log10((23.0 / sersic_constant) ** sersic_index), 0.85 - np.log10(effective_radius), ] ) if (sersic_index <= 0.9) and (sersic_index > 0.8): total_cses = 50 sample_points = 80 upper_dex = np.log10((18.0 / sersic_constant) ** sersic_index) lower_dex = 4.3 + np.log10(effective_radius) elif sersic_index <= 0.8: total_cses = 50 sample_points = 80 upper_dex = np.log10((16.0 / sersic_constant) ** sersic_index) lower_dex = 4.0 + np.log10(effective_radius) elif sersic_index > 3.8: total_cses = 40 sample_points = 50 lower_dex = 4.5 + np.log10(effective_radius) else: lower_dex = 3.5 + np.log10(effective_radius) total_cses = 30 sample_points = 50 return upper_dex, lower_dex, total_cses, sample_points
2.625
3
installer/core/terraform/resources/variable.py
Diffblue-benchmarks/pacbot
1,165
12849
from core.terraform.resources import BaseTerraformVariable class TerraformVariable(BaseTerraformVariable): """ Base resource class for Terraform tfvar variable Attributes: variable_dict_input (dict/none): Var dict values available_args (dict): Instance configurations variable_type (str): Define the variable i.e. terraform list var or terraform dict var etc """ variable_dict_input = None variable_type = None available_args = { 'variable_name': {'required': True}, 'variable_type': {'required': False}, 'default_value': {'required': False} }
2.640625
3
bfs.py
mpHarm88/Algorithms-and-Data-Structures-In-Python
0
12850
<reponame>mpHarm88/Algorithms-and-Data-Structures-In-Python class Node(object): def __init__(self, name): self.name = name; self.adjacencyList = []; self.visited = False; self.predecessor = None; class BreadthFirstSearch(object): def bfs(self, startNode): queue = []; queue.append(startNode); startNode.visited = True; # BFS -> queue DFS --> stack BUT usually we implement it with recursion !!! while queue: actualNode = queue.pop(0); print("%s " % actualNode.name); for n in actualNode.adjacencyList: if not n.visited: n.visited = True; queue.append(n); node1 = Node("A"); node2 = Node("B"); node3 = Node("C"); node4 = Node("D"); node5 = Node("E"); node1.adjacencyList.append(node2); node1.adjacencyList.append(node3); node2.adjacencyList.append(node4); node4.adjacencyList.append(node5); bfs = BreadthFirstSearch(); bfs.bfs(node1);
4.09375
4
supervisor/dbus/network/connection.py
peddamat/home-assistant-supervisor-test
1
12851
"""Connection object for Network Manager.""" from ipaddress import ip_address, ip_interface from typing import Optional from ...const import ATTR_ADDRESS, ATTR_PREFIX from ...utils.gdbus import DBus from ..const import ( DBUS_ATTR_ADDRESS_DATA, DBUS_ATTR_CONNECTION, DBUS_ATTR_GATEWAY, DBUS_ATTR_ID, DBUS_ATTR_IP4CONFIG, DBUS_ATTR_IP6CONFIG, DBUS_ATTR_NAMESERVER_DATA, DBUS_ATTR_NAMESERVERS, DBUS_ATTR_STATE, DBUS_ATTR_TYPE, DBUS_ATTR_UUID, DBUS_NAME_CONNECTION_ACTIVE, DBUS_NAME_IP4CONFIG, DBUS_NAME_IP6CONFIG, DBUS_NAME_NM, DBUS_OBJECT_BASE, ) from ..interface import DBusInterfaceProxy from .configuration import IpConfiguration class NetworkConnection(DBusInterfaceProxy): """NetworkConnection object for Network Manager.""" def __init__(self, object_path: str) -> None: """Initialize NetworkConnection object.""" self.object_path = object_path self.properties = {} self._ipv4: Optional[IpConfiguration] = None self._ipv6: Optional[IpConfiguration] = None @property def id(self) -> str: """Return the id of the connection.""" return self.properties[DBUS_ATTR_ID] @property def type(self) -> str: """Return the type of the connection.""" return self.properties[DBUS_ATTR_TYPE] @property def uuid(self) -> str: """Return the uuid of the connection.""" return self.properties[DBUS_ATTR_UUID] @property def state(self) -> int: """Return the state of the connection.""" return self.properties[DBUS_ATTR_STATE] @property def setting_object(self) -> int: """Return the connection object path.""" return self.properties[DBUS_ATTR_CONNECTION] @property def ipv4(self) -> Optional[IpConfiguration]: """Return a ip4 configuration object for the connection.""" return self._ipv4 @property def ipv6(self) -> Optional[IpConfiguration]: """Return a ip6 configuration object for the connection.""" return self._ipv6 async def connect(self) -> None: """Get connection information.""" self.dbus = await DBus.connect(DBUS_NAME_NM, self.object_path) self.properties = await self.dbus.get_properties(DBUS_NAME_CONNECTION_ACTIVE) # IPv4 if self.properties[DBUS_ATTR_IP4CONFIG] != DBUS_OBJECT_BASE: ip4 = await DBus.connect(DBUS_NAME_NM, self.properties[DBUS_ATTR_IP4CONFIG]) ip4_data = await ip4.get_properties(DBUS_NAME_IP4CONFIG) self._ipv4 = IpConfiguration( ip_address(ip4_data[DBUS_ATTR_GATEWAY]) if ip4_data.get(DBUS_ATTR_GATEWAY) else None, [ ip_address(nameserver[ATTR_ADDRESS]) for nameserver in ip4_data.get(DBUS_ATTR_NAMESERVER_DATA, []) ], [ ip_interface(f"{address[ATTR_ADDRESS]}/{address[ATTR_PREFIX]}") for address in ip4_data.get(DBUS_ATTR_ADDRESS_DATA, []) ], ) # IPv6 if self.properties[DBUS_ATTR_IP6CONFIG] != DBUS_OBJECT_BASE: ip6 = await DBus.connect(DBUS_NAME_NM, self.properties[DBUS_ATTR_IP6CONFIG]) ip6_data = await ip6.get_properties(DBUS_NAME_IP6CONFIG) self._ipv6 = IpConfiguration( ip_address(ip6_data[DBUS_ATTR_GATEWAY]) if ip6_data.get(DBUS_ATTR_GATEWAY) else None, [ ip_address(bytes(nameserver)) for nameserver in ip6_data.get(DBUS_ATTR_NAMESERVERS) ], [ ip_interface(f"{address[ATTR_ADDRESS]}/{address[ATTR_PREFIX]}") for address in ip6_data.get(DBUS_ATTR_ADDRESS_DATA, []) ], )
2.609375
3
quark/databricks.py
mistsys/quark
2
12852
from __future__ import print_function, absolute_import from .beats import Beat from StringIO import StringIO import sys import os import json import urllib import webbrowser try: import pycurl except: print("Need pycurl dependency to use qubole as the deployment platform. Run pip install pycurl in your virtualenv and try this again.") sys.exit(1) class Databricks: def __init__(self, config, options): self.config = config self.options = options projectsDir = self.config.get(self.options.env, "projects_dir") schemasDir = os.path.join(projectsDir, "schemas") schemasFile = os.path.join(schemasDir, "beats.schema.json") if os.path.exists(schemasFile): self.beats = Beat(file(schemasFile).read()) def _q_config(self,item): return self.config.get(self.options.env, "databricks-{}".format(item)) def _do_request(self, method, path, base_url=None, **data): # Uh, only using pycurl because that was the example that was around, will port to requests someday # it's supposed to be faster, so oh well c = pycurl.Curl() #auth_token = self._q_config("auth_token") username = self._q_config("username") password = self._q_config("password") if base_url == None: base_url = self.config.get(self.options.env, "master") url = base_url+ "/" + path buffer = StringIO() c.setopt(c.WRITEDATA, buffer) print("Using", url, file=sys.stderr) c.setopt(pycurl.URL, url) c.setopt(pycurl.HTTPHEADER, ['Accept:application/json']) #c.setopt(pycurl.HTTPHEADER, ["X-AUTH-TOKEN: "+ auth_token, "Content-Type:application/json", "Accept: application/json, text/plain"]) ## Note: Only POST and GET have been tested... ## It's not very obvious with pycurl to do this properly with PUT and DELETE ## Review this if ever needed to add these methods ## http://www.programcreek.com/python/example/2132/pycurl.HTTPHEADER if method.lower() == "post": c.setopt(pycurl.POST,1) post_data = urllib.urlencode(data) print(post_data) c.setopt(pycurl.POSTFIELDS, post_data) elif method.lower() == "get": c.setopt(pycurl.HTTPGET, 1) elif method.lower() == "delete": c.setopt(pycurl.DELETE, 1) elif method.lower() == "put": #c.setopt(pycurl.UPLOAD, 1) post_data = urllib.urlencode(data) c.setopt(pycurl.CUSTOMREQUEST, "PUT") c.setopt(pycurl.POSTFIELDS, post_data) elif method.lower() == "head": c.setopt(pycurl.NOBODY,1) else: print("Unknown method ", method) sys.exit(1) if username != None and password != None: c.setopt(pycurl.USERPWD, '%s:%s' % (username, password)) c.perform() c.close() body = buffer.getvalue() return body def _get_cluster_id(self): cluster_id = self._q_config("cluster_id") assert cluster_id is not None return cluster_id def invoke_task(self,name, *args): if args == (None,): getattr(self,name)() else: getattr(self,name)(*args) def deploy(self, asset_path, *args): # Use multipart upload to libraries/upload print("TBD") def logs(self, job_id): print("TBD") def status(self, job_id): print("TBD") def notebook(self): print("TBD") def _get_clusters(self): resp_body = self._do_request("GET", "clusters/list") j = json.loads(resp_body) return j def describecluster(self, name): clusters = self._get_clusters() for cluster in clusters: if cluster['name'] == name: print(cluster) def lsclusters(self): clusters = self._get_clusters() if len(clusters) == 0: print("No clusters created") for cluster in clusters: print(cluster) def mkcluster(self, name, memory_gb=6, use_spot=True): resp_body = self._do_request("POST", "clusters/create", name=name, memoryGB=memory_gb, useSpot=use_spot ) print(resp_body) def lslibraries(self): resp_body = self._do_request("GET", "libraries/list") j = json.loads(resp_body) print(j) def describelibraries(self): resp_body = self._do_request("GET", "libraries/status") j = json.loads(resp_body) print(j) def rmlibrary(self, library_id): resp_body = self._do_request("DELETE", "clusters/create", libraryId=library_id) print(resp_body) def attachlibrary(self, library_id, cluster_id): print("TBD") def schedule(self, asset_path, schedule_id, schedule_iso8601): print("TBD")
2.21875
2
appimagebuilder/builder/deploy/apt/venv.py
mssalvatore/appimage-builder
0
12853
<filename>appimagebuilder/builder/deploy/apt/venv.py<gh_stars>0 # Copyright 2020 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. import fnmatch import hashlib import logging import os import subprocess from pathlib import Path from urllib import request from appimagebuilder.common import shell from .package import Package DEPENDS_ON = ["dpkg-deb", "apt-get", "apt-key", "fakeroot", "apt-cache"] class Venv: def __init__( self, base_path: str, sources: [str], keys: [str], architectures: [], user_options: {} = None, ): self.logger = logging.getLogger("apt") self._deps = shell.resolve_commands_paths(DEPENDS_ON) self.sources = sources self.keys = keys self.architectures = architectures self.user_options = user_options self._generate_paths(base_path) self._write_apt_conf(user_options, architectures) self._write_sources_list(sources) self._write_keys(keys) self._write_dpkg_arch(architectures) def _generate_paths(self, base_path): self._base_path = Path(base_path).absolute() self._apt_conf_path = self._base_path / "apt.conf" self._apt_conf_parts_path = self._base_path / "apt.conf.d" self._apt_sources_list_path = self._base_path / "sources.list" self._apt_sources_list_parts_path = self._base_path / "sources.list.d" self._apt_preferences_parts_path = self._base_path / "preferences.d" self._apt_key_parts_path = self._base_path / "keys" self._dpkg_path = self._base_path / "dpkg" self._dpkg_status_path = self._dpkg_path / "status" self._apt_archives_path = self._base_path / "archives" self._base_path.mkdir(parents=True, exist_ok=True) self._apt_conf_parts_path.mkdir(parents=True, exist_ok=True) self._apt_preferences_parts_path.mkdir(parents=True, exist_ok=True) self._apt_key_parts_path.mkdir(parents=True, exist_ok=True) self._dpkg_path.mkdir(parents=True, exist_ok=True) self._dpkg_status_path.touch(exist_ok=True) def _write_apt_conf(self, user_options, architectures: [str]): options = { "Dir": self._base_path, "Dir::State": self._base_path, "Dir::Cache": self._base_path, "Dir::Etc::Main": self._apt_conf_path, "Dir::Etc::Parts": self._apt_conf_parts_path, "Dir::Etc::SourceList": self._apt_sources_list_path, "Dir::Etc::SourceListParts": self._apt_sources_list_parts_path, "Dir::Etc::PreferencesParts": self._apt_preferences_parts_path, "Dir::Etc::TrustedParts": self._apt_key_parts_path, "Dir::State::status": self._dpkg_status_path, "Dir::Ignore-Files-Silently": False, "APT::Install-Recommends": False, "APT::Install-Suggests": False, "APT::Immediate-Configure": False, "APT::Architecture": architectures[0], "APT::Architectures": architectures, "Acquire::Languages": "none", } if user_options: options.update(user_options) # write apt.conf with open(self._apt_conf_path, "w") as f: for k, v in options.items(): if isinstance(v, str): f.write('%s "%s";\n' % (k, v)) continue if isinstance(v, list): f.write("%s {" % k) for sv in v: f.write('"%s"; ' % sv) f.write("}\n") continue f.write("%s %s;\n" % (k, v)) def _write_sources_list(self, sources): with open(self._apt_sources_list_path, "w") as f: for line in sources: f.write("%s\n" % line) def _write_keys(self, keys: [str]): for key_url in keys: key_url_hash = hashlib.md5(key_url.encode()).hexdigest() key_path = os.path.join(self._apt_key_parts_path, "%s.asc" % key_url_hash) if not os.path.exists(key_path): self.logger.info("Download key file: %s" % key_url) request.urlretrieve(key_url, key_path) def _get_environment(self): env = os.environ.copy() env["APT_CONFIG"] = self._apt_conf_path env["DEBIAN_FRONTEND"] = "noninteractive" return env def set_installed_packages(self, packages): with open(self._dpkg_status_path, "w") as f: for package in packages: f.write( "Package: %s\n" "Status: install ok installed\n" "Version: %s\n" "Architecture: %s\n" "\n" % (package.name, package.version, package.arch) ) def _run_apt_cache_show(self, package_names: [str]): if not package_names: return None command = "{apt-cache} show %s" % " ".join(package_names) command = command.format(**self._deps) self.logger.debug(command) _proc = subprocess.run( command, stdout=subprocess.PIPE, shell=True, env=self._get_environment() ) shell.assert_successful_result(_proc) return _proc def update(self) -> None: command = "apt-get update" self.logger.info(command) _proc = subprocess.run(command, shell=True, env=self._get_environment()) shell.assert_successful_result(_proc) def search_names(self, patterns: [str]): output = self._run_apt_cache_pkgnames() packages = output.stdout.decode("utf-8").splitlines() filtered_packages = [] for pattern in patterns: filtered_packages.extend(fnmatch.filter(packages, pattern)) return filtered_packages def _run_apt_cache_pkgnames(self): command = "{apt-cache} pkgnames".format(**self._deps) self.logger.debug(command) proc = subprocess.run( command, stdout=subprocess.PIPE, shell=True, env=self._get_environment() ) shell.assert_successful_result(proc) return proc def resolve_packages(self, packages: [Package]) -> [Package]: packages_str = [str(package) for package in packages] output = self._run_apt_get_install_download_only(packages_str) stdout_str = output.stderr.decode("utf-8") installed_packages = [] for line in stdout_str.splitlines(): if line.startswith("Dequeuing") and line.endswith(".deb"): file_path = Path(line.split(" ")[1]) installed_packages.append(Package.from_file_path(file_path)) return installed_packages def _run_apt_get_install_download_only(self, packages: [str]): command = ( "{apt-get} install -y --no-install-recommends --download-only -o Debug::pkgAcquire=1 " "{packages}".format(**self._deps, packages=" ".join(packages)) ) self.logger.debug(command) command = subprocess.run( command, stderr=subprocess.PIPE, shell=True, env=self._get_environment(), ) shell.assert_successful_result(command) return command def resolve_archive_paths(self, packages: [Package]): paths = [ self._apt_archives_path / pkg.get_expected_file_name() for pkg in packages ] return paths def extract_package(self, package, target): path = self._apt_archives_path / package.get_expected_file_name() command = "{dpkg-deb} -x {archive} {directory}".format( **self._deps, archive=path, directory=target ) self.logger.debug(command) output = subprocess.run(command, shell=True, env=self._get_environment()) shell.assert_successful_result(output) def _write_dpkg_arch(self, architectures: [str]): with open(self._dpkg_path / "arch", "w") as f: for arch in architectures: f.write("%s\n" % arch) def search_packages(self, names): packages = [] pkg_name = None pkg_version = None pkg_arch = None output = self._run_apt_cache_show(names) for line in output.stdout.decode("utf-8").splitlines(): if line.startswith("Package:"): pkg_name = line.split(" ", maxsplit=2)[1] if line.startswith("Architecture"): pkg_arch = line.split(" ", maxsplit=2)[1] if line.startswith("Version:"): pkg_version = line.split(" ", maxsplit=2)[1] # empty lines indicate the end of a package description block if not line and pkg_name: packages.append(Package(pkg_name, pkg_version, pkg_arch)) pkg_name = None pkg_arch = None pkg_version = None # empty lines indicate the end of a package description block if pkg_name: packages.append(Package(pkg_name, pkg_version, pkg_arch)) return packages
2
2
tests/test_assertion_method.py
katakumpo/nicepy
0
12854
# -*- coding: utf-8 *-* import logging from unittest import TestCase from nicepy import assert_equal_struct, multi_assert_equal_struct, pretty_repr, permuteflat log = logging.getLogger(__name__) class Foo(object): def __init__(self, **kwargs): for k, v in kwargs.iteritems(): self[k] = v def __setitem__(self, name, value): # helper to add attributes per self[attr] = value -> self.attr == value setattr(self, name, value) def __repr__(self): return pretty_repr(self, ignore_own_repr=True) class TestAssertEqualStruct(TestCase): def run_assert(self, args, expected_msg=None): log.debug('args: %s' % str(args)) msg = None try: assert_equal_struct(*args) except AssertionError as e: msg = e.message log.debug('msg: %s' % msg) self.assertEqual(msg, expected_msg) def check(self, actual_classes=(list,), expected_classes=(list,), expected_obj=None, expected_kwargs={}, working_obj=None, working_kwargs={}, failing_obj=None, failing_kwargs={}, failure_msg=None, namepaths=None, expected_namepaths=None): for actual_cls, expected_cls in permuteflat(actual_classes, expected_classes): expected_obj = expected_obj or expected_cls(**expected_kwargs) working_obj = working_obj or actual_cls(**working_kwargs) self.run_assert((working_obj, expected_obj, namepaths, expected_namepaths)) failing_obj = failing_obj or actual_cls(**failing_kwargs) self.run_assert((failing_obj, expected_obj, namepaths, expected_namepaths), failure_msg) def test_directly(self): """ *assert_equal_struct* can compare similar flat structures directly. """ self.check(actual_classes=(dict, Foo), expected_classes=(dict, Foo), expected_kwargs=dict(x=1), working_kwargs=dict(x=1, y=2), failing_kwargs=dict(x=3, y=2), failure_msg='actual values != expected values:\n\tx: 3 != 1') self.check(expected_obj=[1], working_obj=[1, 2], failing_obj=[3, 2], failure_msg='actual values != expected values:\n\t0: 3 != 1') def test_with_namepaths(self): """ With namepaths *assert_equal_struct* can compare similar structures and structures with lists of values in full depth. This ignores all additional paths at the expected object. """ self.check(actual_classes=(dict, Foo), expected_classes=(dict, Foo), expected_kwargs=dict(x=1, y=4), namepaths=['x'], working_kwargs=dict(x=1, y=2), failing_kwargs=dict(x=3, y=2), failure_msg='actual values != expected values:\n\tx: 3 != 1') self.check(actual_classes=(dict, Foo), expected_obj=[1, 4], namepaths=['x'], working_kwargs=dict(x=1, y=2), failing_kwargs=dict(x=3, y=2), failure_msg='actual values != expected values:\n\tx: 3 != 1') self.check(expected_obj=[1, 4], namepaths=['0'], working_obj=[1, 2], failing_obj=[3, 2], failure_msg='actual values != expected values:\n\t0: 3 != 1') def test_with_namepaths_and_expected_namepaths(self): """ Like just with namepaths, the values are sometimes at other paths at the expected object and will be compared using expected_namepaths in same order as namepaths. """ self.check(actual_classes=(dict, Foo), expected_classes=(dict, Foo), expected_kwargs=dict(a=1, b=4), namepaths=['x'], expected_namepaths=['a'], working_kwargs=dict(x=1, y=2), failing_kwargs=dict(x=3, y=2), failure_msg='actual values != expected values:\n\tx != a: 3 != 1') self.check(actual_classes=(dict, Foo), expected_obj=[4, 1], namepaths=['x'], expected_namepaths=['1'], working_kwargs=dict(x=1, y=2), failing_kwargs=dict(x=3, y=2), failure_msg='actual values != expected values:\n\tx != 1: 3 != 1') self.check(expected_obj=[4, 1], namepaths=['0'], expected_namepaths=['1'], working_obj=[1, 2], failing_obj=[3, 2], failure_msg='actual values != expected values:\n\t0 != 1: 3 != 1') class TestMultiAssertEqualStruct(TestCase): def run_assert(self, args, expected_msg=None): log.debug('args: %s' % str(args)) msg = None try: multi_assert_equal_struct(*args) except AssertionError as e: msg = e.message log.debug('msg: %s' % msg) self.assertEqual(msg, expected_msg) def check(self, actual_classes=(list,), expected_classes=(list,), expected_objs=None, expected_kwargs_list=[], working_objs=None, working_kwargs_list=[], failing_objs=None, failing_kwargs_list=[], failure_msg=None, namepaths=None, expected_namepaths=None): for actual_cls1, actual_cls2, expected_cls1, expected_cls2 in \ permuteflat(*([actual_classes] * 2 + [expected_classes] * 2)): if not expected_objs: expected_objs = (expected_cls1(**expected_kwargs_list[0]), expected_cls2(**expected_kwargs_list[1])) if not working_objs: working_objs = (actual_cls1(**working_kwargs_list[0]), actual_cls2(**working_kwargs_list[1])) self.run_assert((working_objs, expected_objs, namepaths, expected_namepaths)) if not failing_objs: failing_objs = (actual_cls1(**failing_kwargs_list[0]), actual_cls2(**failing_kwargs_list[1])) self.run_assert((failing_objs, expected_objs, namepaths, expected_namepaths), failure_msg) def test_directly(self): """ *multi_assert_equal_struct* can compare multiple similar flat structures directly. """ self.check(actual_classes=(dict, Foo), expected_classes=(dict, Foo), expected_kwargs_list=[dict(x=1), dict(x=2, y=3)], working_kwargs_list=[dict(x=1, y=0), dict(x=2, y=3)], failing_kwargs_list=[dict(x=4, y=0), dict(x=2, y=5)], failure_msg='Multi-assert failed:\n' \ 'Index 0: actual values != expected values:\n\tx: 4 != 1\n'\ 'Index 1: actual values != expected values:\n\ty: 5 != 3') self.check(expected_objs=[[1], [2, 3]], working_objs=[[1, 0], [2, 3]], failing_objs=[[4, 0], [2, 5]], failure_msg='Multi-assert failed:\n' \ 'Index 0: actual values != expected values:\n\t0: 4 != 1\n'\ 'Index 1: actual values != expected values:\n\t1: 5 != 3') def test_with_namepaths(self): """ With namepaths *multi_assert_equal_struct* can compare multiple similar structures and structures with lists of values in full depth. This ignores all additional paths at the expected objects. """ self.check(actual_classes=(dict, Foo), expected_classes=(dict, Foo), expected_kwargs_list=[dict(x=1), dict(x=2, y=3)], working_kwargs_list=[dict(x=1, y=0), dict(x=2)], failing_kwargs_list=[dict(x=4, y=0), dict(x=5)], namepaths=['x'], failure_msg='Multi-assert failed:\n' \ 'Index 0: actual values != expected values:\n\tx: 4 != 1\n'\ 'Index 1: actual values != expected values:\n\tx: 5 != 2') self.check(actual_classes=(dict, Foo), expected_objs=[[1], [2, 0]], working_kwargs_list=[dict(x=1, y=5), dict(x=2)], failing_kwargs_list=[dict(x=3, y=5), dict(x=4)], namepaths=['x'], failure_msg='Multi-assert failed:\n' \ 'Index 0: actual values != expected values:\n\tx: 3 != 1\n'\ 'Index 1: actual values != expected values:\n\tx: 4 != 2') self.check(expected_objs=[[1], [2, 3]], working_objs=[[1, 0], [2, 0]], failing_objs=[[4, 0], [5, 0]], namepaths=['0'], failure_msg='Multi-assert failed:\n' \ 'Index 0: actual values != expected values:\n\t0: 4 != 1\n'\ 'Index 1: actual values != expected values:\n\t0: 5 != 2') def test_with_namepaths_and_expected_namepaths(self): """ Like just with namepaths, the values are sometimes at other paths at the expected object and will be compared using expected_namepaths in same order as namepaths. """ self.check(actual_classes=(dict, Foo), expected_classes=(dict, Foo), expected_kwargs_list=[dict(y=1), dict(y=2, x=3)], working_kwargs_list=[dict(x=1, y=0), dict(x=2)], failing_kwargs_list=[dict(x=4, y=0), dict(x=5)], namepaths=['x'], expected_namepaths=['y'], failure_msg='Multi-assert failed:\n' \ 'Index 0: actual values != expected values:\n\tx != y: 4 != 1\n'\ 'Index 1: actual values != expected values:\n\tx != y: 5 != 2') self.check(actual_classes=(dict, Foo), expected_objs=[[0, 1], [0, 2]], working_kwargs_list=[dict(x=1, y=5), dict(x=2)], failing_kwargs_list=[dict(x=3, y=5), dict(x=4)], namepaths=['x'], expected_namepaths=['1'], failure_msg='Multi-assert failed:\n' \ 'Index 0: actual values != expected values:\n\tx != 1: 3 != 1\n'\ 'Index 1: actual values != expected values:\n\tx != 1: 4 != 2') self.check(expected_objs=[[1, 2], [3, 4]], working_objs=[[2, 1], [4, 3]], failing_objs=[[2, 5], [6, 3]], namepaths=['0', '1'], expected_namepaths=['1', '0'], failure_msg='Multi-assert failed:\n' \ 'Index 0: actual values != expected values:\n\t1 != 0: 5 != 1\n'\ 'Index 1: actual values != expected values:\n\t0 != 1: 6 != 4')
2.65625
3
usaspending_api/awards/migrations/0074_auto_20170320_1607.py
toolness/usaspending-api
1
12855
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2017-03-20 16:07 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('awards', '0073_auto_20170320_1455'), ] operations = [ migrations.AlterField( model_name='award', name='fain', field=models.CharField(blank=True, db_index=True, help_text='An identification code assigned to each financial assistance award tracking purposes. The FAIN is tied to that award (and all future modifications to that award) throughout the award’s life. Each FAIN is assigned by an agency. Within an agency, FAIN are unique: each new award must be issued a new FAIN. FAIN stands for Federal Award Identification Number, though the digits are letters, not numbers.', max_length=30, null=True), ), migrations.AlterField( model_name='award', name='period_of_performance_current_end_date', field=models.DateField(db_index=True, help_text='The current, not original, period of performance end date', null=True, verbose_name='End Date'), ), migrations.AlterField( model_name='award', name='period_of_performance_start_date', field=models.DateField(db_index=True, help_text='The start date for the period of performance', null=True, verbose_name='Start Date'), ), migrations.AlterField( model_name='award', name='piid', field=models.CharField(blank=True, db_index=True, help_text='Procurement Instrument Identifier - A unique identifier assigned to a federal contract, purchase order, basic ordering agreement, basic agreement, and blanket purchase agreement. It is used to track the contract, and any modifications or transactions related to it. After October 2017, it is between 13 and 17 digits, both letters and numbers.', max_length=50, null=True), ), migrations.AlterField( model_name='award', name='potential_total_value_of_award', field=models.DecimalField(blank=True, db_index=True, decimal_places=2, help_text='The sum of the potential_value_of_award from associated transactions', max_digits=20, null=True, verbose_name='Potential Total Value of Award'), ), migrations.AlterField( model_name='award', name='total_obligation', field=models.DecimalField(db_index=True, decimal_places=2, help_text='The amount of money the government is obligated to pay for the award', max_digits=15, null=True, verbose_name='Total Obligated'), ), migrations.AlterField( model_name='award', name='total_outlay', field=models.DecimalField(db_index=True, decimal_places=2, help_text='The total amount of money paid out for this award', max_digits=15, null=True), ), migrations.AlterField( model_name='award', name='type', field=models.CharField(choices=[('U', 'Unknown Type'), ('02', 'Block Grant'), ('03', 'Formula Grant'), ('04', 'Project Grant'), ('05', 'Cooperative Agreement'), ('06', 'Direct Payment for Specified Use'), ('07', 'Direct Loan'), ('08', 'Guaranteed/Insured Loan'), ('09', 'Insurance'), ('10', 'Direct Payment unrestricted'), ('11', 'Other'), ('A', 'BPA Call'), ('B', 'Purchase Order'), ('C', 'Delivery Order'), ('D', 'Definitive Contract')], db_index=True, default='U', help_text='\tThe mechanism used to distribute funding. The federal government can distribute funding in several forms. These award types include contracts, grants, loans, and direct payments.', max_length=5, null=True, verbose_name='Award Type'), ), migrations.AlterField( model_name='award', name='uri', field=models.CharField(blank=True, db_index=True, help_text='The uri of the award', max_length=70, null=True), ), migrations.AlterField( model_name='transaction', name='federal_action_obligation', field=models.DecimalField(blank=True, db_index=True, decimal_places=2, help_text='The obligation of the federal government for this transaction', max_digits=20, null=True), ), ]
1.671875
2
COVIDSafepassage/passsystem/apps.py
VICS-CORE/safepassage_server
0
12856
from django.apps import AppConfig class PasssystemConfig(AppConfig): name = 'passsystem'
1.023438
1
src/intervals/once.py
Eagerod/tasker
0
12857
from base_interval import BaseInterval class OnceInterval(BaseInterval): @staticmethod def next_interval(start_date): return start_date @staticmethod def approximate_period(): return 0
2.421875
2
env/Lib/site-packages/azure/mgmt/storage/storagemanagement.py
Ammar12/simplebanking
0
12858
<reponame>Ammar12/simplebanking<filename>env/Lib/site-packages/azure/mgmt/storage/storagemanagement.py # # Copyright (c) Microsoft and contributors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # limitations under the License. # # Warning: This code was generated by a tool. # # Changes to this file may cause incorrect behavior and will be lost if the # code is regenerated. from datetime import datetime import json from requests import Session, Request import time import uuid try: from urllib import quote, unquote except: from urllib.parse import quote, unquote from azure.common import AzureHttpError from azure.mgmt.common import AzureOperationResponse, OperationStatusResponse, OperationStatus, Service from azure.mgmt.common.arm import ResourceBase, ResourceBaseExtended class StorageAccountCreateResponse(AzureOperationResponse): """ The Create storage account operation response. """ def __init__(self, **kwargs): super(StorageAccountCreateResponse, self).__init__(**kwargs) self._storage_account = kwargs.get('storage_account') self._operation_status_link = kwargs.get('operation_status_link') self._retry_after = kwargs.get('retry_after') self._status = kwargs.get('status') @property def operation_status_link(self): """ Gets the URL where the status of the create operation can be checked. """ return self._operation_status_link @operation_status_link.setter def operation_status_link(self, value): self._operation_status_link = value @property def retry_after(self): """ Gets the delay that the client should use when checking for the status of the operation. This delay is specified in seconds as an integer; min 5 seconds, max 900 seconds (15 minutes). The storage resource provider will return 25 seconds initially. """ return self._retry_after @retry_after.setter def retry_after(self, value): self._retry_after = value @property def status(self): """ Gets the status of the create request. """ return self._status @status.setter def status(self, value): self._status = value @property def storage_account(self): """ Gets the storage account with the created properties populated. """ return self._storage_account @storage_account.setter def storage_account(self, value): self._storage_account = value class CheckNameAvailabilityResponse(AzureOperationResponse): """ The CheckNameAvailability operation response. """ def __init__(self, **kwargs): super(CheckNameAvailabilityResponse, self).__init__(**kwargs) self._name_available = kwargs.get('name_available') self._reason = kwargs.get('reason') self._message = kwargs.get('message') @property def message(self): """ Gets an error message explaining the Reason value in more detail. """ return self._message @message.setter def message(self, value): self._message = value @property def name_available(self): """ Gets a boolean value that indicates whether the name is available for you to use. If true, the name is available. If false, the name has already been taken or invalid and cannot be used. """ return self._name_available @name_available.setter def name_available(self, value): self._name_available = value @property def reason(self): """ Gets the reason that a storage account name could not be used. The Reason element is only returned if NameAvailable is false. """ return self._reason @reason.setter def reason(self, value): self._reason = value class StorageAccountCreateParameters(object): """ The parameters to provide for the account. """ def __init__(self, **kwargs): self._account_type = kwargs.get('account_type') self._location = kwargs.get('location') self._tags = kwargs.get('tags') @property def account_type(self): """ Gets or sets the account type. """ return self._account_type @account_type.setter def account_type(self, value): self._account_type = value @property def location(self): """ Gets or sets the location of the resource. This will be one of the supported and registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.). The geo region of a resource cannot be changed once it is created. """ return self._location @location.setter def location(self, value): self._location = value @property def tags(self): """ Gets or sets a list of key value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. """ return self._tags @tags.setter def tags(self, value): self._tags = value class StorageAccountGetPropertiesResponse(AzureOperationResponse): """ The Get storage account operation response. """ def __init__(self, **kwargs): super(StorageAccountGetPropertiesResponse, self).__init__(**kwargs) self._storage_account = kwargs.get('storage_account') @property def storage_account(self): """ Gets the returned storage account. """ return self._storage_account @storage_account.setter def storage_account(self, value): self._storage_account = value class StorageAccountListKeysResponse(AzureOperationResponse): """ The ListKeys operation response. """ def __init__(self, **kwargs): super(StorageAccountListKeysResponse, self).__init__(**kwargs) self._storage_account_keys = kwargs.get('storage_account_keys') @property def storage_account_keys(self): """ Gets the access keys for the storage account. """ return self._storage_account_keys @storage_account_keys.setter def storage_account_keys(self, value): self._storage_account_keys = value class StorageAccountListResponse(AzureOperationResponse): """ The list storage accounts operation response. """ def __init__(self, **kwargs): super(StorageAccountListResponse, self).__init__(**kwargs) self._storage_accounts = kwargs.get('storage_accounts') self._next_link = kwargs.get('next_link') @property def next_link(self): """ Gets the link to the next set of results. Currently this will always be empty as the API does not support pagination. """ return self._next_link @next_link.setter def next_link(self, value): self._next_link = value @property def storage_accounts(self): """ Gets the list of storage accounts and their properties. """ return self._storage_accounts @storage_accounts.setter def storage_accounts(self, value): self._storage_accounts = value class StorageAccountUpdateResponse(AzureOperationResponse): """ The Update storage account operation response. """ def __init__(self, **kwargs): super(StorageAccountUpdateResponse, self).__init__(**kwargs) self._storage_account = kwargs.get('storage_account') @property def storage_account(self): """ Gets the storage account with the updated properties populated. """ return self._storage_account @storage_account.setter def storage_account(self, value): self._storage_account = value class StorageAccountUpdateParameters(object): """ The parameters to update on the account. """ def __init__(self, **kwargs): self._account_type = kwargs.get('account_type') self._custom_domain = kwargs.get('custom_domain') self._tags = kwargs.get('tags') @property def account_type(self): """ Gets or sets the account type. Note that StandardZRS and PremiumLRS accounts cannot be changed to other account types, and other account types cannot be changed to StandardZRS or PremiumLRS. """ return self._account_type @account_type.setter def account_type(self, value): self._account_type = value @property def custom_domain(self): """ User domain assigned to the storage account. Name is the CNAME source. Only one custom domain is supported per storage account at this time. To clear the existing custom domain, use an empty string for the custom domain name property. """ return self._custom_domain @custom_domain.setter def custom_domain(self, value): self._custom_domain = value @property def tags(self): """ Gets or sets a list of key value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. This is a full replace so all the existing tags will be replaced on Update. """ return self._tags @tags.setter def tags(self, value): self._tags = value class StorageAccountRegenerateKeyResponse(AzureOperationResponse): """ The RegenerateKey operation response. """ def __init__(self, **kwargs): super(StorageAccountRegenerateKeyResponse, self).__init__(**kwargs) self._storage_account_keys = kwargs.get('storage_account_keys') @property def storage_account_keys(self): """ Gets the access keys associated with the storage account, one of which mayhave been regenerated by this operation. """ return self._storage_account_keys @storage_account_keys.setter def storage_account_keys(self, value): self._storage_account_keys = value class KeyName(object): """ The key names. """ key1 = "key1" key2 = "key2" class StorageAccount(ResourceBaseExtended): """ The storage account. """ def __init__(self, **kwargs): super(StorageAccount, self).__init__(**kwargs) self._provisioning_state = kwargs.get('provisioning_state') self._account_type = kwargs.get('account_type') self._primary_endpoints = kwargs.get('primary_endpoints') self._primary_location = kwargs.get('primary_location') self._status_of_primary = kwargs.get('status_of_primary') self._last_geo_failover_time = kwargs.get('last_geo_failover_time') self._secondary_endpoints = kwargs.get('secondary_endpoints') self._secondary_location = kwargs.get('secondary_location') self._status_of_secondary = kwargs.get('status_of_secondary') self._creation_time = kwargs.get('creation_time') self._custom_domain = kwargs.get('custom_domain') @property def account_type(self): """ Gets the type of the storage account. """ return self._account_type @account_type.setter def account_type(self, value): self._account_type = value @property def creation_time(self): """ Gets the creation date and time of the storage account in UTC. """ return self._creation_time @creation_time.setter def creation_time(self, value): self._creation_time = value @property def custom_domain(self): """ Gets the user assigned custom domain assigned to this storage account. """ return self._custom_domain @custom_domain.setter def custom_domain(self, value): self._custom_domain = value @property def last_geo_failover_time(self): """ Gets the timestamp of the most recent instance of a failover to the secondary location. Only the most recent timestamp is retained. This element is not returned if there has never been a failover instance. Only available if the accountType is StandardGRS or StandardRAGRS. """ return self._last_geo_failover_time @last_geo_failover_time.setter def last_geo_failover_time(self, value): self._last_geo_failover_time = value @property def primary_endpoints(self): """ Gets the URLs that are used to perform a retrieval of a public blob, queue or table object.Note that StandardZRS and PremiumLRS accounts only return the blob endpoint. """ return self._primary_endpoints @primary_endpoints.setter def primary_endpoints(self, value): self._primary_endpoints = value @property def primary_location(self): """ Gets the location of the primary for the storage account. """ return self._primary_location @primary_location.setter def primary_location(self, value): self._primary_location = value @property def provisioning_state(self): """ Gets the status of the storage account at the time the operation was called. """ return self._provisioning_state @provisioning_state.setter def provisioning_state(self, value): self._provisioning_state = value @property def secondary_endpoints(self): """ Gets the URLs that are used to perform a retrieval of a public blob, queue or table object from the secondary location of the storage account. Only available if the accountType is StandardRAGRS. """ return self._secondary_endpoints @secondary_endpoints.setter def secondary_endpoints(self, value): self._secondary_endpoints = value @property def secondary_location(self): """ Gets the location of the geo replicated secondary for the storage account. Only available if the accountType is StandardGRS or StandardRAGRS. """ return self._secondary_location @secondary_location.setter def secondary_location(self, value): self._secondary_location = value @property def status_of_primary(self): """ Gets the status indicating whether the primary location of the storage account is available or unavailable. """ return self._status_of_primary @status_of_primary.setter def status_of_primary(self, value): self._status_of_primary = value @property def status_of_secondary(self): """ Gets the status indicating whether the secondary location of the storage account is available or unavailable. Only available if the accountType is StandardGRS or StandardRAGRS. """ return self._status_of_secondary @status_of_secondary.setter def status_of_secondary(self, value): self._status_of_secondary = value class ProvisioningState(object): creating = "Creating" resolving_dns = "ResolvingDNS" succeeded = "Succeeded" class AccountType(object): standard_lrs = "Standard_LRS" standard_zrs = "Standard_ZRS" standard_grs = "Standard_GRS" standard_ragrs = "Standard_RAGRS" premium_lrs = "Premium_LRS" class Endpoints(object): """ The URIs that are used to perform a retrieval of a public blob, queue or table object. """ def __init__(self, **kwargs): self._blob = kwargs.get('blob') self._queue = kwargs.get('queue') self._table = kwargs.get('table') @property def blob(self): """ Gets the blob endpoint. """ return self._blob @blob.setter def blob(self, value): self._blob = value @property def queue(self): """ Gets the queue endpoint. """ return self._queue @queue.setter def queue(self, value): self._queue = value @property def table(self): """ Gets the table endpoint. """ return self._table @table.setter def table(self, value): self._table = value class AccountStatus(object): available = "Available" unavailable = "Unavailable" class CustomDomain(object): """ The custom domain assigned to this storage account. This can be set via Update. """ def __init__(self, **kwargs): self._name = kwargs.get('name') self._use_sub_domain = kwargs.get('use_sub_domain') @property def name(self): """ Gets or sets the custom domain name. Name is the CNAME source. """ return self._name @name.setter def name(self, value): self._name = value @property def use_sub_domain(self): """ Indicates whether indirect CName validation is enabled. Default value is false. This should only be set on updates """ return self._use_sub_domain @use_sub_domain.setter def use_sub_domain(self, value): self._use_sub_domain = value class Reason(object): account_name_invalid = "AccountNameInvalid" already_exists = "AlreadyExists" class StorageAccountKeys(object): """ The access keys for the storage account. """ def __init__(self, **kwargs): self._key1 = kwargs.get('key1') self._key2 = kwargs.get('key2') @property def key1(self): """ Gets the value of key 1. """ return self._key1 @key1.setter def key1(self, value): self._key1 = value @property def key2(self): """ Gets the value of key 2. """ return self._key2 @key2.setter def key2(self, value): self._key2 = value class StorageManagementClient(Service): """ The Storage Management Client. """ @property def api_version(self): """ Gets the API version. """ return self._api_version @property def long_running_operation_initial_timeout(self): """ Gets or sets the initial timeout for Long Running Operations. """ return self._long_running_operation_initial_timeout @long_running_operation_initial_timeout.setter def long_running_operation_initial_timeout(self, value): self._long_running_operation_initial_timeout = value @property def long_running_operation_retry_timeout(self): """ Gets or sets the retry timeout for Long Running Operations. """ return self._long_running_operation_retry_timeout @long_running_operation_retry_timeout.setter def long_running_operation_retry_timeout(self, value): self._long_running_operation_retry_timeout = value @property def storage_accounts(self): """ Operations for managing storage accounts. """ return self._storage_accounts def __init__(self, credentials, **kwargs): super(StorageManagementClient, self).__init__(credentials, **kwargs) if getattr(self, '_base_uri', None) is None: self._base_uri = 'https://management.azure.com/' if getattr(self, '_api_version', None) is None: self._api_version = '2015-05-01-preview' if getattr(self, '_long_running_operation_initial_timeout', None) is None: self._long_running_operation_initial_timeout = -1 if getattr(self, '_long_running_operation_retry_timeout', None) is None: self._long_running_operation_retry_timeout = -1 self._storage_accounts = StorageAccountOperations(self) def parse_account_type(self, value): """ Parse enum values for type AccountType. Args: value (string): The value to parse. Returns: AccountType: The enum value. """ if 'Standard_LRS'.lower() == value.lower(): return AccountType.StandardLRS if 'Standard_ZRS'.lower() == value.lower(): return AccountType.StandardZRS if 'Standard_GRS'.lower() == value.lower(): return AccountType.StandardGRS if 'Standard_RAGRS'.lower() == value.lower(): return AccountType.StandardRAGRS if 'Premium_LRS'.lower() == value.lower(): return AccountType.PremiumLRS raise IndexError('value is outside the valid range.') def account_type_to_string(self, value): """ Convert an enum of type AccountType to a string. Args: value (AccountType): The value to convert to a string. Returns: string: The enum value as a string. """ if value == AccountType.StandardLRS: return 'Standard_LRS' if value == AccountType.StandardZRS: return 'Standard_ZRS' if value == AccountType.StandardGRS: return 'Standard_GRS' if value == AccountType.StandardRAGRS: return 'Standard_RAGRS' if value == AccountType.PremiumLRS: return 'Premium_LRS' raise IndexError('value is outside the valid range.') def parse_key_name(self, value): """ Parse enum values for type KeyName. Args: value (string): The value to parse. Returns: KeyName: The enum value. """ if 'key1'.lower() == value.lower(): return KeyName.Key1 if 'key2'.lower() == value.lower(): return KeyName.Key2 raise IndexError('value is outside the valid range.') def key_name_to_string(self, value): """ Convert an enum of type KeyName to a string. Args: value (KeyName): The value to convert to a string. Returns: string: The enum value as a string. """ if value == KeyName.Key1: return 'key1' if value == KeyName.Key2: return 'key2' raise IndexError('value is outside the valid range.') def get_create_operation_status(self, operation_status_link): """ The Get Create Operation Status operation returns the status of the specified create operation. After calling the asynchronous Begin Create operation, you can call Get Create Operation Status to determine whether the operation has succeeded, failed, or is still in progress. Args: operation_status_link (string): The URL where the status of the long-running create operation can be checked. Returns: StorageAccountCreateResponse: The Create storage account operation response. """ # Validate if operation_status_link is None: raise ValueError('operation_status_link cannot be None.') # Tracing # Construct URL url = '' url = url + operation_status_link url = url.replace(' ', '%20') # Create HTTP transport objects http_request = Request() http_request.url = url http_request.method = 'GET' # Set Headers http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4()) # Send Request response = self.send_request(http_request) body = response.content status_code = response.status_code if status_code != 200 and status_code != 202 and status_code != 500: error = AzureHttpError(body, response.status_code) raise error # Create Result result = None # Deserialize Response if status_code == 200 or status_code == 202 or status_code == 500: response_content = body result = StorageAccountCreateResponse() response_doc = None if response_content: response_doc = json.loads(response_content.decode()) if response_doc is not None: storage_account_instance = StorageAccount(tags={}) result.storage_account = storage_account_instance id_value = response_doc.get('id', None) if id_value is not None: id_instance = id_value storage_account_instance.id = id_instance name_value = response_doc.get('name', None) if name_value is not None: name_instance = name_value storage_account_instance.name = name_instance type_value = response_doc.get('type', None) if type_value is not None: type_instance = type_value storage_account_instance.type = type_instance location_value = response_doc.get('location', None) if location_value is not None: location_instance = location_value storage_account_instance.location = location_instance tags_sequence_element = response_doc.get('tags', None) if tags_sequence_element is not None: for property in tags_sequence_element: tags_key = property tags_value = tags_sequence_element[property] storage_account_instance.tags[tags_key] = tags_value properties_value = response_doc.get('properties', None) if properties_value is not None: provisioning_state_value = properties_value.get('provisioningState', None) if provisioning_state_value is not None: provisioning_state_instance = provisioning_state_value storage_account_instance.provisioning_state = provisioning_state_instance account_type_value = properties_value.get('accountType', None) if account_type_value is not None: account_type_instance = account_type_value storage_account_instance.account_type = account_type_instance primary_endpoints_value = properties_value.get('primaryEndpoints', None) if primary_endpoints_value is not None: primary_endpoints_instance = Endpoints() storage_account_instance.primary_endpoints = primary_endpoints_instance blob_value = primary_endpoints_value.get('blob', None) if blob_value is not None: blob_instance = blob_value primary_endpoints_instance.blob = blob_instance queue_value = primary_endpoints_value.get('queue', None) if queue_value is not None: queue_instance = queue_value primary_endpoints_instance.queue = queue_instance table_value = primary_endpoints_value.get('table', None) if table_value is not None: table_instance = table_value primary_endpoints_instance.table = table_instance primary_location_value = properties_value.get('primaryLocation', None) if primary_location_value is not None: primary_location_instance = primary_location_value storage_account_instance.primary_location = primary_location_instance status_of_primary_value = properties_value.get('statusOfPrimary', None) if status_of_primary_value is not None: status_of_primary_instance = status_of_primary_value storage_account_instance.status_of_primary = status_of_primary_instance last_geo_failover_time_value = properties_value.get('lastGeoFailoverTime', None) if last_geo_failover_time_value is not None: last_geo_failover_time_instance = last_geo_failover_time_value storage_account_instance.last_geo_failover_time = last_geo_failover_time_instance secondary_location_value = properties_value.get('secondaryLocation', None) if secondary_location_value is not None: secondary_location_instance = secondary_location_value storage_account_instance.secondary_location = secondary_location_instance status_of_secondary_value = properties_value.get('statusOfSecondary', None) if status_of_secondary_value is not None: status_of_secondary_instance = status_of_secondary_value storage_account_instance.status_of_secondary = status_of_secondary_instance creation_time_value = properties_value.get('creationTime', None) if creation_time_value is not None: creation_time_instance = creation_time_value storage_account_instance.creation_time = creation_time_instance custom_domain_value = properties_value.get('customDomain', None) if custom_domain_value is not None: custom_domain_instance = CustomDomain() storage_account_instance.custom_domain = custom_domain_instance name_value2 = custom_domain_value.get('name', None) if name_value2 is not None: name_instance2 = name_value2 custom_domain_instance.name = name_instance2 use_sub_domain_value = custom_domain_value.get('useSubDomain', None) if use_sub_domain_value is not None: use_sub_domain_instance = use_sub_domain_value custom_domain_instance.use_sub_domain = use_sub_domain_instance secondary_endpoints_value = properties_value.get('secondaryEndpoints', None) if secondary_endpoints_value is not None: secondary_endpoints_instance = Endpoints() storage_account_instance.secondary_endpoints = secondary_endpoints_instance blob_value2 = secondary_endpoints_value.get('blob', None) if blob_value2 is not None: blob_instance2 = blob_value2 secondary_endpoints_instance.blob = blob_instance2 queue_value2 = secondary_endpoints_value.get('queue', None) if queue_value2 is not None: queue_instance2 = queue_value2 secondary_endpoints_instance.queue = queue_instance2 table_value2 = secondary_endpoints_value.get('table', None) if table_value2 is not None: table_instance2 = table_value2 secondary_endpoints_instance.table = table_instance2 result.status_code = status_code result.retry_after = int(response.headers.get('retryafter', '0')) result.request_id = response.headers.get('x-ms-request-id') if status_code == 409: result.status = OperationStatus.Failed if status_code == 500: result.status = OperationStatus.InProgress if status_code == 202: result.status = OperationStatus.InProgress if status_code == 200: result.status = OperationStatus.Succeeded return result class StorageAccountOperations(object): """ Operations for managing storage accounts. __NOTE__: An instance of this class is automatically created for an instance of the [StorageManagementClient] """ def __init__(self, client): self._client = client @property def client(self): """ Gets a reference to the Microsoft.Azure.Management.Storage.StorageManagementClient. """ return self._client def begin_create(self, resource_group_name, account_name, parameters): """ Asynchronously creates a new storage account with the specified parameters. Existing accounts cannot be updated with this API and should instead use the Update Storage Account API. If an account is already created and subsequent PUT request is issued with exact same set of properties, then HTTP 200 would be returned. Args: resource_group_name (string): The name of the resource group within the user’s subscription. account_name (string): The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only. parameters (StorageAccountCreateParameters): The parameters to provide for the created account. Returns: StorageAccountCreateResponse: The Create storage account operation response. """ # Validate if resource_group_name is None: raise ValueError('resource_group_name cannot be None.') if account_name is None: raise ValueError('account_name cannot be None.') if len(account_name) < 3: raise IndexError('account_name is outside the valid range.') if len(account_name) > 24: raise IndexError('account_name is outside the valid range.') for account_name_char in account_name: if account_name_char.islower() == False and account_name_char.isdigit() == False: raise IndexError('account_name is outside the valid range.') if parameters is None: raise ValueError('parameters cannot be None.') if parameters.account_type is None: raise ValueError('parameters.account_type cannot be None.') if parameters.location is None: raise ValueError('parameters.location cannot be None.') # Tracing # Construct URL url = '' url = url + '/subscriptions/' if self.client.credentials.subscription_id is not None: url = url + quote(self.client.credentials.subscription_id) url = url + '/resourceGroups/' url = url + quote(resource_group_name) url = url + '/providers/Microsoft.Storage/storageAccounts/' url = url + quote(account_name) query_parameters = [] query_parameters.append('api-version=2015-05-01-preview') if len(query_parameters) > 0: url = url + '?' + '&'.join(query_parameters) base_url = self.client.base_uri # Trim '/' character from the end of baseUrl and beginning of url. if base_url[len(base_url) - 1] == '/': base_url = base_url[0 : len(base_url) - 1] if url[0] == '/': url = url[1 : ] url = base_url + '/' + url url = url.replace(' ', '%20') # Create HTTP transport objects http_request = Request() http_request.url = url http_request.method = 'PUT' # Set Headers http_request.headers['Content-Type'] = 'application/json' http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4()) # Serialize Request request_content = None request_doc = None storage_account_create_parameters_json_value = {} request_doc = storage_account_create_parameters_json_value storage_account_create_parameters_json_value['location'] = parameters.location if parameters.tags is not None: tags_dictionary = {} for tags_key in parameters.tags: tags_value = parameters.tags[tags_key] tags_dictionary[tags_key] = tags_value storage_account_create_parameters_json_value['tags'] = tags_dictionary properties_value = {} storage_account_create_parameters_json_value['properties'] = properties_value properties_value['accountType'] = str(parameters.account_type) if parameters.account_type is not None else 'StandardLRS' request_content = json.dumps(request_doc) http_request.data = request_content http_request.headers['Content-Length'] = len(request_content) # Send Request response = self.client.send_request(http_request) body = response.content status_code = response.status_code if status_code != 200 and status_code != 202: error = AzureHttpError(body, response.status_code) raise error # Create Result result = None # Deserialize Response if status_code == 200 or status_code == 202: response_content = body result = StorageAccountCreateResponse() response_doc = None if response_content: response_doc = json.loads(response_content.decode()) if response_doc is not None: storage_account_instance = StorageAccount(tags={}) result.storage_account = storage_account_instance id_value = response_doc.get('id', None) if id_value is not None: id_instance = id_value storage_account_instance.id = id_instance name_value = response_doc.get('name', None) if name_value is not None: name_instance = name_value storage_account_instance.name = name_instance type_value = response_doc.get('type', None) if type_value is not None: type_instance = type_value storage_account_instance.type = type_instance location_value = response_doc.get('location', None) if location_value is not None: location_instance = location_value storage_account_instance.location = location_instance tags_sequence_element = response_doc.get('tags', None) if tags_sequence_element is not None: for property in tags_sequence_element: tags_key2 = property tags_value2 = tags_sequence_element[property] storage_account_instance.tags[tags_key2] = tags_value2 properties_value2 = response_doc.get('properties', None) if properties_value2 is not None: provisioning_state_value = properties_value2.get('provisioningState', None) if provisioning_state_value is not None: provisioning_state_instance = provisioning_state_value storage_account_instance.provisioning_state = provisioning_state_instance account_type_value = properties_value2.get('accountType', None) if account_type_value is not None: account_type_instance = account_type_value storage_account_instance.account_type = account_type_instance primary_endpoints_value = properties_value2.get('primaryEndpoints', None) if primary_endpoints_value is not None: primary_endpoints_instance = Endpoints() storage_account_instance.primary_endpoints = primary_endpoints_instance blob_value = primary_endpoints_value.get('blob', None) if blob_value is not None: blob_instance = blob_value primary_endpoints_instance.blob = blob_instance queue_value = primary_endpoints_value.get('queue', None) if queue_value is not None: queue_instance = queue_value primary_endpoints_instance.queue = queue_instance table_value = primary_endpoints_value.get('table', None) if table_value is not None: table_instance = table_value primary_endpoints_instance.table = table_instance primary_location_value = properties_value2.get('primaryLocation', None) if primary_location_value is not None: primary_location_instance = primary_location_value storage_account_instance.primary_location = primary_location_instance status_of_primary_value = properties_value2.get('statusOfPrimary', None) if status_of_primary_value is not None: status_of_primary_instance = status_of_primary_value storage_account_instance.status_of_primary = status_of_primary_instance last_geo_failover_time_value = properties_value2.get('lastGeoFailoverTime', None) if last_geo_failover_time_value is not None: last_geo_failover_time_instance = last_geo_failover_time_value storage_account_instance.last_geo_failover_time = last_geo_failover_time_instance secondary_location_value = properties_value2.get('secondaryLocation', None) if secondary_location_value is not None: secondary_location_instance = secondary_location_value storage_account_instance.secondary_location = secondary_location_instance status_of_secondary_value = properties_value2.get('statusOfSecondary', None) if status_of_secondary_value is not None: status_of_secondary_instance = status_of_secondary_value storage_account_instance.status_of_secondary = status_of_secondary_instance creation_time_value = properties_value2.get('creationTime', None) if creation_time_value is not None: creation_time_instance = creation_time_value storage_account_instance.creation_time = creation_time_instance custom_domain_value = properties_value2.get('customDomain', None) if custom_domain_value is not None: custom_domain_instance = CustomDomain() storage_account_instance.custom_domain = custom_domain_instance name_value2 = custom_domain_value.get('name', None) if name_value2 is not None: name_instance2 = name_value2 custom_domain_instance.name = name_instance2 use_sub_domain_value = custom_domain_value.get('useSubDomain', None) if use_sub_domain_value is not None: use_sub_domain_instance = use_sub_domain_value custom_domain_instance.use_sub_domain = use_sub_domain_instance secondary_endpoints_value = properties_value2.get('secondaryEndpoints', None) if secondary_endpoints_value is not None: secondary_endpoints_instance = Endpoints() storage_account_instance.secondary_endpoints = secondary_endpoints_instance blob_value2 = secondary_endpoints_value.get('blob', None) if blob_value2 is not None: blob_instance2 = blob_value2 secondary_endpoints_instance.blob = blob_instance2 queue_value2 = secondary_endpoints_value.get('queue', None) if queue_value2 is not None: queue_instance2 = queue_value2 secondary_endpoints_instance.queue = queue_instance2 table_value2 = secondary_endpoints_value.get('table', None) if table_value2 is not None: table_instance2 = table_value2 secondary_endpoints_instance.table = table_instance2 result.status_code = status_code result.operation_status_link = response.headers.get('location') result.retry_after = int(response.headers.get('retryafter', '0')) result.request_id = response.headers.get('x-ms-request-id') if status_code == 409 or status_code == 400: result.status = OperationStatus.Failed if status_code == 200: result.status = OperationStatus.Succeeded return result def check_name_availability(self, account_name): """ Checks that account name is valid and is not in use. Args: account_name (string): The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only. Returns: CheckNameAvailabilityResponse: The CheckNameAvailability operation response. """ # Validate if account_name is None: raise ValueError('account_name cannot be None.') # Tracing # Construct URL url = '' url = url + '/subscriptions/' if self.client.credentials.subscription_id is not None: url = url + quote(self.client.credentials.subscription_id) url = url + '/providers/Microsoft.Storage/checkNameAvailability' query_parameters = [] query_parameters.append('api-version=2015-05-01-preview') if len(query_parameters) > 0: url = url + '?' + '&'.join(query_parameters) base_url = self.client.base_uri # Trim '/' character from the end of baseUrl and beginning of url. if base_url[len(base_url) - 1] == '/': base_url = base_url[0 : len(base_url) - 1] if url[0] == '/': url = url[1 : ] url = base_url + '/' + url url = url.replace(' ', '%20') # Create HTTP transport objects http_request = Request() http_request.url = url http_request.method = 'POST' # Set Headers http_request.headers['Content-Type'] = 'application/json' http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4()) # Serialize Request request_content = None request_doc = None storage_account_check_name_availability_parameters_value = {} request_doc = storage_account_check_name_availability_parameters_value storage_account_check_name_availability_parameters_value['name'] = account_name storage_account_check_name_availability_parameters_value['type'] = 'Microsoft.Storage/storageAccounts' request_content = json.dumps(request_doc) http_request.data = request_content http_request.headers['Content-Length'] = len(request_content) # Send Request response = self.client.send_request(http_request) body = response.content status_code = response.status_code if status_code != 200: error = AzureHttpError(body, response.status_code) raise error # Create Result result = None # Deserialize Response if status_code == 200: response_content = body result = CheckNameAvailabilityResponse() response_doc = None if response_content: response_doc = json.loads(response_content.decode()) if response_doc is not None: name_available_value = response_doc.get('nameAvailable', None) if name_available_value is not None: name_available_instance = name_available_value result.name_available = name_available_instance reason_value = response_doc.get('reason', None) if reason_value is not None: reason_instance = reason_value result.reason = reason_instance message_value = response_doc.get('message', None) if message_value is not None: message_instance = message_value result.message = message_instance result.status_code = status_code result.request_id = response.headers.get('x-ms-request-id') return result def create(self, resource_group_name, account_name, parameters): """ Asynchronously creates a new storage account with the specified parameters. Existing accounts cannot be updated with this API and should instead use the Update Storage Account API. If an account is already created and subsequent create request is issued with exact same set of properties, the request succeeds.The max number of storage accounts that can be created per subscription is limited to 20. Args: resource_group_name (string): The name of the resource group within the user’s subscription. account_name (string): The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only. parameters (StorageAccountCreateParameters): The parameters to provide for the created account. Returns: StorageAccountCreateResponse: The Create storage account operation response. """ client2 = self.client response = client2.storage_accounts.begin_create(resource_group_name, account_name, parameters) if response.status == OperationStatus.succeeded: return response result = client2.get_create_operation_status(response.operation_status_link) delay_in_seconds = response.retry_after if delay_in_seconds == 0: delay_in_seconds = 25 if client2.long_running_operation_initial_timeout >= 0: delay_in_seconds = client2.long_running_operation_initial_timeout while (result.status != OperationStatus.in_progress) == False: time.sleep(delay_in_seconds) result = client2.get_create_operation_status(response.operation_status_link) delay_in_seconds = result.retry_after if delay_in_seconds == 0: delay_in_seconds = 25 if client2.long_running_operation_retry_timeout >= 0: delay_in_seconds = client2.long_running_operation_retry_timeout return result def delete(self, resource_group_name, account_name): """ Deletes a storage account in Microsoft Azure. Args: resource_group_name (string): The name of the resource group within the user’s subscription. account_name (string): The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only. Returns: AzureOperationResponse: A standard service response including an HTTP status code and request ID. """ # Validate if resource_group_name is None: raise ValueError('resource_group_name cannot be None.') if account_name is None: raise ValueError('account_name cannot be None.') if len(account_name) < 3: raise IndexError('account_name is outside the valid range.') if len(account_name) > 24: raise IndexError('account_name is outside the valid range.') for account_name_char in account_name: if account_name_char.islower() == False and account_name_char.isdigit() == False: raise IndexError('account_name is outside the valid range.') # Tracing # Construct URL url = '' url = url + '/subscriptions/' if self.client.credentials.subscription_id is not None: url = url + quote(self.client.credentials.subscription_id) url = url + '/resourceGroups/' url = url + quote(resource_group_name) url = url + '/providers/Microsoft.Storage/storageAccounts/' url = url + quote(account_name) query_parameters = [] query_parameters.append('api-version=2015-05-01-preview') if len(query_parameters) > 0: url = url + '?' + '&'.join(query_parameters) base_url = self.client.base_uri # Trim '/' character from the end of baseUrl and beginning of url. if base_url[len(base_url) - 1] == '/': base_url = base_url[0 : len(base_url) - 1] if url[0] == '/': url = url[1 : ] url = base_url + '/' + url url = url.replace(' ', '%20') # Create HTTP transport objects http_request = Request() http_request.url = url http_request.method = 'DELETE' # Set Headers http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4()) # Send Request response = self.client.send_request(http_request) body = response.content status_code = response.status_code if status_code != 200 and status_code != 204: error = AzureHttpError(body, response.status_code) raise error # Create Result result = None # Deserialize Response result = AzureOperationResponse() result.status_code = status_code result.request_id = response.headers.get('x-ms-request-id') return result def get_properties(self, resource_group_name, account_name): """ Returns the properties for the specified storage account including but not limited to name, account type, location, and account status. The ListKeys operation should be used to retrieve storage keys. Args: resource_group_name (string): The name of the resource group within the user’s subscription. account_name (string): The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only. Returns: StorageAccountGetPropertiesResponse: The Get storage account operation response. """ # Validate if resource_group_name is None: raise ValueError('resource_group_name cannot be None.') if account_name is None: raise ValueError('account_name cannot be None.') if len(account_name) < 3: raise IndexError('account_name is outside the valid range.') if len(account_name) > 24: raise IndexError('account_name is outside the valid range.') for account_name_char in account_name: if account_name_char.islower() == False and account_name_char.isdigit() == False: raise IndexError('account_name is outside the valid range.') # Tracing # Construct URL url = '' url = url + '/subscriptions/' if self.client.credentials.subscription_id is not None: url = url + quote(self.client.credentials.subscription_id) url = url + '/resourceGroups/' url = url + quote(resource_group_name) url = url + '/providers/Microsoft.Storage/storageAccounts/' url = url + quote(account_name) query_parameters = [] query_parameters.append('api-version=2015-05-01-preview') if len(query_parameters) > 0: url = url + '?' + '&'.join(query_parameters) base_url = self.client.base_uri # Trim '/' character from the end of baseUrl and beginning of url. if base_url[len(base_url) - 1] == '/': base_url = base_url[0 : len(base_url) - 1] if url[0] == '/': url = url[1 : ] url = base_url + '/' + url url = url.replace(' ', '%20') # Create HTTP transport objects http_request = Request() http_request.url = url http_request.method = 'GET' # Set Headers http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4()) # Send Request response = self.client.send_request(http_request) body = response.content status_code = response.status_code if status_code != 200: error = AzureHttpError(body, response.status_code) raise error # Create Result result = None # Deserialize Response if status_code == 200: response_content = body result = StorageAccountGetPropertiesResponse() response_doc = None if response_content: response_doc = json.loads(response_content.decode()) if response_doc is not None: storage_account_instance = StorageAccount(tags={}) result.storage_account = storage_account_instance id_value = response_doc.get('id', None) if id_value is not None: id_instance = id_value storage_account_instance.id = id_instance name_value = response_doc.get('name', None) if name_value is not None: name_instance = name_value storage_account_instance.name = name_instance type_value = response_doc.get('type', None) if type_value is not None: type_instance = type_value storage_account_instance.type = type_instance location_value = response_doc.get('location', None) if location_value is not None: location_instance = location_value storage_account_instance.location = location_instance tags_sequence_element = response_doc.get('tags', None) if tags_sequence_element is not None: for property in tags_sequence_element: tags_key = property tags_value = tags_sequence_element[property] storage_account_instance.tags[tags_key] = tags_value properties_value = response_doc.get('properties', None) if properties_value is not None: provisioning_state_value = properties_value.get('provisioningState', None) if provisioning_state_value is not None: provisioning_state_instance = provisioning_state_value storage_account_instance.provisioning_state = provisioning_state_instance account_type_value = properties_value.get('accountType', None) if account_type_value is not None: account_type_instance = account_type_value storage_account_instance.account_type = account_type_instance primary_endpoints_value = properties_value.get('primaryEndpoints', None) if primary_endpoints_value is not None: primary_endpoints_instance = Endpoints() storage_account_instance.primary_endpoints = primary_endpoints_instance blob_value = primary_endpoints_value.get('blob', None) if blob_value is not None: blob_instance = blob_value primary_endpoints_instance.blob = blob_instance queue_value = primary_endpoints_value.get('queue', None) if queue_value is not None: queue_instance = queue_value primary_endpoints_instance.queue = queue_instance table_value = primary_endpoints_value.get('table', None) if table_value is not None: table_instance = table_value primary_endpoints_instance.table = table_instance primary_location_value = properties_value.get('primaryLocation', None) if primary_location_value is not None: primary_location_instance = primary_location_value storage_account_instance.primary_location = primary_location_instance status_of_primary_value = properties_value.get('statusOfPrimary', None) if status_of_primary_value is not None: status_of_primary_instance = status_of_primary_value storage_account_instance.status_of_primary = status_of_primary_instance last_geo_failover_time_value = properties_value.get('lastGeoFailoverTime', None) if last_geo_failover_time_value is not None: last_geo_failover_time_instance = last_geo_failover_time_value storage_account_instance.last_geo_failover_time = last_geo_failover_time_instance secondary_location_value = properties_value.get('secondaryLocation', None) if secondary_location_value is not None: secondary_location_instance = secondary_location_value storage_account_instance.secondary_location = secondary_location_instance status_of_secondary_value = properties_value.get('statusOfSecondary', None) if status_of_secondary_value is not None: status_of_secondary_instance = status_of_secondary_value storage_account_instance.status_of_secondary = status_of_secondary_instance creation_time_value = properties_value.get('creationTime', None) if creation_time_value is not None: creation_time_instance = creation_time_value storage_account_instance.creation_time = creation_time_instance custom_domain_value = properties_value.get('customDomain', None) if custom_domain_value is not None: custom_domain_instance = CustomDomain() storage_account_instance.custom_domain = custom_domain_instance name_value2 = custom_domain_value.get('name', None) if name_value2 is not None: name_instance2 = name_value2 custom_domain_instance.name = name_instance2 use_sub_domain_value = custom_domain_value.get('useSubDomain', None) if use_sub_domain_value is not None: use_sub_domain_instance = use_sub_domain_value custom_domain_instance.use_sub_domain = use_sub_domain_instance secondary_endpoints_value = properties_value.get('secondaryEndpoints', None) if secondary_endpoints_value is not None: secondary_endpoints_instance = Endpoints() storage_account_instance.secondary_endpoints = secondary_endpoints_instance blob_value2 = secondary_endpoints_value.get('blob', None) if blob_value2 is not None: blob_instance2 = blob_value2 secondary_endpoints_instance.blob = blob_instance2 queue_value2 = secondary_endpoints_value.get('queue', None) if queue_value2 is not None: queue_instance2 = queue_value2 secondary_endpoints_instance.queue = queue_instance2 table_value2 = secondary_endpoints_value.get('table', None) if table_value2 is not None: table_instance2 = table_value2 secondary_endpoints_instance.table = table_instance2 result.status_code = status_code result.request_id = response.headers.get('x-ms-request-id') return result def list(self): """ Lists all the storage accounts available under the subscription. Note that storage keys are not returned; use the ListKeys operation for this. Returns: StorageAccountListResponse: The list storage accounts operation response. """ # Validate # Tracing # Construct URL url = '' url = url + '/subscriptions/' if self.client.credentials.subscription_id is not None: url = url + quote(self.client.credentials.subscription_id) url = url + '/providers/Microsoft.Storage/storageAccounts' query_parameters = [] query_parameters.append('api-version=2015-05-01-preview') if len(query_parameters) > 0: url = url + '?' + '&'.join(query_parameters) base_url = self.client.base_uri # Trim '/' character from the end of baseUrl and beginning of url. if base_url[len(base_url) - 1] == '/': base_url = base_url[0 : len(base_url) - 1] if url[0] == '/': url = url[1 : ] url = base_url + '/' + url url = url.replace(' ', '%20') # Create HTTP transport objects http_request = Request() http_request.url = url http_request.method = 'GET' # Set Headers http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4()) # Send Request response = self.client.send_request(http_request) body = response.content status_code = response.status_code if status_code != 200: error = AzureHttpError(body, response.status_code) raise error # Create Result result = None # Deserialize Response if status_code == 200: response_content = body result = StorageAccountListResponse(storage_accounts=[]) response_doc = None if response_content: response_doc = json.loads(response_content.decode()) if response_doc is not None: value_array = response_doc.get('value', None) if value_array is not None: for value_value in value_array: storage_account_json_instance = StorageAccount(tags={}) result.storage_accounts.append(storage_account_json_instance) id_value = value_value.get('id', None) if id_value is not None: id_instance = id_value storage_account_json_instance.id = id_instance name_value = value_value.get('name', None) if name_value is not None: name_instance = name_value storage_account_json_instance.name = name_instance type_value = value_value.get('type', None) if type_value is not None: type_instance = type_value storage_account_json_instance.type = type_instance location_value = value_value.get('location', None) if location_value is not None: location_instance = location_value storage_account_json_instance.location = location_instance tags_sequence_element = value_value.get('tags', None) if tags_sequence_element is not None: for property in tags_sequence_element: tags_key = property tags_value = tags_sequence_element[property] storage_account_json_instance.tags[tags_key] = tags_value properties_value = value_value.get('properties', None) if properties_value is not None: provisioning_state_value = properties_value.get('provisioningState', None) if provisioning_state_value is not None: provisioning_state_instance = provisioning_state_value storage_account_json_instance.provisioning_state = provisioning_state_instance account_type_value = properties_value.get('accountType', None) if account_type_value is not None: account_type_instance = account_type_value storage_account_json_instance.account_type = account_type_instance primary_endpoints_value = properties_value.get('primaryEndpoints', None) if primary_endpoints_value is not None: primary_endpoints_instance = Endpoints() storage_account_json_instance.primary_endpoints = primary_endpoints_instance blob_value = primary_endpoints_value.get('blob', None) if blob_value is not None: blob_instance = blob_value primary_endpoints_instance.blob = blob_instance queue_value = primary_endpoints_value.get('queue', None) if queue_value is not None: queue_instance = queue_value primary_endpoints_instance.queue = queue_instance table_value = primary_endpoints_value.get('table', None) if table_value is not None: table_instance = table_value primary_endpoints_instance.table = table_instance primary_location_value = properties_value.get('primaryLocation', None) if primary_location_value is not None: primary_location_instance = primary_location_value storage_account_json_instance.primary_location = primary_location_instance status_of_primary_value = properties_value.get('statusOfPrimary', None) if status_of_primary_value is not None: status_of_primary_instance = status_of_primary_value storage_account_json_instance.status_of_primary = status_of_primary_instance last_geo_failover_time_value = properties_value.get('lastGeoFailoverTime', None) if last_geo_failover_time_value is not None: last_geo_failover_time_instance = last_geo_failover_time_value storage_account_json_instance.last_geo_failover_time = last_geo_failover_time_instance secondary_location_value = properties_value.get('secondaryLocation', None) if secondary_location_value is not None: secondary_location_instance = secondary_location_value storage_account_json_instance.secondary_location = secondary_location_instance status_of_secondary_value = properties_value.get('statusOfSecondary', None) if status_of_secondary_value is not None: status_of_secondary_instance = status_of_secondary_value storage_account_json_instance.status_of_secondary = status_of_secondary_instance creation_time_value = properties_value.get('creationTime', None) if creation_time_value is not None: creation_time_instance = creation_time_value storage_account_json_instance.creation_time = creation_time_instance custom_domain_value = properties_value.get('customDomain', None) if custom_domain_value is not None: custom_domain_instance = CustomDomain() storage_account_json_instance.custom_domain = custom_domain_instance name_value2 = custom_domain_value.get('name', None) if name_value2 is not None: name_instance2 = name_value2 custom_domain_instance.name = name_instance2 use_sub_domain_value = custom_domain_value.get('useSubDomain', None) if use_sub_domain_value is not None: use_sub_domain_instance = use_sub_domain_value custom_domain_instance.use_sub_domain = use_sub_domain_instance secondary_endpoints_value = properties_value.get('secondaryEndpoints', None) if secondary_endpoints_value is not None: secondary_endpoints_instance = Endpoints() storage_account_json_instance.secondary_endpoints = secondary_endpoints_instance blob_value2 = secondary_endpoints_value.get('blob', None) if blob_value2 is not None: blob_instance2 = blob_value2 secondary_endpoints_instance.blob = blob_instance2 queue_value2 = secondary_endpoints_value.get('queue', None) if queue_value2 is not None: queue_instance2 = queue_value2 secondary_endpoints_instance.queue = queue_instance2 table_value2 = secondary_endpoints_value.get('table', None) if table_value2 is not None: table_instance2 = table_value2 secondary_endpoints_instance.table = table_instance2 next_link_value = response_doc.get('nextLink', None) if next_link_value is not None: next_link_instance = next_link_value result.next_link = next_link_instance result.status_code = status_code result.request_id = response.headers.get('x-ms-request-id') return result def list_by_resource_group(self, resource_group_name): """ Lists all the storage accounts available under the given resource group. Note that storage keys are not returned; use the ListKeys operation for this. Args: resource_group_name (string): The name of the resource group within the user’s subscription. Returns: StorageAccountListResponse: The list storage accounts operation response. """ # Validate if resource_group_name is None: raise ValueError('resource_group_name cannot be None.') # Tracing # Construct URL url = '' url = url + '/subscriptions/' if self.client.credentials.subscription_id is not None: url = url + quote(self.client.credentials.subscription_id) url = url + '/resourceGroups/' url = url + quote(resource_group_name) url = url + '/providers/Microsoft.Storage/storageAccounts' query_parameters = [] query_parameters.append('api-version=2015-05-01-preview') if len(query_parameters) > 0: url = url + '?' + '&'.join(query_parameters) base_url = self.client.base_uri # Trim '/' character from the end of baseUrl and beginning of url. if base_url[len(base_url) - 1] == '/': base_url = base_url[0 : len(base_url) - 1] if url[0] == '/': url = url[1 : ] url = base_url + '/' + url url = url.replace(' ', '%20') # Create HTTP transport objects http_request = Request() http_request.url = url http_request.method = 'GET' # Set Headers http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4()) # Send Request response = self.client.send_request(http_request) body = response.content status_code = response.status_code if status_code != 200: error = AzureHttpError(body, response.status_code) raise error # Create Result result = None # Deserialize Response if status_code == 200: response_content = body result = StorageAccountListResponse(storage_accounts=[]) response_doc = None if response_content: response_doc = json.loads(response_content.decode()) if response_doc is not None: value_array = response_doc.get('value', None) if value_array is not None: for value_value in value_array: storage_account_json_instance = StorageAccount(tags={}) result.storage_accounts.append(storage_account_json_instance) id_value = value_value.get('id', None) if id_value is not None: id_instance = id_value storage_account_json_instance.id = id_instance name_value = value_value.get('name', None) if name_value is not None: name_instance = name_value storage_account_json_instance.name = name_instance type_value = value_value.get('type', None) if type_value is not None: type_instance = type_value storage_account_json_instance.type = type_instance location_value = value_value.get('location', None) if location_value is not None: location_instance = location_value storage_account_json_instance.location = location_instance tags_sequence_element = value_value.get('tags', None) if tags_sequence_element is not None: for property in tags_sequence_element: tags_key = property tags_value = tags_sequence_element[property] storage_account_json_instance.tags[tags_key] = tags_value properties_value = value_value.get('properties', None) if properties_value is not None: provisioning_state_value = properties_value.get('provisioningState', None) if provisioning_state_value is not None: provisioning_state_instance = provisioning_state_value storage_account_json_instance.provisioning_state = provisioning_state_instance account_type_value = properties_value.get('accountType', None) if account_type_value is not None: account_type_instance = account_type_value storage_account_json_instance.account_type = account_type_instance primary_endpoints_value = properties_value.get('primaryEndpoints', None) if primary_endpoints_value is not None: primary_endpoints_instance = Endpoints() storage_account_json_instance.primary_endpoints = primary_endpoints_instance blob_value = primary_endpoints_value.get('blob', None) if blob_value is not None: blob_instance = blob_value primary_endpoints_instance.blob = blob_instance queue_value = primary_endpoints_value.get('queue', None) if queue_value is not None: queue_instance = queue_value primary_endpoints_instance.queue = queue_instance table_value = primary_endpoints_value.get('table', None) if table_value is not None: table_instance = table_value primary_endpoints_instance.table = table_instance primary_location_value = properties_value.get('primaryLocation', None) if primary_location_value is not None: primary_location_instance = primary_location_value storage_account_json_instance.primary_location = primary_location_instance status_of_primary_value = properties_value.get('statusOfPrimary', None) if status_of_primary_value is not None: status_of_primary_instance = status_of_primary_value storage_account_json_instance.status_of_primary = status_of_primary_instance last_geo_failover_time_value = properties_value.get('lastGeoFailoverTime', None) if last_geo_failover_time_value is not None: last_geo_failover_time_instance = last_geo_failover_time_value storage_account_json_instance.last_geo_failover_time = last_geo_failover_time_instance secondary_location_value = properties_value.get('secondaryLocation', None) if secondary_location_value is not None: secondary_location_instance = secondary_location_value storage_account_json_instance.secondary_location = secondary_location_instance status_of_secondary_value = properties_value.get('statusOfSecondary', None) if status_of_secondary_value is not None: status_of_secondary_instance = status_of_secondary_value storage_account_json_instance.status_of_secondary = status_of_secondary_instance creation_time_value = properties_value.get('creationTime', None) if creation_time_value is not None: creation_time_instance = creation_time_value storage_account_json_instance.creation_time = creation_time_instance custom_domain_value = properties_value.get('customDomain', None) if custom_domain_value is not None: custom_domain_instance = CustomDomain() storage_account_json_instance.custom_domain = custom_domain_instance name_value2 = custom_domain_value.get('name', None) if name_value2 is not None: name_instance2 = name_value2 custom_domain_instance.name = name_instance2 use_sub_domain_value = custom_domain_value.get('useSubDomain', None) if use_sub_domain_value is not None: use_sub_domain_instance = use_sub_domain_value custom_domain_instance.use_sub_domain = use_sub_domain_instance secondary_endpoints_value = properties_value.get('secondaryEndpoints', None) if secondary_endpoints_value is not None: secondary_endpoints_instance = Endpoints() storage_account_json_instance.secondary_endpoints = secondary_endpoints_instance blob_value2 = secondary_endpoints_value.get('blob', None) if blob_value2 is not None: blob_instance2 = blob_value2 secondary_endpoints_instance.blob = blob_instance2 queue_value2 = secondary_endpoints_value.get('queue', None) if queue_value2 is not None: queue_instance2 = queue_value2 secondary_endpoints_instance.queue = queue_instance2 table_value2 = secondary_endpoints_value.get('table', None) if table_value2 is not None: table_instance2 = table_value2 secondary_endpoints_instance.table = table_instance2 next_link_value = response_doc.get('nextLink', None) if next_link_value is not None: next_link_instance = next_link_value result.next_link = next_link_instance result.status_code = status_code result.request_id = response.headers.get('x-ms-request-id') return result def list_keys(self, resource_group_name, account_name): """ Lists the access keys for the specified storage account. Args: resource_group_name (string): The name of the resource group. account_name (string): The name of the storage account. Returns: StorageAccountListKeysResponse: The ListKeys operation response. """ # Validate if resource_group_name is None: raise ValueError('resource_group_name cannot be None.') if account_name is None: raise ValueError('account_name cannot be None.') if len(account_name) < 3: raise IndexError('account_name is outside the valid range.') if len(account_name) > 24: raise IndexError('account_name is outside the valid range.') for account_name_char in account_name: if account_name_char.islower() == False and account_name_char.isdigit() == False: raise IndexError('account_name is outside the valid range.') # Tracing # Construct URL url = '' url = url + '/subscriptions/' if self.client.credentials.subscription_id is not None: url = url + quote(self.client.credentials.subscription_id) url = url + '/resourceGroups/' url = url + quote(resource_group_name) url = url + '/providers/Microsoft.Storage/storageAccounts/' url = url + quote(account_name) url = url + '/listKeys' query_parameters = [] query_parameters.append('api-version=2015-05-01-preview') if len(query_parameters) > 0: url = url + '?' + '&'.join(query_parameters) base_url = self.client.base_uri # Trim '/' character from the end of baseUrl and beginning of url. if base_url[len(base_url) - 1] == '/': base_url = base_url[0 : len(base_url) - 1] if url[0] == '/': url = url[1 : ] url = base_url + '/' + url url = url.replace(' ', '%20') # Create HTTP transport objects http_request = Request() http_request.url = url http_request.method = 'POST' # Set Headers http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4()) # Send Request response = self.client.send_request(http_request) body = response.content status_code = response.status_code if status_code != 200: error = AzureHttpError(body, response.status_code) raise error # Create Result result = None # Deserialize Response if status_code == 200: response_content = body result = StorageAccountListKeysResponse() response_doc = None if response_content: response_doc = json.loads(response_content.decode()) if response_doc is not None: storage_account_keys_instance = StorageAccountKeys() result.storage_account_keys = storage_account_keys_instance key1_value = response_doc.get('key1', None) if key1_value is not None: key1_instance = key1_value storage_account_keys_instance.key1 = key1_instance key2_value = response_doc.get('key2', None) if key2_value is not None: key2_instance = key2_value storage_account_keys_instance.key2 = key2_instance result.status_code = status_code result.request_id = response.headers.get('x-ms-request-id') return result def regenerate_key(self, resource_group_name, account_name, regenerate_key): """ Regenerates the access keys for the specified storage account. Args: resource_group_name (string): The name of the resource group within the user’s subscription. account_name (string): The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only. regenerate_key (KeyName): Specifies name of the key which should be regenerated. Returns: StorageAccountRegenerateKeyResponse: The RegenerateKey operation response. """ # Validate if resource_group_name is None: raise ValueError('resource_group_name cannot be None.') if account_name is None: raise ValueError('account_name cannot be None.') if len(account_name) < 3: raise IndexError('account_name is outside the valid range.') if len(account_name) > 24: raise IndexError('account_name is outside the valid range.') for account_name_char in account_name: if account_name_char.islower() == False and account_name_char.isdigit() == False: raise IndexError('account_name is outside the valid range.') if regenerate_key is None: raise ValueError('regenerate_key cannot be None.') # Tracing # Construct URL url = '' url = url + '/subscriptions/' if self.client.credentials.subscription_id is not None: url = url + quote(self.client.credentials.subscription_id) url = url + '/resourceGroups/' url = url + quote(resource_group_name) url = url + '/providers/Microsoft.Storage/storageAccounts/' url = url + quote(account_name) url = url + '/regenerateKey' query_parameters = [] query_parameters.append('api-version=2015-05-01-preview') if len(query_parameters) > 0: url = url + '?' + '&'.join(query_parameters) base_url = self.client.base_uri # Trim '/' character from the end of baseUrl and beginning of url. if base_url[len(base_url) - 1] == '/': base_url = base_url[0 : len(base_url) - 1] if url[0] == '/': url = url[1 : ] url = base_url + '/' + url url = url.replace(' ', '%20') # Create HTTP transport objects http_request = Request() http_request.url = url http_request.method = 'POST' # Set Headers http_request.headers['Content-Type'] = 'application/json' http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4()) # Serialize Request request_content = None request_doc = None storage_account_regenerate_key_parameters_value = {} request_doc = storage_account_regenerate_key_parameters_value storage_account_regenerate_key_parameters_value['keyName'] = str(regenerate_key) if regenerate_key is not None else 'Key1' request_content = json.dumps(request_doc) http_request.data = request_content http_request.headers['Content-Length'] = len(request_content) # Send Request response = self.client.send_request(http_request) body = response.content status_code = response.status_code if status_code != 200: error = AzureHttpError(body, response.status_code) raise error # Create Result result = None # Deserialize Response if status_code == 200: response_content = body result = StorageAccountRegenerateKeyResponse() response_doc = None if response_content: response_doc = json.loads(response_content.decode()) if response_doc is not None: storage_account_keys_instance = StorageAccountKeys() result.storage_account_keys = storage_account_keys_instance key1_value = response_doc.get('key1', None) if key1_value is not None: key1_instance = key1_value storage_account_keys_instance.key1 = key1_instance key2_value = response_doc.get('key2', None) if key2_value is not None: key2_instance = key2_value storage_account_keys_instance.key2 = key2_instance result.status_code = status_code result.request_id = response.headers.get('x-ms-request-id') return result def update(self, resource_group_name, account_name, parameters): """ Updates the account type or tags for a storage account. It can also be used to add a custom domain (note that custom domains cannot be added via the Create operation). Only one custom domain is supported per storage account. This API can only be used to update one of tags, accountType, or customDomain per call. To update multiple of these properties, call the API multiple times with one change per call. This call does not change the storage keys for the account. If you want to change storage account keys, use the RegenerateKey operation. The location and name of the storage account cannot be changed after creation. Args: resource_group_name (string): The name of the resource group within the user’s subscription. account_name (string): The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only. parameters (StorageAccountUpdateParameters): The parameters to update on the account. Note that only one property can be changed at a time using this API. Returns: StorageAccountUpdateResponse: The Update storage account operation response. """ # Validate if resource_group_name is None: raise ValueError('resource_group_name cannot be None.') if account_name is None: raise ValueError('account_name cannot be None.') if len(account_name) < 3: raise IndexError('account_name is outside the valid range.') if len(account_name) > 24: raise IndexError('account_name is outside the valid range.') for account_name_char in account_name: if account_name_char.islower() == False and account_name_char.isdigit() == False: raise IndexError('account_name is outside the valid range.') if parameters is None: raise ValueError('parameters cannot be None.') if parameters.custom_domain is not None: if parameters.custom_domain.name is None: raise ValueError('parameters.custom_domain.name cannot be None.') # Tracing # Construct URL url = '' url = url + '/subscriptions/' if self.client.credentials.subscription_id is not None: url = url + quote(self.client.credentials.subscription_id) url = url + '/resourceGroups/' url = url + quote(resource_group_name) url = url + '/providers/Microsoft.Storage/storageAccounts/' url = url + quote(account_name) query_parameters = [] query_parameters.append('api-version=2015-05-01-preview') if len(query_parameters) > 0: url = url + '?' + '&'.join(query_parameters) base_url = self.client.base_uri # Trim '/' character from the end of baseUrl and beginning of url. if base_url[len(base_url) - 1] == '/': base_url = base_url[0 : len(base_url) - 1] if url[0] == '/': url = url[1 : ] url = base_url + '/' + url url = url.replace(' ', '%20') # Create HTTP transport objects http_request = Request() http_request.url = url http_request.method = 'PATCH' # Set Headers http_request.headers['Content-Type'] = 'application/json' http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4()) # Serialize Request request_content = None request_doc = None storage_account_update_parameters_json_value = {} request_doc = storage_account_update_parameters_json_value if parameters.tags is not None: tags_dictionary = {} for tags_key in parameters.tags: tags_value = parameters.tags[tags_key] tags_dictionary[tags_key] = tags_value storage_account_update_parameters_json_value['tags'] = tags_dictionary properties_value = {} storage_account_update_parameters_json_value['properties'] = properties_value if parameters.account_type is not None: properties_value['accountType'] = str(parameters.account_type) if parameters.account_type is not None else 'StandardLRS' if parameters.custom_domain is not None: custom_domain_value = {} properties_value['customDomain'] = custom_domain_value custom_domain_value['name'] = parameters.custom_domain.name if parameters.custom_domain.use_sub_domain is not None: custom_domain_value['useSubDomain'] = parameters.custom_domain.use_sub_domain request_content = json.dumps(request_doc) http_request.data = request_content http_request.headers['Content-Length'] = len(request_content) # Send Request response = self.client.send_request(http_request) body = response.content status_code = response.status_code if status_code != 200: error = AzureHttpError(body, response.status_code) raise error # Create Result result = None # Deserialize Response if status_code == 200: response_content = body result = StorageAccountUpdateResponse() response_doc = None if response_content: response_doc = json.loads(response_content.decode()) if response_doc is not None: storage_account_instance = StorageAccount(tags={}) result.storage_account = storage_account_instance id_value = response_doc.get('id', None) if id_value is not None: id_instance = id_value storage_account_instance.id = id_instance name_value = response_doc.get('name', None) if name_value is not None: name_instance = name_value storage_account_instance.name = name_instance type_value = response_doc.get('type', None) if type_value is not None: type_instance = type_value storage_account_instance.type = type_instance location_value = response_doc.get('location', None) if location_value is not None: location_instance = location_value storage_account_instance.location = location_instance tags_sequence_element = response_doc.get('tags', None) if tags_sequence_element is not None: for property in tags_sequence_element: tags_key2 = property tags_value2 = tags_sequence_element[property] storage_account_instance.tags[tags_key2] = tags_value2 properties_value2 = response_doc.get('properties', None) if properties_value2 is not None: provisioning_state_value = properties_value2.get('provisioningState', None) if provisioning_state_value is not None: provisioning_state_instance = provisioning_state_value storage_account_instance.provisioning_state = provisioning_state_instance account_type_value = properties_value2.get('accountType', None) if account_type_value is not None: account_type_instance = account_type_value storage_account_instance.account_type = account_type_instance primary_endpoints_value = properties_value2.get('primaryEndpoints', None) if primary_endpoints_value is not None: primary_endpoints_instance = Endpoints() storage_account_instance.primary_endpoints = primary_endpoints_instance blob_value = primary_endpoints_value.get('blob', None) if blob_value is not None: blob_instance = blob_value primary_endpoints_instance.blob = blob_instance queue_value = primary_endpoints_value.get('queue', None) if queue_value is not None: queue_instance = queue_value primary_endpoints_instance.queue = queue_instance table_value = primary_endpoints_value.get('table', None) if table_value is not None: table_instance = table_value primary_endpoints_instance.table = table_instance primary_location_value = properties_value2.get('primaryLocation', None) if primary_location_value is not None: primary_location_instance = primary_location_value storage_account_instance.primary_location = primary_location_instance status_of_primary_value = properties_value2.get('statusOfPrimary', None) if status_of_primary_value is not None: status_of_primary_instance = status_of_primary_value storage_account_instance.status_of_primary = status_of_primary_instance last_geo_failover_time_value = properties_value2.get('lastGeoFailoverTime', None) if last_geo_failover_time_value is not None: last_geo_failover_time_instance = last_geo_failover_time_value storage_account_instance.last_geo_failover_time = last_geo_failover_time_instance secondary_location_value = properties_value2.get('secondaryLocation', None) if secondary_location_value is not None: secondary_location_instance = secondary_location_value storage_account_instance.secondary_location = secondary_location_instance status_of_secondary_value = properties_value2.get('statusOfSecondary', None) if status_of_secondary_value is not None: status_of_secondary_instance = status_of_secondary_value storage_account_instance.status_of_secondary = status_of_secondary_instance creation_time_value = properties_value2.get('creationTime', None) if creation_time_value is not None: creation_time_instance = creation_time_value storage_account_instance.creation_time = creation_time_instance custom_domain_value2 = properties_value2.get('customDomain', None) if custom_domain_value2 is not None: custom_domain_instance = CustomDomain() storage_account_instance.custom_domain = custom_domain_instance name_value2 = custom_domain_value2.get('name', None) if name_value2 is not None: name_instance2 = name_value2 custom_domain_instance.name = name_instance2 use_sub_domain_value = custom_domain_value2.get('useSubDomain', None) if use_sub_domain_value is not None: use_sub_domain_instance = use_sub_domain_value custom_domain_instance.use_sub_domain = use_sub_domain_instance secondary_endpoints_value = properties_value2.get('secondaryEndpoints', None) if secondary_endpoints_value is not None: secondary_endpoints_instance = Endpoints() storage_account_instance.secondary_endpoints = secondary_endpoints_instance blob_value2 = secondary_endpoints_value.get('blob', None) if blob_value2 is not None: blob_instance2 = blob_value2 secondary_endpoints_instance.blob = blob_instance2 queue_value2 = secondary_endpoints_value.get('queue', None) if queue_value2 is not None: queue_instance2 = queue_value2 secondary_endpoints_instance.queue = queue_instance2 table_value2 = secondary_endpoints_value.get('table', None) if table_value2 is not None: table_instance2 = table_value2 secondary_endpoints_instance.table = table_instance2 result.status_code = status_code result.request_id = response.headers.get('x-ms-request-id') return result
1.882813
2
src/nucleotide/component/linux/gcc/atom/rtl.py
dmilos/nucleotide
1
12859
<filename>src/nucleotide/component/linux/gcc/atom/rtl.py #!/usr/bin/env python2 # Copyright 2015 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import platform import nucleotide import nucleotide.component import nucleotide.component.function def _linux_RTL_LINKFLAGS( P_data ): I_flag = '' #if( 'dynamic' == P_data['type'] ): # I_flag += 'D' if( 'static' == P_data['type'] ): I_flag += '-static' return [ I_flag ] atom_linux_RTL = { 'platform' : { 'host' : 'Linux', 'guest' : 'Linux' }, 'cc' : { 'vendor': 'FSF', 'name' : 'gcc', 'version': 'X' }, 'config' : { 'LINKFLAGS' : _linux_RTL_LINKFLAGS }, 'name' :'RTL', 'class': [ 'RTL', 'linux:RTL' ] } class RTL: def __init__(self): pass @staticmethod def extend( P_option ): nucleotide.component.function.extend( P_option, 'A:linux:RTL', atom_linux_RTL ) atom_linux_RTL['platform']['host'] = 'X'; nucleotide.component.function.extend( P_option, 'x:linux:RTL', atom_linux_RTL ) atom_linux_RTL['platform']['guest'] = 'X'; nucleotide.component.function.extend( P_option, 'y:linux:RTL', atom_linux_RTL ) @staticmethod def check(): pass
2.15625
2
notebooks/denerator_tests/actions/config.py
Collen-Roller/Rasa-Denerator
11
12860
<filename>notebooks/denerator_tests/actions/config.py import os policy_model_dir = os.environ.get("POLICY_MODEL_DIR", "models/dialogue/") rasa_nlu_config = os.environ.get("RASA_NLU_CONFIG", "nlu_config.yml") account_sid = os.environ.get("ACCOUNT_SID", "") auth_token = os.environ.get("AUTH_TOKEN", "") twilio_number = os.environ.get("TWILIO_NUMBER", "") platform_api = os.environ.get("RASA_API_ENDPOINT_URL", "") self_port = int(os.environ.get("SELF_PORT", "5001")) core_model_dir = os.environ.get("CORE_MODEL_DIR", "models/dialogue/") remote_core_endpoint = os.environ.get("RASA_REMOTE_CORE_ENDPOINT_URL", "") rasa_core_token = os.environ.get("RASA_CORE_TOKEN", "") mailchimp_api_key = os.environ.get("MAILCHIMP_API_KEY", "") mailchimp_list = os.environ.get("MAILCHIMP_LIST", "") gdrive_credentials = os.environ.get("GDRIVE_CREDENTIALS", "") access_token = os.environ.get("TELEGRAM_TOKEN", "") verify = os.environ.get("TELEGRAM_VERIFY", "rasas_bot") webhook_url = os.environ.get("WEBHOOK_URL", "https://website-demo.rasa.com/webhook") rasa_platform_token = os.environ.get("RASA_PLATFORM_TOKEN", "") rasa_nlg_endpoint = os.environ.get("RASA_NLG_ENDPOINT_URL", "")
1.851563
2
python/moderation_text_token_demo.py
huaweicloud/huaweicloud-sdk-moderation
8
12861
<filename>python/moderation_text_token_demo.py # -*- coding:utf-8 -*- from moderation_sdk.gettoken import get_token from moderation_sdk.moderation_text import moderation_text from moderation_sdk.utils import init_global_env if __name__ == '__main__': # Services currently support North China-Beijing(cn-north-4),China East-Shanghai1(cn-east-3), CN-Hong Kong(ap-southeast-1),AP-Singapore(ap-southeast-3) init_global_env('cn-north-4') # # access moderation text enhance,posy data by token # user_name = '******' password = '******' account_name = '******' # the same as user_name in commonly use token = get_token(user_name, password, account_name) # call interface use the text result = moderation_text(token, '<PASSWORD>请+110亚砷酸钾六位qq,fuck666666666666666', 'content', ['ad', 'politics', 'porn', 'abuse', 'contraband', 'flood']) print(result)
2.484375
2
tests/system/action/test_general.py
FinnStutzenstein/openslides-backend
0
12862
from .base import BaseActionTestCase class GeneralActionWSGITester(BaseActionTestCase): """ Tests the action WSGI application in general. """ def test_request_wrong_method(self) -> None: response = self.client.get("/") self.assert_status_code(response, 405) def test_request_wrong_media_type(self) -> None: response = self.client.post("/") self.assert_status_code(response, 400) self.assertIn("Wrong media type.", response.json["message"]) def test_request_missing_body(self) -> None: response = self.client.post("/", content_type="application/json") self.assert_status_code(response, 400) self.assertIn("Failed to decode JSON object", response.json["message"]) def test_request_fuzzy_body(self) -> None: response = self.client.post( "/", json={"fuzzy_key_Eeng7pha3a": "fuzzy_value_eez3Ko6quu"}, ) self.assert_status_code(response, 400) self.assertIn("data must be array", response.json["message"]) def test_request_fuzzy_body_2(self) -> None: response = self.client.post( "/", json=[{"fuzzy_key_Voh8in7aec": "fuzzy_value_phae3iew4W"}], ) self.assert_status_code(response, 400) self.assertIn( "data[0] must contain ['action', 'data'] properties", response.json["message"], ) def test_request_no_existing_action(self) -> None: response = self.request("fuzzy_action_hamzaeNg4a", {}) self.assert_status_code(response, 400) self.assertIn( "Action fuzzy_action_hamzaeNg4a does not exist.", response.json["message"], ) def test_health_route(self) -> None: response = self.client.get("/health") self.assert_status_code(response, 200) self.assertIn("healthinfo", response.json) actions = response.json["healthinfo"]["actions"] some_example_actions = ( "topic.create", "motion.delete", "user.update_temporary", ) for action in some_example_actions: self.assertIn(action, actions.keys())
2.3125
2
src/thead/cls/amsart.py
jakub-oprsal/thead
0
12863
from .common import * HEADER = r'''\usepackage{tikz} \definecolor{purple}{cmyk}{0.55,1,0,0.15} \definecolor{darkblue}{cmyk}{1,0.58,0,0.21} \usepackage[colorlinks, linkcolor=black, urlcolor=darkblue, citecolor=purple]{hyperref} \urlstyle{same} \newtheorem{theorem}{Theorem}[section] \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{conjecture}[theorem]{Conjecture} \newtheorem{claim}[theorem]{Claim} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{example}[theorem]{Example} \newtheorem{remark}[theorem]{Remark} ''' def render_pdfmeta(authors, title): author_list = authors_list(authors, short=True) return f'''\\hypersetup{{% pdftitle = {{{title}}}, pdfauthor = {{{author_list}}}}}\n''' def render_author(author): out = render_command('author', author['name']) if 'affiliation' in author: out += render_command('address', ", ".join(value for _, value in author['affiliation'].items())) if 'email' in author: out += render_command('email', author['email']) return out def render_funding(funds): funding_note = '\n'.join(grant['note'] for grant in funds if 'note' in grant) return render_command('thanks', funding_note) def render_acks(acks): return f'\\subsection*{{Acknowledgements}}\n\n{acks.strip()}\n' def header(data, cname=None, classoptions=[], **kwargs): if cname is None: cname = 'amsart' if 'noheader' in classoptions: classoptions.remove('noheader') include_header = False else: include_header = True headers = [ render_command( 'documentclass', cname, ','.join(classoptions)), render_encs] if include_header: headers.append(HEADER) if 'include' in kwargs: headers += [include(file) for file in kwargs['include']] shorttitle = data['shorttitle'] if 'shorttitle' in data else '' headers += [ render_pdfmeta(data['authors'], data['title']), begin_document, render_command('title', data['title'], shorttitle), '\n'.join(map(render_author, data['authors']))] if 'funding' in data: headers.append(render_funding(data['funding'])) if 'abstract' in data: headers.append(render_abstract(data['abstract'])) if 'keywords' in data: headers.append(render_keywords(data['keywords'])) headers += [maketitle, ''] return '\n'.join(headers) def footer(data, bib): footers = [''] if 'acknowledgements' in data: # and not anonymous: footers.append(render_acks(data['acknowledgements'])) if bib: footers.append(render_bib('alphaurl', bib)) footers.append(end_document) return '\n'.join(footers)
2.3125
2
Workflow/packages/__init__.py
MATS64664-2021-Group-2/Hydride-Connect-Group-2
0
12864
# -*- coding: utf-8 -*- """ Created on Thu Apr 15 11:31:06 2021 @author: a77510jm """
1.007813
1
in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Aligned_Rotator.py
fedelopezar/nrpytutorial
1
12865
#!/usr/bin/env python # coding: utf-8 # <a id='top'></a> # # # # $\texttt{GiRaFFEfood}$: Initial data for $\texttt{GiRaFFE}$ # # ## Aligned Rotator # # $$\label{top}$$ # # This module provides another initial data option for $\texttt{GiRaFFE}$. This is a flat-spacetime test with initial data $$A_{\phi} = \frac{\mu \varpi}{r^3},$$ where $\mu = B_p R_{\rm NS} / 2$, $R_{\rm NS}$ is the neutron star radius, and $\varpi = \sqrt{x^2+y^2}$ is the cylindrical radius. We let $A_r = A_\theta = 0$. # # Additionally, the drift velocity $v^i = \Omega \textbf{e}_z \times \textbf{r} = [ijk] \Omega \textbf{e}^j_z x^k$, where $[ijk]$ is the Levi-Civita permutation symbol and $\textbf{e}^i_z = (0,0,1)$. # <a id='preliminaries'></a> # # ### Steps 0-1: Preliminaries # $$\label{preliminaries}$$ # # \[Back to [top](#top)\] # # Here, we will import the NRPy+ core modules and set the reference metric to Cartesian, set commonly used NRPy+ parameters, and set C parameters that will be set from outside the code eventually generated from these expressions. We will also set up a parameter to determine what initial data is set up, although it won't do much yet. # Step 0: Import the NRPy+ core modules and set the reference metric to Cartesian import NRPy_param_funcs as par import indexedexp as ixp import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends import reference_metric as rfm par.set_parval_from_str("reference_metric::CoordSystem","Cartesian") rfm.reference_metric() # Step 1a: Set commonly used parameters. thismodule = __name__ B_p_aligned_rotator,R_NS_aligned_rotator = par.Cparameters("REAL",thismodule, # B_p_aligned_rotator = the intensity of the magnetic field and # R_NS_aligned_rotator= "Neutron star" radius ["B_p_aligned_rotator","R_NS_aligned_rotator"], [1e-5, 1.0]) # The angular velocity of the "neutron star" Omega_aligned_rotator = par.Cparameters("REAL",thismodule,"Omega_aligned_rotator",1e3) # <a id='step2'></a> # # ### Step 2: Set the vectors A in Spherical coordinates # $$\label{step2}$$ # # \[Back to [top](#top)\] # # We will first build the fundamental vector $A_i$ in spherical coordinates (see [Table 3](https://arxiv.org/pdf/1704.00599.pdf)). Note that we use reference_metric.py to set $r$ and $\theta$ in terms of Cartesian coordinates; this will save us a step later when we convert to Cartesian coordinates. So, we set # \begin{align} # A_{\phi} &= \frac{\mu \varpi}{r^3}, \\ # \end{align} # with $\mu = B_p R_{\rm NS} / 2$, $R_{\rm NS}$ is the neutron star radius, and $\varpi = \sqrt{x^2+y^2}$ def GiRaFFEfood_NRPy_Aligned_Rotator(): r = rfm.xxSph[0] varpi = sp.sqrt(rfm.xx_to_Cart[0]**2 + rfm.xx_to_Cart[1]**2) mu = B_p_aligned_rotator * R_NS_aligned_rotator**3 / 2 ASphD = ixp.zerorank1() ASphD[2] = mu * varpi**2 / (r**3) # The other components were already declared to be 0. # <a id='step3'></a> # # ### Step 3: Use the Jacobian matrix to transform the vectors to Cartesian coordinates. # $$\label{step3}$$ # # \[Back to [top](#top)\] # # Now, we will use the coordinate transformation definitions provided by reference_metric.py to build the Jacobian # $$ # \frac{\partial x_{\rm Sph}^j}{\partial x_{\rm Cart}^i}, # $$ # where $x_{\rm Sph}^j \in \{r,\theta,\phi\}$ and $x_{\rm Cart}^i \in \{x,y,z\}$. We would normally compute its inverse, but since none of the quantities we need to transform have upper indices, it is not necessary. Then, since $A_i$ and has one lower index, it will need to be multiplied by the Jacobian: # # $$ # A_i^{\rm Cart} = A_j^{\rm Sph} \frac{\partial x_{\rm Sph}^j}{\partial x_{\rm Cart}^i}, # $$ # Step 3: Use the Jacobian matrix to transform the vectors to Cartesian coordinates. drrefmetric__dx_0UDmatrix = sp.Matrix([[sp.diff(rfm.xxSph[0],rfm.xx[0]), sp.diff(rfm.xxSph[0],rfm.xx[1]), sp.diff(rfm.xxSph[0],rfm.xx[2])], [sp.diff(rfm.xxSph[1],rfm.xx[0]), sp.diff(rfm.xxSph[1],rfm.xx[1]), sp.diff(rfm.xxSph[1],rfm.xx[2])], [sp.diff(rfm.xxSph[2],rfm.xx[0]), sp.diff(rfm.xxSph[2],rfm.xx[1]), sp.diff(rfm.xxSph[2],rfm.xx[2])]]) #dx__drrefmetric_0UDmatrix = drrefmetric__dx_0UDmatrix.inv() # We don't actually need this in this case. global AD AD = ixp.zerorank1(DIM=3) for i in range(3): for j in range(3): AD[i] = drrefmetric__dx_0UDmatrix[(j,i)]*ASphD[j] # <a id='step4'></a> # # ### Step 4: Calculate $v^i$ # $$\label{step4}$$ # # \[Back to [top](#top)\] # # Here, we will calculate the drift velocity $v^i = \Omega \textbf{e}_z \times \textbf{r} = [ijk] \Omega \textbf{e}^j_z x^k$, where $[ijk]$ is the Levi-Civita permutation symbol and $\textbf{e}^i_z = (0,0,1)$. Conveniently, in flat space, the drift velocity reduces to the Valencia velocity because $\alpha = 1$ and $\beta^i = 0$. # Step 4: Calculate v^i LeviCivitaSymbolDDD = ixp.LeviCivitaSymbol_dim3_rank3() import Min_Max_and_Piecewise_Expressions as noif unit_zU = ixp.zerorank1() unit_zU[2] = sp.sympify(1) global ValenciavU ValenciavU = ixp.zerorank1() for i in range(3): for j in range(3): for k in range(3): ValenciavU[i] += noif.coord_leq_bound(r,R_NS_aligned_rotator)*LeviCivitaSymbolDDD[i][j][k] * Omega_aligned_rotator * unit_zU[j] * rfm.xx[k] # ### NRPy+ Module Code Validation # # \[Back to [top](#top)\] # # Here, as a code validation check, we verify agreement in the SymPy expressions for the $\texttt{GiRaFFE}$ Aligned Rotator initial data equations we intend to use between # 1. this tutorial and # 2. the NRPy+ [GiRaFFEfood_NRPy_Aligned_Rotator.py](../edit/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Aligned_Rotator.py) module. #
2.6875
3
deploys/call_httpx.py
vic9527/ViClassifier
1
12866
""" 比requests更强大python库,让你的爬虫效率提高一倍 https://mp.weixin.qq.com/s/jqGx-4t4ytDDnXxDkzbPqw HTTPX 基础教程 https://zhuanlan.zhihu.com/p/103824900 """ def interface(url, data): import httpx head = {"Content-Type": "application/json; charset=UTF-8"} return httpx.request('POST', url, json=data, headers=head) if __name__ == '__main__': post_url = "http://127.0.0.1:8888" post_data = {"image": 112, "name": 1} response = interface(post_url, post_data) print('status_code: ', response.status_code) # 打印状态码 # print('url: ', response.url) # 打印请求url # print('headers: ', response.headers) # 打印头信息 # print('cookies: ', response.cookies) # 打印cookie信息 print('text: ', response.text) # 以文本形式打印网页源码 # print('content: ', response.content) #以字节流形式打印
3.8125
4
src/rgt/THOR/THOR.py
mguo123/pan_omics
0
12867
#!/usr/bin/env python # -*- coding: utf-8 -*- """ THOR detects differential peaks in multiple ChIP-seq profiles associated with two distinct biological conditions. Copyright (C) 2014-2016 <NAME> (<EMAIL>) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. @author: <NAME> """ # Python from __future__ import print_function import sys # Internal from .dpc_help import get_peaks, _fit_mean_var_distr, initialize, merge_output, handle_input from .tracker import Tracker from .postprocessing import _output_BED, _output_narrowPeak from ..THOR.neg_bin_rep_hmm import NegBinRepHMM, get_init_parameters, _get_pvalue_distr from ..THOR.RegionGiver import RegionGiver from ..THOR.postprocessing import filter_by_pvalue_strand_lag from .. import __version__ # External TEST = False #enable to test THOR locally def _write_info(tracker, report, **data): """Write information to tracker""" tracker.write(text=data['func_para'][0], header="Parameters for both estimated quadr. function y=max(|a|*x^2 + x + |c|, 0) (a)") tracker.write(text=data['func_para'][1], header="Parameters for both estimated quadr. function y=max(|a|*x^2 + x + |c|, 0) (c)") #tracker.write(text=data['init_mu'], header="Inital parameter estimate for HMM's Neg. Bin. Emission distribution (mu)") #tracker.write(text=data['init_alpha'], header="Inital parameter estimate for HMM's Neg. Bin. Emission distribution (alpha)") #tracker.write(text=data['m'].mu, header="Final HMM's Neg. Bin. Emission distribution (mu)") #tracker.write(text=data['m'].alpha, header="Final HMM's Neg. Bin. Emission distribution (alpha)") #tracker.write(text=data['m']._get_transmat(), header="Transmission matrix") if report: tracker.make_html() def train_HMM(region_giver, options, bamfiles, genome, chrom_sizes, dims, inputs, tracker): """Train HMM""" while True: train_regions = region_giver.get_training_regionset() exp_data = initialize(name=options.name, dims=dims, genome_path=genome, regions=train_regions, stepsize=options.stepsize, binsize=options.binsize, bamfiles=bamfiles, exts=options.exts, inputs=inputs, exts_inputs=options.exts_inputs, debug=options.debug, verbose=options.verbose, no_gc_content=options.no_gc_content, factors_inputs=options.factors_inputs, chrom_sizes=chrom_sizes, tracker=tracker, norm_regions=options.norm_regions, scaling_factors_ip=options.scaling_factors_ip, save_wig=options.save_wig, housekeeping_genes=options.housekeeping_genes, test=TEST, report=options.report, chrom_sizes_dict=region_giver.get_chrom_dict(), end=True, counter=0, output_bw=False, save_input=options.save_input, m_threshold=options.m_threshold, a_threshold=options.a_threshold, rmdup=options.rmdup) if exp_data.count_positive_signal() > len(train_regions.sequences[0]) * 0.00001: tracker.write(text=" ".join(map(lambda x: str(x), exp_data.exts)), header="Extension size (rep1, rep2, input1, input2)") tracker.write(text=map(lambda x: str(x), exp_data.scaling_factors_ip), header="Scaling factors") break func, func_para = _fit_mean_var_distr(exp_data.overall_coverage, options.name, options.debug, verbose=options.verbose, outputdir=options.outputdir, report=options.report, poisson=options.poisson) exp_data.compute_putative_region_index() print('Compute HMM\'s training set', file=sys.stderr) training_set, s0, s1, s2 = exp_data.get_training_set(TEST, exp_data, options.name, options.foldchange, options.threshold, options.size_ts, 3) init_alpha, init_mu = get_init_parameters(s0, s1, s2) m = NegBinRepHMM(alpha=init_alpha, mu=init_mu, dim_cond_1=dims[0], dim_cond_2=dims[1], func=func) training_set_obs = exp_data.get_observation(training_set) print('Train HMM', file=sys.stderr) m.fit([training_set_obs], options.hmm_free_para) distr = _get_pvalue_distr(m.mu, m.alpha, tracker) return m, exp_data, func_para, init_mu, init_alpha, distr def run_HMM(region_giver, options, bamfiles, genome, chrom_sizes, dims, inputs, tracker, exp_data, m, distr): """Run trained HMM chromosome-wise on genomic signal and call differential peaks""" output, pvalues, ratios, no_bw_files = [], [], [], [] print("Compute HMM's posterior probabilities and Viterbi path to call differential peaks", file=sys.stderr) for i, r in enumerate(region_giver): end = True if i == len(region_giver) - 1 else False print("- taking into account %s" % r.sequences[0].chrom, file=sys.stderr) exp_data = initialize(name=options.name, dims=dims, genome_path=genome, regions=r, stepsize=options.stepsize, binsize=options.binsize, bamfiles=bamfiles, exts=exp_data.exts, inputs=inputs, exts_inputs=exp_data.exts_inputs, debug=options.debug, verbose=False, no_gc_content=options.no_gc_content, factors_inputs=exp_data.factors_inputs, chrom_sizes=chrom_sizes, tracker=tracker, norm_regions=options.norm_regions, scaling_factors_ip=exp_data.scaling_factors_ip, save_wig=options.save_wig, housekeeping_genes=options.housekeeping_genes, test=TEST, report=False, chrom_sizes_dict=region_giver.get_chrom_dict(), gc_content_cov=exp_data.gc_content_cov, avg_gc_content=exp_data.avg_gc_content, gc_hist=exp_data.gc_hist, end=end, counter=i, m_threshold=options.m_threshold, a_threshold=options.a_threshold, rmdup=options.rmdup) if exp_data.no_data: continue no_bw_files.append(i) exp_data.compute_putative_region_index() if exp_data.indices_of_interest is None: continue states = m.predict(exp_data.get_observation(exp_data.indices_of_interest)) inst_ratios, inst_pvalues, inst_output = get_peaks(name=options.name, states=states, DCS=exp_data, distr=distr, merge=options.merge, exts=exp_data.exts, pcutoff=options.pcutoff, debug=options.debug, p=options.par, no_correction=options.no_correction, merge_bin=options.merge_bin, deadzones=options.deadzones) # if not inst_output: output += inst_output pvalues += inst_pvalues ratios += inst_ratios res_output, res_pvalues, res_filter_pass = filter_by_pvalue_strand_lag(ratios, options.pcutoff, pvalues, output, options.no_correction, options.name, options.singlestrand) _output_BED(options.name, res_output, res_pvalues, res_filter_pass) _output_narrowPeak(options.name, res_output, res_pvalues, res_filter_pass) merge_output(bamfiles, dims, options, no_bw_files, chrom_sizes) def main(): options, bamfiles, genome, chrom_sizes, dims, inputs = handle_input() tracker = Tracker(options.name + '-setup.info', bamfiles, genome, chrom_sizes, dims, inputs, options, __version__) region_giver = RegionGiver(chrom_sizes, options.regions) m, exp_data, func_para, init_mu, init_alpha, distr = train_HMM(region_giver, options, bamfiles, genome, chrom_sizes, dims, inputs, tracker) run_HMM(region_giver, options, bamfiles, genome, chrom_sizes, dims, inputs, tracker, exp_data, m, distr) _write_info(tracker, options.report, func_para=func_para, init_mu=init_mu, init_alpha=init_alpha, m=m)
2.09375
2
web/api/classroom.py
bbougon/crm-pilates
0
12868
from http import HTTPStatus from typing import Tuple from uuid import UUID from fastapi import status, APIRouter, Response, Depends, HTTPException from command.command_handler import Status from domain.classroom.classroom_creation_command_handler import ClassroomCreated from domain.classroom.classroom_type import ClassroomSubject from domain.commands import ClassroomCreationCommand, ClassroomPatchCommand from domain.exceptions import DomainException, AggregateNotFoundException from infrastructure.command_bus_provider import CommandBusProvider from web.presentation.domain.detailed_classroom import DetailedClassroom from web.presentation.service.classroom_service import get_detailed_classroom from web.schema.classroom_response import ClassroomReadResponse, ClassroomCreatedResponse from web.schema.classroom_schemas import ClassroomCreation, ClassroomPatch router = APIRouter() @router.post("/classrooms", response_model=ClassroomCreatedResponse, status_code=status.HTTP_201_CREATED, responses={ 201: { "description": "Create a classroom", "headers": { "location": { "description": "The absolute path URL location of the newly created classroom", "schema": {"type": "URL"}, } } }, 404: { "description": "See body message details" }, 409: { "description": "See body message details" } } ) def create_classroom(classroom_creation: ClassroomCreation, response: Response, command_bus_provider: CommandBusProvider = Depends(CommandBusProvider)): try: command = ClassroomCreationCommand(classroom_creation.name, classroom_creation.position, classroom_creation.duration, ClassroomSubject[classroom_creation.subject], classroom_creation.start_date, classroom_creation.stop_date, list(map(lambda attendee: attendee.id, classroom_creation.attendees))) from command.response import Response result: Tuple[Response, Status] = command_bus_provider.command_bus.send(command) event: ClassroomCreated = result[0].event response.headers["location"] = f"/classrooms/{event.root_id}" return { "name": event.name, "id": event.root_id, "position": event.position, "subject": event.subject.value, "schedule": { "start": event.schedule.start, "stop": event.schedule.stop }, "duration": ClassroomReadResponse.to_duration(event.duration), "attendees": list(map(lambda attendee: {"id": attendee["id"]}, event.attendees)) } except AggregateNotFoundException as e: raise HTTPException(status_code=HTTPStatus.NOT_FOUND, detail=f"One of the attendees with id '{e.unknown_id}' has not been found") except DomainException as e: raise HTTPException(status_code=HTTPStatus.CONFLICT, detail=e.message) @router.get("/classrooms/{id}", response_model=ClassroomReadResponse, responses={ 404: { "description": "Classroom has not been found" } } ) def get_classroom(id: UUID): try: detailed_classroom: DetailedClassroom = get_detailed_classroom(id) return { "name": detailed_classroom.name, "id": detailed_classroom.id, "position": detailed_classroom.position, "subject": detailed_classroom.subject.value, "schedule": { "start": detailed_classroom.start, "stop": detailed_classroom.stop }, "duration": { "duration": detailed_classroom.duration.duration, "time_unit": detailed_classroom.duration.time_unit }, "attendees": detailed_classroom.attendees } except AggregateNotFoundException: raise HTTPException(status_code=HTTPStatus.NOT_FOUND, detail=f"Classroom with id '{str(id)}' not found") @router.patch("/classrooms/{id}", status_code=status.HTTP_204_NO_CONTENT, description="Add attendees to a classroom. This resource works as a patch, " "you must provide all classroom attendees (i.e: you had Clara already added to the classroom," " if you want John to join, you must provide both Clara and John " "otherwise Clara will be removed", responses={ 404: { "description": "See body message details" }, 409: { "description": "See body message details" } } ) def update_classroom(id: UUID, classroom_patch: ClassroomPatch, command_bus_provider: CommandBusProvider = Depends(CommandBusProvider)): try: command_bus_provider.command_bus.send( ClassroomPatchCommand(id, list(map(lambda client: client.id, classroom_patch.attendees)))) except AggregateNotFoundException as e: raise HTTPException(status_code=HTTPStatus.NOT_FOUND, detail=f"One of the attendees with id '{e.unknown_id}' has not been found") except DomainException as e: raise HTTPException(status_code=HTTPStatus.CONFLICT, detail=e.message)
2.265625
2
community_codebook/eda.py
etstieber/ledatascifi-2022
0
12869
<reponame>etstieber/ledatascifi-2022 ############################################################### # # This function is... INSUFFICIENT. It was developed as an # illustration of EDA lessons in the 2021 class. It's quick and # works well. # # Want a higher grade version of me? Then try pandas-profiling: # https://github.com/pandas-profiling/pandas-profiling # ############################################################### def insufficient_but_starting_eda(df,cat_vars_list=None): ''' Parameters ---------- df : DATAFRAME cat_vars_list : LIST, optional A list of strings containing variable names in the dataframe for variables where you want to see the number of unique values and the 10 most common values. Likely used for categorical values. Returns ------- None. It simply prints. Description ------- This function will print a MINIMUM amount of info about a new dataframe. You should ****look**** at all this output below and consider the data exploration and cleaning questions from https://ledatascifi.github.io/ledatascifi-2021/content/03/02e_eda_golden.html#member Also LOOK at more of the data manually. Then write up anything notable you observe. TIP: put this function in your codebook to reuse easily. PROTIP: Improve this function (better outputs, better formatting). FEATURE REQUEST: optionally print the nunique and top 10 values under the describe matrix FEATURE REQUEST: optionally print more stats (percentiles) ''' print(df.head(), '\n---') print(df.tail(), '\n---') print(df.columns, '\n---') print("The shape is: ",df.shape, '\n---') print("Info:",df.info(), '\n---') # memory usage, name, dtype, and # of non-null obs (--> # of missing obs) per variable print(df.describe(), '\n---') # summary stats, and you can customize the list! if cat_vars_list != None: for var in cat_vars_list: print(var,"has",df[var].nunique(),"values and its top 10 most common are:") print(df[var].value_counts().head(10), '\n---')
3.234375
3
angr/codenode.py
mariusmue/angr
2
12870
import logging l = logging.getLogger("angr.codenode") class CodeNode(object): __slots__ = ['addr', 'size', '_graph', 'thumb'] def __init__(self, addr, size, graph=None, thumb=False): self.addr = addr self.size = size self.thumb = thumb self._graph = graph def __len__(self): return self.size def __eq__(self, other): if type(other) is Block: # pylint: disable=unidiomatic-typecheck raise TypeError("You do not want to be comparing a CodeNode to a Block") return type(self) is type(other) and \ self.addr == other.addr and \ self.size == other.size and \ self.is_hook == other.is_hook and \ self.thumb == other.thumb def __ne__(self, other): return not self == other def __cmp__(self, other): raise TypeError("Comparison with a code node") def __hash__(self): return hash((self.addr, self.size)) def successors(self): if self._graph is None: raise ValueError("Cannot calculate successors for graphless node") return list(self._graph.successors(self)) def predecessors(self): if self._graph is None: raise ValueError("Cannot calculate predecessors for graphless node") return list(self._graph.predecessors(self)) def __getstate__(self): return (self.addr, self.size) def __setstate__(self, dat): self.__init__(*dat) is_hook = None class BlockNode(CodeNode): __slots__ = ['bytestr'] is_hook = False def __init__(self, addr, size, bytestr=None, **kwargs): super(BlockNode, self).__init__(addr, size, **kwargs) self.bytestr = bytestr def __repr__(self): return '<BlockNode at %#x (size %d)>' % (self.addr, self.size) def __getstate__(self): return (self.addr, self.size, self.bytestr, self.thumb) def __setstate__(self, dat): self.__init__(*dat[:-1], thumb=dat[-1]) class HookNode(CodeNode): __slots__ = ['sim_procedure'] is_hook = True def __init__(self, addr, size, sim_procedure, **kwargs): super(HookNode, self).__init__(addr, size, **kwargs) self.sim_procedure = sim_procedure def __repr__(self): return '<HookNode %r at %#x (size %s)>' % (self.sim_procedure, self.addr, self.size) def __hash__(self): return hash((self.addr, self.size, self.sim_procedure)) def __eq__(self, other): return super(HookNode, self).__eq__(other) and \ self.sim_procedure == other.sim_procedure def __getstate__(self): return (self.addr, self.size, self.sim_procedure) def __setstate__(self, dat): self.__init__(*dat) from .block import Block
2.9375
3
第12章/program/Requester/Launcher.py
kingname/SourceCodeOfBook
274
12871
import os scrapy_project_path = '/Users/kingname/book/chapter_12/DeploySpider' os.chdir(scrapy_project_path) #切换工作区,进入爬虫工程根目录执行命令 os.system('scrapyd-deploy') import json import time import requests start_url = 'http://45.76.110.210:6800/schedule.json' start_data = {'project': 'DeploySpider', 'spider': 'Example'} end_url = 'http://172.16.31.10:6800/cancel.json' end_data = {'project': 'DeploySpider'} result = requests.post(start_url, data=start_data, auth=('kingname', 'genius')).text result = requests.post(end_url, data=end_data, auth=('kingname', 'genius')).text # result_dict = json.loads(result) # job_id = result_dict['jobid'] # print(f'启动的爬虫,jobid为:{job_id}') # # time.sleep(5) # end_data['job'] = job_id # result = requests.post(end_url, data=end_data).text # print(result)
2.4375
2
unittest_example/mathfunc.py
RobinCPC/experiment_code
0
12872
""" Simple math operating functions for unit test """ def add(a, b): """ Adding to parameters and return result :param a: :param b: :return: """ return a + b def minus(a, b): """ subtraction :param a: :param b: :return: """ return a - b def multi(a, b): """ multiple :param a: :param b: :return: """ return a * b def divide(a, b): """ division :param a: :param b: :return: """ return a // b
3.625
4
test/test_parameters.py
HubukiNinten/imgaug
1
12873
from __future__ import print_function, division, absolute_import import itertools import sys # unittest only added in 3.4 self.subTest() if sys.version_info[0] < 3 or sys.version_info[1] < 4: import unittest2 as unittest else: import unittest # unittest.mock is not available in 2.7 (though unittest2 might contain it?) try: import unittest.mock as mock except ImportError: import mock import matplotlib matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis import numpy as np import six.moves as sm import skimage import skimage.data import skimage.morphology import scipy import scipy.special import imgaug as ia import imgaug.random as iarandom from imgaug import parameters as iap from imgaug.testutils import reseed def _eps(arr): if ia.is_np_array(arr) and arr.dtype.kind == "f": return np.finfo(arr.dtype).eps return 1e-4 class Test_handle_continuous_param(unittest.TestCase): def test_value_range_is_none(self): result = iap.handle_continuous_param( 1, "[test1]", value_range=None, tuple_to_uniform=True, list_to_choice=True) self.assertTrue(isinstance(result, iap.Deterministic)) def test_value_range_is_tuple_of_nones(self): result = iap.handle_continuous_param( 1, "[test1b]", value_range=(None, None), tuple_to_uniform=True, list_to_choice=True) self.assertTrue(isinstance(result, iap.Deterministic)) def test_param_is_stochastic_parameter(self): result = iap.handle_continuous_param( iap.Deterministic(1), "[test2]", value_range=None, tuple_to_uniform=True, list_to_choice=True) self.assertTrue(isinstance(result, iap.Deterministic)) def test_value_range_is_tuple_of_integers(self): result = iap.handle_continuous_param( 1, "[test3]", value_range=(0, 10), tuple_to_uniform=True, list_to_choice=True) self.assertTrue(isinstance(result, iap.Deterministic)) def test_param_is_outside_of_value_range(self): with self.assertRaises(Exception) as context: _ = iap.handle_continuous_param( 1, "[test4]", value_range=(2, 12), tuple_to_uniform=True, list_to_choice=True) self.assertTrue("[test4]" in str(context.exception)) def test_param_is_inside_value_range_and_no_lower_bound(self): # value within value range (without lower bound) result = iap.handle_continuous_param( 1, "[test5]", value_range=(None, 12), tuple_to_uniform=True, list_to_choice=True) self.assertTrue(isinstance(result, iap.Deterministic)) def test_param_is_outside_of_value_range_and_no_lower_bound(self): # value outside of value range (without lower bound) with self.assertRaises(Exception) as context: _ = iap.handle_continuous_param( 1, "[test6]", value_range=(None, 0), tuple_to_uniform=True, list_to_choice=True) self.assertTrue("[test6]" in str(context.exception)) def test_param_is_inside_value_range_and_no_upper_bound(self): # value within value range (without upper bound) result = iap.handle_continuous_param( 1, "[test7]", value_range=(-1, None), tuple_to_uniform=True, list_to_choice=True) self.assertTrue(isinstance(result, iap.Deterministic)) def test_param_is_outside_of_value_range_and_no_upper_bound(self): # value outside of value range (without upper bound) with self.assertRaises(Exception) as context: _ = iap.handle_continuous_param( 1, "[test8]", value_range=(2, None), tuple_to_uniform=True, list_to_choice=True) self.assertTrue("[test8]" in str(context.exception)) def test_tuple_as_value_but_no_tuples_allowed(self): # tuple as value, but no tuples allowed with self.assertRaises(Exception) as context: _ = iap.handle_continuous_param( (1, 2), "[test9]", value_range=None, tuple_to_uniform=False, list_to_choice=True) self.assertTrue("[test9]" in str(context.exception)) def test_tuple_as_value_and_tuples_allowed(self): # tuple as value and tuple allowed result = iap.handle_continuous_param( (1, 2), "[test10]", value_range=None, tuple_to_uniform=True, list_to_choice=True) self.assertTrue(isinstance(result, iap.Uniform)) def test_tuple_as_value_and_tuples_allowed_and_inside_value_range(self): # tuple as value and tuple allowed and tuple within value range result = iap.handle_continuous_param( (1, 2), "[test11]", value_range=(0, 10), tuple_to_uniform=True, list_to_choice=True) self.assertTrue(isinstance(result, iap.Uniform)) def test_tuple_value_and_allowed_and_partially_outside_value_range(self): # tuple as value and tuple allowed and tuple partially outside of # value range with self.assertRaises(Exception) as context: _ = iap.handle_continuous_param( (1, 2), "[test12]", value_range=(1.5, 13), tuple_to_uniform=True, list_to_choice=True) self.assertTrue("[test12]" in str(context.exception)) def test_tuple_value_and_allowed_and_fully_outside_value_range(self): # tuple as value and tuple allowed and tuple fully outside of value # range with self.assertRaises(Exception) as context: _ = iap.handle_continuous_param( (1, 2), "[test13]", value_range=(3, 13), tuple_to_uniform=True, list_to_choice=True) self.assertTrue("[test13]" in str(context.exception)) def test_list_as_value_but_no_lists_allowed(self): # list as value, but no list allowed with self.assertRaises(Exception) as context: _ = iap.handle_continuous_param( [1, 2, 3], "[test14]", value_range=None, tuple_to_uniform=True, list_to_choice=False) self.assertTrue("[test14]" in str(context.exception)) def test_list_as_value_and_lists_allowed(self): # list as value and list allowed result = iap.handle_continuous_param( [1, 2, 3], "[test15]", value_range=None, tuple_to_uniform=True, list_to_choice=True) self.assertTrue(isinstance(result, iap.Choice)) def test_list_value_and_allowed_and_partially_outside_value_range(self): # list as value and list allowed and list partially outside of value # range with self.assertRaises(Exception) as context: _ = iap.handle_continuous_param( [1, 2], "[test16]", value_range=(1.5, 13), tuple_to_uniform=True, list_to_choice=True) self.assertTrue("[test16]" in str(context.exception)) def test_list_value_and_allowed_and_fully_outside_of_value_range(self): # list as value and list allowed and list fully outside of value range with self.assertRaises(Exception) as context: _ = iap.handle_continuous_param( [1, 2], "[test17]", value_range=(3, 13), tuple_to_uniform=True, list_to_choice=True) self.assertTrue("[test17]" in str(context.exception)) def test_value_inside_value_range_and_value_range_given_as_callable(self): # single value within value range given as callable def _value_range(x): return -1 < x < 1 result = iap.handle_continuous_param( 1, "[test18]", value_range=_value_range, tuple_to_uniform=True, list_to_choice=True) self.assertTrue(isinstance(result, iap.Deterministic)) def test_bad_datatype_as_value_range(self): # bad datatype for value range with self.assertRaises(Exception) as context: _ = iap.handle_continuous_param( 1, "[test19]", value_range=False, tuple_to_uniform=True, list_to_choice=True) self.assertTrue( "Unexpected input for value_range" in str(context.exception)) class Test_handle_discrete_param(unittest.TestCase): def test_float_value_inside_value_range_but_no_floats_allowed(self): # float value without value range when no float value is allowed with self.assertRaises(Exception) as context: _ = iap.handle_discrete_param( 1.5, "[test0]", value_range=None, tuple_to_uniform=True, list_to_choice=True, allow_floats=False) self.assertTrue("[test0]" in str(context.exception)) def test_value_range_is_none(self): # value without value range result = iap.handle_discrete_param( 1, "[test1]", value_range=None, tuple_to_uniform=True, list_to_choice=True, allow_floats=True) self.assertTrue(isinstance(result, iap.Deterministic)) def test_value_range_is_tuple_of_nones(self): # value without value range as (None, None) result = iap.handle_discrete_param( 1, "[test1b]", value_range=(None, None), tuple_to_uniform=True, list_to_choice=True, allow_floats=True) self.assertTrue(isinstance(result, iap.Deterministic)) def test_value_is_stochastic_parameter(self): # stochastic parameter result = iap.handle_discrete_param( iap.Deterministic(1), "[test2]", value_range=None, tuple_to_uniform=True, list_to_choice=True, allow_floats=True) self.assertTrue(isinstance(result, iap.Deterministic)) def test_value_inside_value_range(self): # value within value range result = iap.handle_discrete_param( 1, "[test3]", value_range=(0, 10), tuple_to_uniform=True, list_to_choice=True, allow_floats=True) self.assertTrue(isinstance(result, iap.Deterministic)) def test_value_outside_value_range(self): # value outside of value range with self.assertRaises(Exception) as context: _ = iap.handle_discrete_param( 1, "[test4]", value_range=(2, 12), tuple_to_uniform=True, list_to_choice=True, allow_floats=True) self.assertTrue("[test4]" in str(context.exception)) def test_value_inside_value_range_no_lower_bound(self): # value within value range (without lower bound) result = iap.handle_discrete_param( 1, "[test5]", value_range=(None, 12), tuple_to_uniform=True, list_to_choice=True, allow_floats=True) self.assertTrue(isinstance(result, iap.Deterministic)) def test_value_outside_value_range_no_lower_bound(self): # value outside of value range (without lower bound) with self.assertRaises(Exception) as context: _ = iap.handle_discrete_param( 1, "[test6]", value_range=(None, 0), tuple_to_uniform=True, list_to_choice=True, allow_floats=True) self.assertTrue("[test6]" in str(context.exception)) def test_value_inside_value_range_no_upper_bound(self): # value within value range (without upper bound) result = iap.handle_discrete_param( 1, "[test7]", value_range=(-1, None), tuple_to_uniform=True, list_to_choice=True, allow_floats=True) self.assertTrue(isinstance(result, iap.Deterministic)) def test_value_outside_value_range_no_upper_bound(self): # value outside of value range (without upper bound) with self.assertRaises(Exception) as context: _ = iap.handle_discrete_param( 1, "[test8]", value_range=(2, None), tuple_to_uniform=True, list_to_choice=True, allow_floats=True) self.assertTrue("[test8]" in str(context.exception)) def test_value_is_tuple_but_no_tuples_allowed(self): # tuple as value, but no tuples allowed with self.assertRaises(Exception) as context: _ = iap.handle_discrete_param( (1, 2), "[test9]", value_range=None, tuple_to_uniform=False, list_to_choice=True, allow_floats=True) self.assertTrue("[test9]" in str(context.exception)) def test_value_is_tuple_and_tuples_allowed(self): # tuple as value and tuple allowed result = iap.handle_discrete_param( (1, 2), "[test10]", value_range=None, tuple_to_uniform=True, list_to_choice=True, allow_floats=True) self.assertTrue(isinstance(result, iap.DiscreteUniform)) def test_value_tuple_and_allowed_and_inside_value_range(self): # tuple as value and tuple allowed and tuple within value range result = iap.handle_discrete_param( (1, 2), "[test11]", value_range=(0, 10), tuple_to_uniform=True, list_to_choice=True, allow_floats=True) self.assertTrue(isinstance(result, iap.DiscreteUniform)) def test_value_tuple_and_allowed_and_inside_vr_allow_floats_false(self): # tuple as value and tuple allowed and tuple within value range with # allow_floats=False result = iap.handle_discrete_param( (1, 2), "[test11b]", value_range=(0, 10), tuple_to_uniform=True, list_to_choice=True, allow_floats=False) self.assertTrue(isinstance(result, iap.DiscreteUniform)) def test_value_tuple_and_allowed_and_partially_outside_value_range(self): # tuple as value and tuple allowed and tuple partially outside of # value range with self.assertRaises(Exception) as context: _ = iap.handle_discrete_param( (1, 3), "[test12]", value_range=(2, 13), tuple_to_uniform=True, list_to_choice=True, allow_floats=True) self.assertTrue("[test12]" in str(context.exception)) def test_value_tuple_and_allowed_and_fully_outside_value_range(self): # tuple as value and tuple allowed and tuple fully outside of value # range with self.assertRaises(Exception) as context: _ = iap.handle_discrete_param( (1, 2), "[test13]", value_range=(3, 13), tuple_to_uniform=True, list_to_choice=True, allow_floats=True) self.assertTrue("[test13]" in str(context.exception)) def test_value_list_but_not_allowed(self): # list as value, but no list allowed with self.assertRaises(Exception) as context: _ = iap.handle_discrete_param( [1, 2, 3], "[test14]", value_range=None, tuple_to_uniform=True, list_to_choice=False, allow_floats=True) self.assertTrue("[test14]" in str(context.exception)) def test_value_list_and_allowed(self): # list as value and list allowed result = iap.handle_discrete_param( [1, 2, 3], "[test15]", value_range=None, tuple_to_uniform=True, list_to_choice=True, allow_floats=True) self.assertTrue(isinstance(result, iap.Choice)) def test_value_list_and_allowed_and_partially_outside_value_range(self): # list as value and list allowed and list partially outside of value range with self.assertRaises(Exception) as context: _ = iap.handle_discrete_param( [1, 3], "[test16]", value_range=(2, 13), tuple_to_uniform=True, list_to_choice=True, allow_floats=True) self.assertTrue("[test16]" in str(context.exception)) def test_value_list_and_allowed_and_fully_outside_value_range(self): # list as value and list allowed and list fully outside of value range with self.assertRaises(Exception) as context: _ = iap.handle_discrete_param( [1, 2], "[test17]", value_range=(3, 13), tuple_to_uniform=True, list_to_choice=True, allow_floats=True) self.assertTrue("[test17]" in str(context.exception)) def test_value_inside_value_range_given_as_callable(self): # single value within value range given as callable def _value_range(x): return -1 < x < 1 result = iap.handle_discrete_param( 1, "[test18]", value_range=_value_range, tuple_to_uniform=True, list_to_choice=True) self.assertTrue(isinstance(result, iap.Deterministic)) def test_bad_datatype_as_value_range(self): # bad datatype for value range with self.assertRaises(Exception) as context: _ = iap.handle_discrete_param( 1, "[test19]", value_range=False, tuple_to_uniform=True, list_to_choice=True) self.assertTrue( "Unexpected input for value_range" in str(context.exception)) class Test_handle_categorical_string_param(unittest.TestCase): def test_arg_is_all(self): valid_values = ["class1", "class2"] param = iap.handle_categorical_string_param( ia.ALL, "foo", valid_values) assert isinstance(param, iap.Choice) assert param.a == valid_values def test_arg_is_valid_str(self): valid_values = ["class1", "class2"] param = iap.handle_categorical_string_param( "class1", "foo", valid_values) assert isinstance(param, iap.Deterministic) assert param.value == "class1" def test_arg_is_invalid_str(self): valid_values = ["class1", "class2"] with self.assertRaises(AssertionError) as ctx: _param = iap.handle_categorical_string_param( "class3", "foo", valid_values) expected = ( "Expected parameter 'foo' to be one of: class1, class2. " "Got: class3.") assert expected == str(ctx.exception) def test_arg_is_valid_list(self): valid_values = ["class1", "class2", "class3"] param = iap.handle_categorical_string_param( ["class1", "class3"], "foo", valid_values) assert isinstance(param, iap.Choice) assert param.a == ["class1", "class3"] def test_arg_is_list_with_invalid_types(self): valid_values = ["class1", "class2", "class3"] with self.assertRaises(AssertionError) as ctx: _param = iap.handle_categorical_string_param( ["class1", False], "foo", valid_values) expected = ( "Expected list provided for parameter 'foo' to only contain " "strings, got types: str, bool." ) assert expected in str(ctx.exception) def test_arg_is_invalid_list(self): valid_values = ["class1", "class2", "class3"] with self.assertRaises(AssertionError) as ctx: _param = iap.handle_categorical_string_param( ["class1", "class4"], "foo", valid_values) expected = ( "Expected list provided for parameter 'foo' to only contain " "the following allowed strings: class1, class2, class3. " "Got strings: class1, class4." ) assert expected in str(ctx.exception) def test_arg_is_stochastic_param(self): param = iap.Deterministic("class1") param_out = iap.handle_categorical_string_param( param, "foo", ["class1"]) assert param_out is param def test_arg_is_invalid_datatype(self): with self.assertRaises(Exception) as ctx: _ = iap.handle_categorical_string_param( False, "foo", ["class1"]) expected = "Expected parameter 'foo' to be imgaug.ALL" assert expected in str(ctx.exception) class Test_handle_probability_param(unittest.TestCase): def test_bool_like_values(self): for val in [True, False, 0, 1, 0.0, 1.0]: with self.subTest(param=val): p = iap.handle_probability_param(val, "[test1]") assert isinstance(p, iap.Deterministic) assert p.value == int(val) def test_float_probabilities(self): for val in [0.0001, 0.001, 0.01, 0.1, 0.9, 0.99, 0.999, 0.9999]: with self.subTest(param=val): p = iap.handle_probability_param(val, "[test2]") assert isinstance(p, iap.Binomial) assert isinstance(p.p, iap.Deterministic) assert val-1e-8 < p.p.value < val+1e-8 def test_probability_is_stochastic_parameter(self): det = iap.Deterministic(1) p = iap.handle_probability_param(det, "[test3]") assert p == det def test_probability_has_bad_datatype(self): with self.assertRaises(Exception) as context: _p = iap.handle_probability_param("test", "[test4]") self.assertTrue("Expected " in str(context.exception)) def test_probability_is_negative(self): with self.assertRaises(AssertionError): _p = iap.handle_probability_param(-0.01, "[test5]") def test_probability_is_above_100_percent(self): with self.assertRaises(AssertionError): _p = iap.handle_probability_param(1.01, "[test6]") class Test_force_np_float_dtype(unittest.TestCase): def test_common_dtypes(self): dtypes = [ ("float16", "float16"), ("float32", "float32"), ("float64", "float64"), ("uint8", "float64"), ("int32", "float64") ] for dtype_in, expected in dtypes: with self.subTest(dtype_in=dtype_in): arr = np.zeros((1,), dtype=dtype_in) observed = iap.force_np_float_dtype(arr).dtype assert observed.name == expected class Test_both_np_float_if_one_is_float(unittest.TestCase): def test_float16_float32(self): a1 = np.zeros((1,), dtype=np.float16) b1 = np.zeros((1,), dtype=np.float32) a2, b2 = iap.both_np_float_if_one_is_float(a1, b1) assert a2.dtype.name == "float16" assert b2.dtype.name == "float32" def test_float16_int32(self): a1 = np.zeros((1,), dtype=np.float16) b1 = np.zeros((1,), dtype=np.int32) a2, b2 = iap.both_np_float_if_one_is_float(a1, b1) assert a2.dtype.name == "float16" assert b2.dtype.name == "float64" def test_int32_float16(self): a1 = np.zeros((1,), dtype=np.int32) b1 = np.zeros((1,), dtype=np.float16) a2, b2 = iap.both_np_float_if_one_is_float(a1, b1) assert a2.dtype.name == "float64" assert b2.dtype.name == "float16" def test_int32_uint8(self): a1 = np.zeros((1,), dtype=np.int32) b1 = np.zeros((1,), dtype=np.uint8) a2, b2 = iap.both_np_float_if_one_is_float(a1, b1) assert a2.dtype.name == "float64" assert b2.dtype.name == "float64" class Test_draw_distributions_grid(unittest.TestCase): def setUp(self): reseed() def test_basic_functionality(self): params = [mock.Mock(), mock.Mock()] params[0].draw_distribution_graph.return_value = \ np.zeros((1, 1, 3), dtype=np.uint8) params[1].draw_distribution_graph.return_value = \ np.zeros((1, 1, 3), dtype=np.uint8) draw_grid_mock = mock.Mock() draw_grid_mock.return_value = np.zeros((4, 3, 2), dtype=np.uint8) with mock.patch('imgaug.imgaug.draw_grid', draw_grid_mock): grid_observed = iap.draw_distributions_grid( params, rows=2, cols=3, graph_sizes=(20, 21), sample_sizes=[(1, 2), (3, 4)], titles=["A", "B"]) assert grid_observed.shape == (4, 3, 2) assert params[0].draw_distribution_graph.call_count == 1 assert params[1].draw_distribution_graph.call_count == 1 assert params[0].draw_distribution_graph.call_args[1]["size"] == (1, 2) assert params[0].draw_distribution_graph.call_args[1]["title"] == "A" assert params[1].draw_distribution_graph.call_args[1]["size"] == (3, 4) assert params[1].draw_distribution_graph.call_args[1]["title"] == "B" assert draw_grid_mock.call_count == 1 assert draw_grid_mock.call_args[0][0][0].shape == (20, 21, 3) assert draw_grid_mock.call_args[0][0][1].shape == (20, 21, 3) assert draw_grid_mock.call_args[1]["rows"] == 2 assert draw_grid_mock.call_args[1]["cols"] == 3 class Test_draw_distributions_graph(unittest.TestCase): def test_basic_functionality(self): # this test is very rough as we get a not-very-well-defined image out # of the function param = iap.Uniform(0.0, 1.0) graph_img = param.draw_distribution_graph(title=None, size=(10000,), bins=100) # at least 10% of the image should be white-ish (background) nb_white = np.sum(graph_img[..., :] > [200, 200, 200]) nb_all = np.prod(graph_img.shape) graph_img_title = param.draw_distribution_graph(title="test", size=(10000,), bins=100) assert graph_img.ndim == 3 assert graph_img.shape[2] == 3 assert nb_white > 0.1 * nb_all assert graph_img_title.ndim == 3 assert graph_img_title.shape[2] == 3 assert not np.array_equal(graph_img_title, graph_img) class TestStochasticParameter(unittest.TestCase): def setUp(self): reseed() def test_copy(self): other_param = iap.Uniform(1.0, 10.0) param = iap.Discretize(other_param) other_param.a = [1.0] param_copy = param.copy() param.other_param.a[0] += 1 assert isinstance(param_copy, iap.Discretize) assert isinstance(param_copy.other_param, iap.Uniform) assert param_copy.other_param.a[0] == param.other_param.a[0] def test_deepcopy(self): other_param = iap.Uniform(1.0, 10.0) param = iap.Discretize(other_param) other_param.a = [1.0] param_copy = param.deepcopy() param.other_param.a[0] += 1 assert isinstance(param_copy, iap.Discretize) assert isinstance(param_copy.other_param, iap.Uniform) assert param_copy.other_param.a[0] != param.other_param.a[0] class TestStochasticParameterOperators(unittest.TestCase): def setUp(self): reseed() def test_multiply_stochasic_params(self): param1 = iap.Normal(0, 1) param2 = iap.Uniform(-1.0, 1.0) param3 = param1 * param2 assert isinstance(param3, iap.Multiply) assert param3.other_param == param1 assert param3.val == param2 def test_multiply_stochastic_param_with_integer(self): param1 = iap.Normal(0, 1) param3 = param1 * 2 assert isinstance(param3, iap.Multiply) assert param3.other_param == param1 assert isinstance(param3.val, iap.Deterministic) assert param3.val.value == 2 def test_multiply_integer_with_stochastic_param(self): param1 = iap.Normal(0, 1) param3 = 2 * param1 assert isinstance(param3, iap.Multiply) assert isinstance(param3.other_param, iap.Deterministic) assert param3.other_param.value == 2 assert param3.val == param1 def test_multiply_string_with_stochastic_param_should_fail(self): param1 = iap.Normal(0, 1) with self.assertRaises(Exception) as context: _ = "test" * param1 self.assertTrue("Invalid datatypes" in str(context.exception)) def test_multiply_stochastic_param_with_string_should_fail(self): param1 = iap.Normal(0, 1) with self.assertRaises(Exception) as context: _ = param1 * "test" self.assertTrue("Invalid datatypes" in str(context.exception)) def test_divide_stochastic_params(self): # Divide (__truediv__) param1 = iap.Normal(0, 1) param2 = iap.Uniform(-1.0, 1.0) param3 = param1 / param2 assert isinstance(param3, iap.Divide) assert param3.other_param == param1 assert param3.val == param2 def test_divide_stochastic_param_by_integer(self): param1 = iap.Normal(0, 1) param3 = param1 / 2 assert isinstance(param3, iap.Divide) assert param3.other_param == param1 assert isinstance(param3.val, iap.Deterministic) assert param3.val.value == 2 def test_divide_integer_by_stochastic_param(self): param1 = iap.Normal(0, 1) param3 = 2 / param1 assert isinstance(param3, iap.Divide) assert isinstance(param3.other_param, iap.Deterministic) assert param3.other_param.value == 2 assert param3.val == param1 def test_divide_string_by_stochastic_param_should_fail(self): param1 = iap.Normal(0, 1) with self.assertRaises(Exception) as context: _ = "test" / param1 self.assertTrue("Invalid datatypes" in str(context.exception)) def test_divide_stochastic_param_by_string_should_fail(self): param1 = iap.Normal(0, 1) with self.assertRaises(Exception) as context: _ = param1 / "test" self.assertTrue("Invalid datatypes" in str(context.exception)) def test_div_stochastic_params(self): # Divide (__div__) param1 = iap.Normal(0, 1) param2 = iap.Uniform(-1.0, 1.0) param3 = param1.__div__(param2) assert isinstance(param3, iap.Divide) assert param3.other_param == param1 assert param3.val == param2 def test_div_stochastic_param_by_integer(self): param1 = iap.Normal(0, 1) param3 = param1.__div__(2) assert isinstance(param3, iap.Divide) assert param3.other_param == param1 assert isinstance(param3.val, iap.Deterministic) assert param3.val.value == 2 def test_div_stochastic_param_by_string_should_fail(self): param1 = iap.Normal(0, 1) with self.assertRaises(Exception) as context: _ = param1.__div__("test") self.assertTrue("Invalid datatypes" in str(context.exception)) def test_rdiv_stochastic_param_by_integer(self): # Divide (__rdiv__) param1 = iap.Normal(0, 1) param3 = param1.__rdiv__(2) assert isinstance(param3, iap.Divide) assert isinstance(param3.other_param, iap.Deterministic) assert param3.other_param.value == 2 assert param3.val == param1 def test_rdiv_stochastic_param_by_string_should_fail(self): param1 = iap.Normal(0, 1) with self.assertRaises(Exception) as context: _ = param1.__rdiv__("test") self.assertTrue("Invalid datatypes" in str(context.exception)) def test_floordiv_stochastic_params(self): # Divide (__floordiv__) param1_int = iap.DiscreteUniform(0, 10) param2_int = iap.Choice([1, 2]) param3 = param1_int // param2_int assert isinstance(param3, iap.Discretize) assert isinstance(param3.other_param, iap.Divide) assert param3.other_param.other_param == param1_int assert param3.other_param.val == param2_int def test_floordiv_symbol_stochastic_param_by_integer(self): param1_int = iap.DiscreteUniform(0, 10) param3 = param1_int // 2 assert isinstance(param3, iap.Discretize) assert isinstance(param3.other_param, iap.Divide) assert param3.other_param.other_param == param1_int assert isinstance(param3.other_param.val, iap.Deterministic) assert param3.other_param.val.value == 2 def test_floordiv_symbol_integer_by_stochastic_param(self): param1_int = iap.DiscreteUniform(0, 10) param3 = 2 // param1_int assert isinstance(param3, iap.Discretize) assert isinstance(param3.other_param, iap.Divide) assert isinstance(param3.other_param.other_param, iap.Deterministic) assert param3.other_param.other_param.value == 2 assert param3.other_param.val == param1_int def test_floordiv_symbol_string_by_stochastic_should_fail(self): param1_int = iap.DiscreteUniform(0, 10) with self.assertRaises(Exception) as context: _ = "test" // param1_int self.assertTrue("Invalid datatypes" in str(context.exception)) def test_floordiv_symbol_stochastic_param_by_string_should_fail(self): param1_int = iap.DiscreteUniform(0, 10) with self.assertRaises(Exception) as context: _ = param1_int // "test" self.assertTrue("Invalid datatypes" in str(context.exception)) def test_add_stochastic_params(self): param1 = iap.Normal(0, 1) param2 = iap.Uniform(-1.0, 1.0) param3 = param1 + param2 assert isinstance(param3, iap.Add) assert param3.other_param == param1 assert param3.val == param2 def test_add_integer_to_stochastic_param(self): param1 = iap.Normal(0, 1) param3 = param1 + 2 assert isinstance(param3, iap.Add) assert param3.other_param == param1 assert isinstance(param3.val, iap.Deterministic) assert param3.val.value == 2 def test_add_stochastic_param_to_integer(self): param1 = iap.Normal(0, 1) param3 = 2 + param1 assert isinstance(param3, iap.Add) assert isinstance(param3.other_param, iap.Deterministic) assert param3.other_param.value == 2 assert param3.val == param1 def test_add_stochastic_param_to_string(self): param1 = iap.Normal(0, 1) with self.assertRaises(Exception) as context: _ = "test" + param1 self.assertTrue("Invalid datatypes" in str(context.exception)) def test_add_string_to_stochastic_param(self): param1 = iap.Normal(0, 1) with self.assertRaises(Exception) as context: _ = param1 + "test" self.assertTrue("Invalid datatypes" in str(context.exception)) def test_subtract_stochastic_params(self): param1 = iap.Normal(0, 1) param2 = iap.Uniform(-1.0, 1.0) param3 = param1 - param2 assert isinstance(param3, iap.Subtract) assert param3.other_param == param1 assert param3.val == param2 def test_subtract_integer_from_stochastic_param(self): param1 = iap.Normal(0, 1) param3 = param1 - 2 assert isinstance(param3, iap.Subtract) assert param3.other_param == param1 assert isinstance(param3.val, iap.Deterministic) assert param3.val.value == 2 def test_subtract_stochastic_param_from_integer(self): param1 = iap.Normal(0, 1) param3 = 2 - param1 assert isinstance(param3, iap.Subtract) assert isinstance(param3.other_param, iap.Deterministic) assert param3.other_param.value == 2 assert param3.val == param1 def test_subtract_stochastic_param_from_string_should_fail(self): param1 = iap.Normal(0, 1) with self.assertRaises(Exception) as context: _ = "test" - param1 self.assertTrue("Invalid datatypes" in str(context.exception)) def test_subtract_string_from_stochastic_param_should_fail(self): param1 = iap.Normal(0, 1) with self.assertRaises(Exception) as context: _ = param1 - "test" self.assertTrue("Invalid datatypes" in str(context.exception)) def test_exponentiate_stochastic_params(self): param1 = iap.Normal(0, 1) param2 = iap.Uniform(-1.0, 1.0) param3 = param1 ** param2 assert isinstance(param3, iap.Power) assert param3.other_param == param1 assert param3.val == param2 def test_exponentiate_stochastic_param_by_integer(self): param1 = iap.Normal(0, 1) param3 = param1 ** 2 assert isinstance(param3, iap.Power) assert param3.other_param == param1 assert isinstance(param3.val, iap.Deterministic) assert param3.val.value == 2 def test_exponentiate_integer_by_stochastic_param(self): param1 = iap.Normal(0, 1) param3 = 2 ** param1 assert isinstance(param3, iap.Power) assert isinstance(param3.other_param, iap.Deterministic) assert param3.other_param.value == 2 assert param3.val == param1 def test_exponentiate_string_by_stochastic_param(self): param1 = iap.Normal(0, 1) with self.assertRaises(Exception) as context: _ = "test" ** param1 self.assertTrue("Invalid datatypes" in str(context.exception)) def test_exponentiate_stochastic_param_by_string(self): param1 = iap.Normal(0, 1) with self.assertRaises(Exception) as context: _ = param1 ** "test" self.assertTrue("Invalid datatypes" in str(context.exception)) class TestBinomial(unittest.TestCase): def setUp(self): reseed() def test___init___p_is_zero(self): param = iap.Binomial(0) assert ( param.__str__() == param.__repr__() == "Binomial(Deterministic(int 0))" ) def test___init___p_is_one(self): param = iap.Binomial(1.0) assert ( param.__str__() == param.__repr__() == "Binomial(Deterministic(float 1.00000000))" ) def test_p_is_zero(self): param = iap.Binomial(0) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample == 0 assert np.all(samples == 0) def test_p_is_one(self): param = iap.Binomial(1.0) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample == 1 assert np.all(samples == 1) def test_p_is_50_percent(self): param = iap.Binomial(0.5) sample = param.draw_sample() samples = param.draw_samples((10000,)) unique, counts = np.unique(samples, return_counts=True) assert sample.shape == tuple() assert samples.shape == (10000,) assert sample in [0, 1] assert len(unique) == 2 for val, count in zip(unique, counts): if val == 0: assert 5000 - 500 < count < 5000 + 500 elif val == 1: assert 5000 - 500 < count < 5000 + 500 else: assert False def test_p_is_list(self): param = iap.Binomial(iap.Choice([0.25, 0.75])) for _ in sm.xrange(10): samples = param.draw_samples((1000,)) p = np.sum(samples) / samples.size assert ( (0.25 - 0.05 < p < 0.25 + 0.05) or (0.75 - 0.05 < p < 0.75 + 0.05) ) def test_p_is_tuple(self): param = iap.Binomial((0.0, 1.0)) last_p = 0.5 diffs = [] for _ in sm.xrange(30): samples = param.draw_samples((1000,)) p = np.sum(samples).astype(np.float32) / samples.size diffs.append(abs(p - last_p)) last_p = p nb_p_changed = sum([diff > 0.05 for diff in diffs]) assert nb_p_changed > 15 def test_samples_same_values_for_same_seeds(self): param = iap.Binomial(0.5) samples1 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) samples2 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) assert np.array_equal(samples1, samples2) class TestChoice(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.Choice([0, 1, 2]) assert ( param.__str__() == param.__repr__() == "Choice(a=[0, 1, 2], replace=True, p=None)" ) def test_value_is_list(self): param = iap.Choice([0, 1, 2]) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample in [0, 1, 2] assert np.all( np.logical_or( np.logical_or(samples == 0, samples == 1), samples == 2 ) ) def test_sampled_values_match_expected_counts(self): param = iap.Choice([0, 1, 2]) samples = param.draw_samples((10000,)) expected = 10000/3 expected_tolerance = expected * 0.05 for v in [0, 1, 2]: count = np.sum(samples == v) assert ( expected - expected_tolerance < count < expected + expected_tolerance ) def test_value_is_list_containing_negative_number(self): param = iap.Choice([-1, 1]) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample in [-1, 1] assert np.all(np.logical_or(samples == -1, samples == 1)) def test_value_is_list_of_floats(self): param = iap.Choice([-1.2, 1.7]) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert ( ( -1.2 - _eps(sample) < sample < -1.2 + _eps(sample) ) or ( 1.7 - _eps(sample) < sample < 1.7 + _eps(sample) ) ) assert np.all( np.logical_or( np.logical_and( -1.2 - _eps(sample) < samples, samples < -1.2 + _eps(sample) ), np.logical_and( 1.7 - _eps(sample) < samples, samples < 1.7 + _eps(sample) ) ) ) def test_value_is_list_of_strings(self): param = iap.Choice(["first", "second", "third"]) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample in ["first", "second", "third"] assert np.all( np.logical_or( np.logical_or( samples == "first", samples == "second" ), samples == "third" ) ) def test_sample_without_replacing(self): param = iap.Choice([1+i for i in sm.xrange(100)], replace=False) samples = param.draw_samples((50,)) seen = [0 for _ in sm.xrange(100)] for sample in samples: seen[sample-1] += 1 assert all([count in [0, 1] for count in seen]) def test_non_uniform_probabilities_over_elements(self): param = iap.Choice([0, 1], p=[0.25, 0.75]) samples = param.draw_samples((10000,)) unique, counts = np.unique(samples, return_counts=True) assert len(unique) == 2 for val, count in zip(unique, counts): if val == 0: assert 2500 - 500 < count < 2500 + 500 elif val == 1: assert 7500 - 500 < count < 7500 + 500 else: assert False def test_list_contains_stochastic_parameter(self): param = iap.Choice([iap.Choice([0, 1]), 2]) samples = param.draw_samples((10000,)) unique, counts = np.unique(samples, return_counts=True) assert len(unique) == 3 for val, count in zip(unique, counts): if val in [0, 1]: assert 2500 - 500 < count < 2500 + 500 elif val == 2: assert 5000 - 500 < count < 5000 + 500 else: assert False def test_samples_same_values_for_same_seeds(self): param = iap.Choice([-1, 0, 1, 2, 3]) samples1 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) samples2 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) assert np.array_equal(samples1, samples2) def test_value_is_bad_datatype(self): with self.assertRaises(Exception) as context: _ = iap.Choice(123) self.assertTrue( "Expected a to be an iterable" in str(context.exception)) def test_p_is_bad_datatype(self): with self.assertRaises(Exception) as context: _ = iap.Choice([1, 2], p=123) self.assertTrue("Expected p to be" in str(context.exception)) def test_value_and_p_have_unequal_lengths(self): with self.assertRaises(Exception) as context: _ = iap.Choice([1, 2], p=[1]) self.assertTrue("Expected lengths of" in str(context.exception)) class TestDiscreteUniform(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.DiscreteUniform(0, 2) assert ( param.__str__() == param.__repr__() == "DiscreteUniform(Deterministic(int 0), Deterministic(int 2))" ) def test_bounds_are_ints(self): param = iap.DiscreteUniform(0, 2) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample in [0, 1, 2] assert np.all( np.logical_or( np.logical_or(samples == 0, samples == 1), samples == 2 ) ) def test_samples_match_expected_counts(self): param = iap.DiscreteUniform(0, 2) samples = param.draw_samples((10000,)) expected = 10000/3 expected_tolerance = expected * 0.05 for v in [0, 1, 2]: count = np.sum(samples == v) assert ( expected - expected_tolerance < count < expected + expected_tolerance ) def test_lower_bound_is_negative(self): param = iap.DiscreteUniform(-1, 1) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample in [-1, 0, 1] assert np.all( np.logical_or( np.logical_or(samples == -1, samples == 0), samples == 1 ) ) def test_bounds_are_floats(self): param = iap.DiscreteUniform(-1.2, 1.2) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample in [-1, 0, 1] assert np.all( np.logical_or( np.logical_or( samples == -1, samples == 0 ), samples == 1 ) ) def test_lower_and_upper_bound_have_wrong_order(self): param = iap.DiscreteUniform(1, -1) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample in [-1, 0, 1] assert np.all( np.logical_or( np.logical_or( samples == -1, samples == 0 ), samples == 1 ) ) def test_lower_and_upper_bound_are_the_same(self): param = iap.DiscreteUniform(1, 1) sample = param.draw_sample() samples = param.draw_samples((100,)) assert sample == 1 assert np.all(samples == 1) def test_samples_same_values_for_same_seeds(self): param = iap.Uniform(-1, 1) samples1 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) samples2 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) assert np.array_equal(samples1, samples2) class TestPoisson(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.Poisson(1) assert ( param.__str__() == param.__repr__() == "Poisson(Deterministic(int 1))" ) def test_draw_sample(self): param = iap.Poisson(1) sample = param.draw_sample() assert sample.shape == tuple() assert 0 <= sample def test_via_comparison_to_np_poisson(self): param = iap.Poisson(1) samples = param.draw_samples((100, 1000)) samples_direct = iarandom.RNG(1234).poisson( lam=1, size=(100, 1000)) assert samples.shape == (100, 1000) for i in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]: count_direct = int(np.sum(samples_direct == i)) count = np.sum(samples == i) tolerance = max(count_direct * 0.1, 250) assert count_direct - tolerance < count < count_direct + tolerance def test_samples_same_values_for_same_seeds(self): param = iap.Poisson(1) samples1 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) samples2 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) assert np.array_equal(samples1, samples2) class TestNormal(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.Normal(0, 1) assert ( param.__str__() == param.__repr__() == "Normal(loc=Deterministic(int 0), scale=Deterministic(int 1))" ) def test_draw_sample(self): param = iap.Normal(0, 1) sample = param.draw_sample() assert sample.shape == tuple() def test_via_comparison_to_np_normal(self): param = iap.Normal(0, 1) samples = param.draw_samples((100, 1000)) samples_direct = iarandom.RNG(1234).normal(loc=0, scale=1, size=(100, 1000)) samples = np.clip(samples, -1, 1) samples_direct = np.clip(samples_direct, -1, 1) nb_bins = 10 hist, _ = np.histogram(samples, bins=nb_bins, range=(-1.0, 1.0), density=False) hist_direct, _ = np.histogram(samples_direct, bins=nb_bins, range=(-1.0, 1.0), density=False) tolerance = 0.05 for nb_samples, nb_samples_direct in zip(hist, hist_direct): density = nb_samples / samples.size density_direct = nb_samples_direct / samples_direct.size assert ( density_direct - tolerance < density < density_direct + tolerance ) def test_loc_is_stochastic_parameter(self): param = iap.Normal(iap.Choice([-100, 100]), 1) seen = [0, 0] for _ in sm.xrange(1000): samples = param.draw_samples((100,)) exp = np.mean(samples) if -100 - 10 < exp < -100 + 10: seen[0] += 1 elif 100 - 10 < exp < 100 + 10: seen[1] += 1 else: assert False assert 500 - 100 < seen[0] < 500 + 100 assert 500 - 100 < seen[1] < 500 + 100 def test_scale(self): param1 = iap.Normal(0, 1) param2 = iap.Normal(0, 100) samples1 = param1.draw_samples((1000,)) samples2 = param2.draw_samples((1000,)) assert np.std(samples1) < np.std(samples2) assert 100 - 10 < np.std(samples2) < 100 + 10 def test_samples_same_values_for_same_seeds(self): param = iap.Normal(0, 1) samples1 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) samples2 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) assert np.allclose(samples1, samples2) class TestTruncatedNormal(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.TruncatedNormal(0, 1) expected = ( "TruncatedNormal(" "loc=Deterministic(int 0), " "scale=Deterministic(int 1), " "low=Deterministic(float -inf), " "high=Deterministic(float inf)" ")" ) assert ( param.__str__() == param.__repr__() == expected ) def test___init___custom_range(self): param = iap.TruncatedNormal(0, 1, low=-100, high=50.0) expected = ( "TruncatedNormal(" "loc=Deterministic(int 0), " "scale=Deterministic(int 1), " "low=Deterministic(int -100), " "high=Deterministic(float 50.00000000)" ")" ) assert ( param.__str__() == param.__repr__() == expected ) def test_scale_is_zero(self): param = iap.TruncatedNormal(0.5, 0, low=-10, high=10) samples = param.draw_samples((100,)) assert np.allclose(samples, 0.5) def test_scale(self): param1 = iap.TruncatedNormal(0.0, 0.1, low=-100, high=100) param2 = iap.TruncatedNormal(0.0, 5.0, low=-100, high=100) samples1 = param1.draw_samples((1000,)) samples2 = param2.draw_samples((1000,)) assert np.std(samples1) < np.std(samples2) assert np.isclose(np.std(samples1), 0.1, rtol=0, atol=0.20) assert np.isclose(np.std(samples2), 5.0, rtol=0, atol=0.40) def test_loc_is_stochastic_parameter(self): param = iap.TruncatedNormal(iap.Choice([-100, 100]), 0.01, low=-1000, high=1000) seen = [0, 0] for _ in sm.xrange(200): samples = param.draw_samples((5,)) observed = np.mean(samples) dist1 = np.abs(-100 - observed) dist2 = np.abs(100 - observed) if dist1 < 1: seen[0] += 1 elif dist2 < 1: seen[1] += 1 else: assert False assert np.isclose(seen[0], 100, rtol=0, atol=20) assert np.isclose(seen[1], 100, rtol=0, atol=20) def test_samples_are_within_bounds(self): param = iap.TruncatedNormal(0, 10.0, low=-5, high=7.5) samples = param.draw_samples((1000,)) # are all within bounds assert np.all(samples >= -5.0 - 1e-4) assert np.all(samples <= 7.5 + 1e-4) # at least some samples close to bounds assert np.any(samples <= -4.5) assert np.any(samples >= 7.0) # at least some samples close to loc assert np.any(np.abs(samples) < 0.5) def test_samples_same_values_for_same_seeds(self): param = iap.TruncatedNormal(0, 1) samples1 = param.draw_samples((10, 5), random_state=1234) samples2 = param.draw_samples((10, 5), random_state=1234) assert np.allclose(samples1, samples2) def test_samples_different_values_for_different_seeds(self): param = iap.TruncatedNormal(0, 1) samples1 = param.draw_samples((10, 5), random_state=1234) samples2 = param.draw_samples((10, 5), random_state=2345) assert not np.allclose(samples1, samples2) class TestLaplace(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.Laplace(0, 1) assert ( param.__str__() == param.__repr__() == "Laplace(loc=Deterministic(int 0), scale=Deterministic(int 1))" ) def test_draw_sample(self): param = iap.Laplace(0, 1) sample = param.draw_sample() assert sample.shape == tuple() def test_via_comparison_to_np_laplace(self): param = iap.Laplace(0, 1) samples = param.draw_samples((100, 1000)) samples_direct = iarandom.RNG(1234).laplace(loc=0, scale=1, size=(100, 1000)) assert samples.shape == (100, 1000) samples = np.clip(samples, -1, 1) samples_direct = np.clip(samples_direct, -1, 1) nb_bins = 10 hist, _ = np.histogram(samples, bins=nb_bins, range=(-1.0, 1.0), density=False) hist_direct, _ = np.histogram(samples_direct, bins=nb_bins, range=(-1.0, 1.0), density=False) tolerance = 0.05 for nb_samples, nb_samples_direct in zip(hist, hist_direct): density = nb_samples / samples.size density_direct = nb_samples_direct / samples_direct.size assert ( density_direct - tolerance < density < density_direct + tolerance ) def test_loc_is_stochastic_parameter(self): param = iap.Laplace(iap.Choice([-100, 100]), 1) seen = [0, 0] for _ in sm.xrange(1000): samples = param.draw_samples((100,)) exp = np.mean(samples) if -100 - 10 < exp < -100 + 10: seen[0] += 1 elif 100 - 10 < exp < 100 + 10: seen[1] += 1 else: assert False assert 500 - 100 < seen[0] < 500 + 100 assert 500 - 100 < seen[1] < 500 + 100 def test_scale(self): param1 = iap.Laplace(0, 1) param2 = iap.Laplace(0, 100) samples1 = param1.draw_samples((1000,)) samples2 = param2.draw_samples((1000,)) assert np.var(samples1) < np.var(samples2) def test_scale_is_zero(self): param1 = iap.Laplace(1, 0) samples = param1.draw_samples((100,)) assert np.all(np.logical_and( samples > 1 - _eps(samples), samples < 1 + _eps(samples) )) def test_samples_same_values_for_same_seeds(self): param = iap.Laplace(0, 1) samples1 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) samples2 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) assert np.allclose(samples1, samples2) class TestChiSquare(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.ChiSquare(1) assert ( param.__str__() == param.__repr__() == "ChiSquare(df=Deterministic(int 1))" ) def test_draw_sample(self): param = iap.ChiSquare(1) sample = param.draw_sample() assert sample.shape == tuple() assert 0 <= sample def test_via_comparison_to_np_chisquare(self): param = iap.ChiSquare(1) samples = param.draw_samples((100, 1000)) samples_direct = iarandom.RNG(1234).chisquare(df=1, size=(100, 1000)) assert samples.shape == (100, 1000) assert np.all(0 <= samples) samples = np.clip(samples, 0, 3) samples_direct = np.clip(samples_direct, 0, 3) nb_bins = 10 hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 3.0), density=False) hist_direct, _ = np.histogram(samples_direct, bins=nb_bins, range=(0, 3.0), density=False) tolerance = 0.05 for nb_samples, nb_samples_direct in zip(hist, hist_direct): density = nb_samples / samples.size density_direct = nb_samples_direct / samples_direct.size assert ( density_direct - tolerance < density < density_direct + tolerance ) def test_df_is_stochastic_parameter(self): param = iap.ChiSquare(iap.Choice([1, 10])) seen = [0, 0] for _ in sm.xrange(1000): samples = param.draw_samples((100,)) exp = np.mean(samples) if 1 - 1.0 < exp < 1 + 1.0: seen[0] += 1 elif 10 - 4.0 < exp < 10 + 4.0: seen[1] += 1 else: assert False assert 500 - 100 < seen[0] < 500 + 100 assert 500 - 100 < seen[1] < 500 + 100 def test_larger_df_leads_to_more_variance(self): param1 = iap.ChiSquare(1) param2 = iap.ChiSquare(10) samples1 = param1.draw_samples((1000,)) samples2 = param2.draw_samples((1000,)) assert np.var(samples1) < np.var(samples2) assert 2*1 - 1.0 < np.var(samples1) < 2*1 + 1.0 assert 2*10 - 5.0 < np.var(samples2) < 2*10 + 5.0 def test_samples_same_values_for_same_seeds(self): param = iap.ChiSquare(1) samples1 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) samples2 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) assert np.allclose(samples1, samples2) class TestWeibull(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.Weibull(1) assert ( param.__str__() == param.__repr__() == "Weibull(a=Deterministic(int 1))" ) def test_draw_sample(self): param = iap.Weibull(1) sample = param.draw_sample() assert sample.shape == tuple() assert 0 <= sample def test_via_comparison_to_np_weibull(self): param = iap.Weibull(1) samples = param.draw_samples((100, 1000)) samples_direct = iarandom.RNG(1234).weibull(a=1, size=(100, 1000)) assert samples.shape == (100, 1000) assert np.all(0 <= samples) samples = np.clip(samples, 0, 2) samples_direct = np.clip(samples_direct, 0, 2) nb_bins = 10 hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 2.0), density=False) hist_direct, _ = np.histogram(samples_direct, bins=nb_bins, range=(0, 2.0), density=False) tolerance = 0.05 for nb_samples, nb_samples_direct in zip(hist, hist_direct): density = nb_samples / samples.size density_direct = nb_samples_direct / samples_direct.size assert ( density_direct - tolerance < density < density_direct + tolerance ) def test_argument_is_stochastic_parameter(self): param = iap.Weibull(iap.Choice([1, 0.5])) expected_first = scipy.special.gamma(1 + 1/1) expected_second = scipy.special.gamma(1 + 1/0.5) seen = [0, 0] for _ in sm.xrange(100): samples = param.draw_samples((50000,)) observed = np.mean(samples) matches_first = ( expected_first - 0.2 * expected_first < observed < expected_first + 0.2 * expected_first ) matches_second = ( expected_second - 0.2 * expected_second < observed < expected_second + 0.2 * expected_second ) if matches_first: seen[0] += 1 elif matches_second: seen[1] += 1 else: assert False assert 50 - 25 < seen[0] < 50 + 25 assert 50 - 25 < seen[1] < 50 + 25 def test_different_strengths(self): param1 = iap.Weibull(1) param2 = iap.Weibull(0.5) samples1 = param1.draw_samples((10000,)) samples2 = param2.draw_samples((10000,)) expected_first = ( scipy.special.gamma(1 + 2/1) - (scipy.special.gamma(1 + 1/1))**2 ) expected_second = ( scipy.special.gamma(1 + 2/0.5) - (scipy.special.gamma(1 + 1/0.5))**2 ) assert np.var(samples1) < np.var(samples2) assert ( expected_first - 0.2 * expected_first < np.var(samples1) < expected_first + 0.2 * expected_first ) assert ( expected_second - 0.2 * expected_second < np.var(samples2) < expected_second + 0.2 * expected_second ) def test_samples_same_values_for_same_seeds(self): param = iap.Weibull(1) samples1 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) samples2 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) assert np.allclose(samples1, samples2) class TestUniform(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.Uniform(0, 1.0) assert ( param.__str__() == param.__repr__() == "Uniform(Deterministic(int 0), Deterministic(float 1.00000000))" ) def test_draw_sample(self): param = iap.Uniform(0, 1.0) sample = param.draw_sample() assert sample.shape == tuple() assert 0 - _eps(sample) < sample < 1.0 + _eps(sample) def test_draw_samples(self): param = iap.Uniform(0, 1.0) samples = param.draw_samples((10, 5)) assert samples.shape == (10, 5) assert np.all( np.logical_and( 0 - _eps(samples) < samples, samples < 1.0 + _eps(samples) ) ) def test_via_density_histogram(self): param = iap.Uniform(0, 1.0) samples = param.draw_samples((10000,)) nb_bins = 10 hist, _ = np.histogram(samples, bins=nb_bins, range=(0.0, 1.0), density=False) density_expected = 1.0/nb_bins density_tolerance = 0.05 for nb_samples in hist: density = nb_samples / samples.size assert ( density_expected - density_tolerance < density < density_expected + density_tolerance ) def test_negative_value(self): param = iap.Uniform(-1.0, 1.0) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample) assert np.all( np.logical_and( -1.0 - _eps(samples) < samples, samples < 1.0 + _eps(samples) ) ) def test_wrong_argument_order(self): param = iap.Uniform(1.0, -1.0) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample) assert np.all( np.logical_and( -1.0 - _eps(samples) < samples, samples < 1.0 + _eps(samples) ) ) def test_arguments_are_integers(self): param = iap.Uniform(-1, 1) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample) assert np.all( np.logical_and( -1.0 - _eps(samples) < samples, samples < 1.0 + _eps(samples) ) ) def test_arguments_are_identical(self): param = iap.Uniform(1, 1) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert 1.0 - _eps(sample) < sample < 1.0 + _eps(sample) assert np.all( np.logical_and( 1.0 - _eps(samples) < samples, samples < 1.0 + _eps(samples) ) ) def test_samples_same_values_for_same_seeds(self): param = iap.Uniform(-1.0, 1.0) samples1 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) samples2 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) assert np.allclose(samples1, samples2) class TestBeta(unittest.TestCase): @classmethod def _mean(cls, alpha, beta): return alpha / (alpha + beta) @classmethod def _var(cls, alpha, beta): return (alpha * beta) / ((alpha + beta)**2 * (alpha + beta + 1)) def setUp(self): reseed() def test___init__(self): param = iap.Beta(0.5, 0.5) assert ( param.__str__() == param.__repr__() == "Beta(" "Deterministic(float 0.50000000), " "Deterministic(float 0.50000000)" ")" ) def test_draw_sample(self): param = iap.Beta(0.5, 0.5) sample = param.draw_sample() assert sample.shape == tuple() assert 0 - _eps(sample) < sample < 1.0 + _eps(sample) def test_draw_samples(self): param = iap.Beta(0.5, 0.5) samples = param.draw_samples((100, 1000)) assert samples.shape == (100, 1000) assert np.all( np.logical_and( 0 - _eps(samples) <= samples, samples <= 1.0 + _eps(samples) ) ) def test_via_comparison_to_np_beta(self): param = iap.Beta(0.5, 0.5) samples = param.draw_samples((100, 1000)) samples_direct = iarandom.RNG(1234).beta( a=0.5, b=0.5, size=(100, 1000)) nb_bins = 10 hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 1.0), density=False) hist_direct, _ = np.histogram(samples_direct, bins=nb_bins, range=(0, 1.0), density=False) tolerance = 0.05 for nb_samples, nb_samples_direct in zip(hist, hist_direct): density = nb_samples / samples.size density_direct = nb_samples_direct / samples_direct.size assert ( density_direct - tolerance < density < density_direct + tolerance ) def test_argument_is_stochastic_parameter(self): param = iap.Beta(iap.Choice([0.5, 2]), 0.5) expected_first = self._mean(0.5, 0.5) expected_second = self._mean(2, 0.5) seen = [0, 0] for _ in sm.xrange(100): samples = param.draw_samples((10000,)) observed = np.mean(samples) if expected_first - 0.05 < observed < expected_first + 0.05: seen[0] += 1 elif expected_second - 0.05 < observed < expected_second + 0.05: seen[1] += 1 else: assert False assert 50 - 25 < seen[0] < 50 + 25 assert 50 - 25 < seen[1] < 50 + 25 def test_compare_curves_of_different_arguments(self): param1 = iap.Beta(2, 2) param2 = iap.Beta(0.5, 0.5) samples1 = param1.draw_samples((10000,)) samples2 = param2.draw_samples((10000,)) expected_first = self._var(2, 2) expected_second = self._var(0.5, 0.5) assert np.var(samples1) < np.var(samples2) assert ( expected_first - 0.1 * expected_first < np.var(samples1) < expected_first + 0.1 * expected_first ) assert ( expected_second - 0.1 * expected_second < np.var(samples2) < expected_second + 0.1 * expected_second ) def test_samples_same_values_for_same_seeds(self): param = iap.Beta(0.5, 0.5) samples1 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) samples2 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) assert np.allclose(samples1, samples2) class TestDeterministic(unittest.TestCase): def setUp(self): reseed() def test___init__(self): pairs = [ (0, "Deterministic(int 0)"), (1.0, "Deterministic(float 1.00000000)"), ("test", "Deterministic(test)") ] for value, expected in pairs: with self.subTest(value=value): param = iap.Deterministic(value) assert ( param.__str__() == param.__repr__() == expected ) def test_samples_same_values_for_same_seeds(self): values = [ -100, -54, -1, 0, 1, 54, 100, -100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0 ] for value in values: with self.subTest(value=value): param = iap.Deterministic(value) rs1 = iarandom.RNG(123456) rs2 = iarandom.RNG(123456) samples1 = param.draw_samples(20, random_state=rs1) samples2 = param.draw_samples(20, random_state=rs2) assert np.array_equal(samples1, samples2) def test_draw_sample_int(self): values = [-100, -54, -1, 0, 1, 54, 100] for value in values: with self.subTest(value=value): param = iap.Deterministic(value) sample1 = param.draw_sample() sample2 = param.draw_sample() assert sample1.shape == tuple() assert sample1 == sample2 def test_draw_sample_float(self): values = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0] for value in values: with self.subTest(value=value): param = iap.Deterministic(value) sample1 = param.draw_sample() sample2 = param.draw_sample() assert sample1.shape == tuple() assert np.isclose( sample1, sample2, rtol=0, atol=_eps(sample1)) def test_draw_samples_int(self): values = [-100, -54, -1, 0, 1, 54, 100] shapes = [10, 10, (5, 3), (5, 3), (4, 5, 3), (4, 5, 3)] for value, shape in itertools.product(values, shapes): with self.subTest(value=value, shape=shape): param = iap.Deterministic(value) samples = param.draw_samples(shape) shape_expected = ( shape if isinstance(shape, tuple) else tuple([shape])) assert samples.shape == shape_expected assert np.all(samples == value) def test_draw_samples_float(self): values = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0] shapes = [10, 10, (5, 3), (5, 3), (4, 5, 3), (4, 5, 3)] for value, shape in itertools.product(values, shapes): with self.subTest(value=value, shape=shape): param = iap.Deterministic(value) samples = param.draw_samples(shape) shape_expected = ( shape if isinstance(shape, tuple) else tuple([shape])) assert samples.shape == shape_expected assert np.allclose(samples, value, rtol=0, atol=_eps(samples)) def test_argument_is_stochastic_parameter(self): seen = [0, 0] for _ in sm.xrange(200): param = iap.Deterministic(iap.Choice([0, 1])) seen[param.value] += 1 assert 100 - 50 < seen[0] < 100 + 50 assert 100 - 50 < seen[1] < 100 + 50 def test_argument_has_invalid_type(self): with self.assertRaises(Exception) as context: _ = iap.Deterministic([1, 2, 3]) self.assertTrue( "Expected StochasticParameter object or number or string" in str(context.exception)) class TestFromLowerResolution(unittest.TestCase): def setUp(self): reseed() def test___init___size_percent(self): param = iap.FromLowerResolution(other_param=iap.Deterministic(0), size_percent=1, method="nearest") assert ( param.__str__() == param.__repr__() == "FromLowerResolution(" "size_percent=Deterministic(int 1), " "method=Deterministic(nearest), " "other_param=Deterministic(int 0)" ")" ) def test___init___size_px(self): param = iap.FromLowerResolution(other_param=iap.Deterministic(0), size_px=1, method="nearest") assert ( param.__str__() == param.__repr__() == "FromLowerResolution(" "size_px=Deterministic(int 1), " "method=Deterministic(nearest), " "other_param=Deterministic(int 0)" ")" ) def test_binomial_hwc(self): param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8) samples = param.draw_samples((8, 8, 1)) uq = np.unique(samples) assert samples.shape == (8, 8, 1) assert len(uq) == 2 assert 0 in uq assert 1 in uq def test_binomial_nhwc(self): param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8) samples_nhwc = param.draw_samples((1, 8, 8, 1)) uq = np.unique(samples_nhwc) assert samples_nhwc.shape == (1, 8, 8, 1) assert len(uq) == 2 assert 0 in uq assert 1 in uq def test_draw_samples_with_too_many_dimensions(self): # (N, H, W, C, something) causing error param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8) with self.assertRaises(Exception) as context: _ = param.draw_samples((1, 8, 8, 1, 1)) self.assertTrue( "FromLowerResolution can only generate samples of shape" in str(context.exception) ) def test_binomial_hw3(self): # C=3 param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8) samples = param.draw_samples((8, 8, 3)) uq = np.unique(samples) assert samples.shape == (8, 8, 3) assert len(uq) == 2 assert 0 in uq assert 1 in uq def test_different_size_px_arguments(self): # different sizes in px param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2) param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=16) seen_components = [0, 0] seen_pixels = [0, 0] for _ in sm.xrange(100): samples1 = param1.draw_samples((16, 16, 1)) samples2 = param2.draw_samples((16, 16, 1)) _, num1 = skimage.morphology.label(samples1, connectivity=1, background=0, return_num=True) _, num2 = skimage.morphology.label(samples2, connectivity=1, background=0, return_num=True) seen_components[0] += num1 seen_components[1] += num2 seen_pixels[0] += np.sum(samples1 == 1) seen_pixels[1] += np.sum(samples2 == 1) assert seen_components[0] < seen_components[1] assert ( seen_pixels[0] / seen_components[0] > seen_pixels[1] / seen_components[1] ) def test_different_size_px_arguments_with_tuple(self): # different sizes in px, one given as tuple (a, b) param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2) param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=(2, 16)) seen_components = [0, 0] seen_pixels = [0, 0] for _ in sm.xrange(400): samples1 = param1.draw_samples((16, 16, 1)) samples2 = param2.draw_samples((16, 16, 1)) _, num1 = skimage.morphology.label(samples1, connectivity=1, background=0, return_num=True) _, num2 = skimage.morphology.label(samples2, connectivity=1, background=0, return_num=True) seen_components[0] += num1 seen_components[1] += num2 seen_pixels[0] += np.sum(samples1 == 1) seen_pixels[1] += np.sum(samples2 == 1) assert seen_components[0] < seen_components[1] assert ( seen_pixels[0] / seen_components[0] > seen_pixels[1] / seen_components[1] ) def test_different_size_px_argument_with_stochastic_parameters(self): # different sizes in px, given as StochasticParameter param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=iap.Deterministic(1)) param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=iap.Choice([8, 16])) seen_components = [0, 0] seen_pixels = [0, 0] for _ in sm.xrange(100): samples1 = param1.draw_samples((16, 16, 1)) samples2 = param2.draw_samples((16, 16, 1)) _, num1 = skimage.morphology.label(samples1, connectivity=1, background=0, return_num=True) _, num2 = skimage.morphology.label(samples2, connectivity=1, background=0, return_num=True) seen_components[0] += num1 seen_components[1] += num2 seen_pixels[0] += np.sum(samples1 == 1) seen_pixels[1] += np.sum(samples2 == 1) assert seen_components[0] < seen_components[1] assert ( seen_pixels[0] / seen_components[0] > seen_pixels[1] / seen_components[1] ) def test_size_px_has_invalid_datatype(self): # bad datatype for size_px with self.assertRaises(Exception) as context: _ = iap.FromLowerResolution(iap.Binomial(0.5), size_px=False) self.assertTrue("Expected " in str(context.exception)) def test_min_size(self): # min_size param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2) param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=1, min_size=16) seen_components = [0, 0] seen_pixels = [0, 0] for _ in sm.xrange(100): samples1 = param1.draw_samples((16, 16, 1)) samples2 = param2.draw_samples((16, 16, 1)) _, num1 = skimage.morphology.label(samples1, connectivity=1, background=0, return_num=True) _, num2 = skimage.morphology.label(samples2, connectivity=1, background=0, return_num=True) seen_components[0] += num1 seen_components[1] += num2 seen_pixels[0] += np.sum(samples1 == 1) seen_pixels[1] += np.sum(samples2 == 1) assert seen_components[0] < seen_components[1] assert ( seen_pixels[0] / seen_components[0] > seen_pixels[1] / seen_components[1] ) def test_size_percent(self): # different sizes in percent param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=0.01) param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=0.8) seen_components = [0, 0] seen_pixels = [0, 0] for _ in sm.xrange(100): samples1 = param1.draw_samples((16, 16, 1)) samples2 = param2.draw_samples((16, 16, 1)) _, num1 = skimage.morphology.label(samples1, connectivity=1, background=0, return_num=True) _, num2 = skimage.morphology.label(samples2, connectivity=1, background=0, return_num=True) seen_components[0] += num1 seen_components[1] += num2 seen_pixels[0] += np.sum(samples1 == 1) seen_pixels[1] += np.sum(samples2 == 1) assert seen_components[0] < seen_components[1] assert ( seen_pixels[0] / seen_components[0] > seen_pixels[1] / seen_components[1] ) def test_size_percent_as_stochastic_parameters(self): # different sizes in percent, given as StochasticParameter param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=iap.Deterministic(0.01)) param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=iap.Choice([0.4, 0.8])) seen_components = [0, 0] seen_pixels = [0, 0] for _ in sm.xrange(100): samples1 = param1.draw_samples((16, 16, 1)) samples2 = param2.draw_samples((16, 16, 1)) _, num1 = skimage.morphology.label(samples1, connectivity=1, background=0, return_num=True) _, num2 = skimage.morphology.label(samples2, connectivity=1, background=0, return_num=True) seen_components[0] += num1 seen_components[1] += num2 seen_pixels[0] += np.sum(samples1 == 1) seen_pixels[1] += np.sum(samples2 == 1) assert seen_components[0] < seen_components[1] assert ( seen_pixels[0] / seen_components[0] > seen_pixels[1] / seen_components[1] ) def test_size_percent_has_invalid_datatype(self): # bad datatype for size_percent with self.assertRaises(Exception) as context: _ = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=False) self.assertTrue("Expected " in str(context.exception)) def test_method(self): # method given as StochasticParameter param = iap.FromLowerResolution( iap.Binomial(0.5), size_px=4, method=iap.Choice(["nearest", "linear"])) seen = [0, 0] for _ in sm.xrange(200): samples = param.draw_samples((16, 16, 1)) nb_in_between = np.sum( np.logical_and(0.05 < samples, samples < 0.95)) if nb_in_between == 0: seen[0] += 1 else: seen[1] += 1 assert 100 - 50 < seen[0] < 100 + 50 assert 100 - 50 < seen[1] < 100 + 50 def test_method_has_invalid_datatype(self): # bad datatype for method with self.assertRaises(Exception) as context: _ = iap.FromLowerResolution(iap.Binomial(0.5), size_px=4, method=False) self.assertTrue("Expected " in str(context.exception)) def test_samples_same_values_for_same_seeds(self): # multiple calls with same random_state param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2) samples1 = param.draw_samples((10, 5, 1), random_state=iarandom.RNG(1234)) samples2 = param.draw_samples((10, 5, 1), random_state=iarandom.RNG(1234)) assert np.allclose(samples1, samples2) class TestClip(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.Clip(iap.Deterministic(0), -1, 1) assert ( param.__str__() == param.__repr__() == "Clip(Deterministic(int 0), -1.000000, 1.000000)" ) def test_value_within_bounds(self): param = iap.Clip(iap.Deterministic(0), -1, 1) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample == 0 assert np.all(samples == 0) def test_value_exactly_at_upper_bound(self): param = iap.Clip(iap.Deterministic(1), -1, 1) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample == 1 assert np.all(samples == 1) def test_value_exactly_at_lower_bound(self): param = iap.Clip(iap.Deterministic(-1), -1, 1) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample == -1 assert np.all(samples == -1) def test_value_is_within_bounds_and_float(self): param = iap.Clip(iap.Deterministic(0.5), -1, 1) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert 0.5 - _eps(sample) < sample < 0.5 + _eps(sample) assert np.all( np.logical_and( 0.5 - _eps(sample) <= samples, samples <= 0.5 + _eps(sample) ) ) def test_value_is_above_upper_bound(self): param = iap.Clip(iap.Deterministic(2), -1, 1) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample == 1 assert np.all(samples == 1) def test_value_is_below_lower_bound(self): param = iap.Clip(iap.Deterministic(-2), -1, 1) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample == -1 assert np.all(samples == -1) def test_value_is_sometimes_without_bounds_sometimes_beyond(self): param = iap.Clip(iap.Choice([0, 2]), -1, 1) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample in [0, 1] assert np.all(np.logical_or(samples == 0, samples == 1)) def test_samples_same_values_for_same_seeds(self): param = iap.Clip(iap.Choice([0, 2]), -1, 1) samples1 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) samples2 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) assert np.array_equal(samples1, samples2) def test_lower_bound_is_none(self): param = iap.Clip(iap.Deterministic(0), None, 1) sample = param.draw_sample() assert sample == 0 assert ( param.__str__() == param.__repr__() == "Clip(Deterministic(int 0), None, 1.000000)" ) def test_upper_bound_is_none(self): param = iap.Clip(iap.Deterministic(0), 0, None) sample = param.draw_sample() assert sample == 0 assert ( param.__str__() == param.__repr__() == "Clip(Deterministic(int 0), 0.000000, None)" ) def test_both_bounds_are_none(self): param = iap.Clip(iap.Deterministic(0), None, None) sample = param.draw_sample() assert sample == 0 assert ( param.__str__() == param.__repr__() == "Clip(Deterministic(int 0), None, None)" ) class TestDiscretize(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.Discretize(iap.Deterministic(0)) assert ( param.__str__() == param.__repr__() == "Discretize(Deterministic(int 0))" ) def test_applied_to_deterministic(self): values = [-100.2, -54.3, -1.0, -1, -0.7, -0.00043, 0, 0.00043, 0.7, 1.0, 1, 54.3, 100.2] for value in values: with self.subTest(value=value): param = iap.Discretize(iap.Deterministic(value)) value_expected = np.round( np.float64([value]) ).astype(np.int32)[0] sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample == value_expected assert np.all(samples == value_expected) # TODO why are these tests applied to DiscreteUniform instead of Uniform? def test_applied_to_discrete_uniform(self): param_orig = iap.DiscreteUniform(0, 1) param = iap.Discretize(param_orig) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample in [0, 1] assert np.all(np.logical_or(samples == 0, samples == 1)) def test_applied_to_discrete_uniform_with_wider_range(self): param_orig = iap.DiscreteUniform(0, 2) param = iap.Discretize(param_orig) samples1 = param_orig.draw_samples((10000,)) samples2 = param.draw_samples((10000,)) assert np.all(np.abs(samples1 - samples2) < 0.2*(10000/3)) def test_samples_same_values_for_same_seeds(self): param_orig = iap.DiscreteUniform(0, 2) param = iap.Discretize(param_orig) samples1 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) samples2 = param.draw_samples((10, 5), random_state=iarandom.RNG(1234)) assert np.array_equal(samples1, samples2) class TestMultiply(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.Multiply(iap.Deterministic(0), 1, elementwise=False) assert ( param.__str__() == param.__repr__() == "Multiply(Deterministic(int 0), Deterministic(int 1), False)" ) def test_multiply_example_integer_values(self): values_int = [-100, -54, -1, 0, 1, 54, 100] for v1, v2 in itertools.product(values_int, values_int): with self.subTest(left=v1, right=v2): p = iap.Multiply(iap.Deterministic(v1), v2) samples = p.draw_samples((2, 3)) assert p.draw_sample() == v1 * v2 assert samples.dtype.kind == "i" assert np.array_equal( samples, np.zeros((2, 3), dtype=np.int64) + v1 * v2 ) def test_multiply_example_integer_values_both_deterministic(self): values_int = [-100, -54, -1, 0, 1, 54, 100] for v1, v2 in itertools.product(values_int, values_int): with self.subTest(left=v1, right=v2): p = iap.Multiply(iap.Deterministic(v1), iap.Deterministic(v2)) samples = p.draw_samples((2, 3)) assert p.draw_sample() == v1 * v2 assert samples.dtype.name == "int32" assert np.array_equal( samples, np.zeros((2, 3), dtype=np.int32) + v1 * v2 ) def test_multiply_example_float_values(self): values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0] for v1, v2 in itertools.product(values_float, values_float): with self.subTest(left=v1, right=v2): p = iap.Multiply(iap.Deterministic(v1), v2) sample = p.draw_sample() samples = p.draw_samples((2, 3)) assert np.isclose(sample, v1 * v2, atol=1e-3, rtol=0) assert samples.dtype.kind == "f" assert np.allclose( samples, np.zeros((2, 3), dtype=np.float32) + v1 * v2 ) def test_multiply_example_float_values_both_deterministic(self): values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0] for v1, v2 in itertools.product(values_float, values_float): with self.subTest(left=v1, right=v2): p = iap.Multiply(iap.Deterministic(v1), iap.Deterministic(v2)) sample = p.draw_sample() samples = p.draw_samples((2, 3)) assert np.isclose(sample, v1 * v2, atol=1e-3, rtol=0) assert samples.dtype.kind == "f" assert np.allclose( samples, np.zeros((2, 3), dtype=np.float32) + v1 * v2 ) def test_multiply_by_stochastic_parameter(self): param = iap.Multiply(iap.Deterministic(1.0), (1.0, 2.0), elementwise=False) samples = param.draw_samples((10, 20)) samples_sorted = np.sort(samples.flatten()) assert samples.shape == (10, 20) assert np.all(samples > 1.0 * 1.0 - _eps(samples)) assert np.all(samples < 1.0 * 2.0 + _eps(samples)) assert ( samples_sorted[0] - _eps(samples_sorted[0]) < samples_sorted[-1] < samples_sorted[0] + _eps(samples_sorted[0]) ) def test_multiply_by_stochastic_parameter_elementwise(self): param = iap.Multiply(iap.Deterministic(1.0), (1.0, 2.0), elementwise=True) samples = param.draw_samples((10, 20)) samples_sorted = np.sort(samples.flatten()) assert samples.shape == (10, 20) assert np.all(samples > 1.0 * 1.0 - _eps(samples)) assert np.all(samples < 1.0 * 2.0 + _eps(samples)) assert not ( samples_sorted[0] - _eps(samples_sorted[0]) < samples_sorted[-1] < samples_sorted[0] + _eps(samples_sorted[0]) ) def test_multiply_stochastic_parameter_by_fixed_value(self): param = iap.Multiply(iap.Uniform(1.0, 2.0), 1.0, elementwise=False) samples = param.draw_samples((10, 20)) samples_sorted = np.sort(samples.flatten()) assert samples.shape == (10, 20) assert np.all(samples > 1.0 * 1.0 - _eps(samples)) assert np.all(samples < 2.0 * 1.0 + _eps(samples)) assert not ( samples_sorted[0] - _eps(samples_sorted[0]) < samples_sorted[-1] < samples_sorted[0] + _eps(samples_sorted[0]) ) def test_multiply_stochastic_parameter_by_fixed_value_elementwise(self): param = iap.Multiply(iap.Uniform(1.0, 2.0), 1.0, elementwise=True) samples = param.draw_samples((10, 20)) samples_sorted = np.sort(samples.flatten()) assert samples.shape == (10, 20) assert np.all(samples > 1.0 * 1.0 - _eps(samples)) assert np.all(samples < 2.0 * 1.0 + _eps(samples)) assert not ( samples_sorted[0] - _eps(samples_sorted[0]) < samples_sorted[-1] < samples_sorted[0] + _eps(samples_sorted[0]) ) class TestDivide(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.Divide(iap.Deterministic(0), 1, elementwise=False) assert ( param.__str__() == param.__repr__() == "Divide(Deterministic(int 0), Deterministic(int 1), False)" ) def test_divide_integers(self): values_int = [-100, -54, -1, 0, 1, 54, 100] for v1, v2 in itertools.product(values_int, values_int): if v2 == 0: v2 = 1 with self.subTest(left=v1, right=v2): p = iap.Divide(iap.Deterministic(v1), v2) sample = p.draw_sample() samples = p.draw_samples((2, 3)) assert sample == (v1 / v2) assert samples.dtype.kind == "f" assert np.array_equal( samples, np.zeros((2, 3), dtype=np.float64) + (v1 / v2) ) def test_divide_integers_both_deterministic(self): values_int = [-100, -54, -1, 0, 1, 54, 100] for v1, v2 in itertools.product(values_int, values_int): if v2 == 0: v2 = 1 with self.subTest(left=v1, right=v2): p = iap.Divide(iap.Deterministic(v1), iap.Deterministic(v2)) sample = p.draw_sample() samples = p.draw_samples((2, 3)) assert sample == (v1 / v2) assert samples.dtype.kind == "f" assert np.array_equal( samples, np.zeros((2, 3), dtype=np.float64) + (v1 / v2) ) def test_divide_floats(self): values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0] for v1, v2 in itertools.product(values_float, values_float): if v2 == 0: v2 = 1 with self.subTest(left=v1, right=v2): p = iap.Divide(iap.Deterministic(v1), v2) sample = p.draw_sample() samples = p.draw_samples((2, 3)) assert ( (v1 / v2) - _eps(sample) <= sample <= (v1 / v2) + _eps(sample) ) assert samples.dtype.kind == "f" assert np.allclose( samples, np.zeros((2, 3), dtype=np.float64) + (v1 / v2) ) def test_divide_floats_both_deterministic(self): values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0] for v1, v2 in itertools.product(values_float, values_float): if v2 == 0: v2 = 1 with self.subTest(left=v1, right=v2): p = iap.Divide(iap.Deterministic(v1), iap.Deterministic(v2)) sample = p.draw_sample() samples = p.draw_samples((2, 3)) assert ( (v1 / v2) - _eps(sample) <= sample <= (v1 / v2) + _eps(sample) ) assert samples.dtype.kind == "f" assert np.allclose( samples, np.zeros((2, 3), dtype=np.float64) + (v1 / v2) ) def test_divide_by_stochastic_parameter(self): param = iap.Divide(iap.Deterministic(1.0), (1.0, 2.0), elementwise=False) samples = param.draw_samples((10, 20)) samples_sorted = np.sort(samples.flatten()) assert samples.shape == (10, 20) assert np.all(samples > (1.0 / 2.0) - _eps(samples)) assert np.all(samples < (1.0 / 1.0) + _eps(samples)) assert ( samples_sorted[0] - _eps(samples) < samples_sorted[-1] < samples_sorted[0] + _eps(samples) ) def test_divide_by_stochastic_parameter_elementwise(self): param = iap.Divide(iap.Deterministic(1.0), (1.0, 2.0), elementwise=True) samples = param.draw_samples((10, 20)) samples_sorted = np.sort(samples.flatten()) assert samples.shape == (10, 20) assert np.all(samples > (1.0 / 2.0) - _eps(samples)) assert np.all(samples < (1.0 / 1.0) + _eps(samples)) assert not ( samples_sorted[0] - _eps(samples) < samples_sorted[-1] < samples_sorted[0] + _eps(samples) ) def test_divide_stochastic_parameter_by_float(self): param = iap.Divide(iap.Uniform(1.0, 2.0), 1.0, elementwise=False) samples = param.draw_samples((10, 20)) samples_sorted = np.sort(samples.flatten()) assert samples.shape == (10, 20) assert np.all(samples > (1.0 / 1.0) - _eps(samples)) assert np.all(samples < (2.0 / 1.0) + _eps(samples)) assert not ( samples_sorted[0] - _eps(samples) < samples_sorted[-1] < samples_sorted[0] + _eps(samples) ) def test_divide_stochastic_parameter_by_float_elementwise(self): param = iap.Divide(iap.Uniform(1.0, 2.0), 1.0, elementwise=True) samples = param.draw_samples((10, 20)) samples_sorted = np.sort(samples.flatten()) assert samples.shape == (10, 20) assert np.all(samples > (1.0 / 1.0) - _eps(samples)) assert np.all(samples < (2.0 / 1.0) + _eps(samples)) assert not ( samples_sorted[0] - _eps(samples_sorted) < samples_sorted[-1] < samples_sorted[-1] < samples_sorted[0] + _eps(samples_sorted) ) def test_divide_by_stochastic_parameter_that_can_by_zero(self): # test division by zero automatically being converted to division by 1 param = iap.Divide(2, iap.Choice([0, 2]), elementwise=True) samples = param.draw_samples((10, 20)) samples_unique = np.sort(np.unique(samples.flatten())) assert samples_unique[0] == 1 and samples_unique[1] == 2 def test_divide_by_zero(self): param = iap.Divide(iap.Deterministic(1), 0, elementwise=False) sample = param.draw_sample() assert sample == 1 class TestAdd(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.Add(iap.Deterministic(0), 1, elementwise=False) assert ( param.__str__() == param.__repr__() == "Add(Deterministic(int 0), Deterministic(int 1), False)" ) def test_add_integers(self): values_int = [-100, -54, -1, 0, 1, 54, 100] for v1, v2 in itertools.product(values_int, values_int): with self.subTest(left=v1, right=v2): p = iap.Add(iap.Deterministic(v1), v2) sample = p.draw_sample() samples = p.draw_samples((2, 3)) assert sample == v1 + v2 assert samples.dtype.kind == "i" assert np.array_equal( samples, np.zeros((2, 3), dtype=np.int32) + v1 + v2 ) def test_add_integers_both_deterministic(self): values_int = [-100, -54, -1, 0, 1, 54, 100] for v1, v2 in itertools.product(values_int, values_int): with self.subTest(left=v1, right=v2): p = iap.Add(iap.Deterministic(v1), iap.Deterministic(v2)) sample = p.draw_sample() samples = p.draw_samples((2, 3)) assert sample == v1 + v2 assert samples.dtype.kind == "i" assert np.array_equal( samples, np.zeros((2, 3), dtype=np.int32) + v1 + v2 ) def test_add_floats(self): values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0] for v1, v2 in itertools.product(values_float, values_float): with self.subTest(left=v1, right=v2): p = iap.Add(iap.Deterministic(v1), v2) sample = p.draw_sample() samples = p.draw_samples((2, 3)) assert np.isclose(sample, v1 + v2, atol=1e-3, rtol=0) assert samples.dtype.kind == "f" assert np.allclose( samples, np.zeros((2, 3), dtype=np.float32) + v1 + v2 ) def test_add_floats_both_deterministic(self): values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0] for v1, v2 in itertools.product(values_float, values_float): with self.subTest(left=v1, right=v2): p = iap.Add(iap.Deterministic(v1), iap.Deterministic(v2)) sample = p.draw_sample() samples = p.draw_samples((2, 3)) assert np.isclose(sample, v1 + v2, atol=1e-3, rtol=0) assert samples.dtype.kind == "f" assert np.allclose( samples, np.zeros((2, 3), dtype=np.float32) + v1 + v2 ) def test_add_stochastic_parameter(self): param = iap.Add(iap.Deterministic(1.0), (1.0, 2.0), elementwise=False) samples = param.draw_samples((10, 20)) samples_sorted = np.sort(samples.flatten()) assert samples.shape == (10, 20) assert np.all(samples >= 1.0 + 1.0 - _eps(samples)) assert np.all(samples <= 1.0 + 2.0 + _eps(samples)) assert ( samples_sorted[0] - _eps(samples_sorted[0]) < samples_sorted[-1] < samples_sorted[0] + _eps(samples_sorted[0]) ) def test_add_stochastic_parameter_elementwise(self): param = iap.Add(iap.Deterministic(1.0), (1.0, 2.0), elementwise=True) samples = param.draw_samples((10, 20)) samples_sorted = np.sort(samples.flatten()) assert samples.shape == (10, 20) assert np.all(samples >= 1.0 + 1.0 - _eps(samples)) assert np.all(samples <= 1.0 + 2.0 + _eps(samples)) assert not ( samples_sorted[0] - _eps(samples_sorted[0]) < samples_sorted[-1] < samples_sorted[0] + _eps(samples_sorted[0]) ) def test_add_to_stochastic_parameter(self): param = iap.Add(iap.Uniform(1.0, 2.0), 1.0, elementwise=False) samples = param.draw_samples((10, 20)) samples_sorted = np.sort(samples.flatten()) assert samples.shape == (10, 20) assert np.all(samples >= 1.0 + 1.0 - _eps(samples)) assert np.all(samples <= 2.0 + 1.0 + _eps(samples)) assert not ( samples_sorted[0] - _eps(samples_sorted[0]) < samples_sorted[-1] < samples_sorted[0] + _eps(samples_sorted[0]) ) def test_add_to_stochastic_parameter_elementwise(self): param = iap.Add(iap.Uniform(1.0, 2.0), 1.0, elementwise=True) samples = param.draw_samples((10, 20)) samples_sorted = np.sort(samples.flatten()) assert samples.shape == (10, 20) assert np.all(samples >= 1.0 + 1.0 - _eps(samples)) assert np.all(samples <= 2.0 + 1.0 + _eps(samples)) assert not ( samples_sorted[0] - _eps(samples_sorted[0]) < samples_sorted[-1] < samples_sorted[0] + _eps(samples_sorted[0]) ) class TestSubtract(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.Subtract(iap.Deterministic(0), 1, elementwise=False) assert ( param.__str__() == param.__repr__() == "Subtract(Deterministic(int 0), Deterministic(int 1), False)" ) def test_subtract_integers(self): values_int = [-100, -54, -1, 0, 1, 54, 100] for v1, v2 in itertools.product(values_int, values_int): with self.subTest(left=v1, right=v2): p = iap.Subtract(iap.Deterministic(v1), v2) sample = p.draw_sample() samples = p.draw_samples((2, 3)) assert sample == v1 - v2 assert samples.dtype.kind == "i" assert np.array_equal( samples, np.zeros((2, 3), dtype=np.int64) + v1 - v2 ) def test_subtract_integers_both_deterministic(self): values_int = [-100, -54, -1, 0, 1, 54, 100] for v1, v2 in itertools.product(values_int, values_int): with self.subTest(left=v1, right=v2): p = iap.Subtract(iap.Deterministic(v1), iap.Deterministic(v2)) sample = p.draw_sample() samples = p.draw_samples((2, 3)) assert sample == v1 - v2 assert samples.dtype.kind == "i" assert np.array_equal( samples, np.zeros((2, 3), dtype=np.int64) + v1 - v2 ) def test_subtract_floats(self): values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0] for v1, v2 in itertools.product(values_float, values_float): with self.subTest(left=v1, right=v2): p = iap.Subtract(iap.Deterministic(v1), v2) sample = p.draw_sample() samples = p.draw_samples((2, 3)) assert v1 - v2 - _eps(sample) < sample < v1 - v2 + _eps(sample) assert samples.dtype.kind == "f" assert np.allclose( samples, np.zeros((2, 3), dtype=np.float64) + v1 - v2 ) def test_subtract_floats_both_deterministic(self): values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0] for v1, v2 in itertools.product(values_float, values_float): with self.subTest(left=v1, right=v2): p = iap.Subtract(iap.Deterministic(v1), iap.Deterministic(v2)) sample = p.draw_sample() samples = p.draw_samples((2, 3)) assert v1 - v2 - _eps(sample) < sample < v1 - v2 + _eps(sample) assert samples.dtype.kind == "f" assert np.allclose( samples, np.zeros((2, 3), dtype=np.float64) + v1 - v2 ) def test_subtract_stochastic_parameter(self): param = iap.Subtract(iap.Deterministic(1.0), (1.0, 2.0), elementwise=False) samples = param.draw_samples((10, 20)) samples_sorted = np.sort(samples.flatten()) assert samples.shape == (10, 20) assert np.all(samples > 1.0 - 2.0 - _eps(samples)) assert np.all(samples < 1.0 - 1.0 + _eps(samples)) assert ( samples_sorted[0] - _eps(samples_sorted[0]) < samples_sorted[-1] < samples_sorted[0] + _eps(samples_sorted[0]) ) def test_subtract_stochastic_parameter_elementwise(self): param = iap.Subtract(iap.Deterministic(1.0), (1.0, 2.0), elementwise=True) samples = param.draw_samples((10, 20)) samples_sorted = np.sort(samples.flatten()) assert samples.shape == (10, 20) assert np.all(samples > 1.0 - 2.0 - _eps(samples)) assert np.all(samples < 1.0 - 1.0 + _eps(samples)) assert not ( samples_sorted[0] - _eps(samples_sorted[0]) < samples_sorted[-1] < samples_sorted[0] + _eps(samples_sorted[0]) ) def test_subtract_from_stochastic_parameter(self): param = iap.Subtract(iap.Uniform(1.0, 2.0), 1.0, elementwise=False) samples = param.draw_samples((10, 20)) samples_sorted = np.sort(samples.flatten()) assert samples.shape == (10, 20) assert np.all(samples > 1.0 - 1.0 - _eps(samples)) assert np.all(samples < 2.0 - 1.0 + _eps(samples)) assert not ( samples_sorted[0] - _eps(samples_sorted[0]) < samples_sorted[-1] < samples_sorted[0] + _eps(samples_sorted[0]) ) def test_subtract_from_stochastic_parameter_elementwise(self): param = iap.Subtract(iap.Uniform(1.0, 2.0), 1.0, elementwise=True) samples = param.draw_samples((10, 20)) samples_sorted = np.sort(samples.flatten()) assert samples.shape == (10, 20) assert np.all(samples > 1.0 - 1.0 - _eps(samples)) assert np.all(samples < 2.0 - 1.0 + _eps(samples)) assert not ( samples_sorted[0] - _eps(samples_sorted[0]) < samples_sorted[-1] < samples_sorted[0] + _eps(samples_sorted[0]) ) class TestPower(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.Power(iap.Deterministic(0), 1, elementwise=False) assert ( param.__str__() == param.__repr__() == "Power(Deterministic(int 0), Deterministic(int 1), False)" ) def test_pairs(self): values = [ -100, -54, -1, 0, 1, 54, 100, -100.0, -54.0, -1.0, 0.0, 1.0, 54.0, 100.0 ] exponents = [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2] for base, exponent in itertools.product(values, exponents): if base < 0 and ia.is_single_float(exponent): continue if base == 0 and exponent < 0: continue with self.subTest(base=base, exponent=exponent): p = iap.Power(iap.Deterministic(base), exponent) sample = p.draw_sample() samples = p.draw_samples((2, 3)) assert ( base ** exponent - _eps(sample) < sample < base ** exponent + _eps(sample) ) assert samples.dtype.kind == "f" assert np.allclose( samples, np.zeros((2, 3), dtype=np.float64) + base ** exponent ) def test_pairs_both_deterministic(self): values = [ -100, -54, -1, 0, 1, 54, 100, -100.0, -54.0, -1.0, 0.0, 1.0, 54.0, 100.0 ] exponents = [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2] for base, exponent in itertools.product(values, exponents): if base < 0 and ia.is_single_float(exponent): continue if base == 0 and exponent < 0: continue with self.subTest(base=base, exponent=exponent): p = iap.Power(iap.Deterministic(base), iap.Deterministic(exponent)) sample = p.draw_sample() samples = p.draw_samples((2, 3)) assert ( base ** exponent - _eps(sample) < sample < base ** exponent + _eps(sample) ) assert samples.dtype.kind == "f" assert np.allclose( samples, np.zeros((2, 3), dtype=np.float64) + base ** exponent ) def test_exponent_is_stochastic_parameter(self): param = iap.Power(iap.Deterministic(1.5), (1.0, 2.0), elementwise=False) samples = param.draw_samples((10, 20)) samples_sorted = np.sort(samples.flatten()) assert samples.shape == (10, 20) assert np.all(samples > 1.5 ** 1.0 - 2 * _eps(samples)) assert np.all(samples < 1.5 ** 2.0 + 2 * _eps(samples)) assert ( samples_sorted[0] - _eps(samples_sorted[0]) < samples_sorted[-1] < samples_sorted[0] + _eps(samples_sorted[0]) ) def test_exponent_is_stochastic_parameter_elementwise(self): param = iap.Power(iap.Deterministic(1.5), (1.0, 2.0), elementwise=True) samples = param.draw_samples((10, 20)) samples_sorted = np.sort(samples.flatten()) assert samples.shape == (10, 20) assert np.all(samples > 1.5 ** 1.0 - 2 * _eps(samples)) assert np.all(samples < 1.5 ** 2.0 + 2 * _eps(samples)) assert not ( samples_sorted[0] - _eps(samples_sorted[0]) < samples_sorted[-1] < samples_sorted[0] + _eps(samples_sorted[0]) ) def test_value_is_uniform(self): param = iap.Power(iap.Uniform(1.0, 2.0), 1.0, elementwise=False) samples = param.draw_samples((10, 20)) samples_sorted = np.sort(samples.flatten()) assert samples.shape == (10, 20) assert np.all(samples > 1.0 ** 1.0 - 2 * _eps(samples)) assert np.all(samples < 2.0 ** 1.0 + 2 * _eps(samples)) assert not ( samples_sorted[0] - _eps(samples_sorted[0]) < samples_sorted[-1] < samples_sorted[0] + _eps(samples_sorted[0]) ) def test_value_is_uniform_elementwise(self): param = iap.Power(iap.Uniform(1.0, 2.0), 1.0, elementwise=True) samples = param.draw_samples((10, 20)) samples_sorted = np.sort(samples.flatten()) assert samples.shape == (10, 20) assert np.all(samples > 1.0 ** 1.0 - 2 * _eps(samples)) assert np.all(samples < 2.0 ** 1.0 + 2 * _eps(samples)) assert not ( samples_sorted[0] - _eps(samples_sorted[0]) < samples_sorted[-1] < samples_sorted[0] + _eps(samples_sorted[0]) ) class TestAbsolute(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.Absolute(iap.Deterministic(0)) assert ( param.__str__() == param.__repr__() == "Absolute(Deterministic(int 0))" ) def test_fixed_values(self): simple_values = [-1.5, -1, -1.0, -0.1, 0, 0.0, 0.1, 1, 1.0, 1.5] for value in simple_values: with self.subTest(value=value): param = iap.Absolute(iap.Deterministic(value)) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) if ia.is_single_float(value): assert ( abs(value) - _eps(sample) < sample < abs(value) + _eps(sample) ) assert np.all(abs(value) - _eps(samples) < samples) assert np.all(samples < abs(value) + _eps(samples)) else: assert sample == abs(value) assert np.all(samples == abs(value)) def test_value_is_stochastic_parameter(self): param = iap.Absolute(iap.Choice([-3, -1, 1, 3])) sample = param.draw_sample() samples = param.draw_samples((10, 10)) samples_uq = np.sort(np.unique(samples)) assert sample.shape == tuple() assert sample in [3, 1] assert samples.shape == (10, 10) assert len(samples_uq) == 2 assert samples_uq[0] == 1 and samples_uq[1] == 3 class TestRandomSign(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.RandomSign(iap.Deterministic(0), 0.5) assert ( param.__str__() == param.__repr__() == "RandomSign(Deterministic(int 0), 0.50)" ) def test_value_is_deterministic(self): param = iap.RandomSign(iap.Deterministic(1)) samples = param.draw_samples((1000,)) n_positive = np.sum(samples == 1) n_negative = np.sum(samples == -1) assert samples.shape == (1000,) assert n_positive + n_negative == 1000 assert 350 < n_positive < 750 def test_value_is_deterministic_many_samples(self): param = iap.RandomSign(iap.Deterministic(1)) seen = [0, 0] for _ in sm.xrange(1000): sample = param.draw_sample() assert sample.shape == tuple() if sample == 1: seen[1] += 1 else: seen[0] += 1 n_negative, n_positive = seen assert n_positive + n_negative == 1000 assert 350 < n_positive < 750 def test_value_is_stochastic_parameter(self): param = iap.RandomSign(iap.Choice([1, 2])) samples = param.draw_samples((4000,)) seen = [0, 0, 0, 0] seen[0] = np.sum(samples == -2) seen[1] = np.sum(samples == -1) seen[2] = np.sum(samples == 1) seen[3] = np.sum(samples == 2) assert np.sum(seen) == 4000 assert all([700 < v < 1300 for v in seen]) def test_samples_same_values_for_same_seeds(self): param = iap.RandomSign(iap.Choice([1, 2])) samples1 = param.draw_samples((100, 10), random_state=iarandom.RNG(1234)) samples2 = param.draw_samples((100, 10), random_state=iarandom.RNG(1234)) assert samples1.shape == (100, 10) assert samples2.shape == (100, 10) assert np.array_equal(samples1, samples2) assert np.sum(samples1 == -2) > 50 assert np.sum(samples1 == -1) > 50 assert np.sum(samples1 == 1) > 50 assert np.sum(samples1 == 2) > 50 class TestForceSign(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.ForceSign(iap.Deterministic(0), True, "invert", 1) assert ( param.__str__() == param.__repr__() == "ForceSign(Deterministic(int 0), True, invert, 1)" ) def test_single_sample_positive(self): param = iap.ForceSign(iap.Deterministic(1), positive=True, mode="invert") sample = param.draw_sample() assert sample.shape == tuple() assert sample == 1 def test_single_sample_negative(self): param = iap.ForceSign(iap.Deterministic(1), positive=False, mode="invert") sample = param.draw_sample() assert sample.shape == tuple() assert sample == -1 def test_many_samples_positive(self): param = iap.ForceSign(iap.Deterministic(1), positive=True, mode="invert") samples = param.draw_samples(100) assert samples.shape == (100,) assert np.all(samples == 1) def test_many_samples_negative(self): param = iap.ForceSign(iap.Deterministic(1), positive=False, mode="invert") samples = param.draw_samples(100) assert samples.shape == (100,) assert np.all(samples == -1) def test_many_samples_negative_value_to_positive(self): param = iap.ForceSign(iap.Deterministic(-1), positive=True, mode="invert") samples = param.draw_samples(100) assert samples.shape == (100,) assert np.all(samples == 1) def test_many_samples_negative_value_to_negative(self): param = iap.ForceSign(iap.Deterministic(-1), positive=False, mode="invert") samples = param.draw_samples(100) assert samples.shape == (100,) assert np.all(samples == -1) def test_many_samples_stochastic_value_to_positive(self): param = iap.ForceSign(iap.Choice([-2, 1]), positive=True, mode="invert") samples = param.draw_samples(1000) n_twos = np.sum(samples == 2) n_ones = np.sum(samples == 1) assert samples.shape == (1000,) assert n_twos + n_ones == 1000 assert 200 < n_twos < 700 assert 200 < n_ones < 700 def test_many_samples_stochastic_value_to_positive_reroll(self): param = iap.ForceSign(iap.Choice([-2, 1]), positive=True, mode="reroll") samples = param.draw_samples(1000) n_twos = np.sum(samples == 2) n_ones = np.sum(samples == 1) assert samples.shape == (1000,) assert n_twos + n_ones == 1000 assert n_twos > 0 assert n_ones > 0 def test_many_samples_stochastic_value_to_positive_reroll_max_count(self): param = iap.ForceSign(iap.Choice([-2, 1]), positive=True, mode="reroll", reroll_count_max=100) samples = param.draw_samples(100) n_twos = np.sum(samples == 2) n_ones = np.sum(samples == 1) assert samples.shape == (100,) assert n_twos + n_ones == 100 assert n_twos < 5 def test_samples_same_values_for_same_seeds(self): param = iap.ForceSign(iap.Choice([-2, 1]), positive=True, mode="invert") samples1 = param.draw_samples((100, 10), random_state=iarandom.RNG(1234)) samples2 = param.draw_samples((100, 10), random_state=iarandom.RNG(1234)) assert samples1.shape == (100, 10) assert samples2.shape == (100, 10) assert np.array_equal(samples1, samples2) class TestPositive(unittest.TestCase): def setUp(self): reseed() def test_many_samples_reroll(self): param = iap.Positive(iap.Deterministic(-1), mode="reroll", reroll_count_max=1) samples = param.draw_samples((100,)) assert samples.shape == (100,) assert np.all(samples == 1) class TestNegative(unittest.TestCase): def setUp(self): reseed() def test_many_samples_reroll(self): param = iap.Negative(iap.Deterministic(1), mode="reroll", reroll_count_max=1) samples = param.draw_samples((100,)) assert samples.shape == (100,) assert np.all(samples == -1) class TestIterativeNoiseAggregator(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.IterativeNoiseAggregator(iap.Deterministic(0), iterations=(1, 3), aggregation_method="max") assert ( param.__str__() == param.__repr__() == ( "IterativeNoiseAggregator(" "Deterministic(int 0), " "DiscreteUniform(Deterministic(int 1), " "Deterministic(int 3)" "), " "Deterministic(max)" ")" ) ) def test_value_is_deterministic_max_1_iter(self): param = iap.IterativeNoiseAggregator(iap.Deterministic(1), iterations=1, aggregation_method="max") sample = param.draw_sample() samples = param.draw_samples((2, 4)) assert sample.shape == tuple() assert samples.shape == (2, 4) assert sample == 1 assert np.all(samples == 1) def test_value_is_stochastic_avg_200_iter(self): param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]), iterations=200, aggregation_method="avg") sample = param.draw_sample() samples = param.draw_samples((2, 4)) assert sample.shape == tuple() assert samples.shape == (2, 4) assert 25 - 10 < sample < 25 + 10 assert np.all(np.logical_and(25 - 10 < samples, samples < 25 + 10)) def test_value_is_stochastic_max_100_iter(self): param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]), iterations=100, aggregation_method="max") sample = param.draw_sample() samples = param.draw_samples((2, 4)) assert sample.shape == tuple() assert samples.shape == (2, 4) assert sample == 50 assert np.all(samples == 50) def test_value_is_stochastic_min_100_iter(self): param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]), iterations=100, aggregation_method="min") sample = param.draw_sample() samples = param.draw_samples((2, 4)) assert sample.shape == tuple() assert samples.shape == (2, 4) assert sample == 0 assert np.all(samples == 0) def test_value_is_stochastic_avg_or_max_100_iter_evaluate_counts(self): seen = [0, 0, 0, 0] for _ in sm.xrange(100): param = iap.IterativeNoiseAggregator( iap.Choice([0, 50]), iterations=100, aggregation_method=["avg", "max"]) samples = param.draw_samples((1, 1)) diff_0 = abs(0 - samples[0, 0]) diff_25 = abs(25 - samples[0, 0]) diff_50 = abs(50 - samples[0, 0]) if diff_25 < 10.0: seen[0] += 1 elif diff_50 < _eps(samples): seen[1] += 1 elif diff_0 < _eps(samples): seen[2] += 1 else: seen[3] += 1 assert seen[2] <= 2 # around 0.0 assert seen[3] <= 2 # 0.0+eps <= x < 15.0 or 35.0 < x < 50.0 or >50.0 assert 50 - 20 < seen[0] < 50 + 20 assert 50 - 20 < seen[1] < 50 + 20 def test_value_is_stochastic_avg_tuple_as_iter_evaluate_histograms(self): # iterations as tuple param = iap.IterativeNoiseAggregator( iap.Uniform(-1.0, 1.0), iterations=(1, 100), aggregation_method="avg") diffs = [] for _ in sm.xrange(100): samples = param.draw_samples((1, 1)) diff = abs(samples[0, 0] - 0.0) diffs.append(diff) nb_bins = 3 hist, _ = np.histogram(diffs, bins=nb_bins, range=(-1.0, 1.0), density=False) assert hist[1] > hist[0] assert hist[1] > hist[2] def test_value_is_stochastic_max_list_as_iter_evaluate_counts(self): # iterations as list seen = [0, 0] for _ in sm.xrange(400): param = iap.IterativeNoiseAggregator( iap.Choice([0, 50]), iterations=[1, 100], aggregation_method=["max"]) samples = param.draw_samples((1, 1)) diff_0 = abs(0 - samples[0, 0]) diff_50 = abs(50 - samples[0, 0]) if diff_50 < _eps(samples): seen[0] += 1 elif diff_0 < _eps(samples): seen[1] += 1 else: assert False assert 300 - 50 < seen[0] < 300 + 50 assert 100 - 50 < seen[1] < 100 + 50 def test_value_is_stochastic_all_100_iter(self): # test ia.ALL as aggregation_method # note that each method individually and list of methods are already # tested, so no in depth test is needed here param = iap.IterativeNoiseAggregator( iap.Choice([0, 50]), iterations=100, aggregation_method=ia.ALL) assert isinstance(param.aggregation_method, iap.Choice) assert len(param.aggregation_method.a) == 3 assert [v in param.aggregation_method.a for v in ["min", "avg", "max"]] def test_value_is_stochastic_max_2_iter(self): param = iap.IterativeNoiseAggregator( iap.Choice([0, 50]), iterations=2, aggregation_method="max") samples = param.draw_samples((2, 1000)) nb_0 = np.sum(samples == 0) nb_50 = np.sum(samples == 50) assert nb_0 + nb_50 == 2 * 1000 assert 0.25 - 0.05 < nb_0 / (2 * 1000) < 0.25 + 0.05 def test_samples_same_values_for_same_seeds(self): param = iap.IterativeNoiseAggregator( iap.Choice([0, 50]), iterations=5, aggregation_method="avg") samples1 = param.draw_samples((100, 10), random_state=iarandom.RNG(1234)) samples2 = param.draw_samples((100, 10), random_state=iarandom.RNG(1234)) assert samples1.shape == (100, 10) assert samples2.shape == (100, 10) assert np.allclose(samples1, samples2) def test_stochastic_param_as_aggregation_method(self): param = iap.IterativeNoiseAggregator( iap.Choice([0, 50]), iterations=5, aggregation_method=iap.Deterministic("max")) assert isinstance(param.aggregation_method, iap.Deterministic) assert param.aggregation_method.value == "max" def test_bad_datatype_for_aggregation_method(self): with self.assertRaises(Exception) as context: _ = iap.IterativeNoiseAggregator( iap.Choice([0, 50]), iterations=5, aggregation_method=False) self.assertTrue( "Expected aggregation_method to be" in str(context.exception)) def test_bad_datatype_for_iterations(self): with self.assertRaises(Exception) as context: _ = iap.IterativeNoiseAggregator( iap.Choice([0, 50]), iterations=False, aggregation_method="max") self.assertTrue("Expected iterations to be" in str(context.exception)) class TestSigmoid(unittest.TestCase): def setUp(self): reseed() def test___init__(self): param = iap.Sigmoid( iap.Deterministic(0), threshold=(-10, 10), activated=True, mul=1, add=0) assert ( param.__str__() == param.__repr__() == ( "Sigmoid(" "Deterministic(int 0), " "Uniform(" "Deterministic(int -10), " "Deterministic(int 10)" "), " "Deterministic(int 1), " "1, " "0)" ) ) def test_activated_is_true(self): param = iap.Sigmoid( iap.Deterministic(5), add=0, mul=1, threshold=0.5, activated=True) expected = 1 / (1 + np.exp(-(5 * 1 + 0 - 0.5))) sample = param.draw_sample() samples = param.draw_samples((5, 10)) assert sample.shape == tuple() assert samples.shape == (5, 10) assert expected - _eps(sample) < sample < expected + _eps(sample) assert np.all( np.logical_and( expected - _eps(samples) < samples, samples < expected + _eps(samples) ) ) def test_activated_is_false(self): param = iap.Sigmoid( iap.Deterministic(5), add=0, mul=1, threshold=0.5, activated=False) expected = 5 sample = param.draw_sample() samples = param.draw_samples((5, 10)) assert sample.shape == tuple() assert samples.shape == (5, 10) assert expected - _eps(sample) < sample < expected + _eps(sample) assert np.all( np.logical_and( expected - _eps(sample) < samples, samples < expected + _eps(sample) ) ) def test_activated_is_probabilistic(self): param = iap.Sigmoid( iap.Deterministic(5), add=0, mul=1, threshold=0.5, activated=0.5) expected_first = 5 expected_second = 1 / (1 + np.exp(-(5 * 1 + 0 - 0.5))) seen = [0, 0] for _ in sm.xrange(1000): sample = param.draw_sample() diff_first = abs(sample - expected_first) diff_second = abs(sample - expected_second) if diff_first < _eps(sample): seen[0] += 1 elif diff_second < _eps(sample): seen[1] += 1 else: assert False assert 500 - 150 < seen[0] < 500 + 150 assert 500 - 150 < seen[1] < 500 + 150 def test_value_is_stochastic_param(self): param = iap.Sigmoid( iap.Choice([1, 10]), add=0, mul=1, threshold=0.5, activated=True) expected_first = 1 / (1 + np.exp(-(1 * 1 + 0 - 0.5))) expected_second = 1 / (1 + np.exp(-(10 * 1 + 0 - 0.5))) seen = [0, 0] for _ in sm.xrange(1000): sample = param.draw_sample() diff_first = abs(sample - expected_first) diff_second = abs(sample - expected_second) if diff_first < _eps(sample): seen[0] += 1 elif diff_second < _eps(sample): seen[1] += 1 else: assert False assert 500 - 150 < seen[0] < 500 + 150 assert 500 - 150 < seen[1] < 500 + 150 def test_mul_add_threshold_with_various_fixed_values(self): muls = [0.1, 1, 10.3] adds = [-5.7, -0.0734, 0, 0.0734, 5.7] vals = [-1, -0.7, 0, 0.7, 1] threshs = [-5.7, -0.0734, 0, 0.0734, 5.7] for mul, add, val, thresh in itertools.product(muls, adds, vals, threshs): with self.subTest(mul=mul, add=add, val=val, threshold=thresh): param = iap.Sigmoid( iap.Deterministic(val), add=add, mul=mul, threshold=thresh) sample = param.draw_sample() samples = param.draw_samples((2, 3)) dt = sample.dtype val_ = np.array([val], dtype=dt) mul_ = np.array([mul], dtype=dt) add_ = np.array([add], dtype=dt) thresh_ = np.array([thresh], dtype=dt) expected = ( 1 / ( 1 + np.exp( -(val_ * mul_ + add_ - thresh_) ) ) ) assert sample.shape == tuple() assert samples.shape == (2, 3) assert ( expected - 5*_eps(sample) < sample < expected + 5*_eps(sample) ) assert np.all( np.logical_and( expected - 5*_eps(sample) < samples, samples < expected + 5*_eps(sample) ) ) def test_samples_same_values_for_same_seeds(self): param = iap.Sigmoid( iap.Choice([1, 10]), add=0, mul=1, threshold=0.5, activated=True) samples1 = param.draw_samples((100, 10), random_state=iarandom.RNG(1234)) samples2 = param.draw_samples((100, 10), random_state=iarandom.RNG(1234)) assert samples1.shape == (100, 10) assert samples2.shape == (100, 10) assert np.array_equal(samples1, samples2)
2.515625
3
ml_snek/datasets/jsnek_dataset.py
joram/ml-snek
0
12874
""" jsnek_saved_games_dataset that returns flat (vectorized) data """ from .jsnek_base_dataset import JSnekBaseDataset from .. import utils class JSnekDataset(JSnekBaseDataset): """Represents a board state in the following way: board_state: `torch.Tensor` Board state in torch.Tensor format. Board state can either be C x H x W or (C*H*W) if board_state_as_vector = True direction: `torch.Tensor` Direction taken in one-hot format """ def __init__( self, board_state_as_vector=False, direction_as_index=False, max_frames=-1 ): super().__init__(max_frames=max_frames) self.board_state_as_vector = board_state_as_vector self.direction_as_index = direction_as_index def __getitem__(self, index): """ Parameters ---------- index : int Index of datum Returns ------- board_state: `torch.Tensor` Board state in torch.Tensor format. Board state can either be C x H x W or (C*H*W) if board_state_as_vector = True direction: `torch.Tensor` Direction taken in one-hot format or Index if direction_as_index = True """ frame, winner_id, direction = super().__getitem__(index) board_state = utils.frame_to_image(frame, winner_id) if self.board_state_as_vector: board_state = board_state.view([board_state.numel()]) if self.direction_as_index: direction = utils.direction_to_index(direction) else: direction = utils.direction_to_onehot(direction) return board_state, direction
3.015625
3
src/posts/migrations/0007_recipe_preface.py
eduardkh/matkonim2
0
12875
<reponame>eduardkh/matkonim2 # Generated by Django 3.2.7 on 2021-09-15 15:40 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('posts', '0006_auto_20210914_0910'), ] operations = [ migrations.AddField( model_name='recipe', name='preface', field=models.TextField(blank=True, null=True), ), ]
1.640625
2
app/database/db.py
flych3r/spotify-tracker
2
12876
import os import databases import sqlalchemy DB_CONNECTOR = os.getenv('APP_DB_CONNECTOR') DB_USERNAME = os.getenv('APP_DB_USERNAME') DB_PASSWORD = os.getenv('APP_DB_PASSWORD') DB_HOST = os.getenv('APP_DB_HOST') DB_PORT = os.getenv('APP_DB_PORT') DB_DATABASE = os.getenv('APP_DB_DATABASE') DB_URL = f'{DB_CONNECTOR}://{DB_USERNAME}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_DATABASE}' db: databases.Database = databases.Database(DB_URL) metadata: sqlalchemy.MetaData = sqlalchemy.MetaData()
2.25
2
examples/stl10/main_info.py
hehaodele/align_uniform
0
12877
import os import time import argparse import torchvision import torch import torch.nn as nn from util import AverageMeter, TwoAugUnsupervisedDataset from encoder import SmallAlexNet from align_uniform import align_loss, uniform_loss import json def parse_option(): parser = argparse.ArgumentParser('STL-10 Representation Learning with Alignment and Uniformity Losses') parser.add_argument('--align_w', type=float, default=1, help='Alignment loss weight') parser.add_argument('--unif_w', type=float, default=1, help='Uniformity loss weight') parser.add_argument('--align_alpha', type=float, default=2, help='alpha in alignment loss') parser.add_argument('--unif_t', type=float, default=2, help='t in uniformity loss') parser.add_argument('--batch_size', type=int, default=768, help='Batch size') parser.add_argument('--epochs', type=int, default=200, help='Number of training epochs') parser.add_argument('--lr', type=float, default=None, help='Learning rate. Default is linear scaling 0.12 per 256 batch size') parser.add_argument('--lr_decay_rate', type=float, default=0.1, help='Learning rate decay rate') parser.add_argument('--lr_decay_epochs', default=[155, 170, 185], nargs='*', type=int, help='When to decay learning rate') parser.add_argument('--momentum', type=float, default=0.9, help='SGD momentum') parser.add_argument('--weight_decay', type=float, default=1e-4, help='L2 weight decay') parser.add_argument('--feat_dim', type=int, default=128, help='Feature dimensionality') parser.add_argument('--num_workers', type=int, default=20, help='Number of data loader workers to use') parser.add_argument('--log_interval', type=int, default=40, help='Number of iterations between logs') parser.add_argument('--gpus', default=[0], nargs='*', type=int, help='List of GPU indices to use, e.g., --gpus 0 1 2 3') parser.add_argument('--data_folder', type=str, default='./data', help='Path to data') parser.add_argument('--result_folder', type=str, default='./results', help='Base directory to save model') parser.add_argument('--suffix', type=str, default='info', help='Name Suffix') opt = parser.parse_args() opt.data_folder = '/afs/csail.mit.edu/u/h/hehaodele/radar/Hao/datasets' opt.result_folder = '/afs/csail.mit.edu/u/h/hehaodele/radar/Hao/projects/align_uniform/results' if opt.lr is None: opt.lr = 0.12 * (opt.batch_size / 256) print(json.dumps(vars(opt), indent=2, default=lambda o: o.__dict__)) opt.gpus = list(map(lambda x: torch.device('cuda', x), opt.gpus)) exp_name = f"align{opt.align_w:g}alpha{opt.align_alpha:g}_unif{opt.unif_w:g}t{opt.unif_t:g}" if len(opt.suffix) > 0: exp_name += f'_{opt.suffix}' opt.save_folder = os.path.join( opt.result_folder, exp_name, ) os.makedirs(opt.save_folder, exist_ok=True) return opt def get_data_loader(opt): from util import RandomResizedCropWithBox, TwoAugUnsupervisedDatasetWithBox transform_crop = RandomResizedCropWithBox(64, scale=(0.08, 1)) transform_others = torchvision.transforms.Compose([ torchvision.transforms.RandomHorizontalFlip(), torchvision.transforms.ColorJitter(0.4, 0.4, 0.4, 0.4), torchvision.transforms.RandomGrayscale(p=0.2), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize( (0.44087801806139126, 0.42790631331699347, 0.3867879370752931), (0.26826768628079806, 0.2610450402318512, 0.26866836876860795), ), ]) dataset = TwoAugUnsupervisedDatasetWithBox( torchvision.datasets.STL10(opt.data_folder, 'train+unlabeled', download=True), transform_crop, transform_others) return torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, num_workers=opt.num_workers, shuffle=True, pin_memory=True) def get_rate(x): return sum(x) / len(x) * 100 def main(): opt = parse_option() print(f'Optimize: {opt.align_w:g} * loss_align(alpha={opt.align_alpha:g}) + {opt.unif_w:g} * loss_uniform(t={opt.unif_t:g})') torch.cuda.set_device(opt.gpus[0]) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = True encoder = nn.DataParallel(SmallAlexNet(feat_dim=opt.feat_dim).to(opt.gpus[0]), opt.gpus) optim = torch.optim.SGD(encoder.parameters(), lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay) scheduler = torch.optim.lr_scheduler.MultiStepLR(optim, gamma=opt.lr_decay_rate, milestones=opt.lr_decay_epochs) loader = get_data_loader(opt) align_meter = AverageMeter('align_loss') unif_meter = AverageMeter('uniform_loss') loss_meter = AverageMeter('total_loss') it_time_meter = AverageMeter('iter_time') info_rate_meter = AverageMeter('info_rate') noni_rate_meter = AverageMeter('noni_rate') for epoch in range(opt.epochs): align_meter.reset() unif_meter.reset() loss_meter.reset() it_time_meter.reset() t0 = time.time() for ii, (im_x, info_x, im_y, info_y) in enumerate(loader): optim.zero_grad() x, y = encoder(torch.cat([im_x.to(opt.gpus[0]), im_y.to(opt.gpus[0])])).chunk(2) align_loss_val = align_loss(x, y, alpha=opt.align_alpha) unif_loss_val = (uniform_loss(x, t=opt.unif_t) + uniform_loss(y, t=opt.unif_t)) / 2 loss = align_loss_val * opt.align_w + unif_loss_val * opt.unif_w info_x, info_y = info_x.to(opt.gpus[0]), info_y.to(opt.gpus[0]) info_x_idx, noni_x_idx = info_x > 0.5, info_x < 0.2 info_y_idx, noni_y_idx = info_y > 0.5, info_y < 0.2 info_pair_idx = info_x_idx & info_y_idx if info_pair_idx.any(): align_loss_info = align_loss(x[info_pair_idx], y[info_pair_idx], alpha=opt.align_alpha) else: align_loss_info = 0 uniform_loss_noninfo = 0 if noni_x_idx.any(): uniform_loss_noninfo += uniform_loss(x[noni_x_idx], t=opt.unif_t) if noni_y_idx.any(): uniform_loss_noninfo += uniform_loss(y[noni_y_idx], t=opt.unif_t) uniform_loss_noninfo /= 2 loss_info = align_loss_info * opt.align_w + uniform_loss_noninfo * opt.unif_w loss = loss + loss_info align_meter.update(align_loss_val, x.shape[0]) unif_meter.update(unif_loss_val) loss_meter.update(loss, x.shape[0]) info_rate_meter.update((get_rate(info_x_idx)+get_rate(info_y_idx))/2) noni_rate_meter.update((get_rate(noni_x_idx)+get_rate(noni_y_idx))/2) loss.backward() optim.step() it_time_meter.update(time.time() - t0) if ii % opt.log_interval == 0: print(f"Epoch {epoch}/{opt.epochs}\tIt {ii}/{len(loader)}\t" + f"{align_meter}\t{unif_meter}\t{loss_meter}\t{it_time_meter}\t{info_rate_meter}\t{noni_rate_meter}") t0 = time.time() scheduler.step() if epoch % 40 == 0: ckpt_file = os.path.join(opt.save_folder, f'encoder-ep{epoch}.pth') torch.save(encoder.module.state_dict(), ckpt_file) ckpt_file = os.path.join(opt.save_folder, 'encoder.pth') torch.save(encoder.module.state_dict(), ckpt_file) print(f'Saved to {ckpt_file}') if __name__ == '__main__': main()
2.375
2
app/__init__.py
nic-mon/IAIOLab
0
12878
<gh_stars>0 from flask import Flask """ 1. Creating a flask application instance, the name argument is passed to flask application constructor. It's used to determine the root path""" app = Flask(__name__) app.config.from_object('config') from app import views, models
2.75
3
ptf/tests/linerate/qos_metrics.py
dariusgrassi/upf-epc
0
12879
# SPDX-License-Identifier: Apache-2.0 # Copyright(c) 2021 Open Networking Foundation import time from ipaddress import IPv4Address from pprint import pprint from trex_test import TrexTest from grpc_test import * from trex_stl_lib.api import ( STLVM, STLPktBuilder, STLStream, STLTXCont, ) import ptf.testutils as testutils UPF_DEST_MAC = "0c:c4:7a:19:6d:ca" # Port setup TREX_SENDER_PORT = 0 TREX_RECEIVER_PORT = 1 BESS_SENDER_PORT = 2 BESS_RECEIVER_PORT = 3 # Test specs DURATION = 10 RATE = 100_000 # 100 Kpps UE_COUNT = 10_000 # 10k UEs GTPU_PORT = 2152 PKT_SIZE = 64 class PerFlowQosMetricsTest(TrexTest, GrpcTest): """ Generates 1 Mpps downlink traffic for 10k dest UE IP addresses. Uses BESS-UPF QoS metrics to verify baseline packet loss, latency, and jitter results. """ @autocleanup def runTest(self): n3TEID = 0 startIP = IPv4Address('172.16.17.32') endIP = startIP + UE_COUNT - 1 accessIP = IPv4Address('10.128.13.29') enbIP = IPv4Address('10.27.19.99') # arbitrary ip for non-existent eNodeB for gtpu encap # program UPF for downlink traffic by installing PDRs and FARs print("Installing PDRs and FARs...") for i in range(UE_COUNT): # install N6 DL PDR to match UE dst IP pdrDown = self.createPDR( srcIface = CORE, dstIP = int(startIP + i), srcIfaceMask = 0xFF, dstIPMask = 0xFFFFFFFF, precedence = 255, fseID = n3TEID + i + 1, # start from 1 ctrID = 0, farID = i, qerIDList = [N6, 1], needDecap = 0, ) self.addPDR(pdrDown) # install N6 DL FAR for encap farDown = self.createFAR( farID = i, fseID = n3TEID + i + 1, # start from 1 applyAction = ACTION_FORWARD, dstIntf = DST_ACCESS, tunnelType = 0x1, tunnelIP4Src = int(accessIP), tunnelIP4Dst = int(enbIP), # only one eNB to send to downlink tunnelTEID = 0, tunnelPort = GTPU_PORT, ) self.addFAR(farDown) # install N6 DL/UL application QER qer = self.createQER( gate = GATE_UNMETER, qerID = N6, fseID = n3TEID + i + 1, # start from 1 qfi = 9, ulGbr = 0, ulMbr = 0, dlGbr = 0, dlMbr = 0, burstDurationMs = 10, ) self.addApplicationQER(qer) # set up trex to send traffic thru UPF print("Setting up TRex client...") vm = STLVM() vm.var( name="dst", min_value=str(startIP), max_value=str(endIP), size=4, op="random", ) vm.write(fv_name="dst", pkt_offset="IP.dst") vm.fix_chksum() pkt = testutils.simple_udp_packet( pktlen=PKT_SIZE, eth_dst=UPF_DEST_MAC, with_udp_chksum=False, ) stream = STLStream( packet=STLPktBuilder(pkt=pkt, vm=vm), mode=STLTXCont(pps=RATE), ) self.trex_client.add_streams(stream, ports=[BESS_SENDER_PORT]) print("Running traffic...") s_time = time.time() self.trex_client.start( ports=[BESS_SENDER_PORT], mult="1", duration=DURATION ) # FIXME: pull QoS metrics at end instead of while traffic running time.sleep(DURATION - 5) if self.trex_client.is_traffic_active(): stats = self.getSessionStats(q=[90, 99, 99.9], quiet=True) preQos = stats["preQos"] postDlQos = stats["postDlQos"] postUlQos = stats["postUlQos"] self.trex_client.wait_on_traffic(ports=[BESS_SENDER_PORT]) print(f"Duration was {time.time() - s_time}") trex_stats = self.trex_client.get_stats() sent_packets = trex_stats['total']['opackets'] recv_packets = trex_stats['total']['ipackets'] # 0% packet loss self.assertEqual( sent_packets, recv_packets, f"Didn't receive all packets; sent {sent_packets}, received {recv_packets}", ) for fseid in postDlQos: lat = fseid['latency']['percentileValuesNs'] jitter = fseid['jitter']['percentileValuesNs'] # 99th %ile latency < 100 us self.assertLessEqual( int(lat[1]) / 1000, 100, f"99th %ile latency was higher than 100 us! Was {int(lat[1]) / 1000} us" ) # 99.9th %ile latency < 200 us self.assertLessEqual( int(lat[2]) / 1000, 200, f"99.9th %ile latency was higher than 200 us! Was {int(lat[2]) / 1000} us" ) # 99th% jitter < 100 us self.assertLessEqual( int(jitter[1]) / 1000, 100, f"99th %ile jitter was higher than 100 us! Was {int(jitter[1]) / 1000} us" ) return
2.046875
2
archives_app/documents_serializers.py
DITGO/2021.1-PC-GO1-Archives
1
12880
<reponame>DITGO/2021.1-PC-GO1-Archives from rest_framework import serializers from archives_app.documents_models import (FrequencyRelation, BoxArchiving, AdministrativeProcess, OriginBox, FrequencySheet, DocumentTypes) class FrequencySupport(serializers.ModelSerializer): def get_document_type(self, obj): if obj.document_type_id is not None: return obj.document_type_id.document_name return None class BoxArchivingSerializer(serializers.ModelSerializer): def get_shelf_number(self, obj): if obj.shelf_id is not None: return obj.shelf_id.number return None def get_rack_number(self, obj): if obj.rack_id is not None: return obj.rack_id.number return None def get_abbreviation_name(self, obj): if obj.abbreviation_id is not None: return obj.abbreviation_id.name return "" def get_sender_unity(self, obj): if obj.sender_unity is not None: return obj.sender_unity.unity_name return "" def get_doc_types(self, obj): if obj.document_types is not None: doc_types = [] for obj in obj.document_types.all(): doc_types.append(obj.document_type_id.document_name) return doc_types return "" def get_temporalities(self, obj): if obj.document_types is not None: doc_types = [] for obj in obj.document_types.all(): doc_types.append(obj.temporality_date) return doc_types return None shelf_number = serializers.SerializerMethodField('get_shelf_number') rack_number = serializers.SerializerMethodField('get_rack_number') abbreviation_name = serializers.SerializerMethodField('get_abbreviation_name') sender_unity_name = serializers.SerializerMethodField('get_sender_unity') document_type_name = serializers.SerializerMethodField('get_doc_types') temporality_date = serializers.SerializerMethodField('get_temporalities') class Meta: model = BoxArchiving fields = ( "id", "process_number", "sender_unity", "notes", "received_date", "document_url", "cover_sheet", "filer_user", "abbreviation_name", "shelf_number", "rack_number", "origin_box_id", "abbreviation_id", "shelf_id", "rack_id", "document_types", "sender_unity_name", "document_type_name", "temporality_date" ) class FrequencyRelationSerializer(FrequencySupport): def get_sender_unity(self, obj): if obj.sender_unity is not None: return obj.sender_unity.unity_name return "" document_type_name = serializers.SerializerMethodField( 'get_document_type' ) sender_unity_name = serializers.SerializerMethodField('get_sender_unity') class Meta: model = FrequencyRelation fields = ( "id", "process_number", "notes", "document_date", "received_date", "temporality_date", "reference_period", "filer_user", "sender_unity", "document_type_id", "document_type_name", "sender_unity_name" ) class AdministrativeProcessSerializer(serializers.ModelSerializer): def get_document_subject(self, obj): if obj.subject_id is not None: return obj.subject_id.subject_name return None def get_sender_unity(self, obj): if obj.sender_unity is not None: return obj.sender_unity.unity_name return "" def get_sender_user(self, obj): if obj.sender_user is not None: return obj.sender_user.name return "" sender_unity_name = serializers.SerializerMethodField('get_sender_unity') sender_user_name = serializers.SerializerMethodField('get_sender_user') document_subject_name = serializers.SerializerMethodField( 'get_document_subject' ) class Meta: model = AdministrativeProcess fields = ("id", "process_number", "notes", "filer_user", "notice_date", "interested", "cpf_cnpj", "reference_month_year", "sender_user", "sender_user_name", "archiving_date", "is_filed", "is_eliminated", "temporality_date", "send_date", "administrative_process_number", "sender_unity", "subject_id", "dest_unity_id", "unity_id", "document_subject_name", "sender_unity_name" ) class OriginBoxSerializer(serializers.ModelSerializer): class Meta: model = OriginBox fields = '__all__' class DocumentTypesSerializer(serializers.ModelSerializer): class Meta: model = DocumentTypes fields = '__all__' class FrequencySheetSerializer(FrequencySupport): def get_person_name(self, obj): if obj.person_id is not None: return obj.person_id.name return "" document_type_name = serializers.SerializerMethodField( 'get_document_type' ) person_name = serializers.SerializerMethodField('get_person_name') class Meta: model = FrequencySheet fields = ("id", "person_id", "person_name", "cpf", "role", "category", "workplace", "municipal_area", "reference_period", "notes", "process_number", "document_type_id", "temporality_date", "document_type_name" )
2.015625
2
src/fparser/common/tests/test_base_classes.py
sturmianseq/fparser
33
12881
# -*- coding: utf-8 -*- ############################################################################## # Copyright (c) 2017 Science and Technology Facilities Council # # All rights reserved. # # Modifications made as part of the fparser project are distributed # under the following license: # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################## # Modified M.Hambley, UK Met Office ############################################################################## ''' Test battery associated with fparser.common.base_classes package. ''' import re import pytest import fparser.common.base_classes import fparser.common.readfortran import fparser.common.sourceinfo import fparser.common.utils from fparser import api def test_statement_logging(log, monkeypatch): ''' Tests the Statement class' logging methods. ''' class DummyParser(object): ''' Null parser harness. ''' def __init__(self, reader): self.reader = reader reader = fparser.common.readfortran.FortranStringReader("dummy = 1") parser = DummyParser(reader) monkeypatch.setattr(fparser.common.base_classes.Statement, 'process_item', lambda x: None, raising=False) unit_under_test = fparser.common.base_classes.Statement(parser, None) unit_under_test.error('Scary biscuits') assert(log.messages == {'critical': [], 'debug': [], 'error': ['Scary biscuits'], 'info': [], 'warning': []}) log.reset() unit_under_test.warning('Trepidacious Cetations') assert(log.messages == {'critical': [], 'debug': [], 'error': [], 'info': [], 'warning': ['Trepidacious Cetations']}) log.reset() unit_under_test.info('Hilarious Ontologies') assert(log.messages == {'critical': [], 'debug': [], 'error': [], 'info': ['Hilarious Ontologies'], 'warning': []}) def test_log_comment_mix(log): ''' Tests that unexpected Fortran 90 comment in fixed format source is logged. ''' class EndDummy(fparser.common.base_classes.EndStatement): ''' Dummy EndStatement. ''' match = re.compile(r'\s*end(\s*thing\s*\w*|)\s*\Z', re.I).match class BeginHarness(fparser.common.base_classes.BeginStatement): ''' Dummy BeginStatement. ''' end_stmt_cls = EndDummy classes = [] match = re.compile(r'\s*thing\s+(\w*)\s*\Z', re.I).match def get_classes(self): ''' Returns an empty list of contained statements. ''' return [] code = ' x=1 ! Cheese' parent = fparser.common.readfortran.FortranStringReader( code, ignore_comments=False) parent.set_format(fparser.common.sourceinfo.FortranFormat(False, True)) item = fparser.common.readfortran.Line(code, (1, 1), None, None, parent) with pytest.raises(fparser.common.utils.AnalyzeError): __ = BeginHarness(parent, item) expected = ' 1: x=1 ! Cheese <== ' \ + 'no parse pattern found for "x=1 ! cheese" ' \ + "in 'BeginHarness' block, " \ + 'trying to remove inline comment (not in Fortran 77).' result = log.messages['warning'][0].split('\n')[1] assert result == expected def test_log_unexpected(log): ''' Tests that an unexpected thing between begin and end statements logs an event. ''' class EndThing(fparser.common.base_classes.EndStatement): ''' Dummy EndStatement class. ''' isvalid = True match = re.compile(r'\s*end(\s+thing(\s+\w+)?)?\s*$', re.I).match class BeginThing(fparser.common.base_classes.BeginStatement): ''' Dummy BeginStatement class. ''' end_stmt_cls = EndThing classes = [] match = re.compile(r'\s*thing\s+(\w+)?\s*$', re.I).match def get_classes(self): ''' Returns an empty list of contained classes. ''' return [] code = [' jumper', ' end thing'] parent = fparser.common.readfortran.FortranStringReader('\n'.join(code)) parent.set_format(fparser.common.sourceinfo.FortranFormat(False, True)) item = fparser.common.readfortran.Line(code[0], (1, 1), None, None, parent) with pytest.raises(fparser.common.utils.AnalyzeError): __ = BeginThing(parent, item) expected = ' 1: jumper <== no parse pattern found for "jumper" ' \ "in 'BeginThing' block." result = log.messages['warning'][0].split('\n')[1] assert result == expected def test_space_after_enddo(): '''Make sure that there is no space after an 'END DO' without name, but there is a space if there is a name after 'END DO'. ''' # Unnamed loop: source_str = '''\ subroutine foo integer i, r do i = 1,100 r = r + 1 end do end subroutine foo ''' tree = api.parse(source_str, isfree=True, isstrict=False) assert "END DO " not in tree.tofortran() # Named loop: source_str = '''\ subroutine foo integer i, r loop1: do i = 1,100 r = r + 1 end do loop1 end subroutine foo ''' tree = api.parse(source_str, isfree=True, isstrict=False) assert "END DO loop1" in tree.tofortran()
1.109375
1
pyvecorg/__main__.py
torsava/pyvec.org
3
12882
<filename>pyvecorg/__main__.py from elsa import cli from pyvecorg import app cli(app, base_url='http://pyvec.org')
1.289063
1
ppython/input_handler.py
paberr/ppython
1
12883
import curtsies.events as ev import sys DELIMITERS = ' .' WHITESPACE = ' ' def print_console(txt, npadding=0, newline=False, flush=True): """ Prints txt without newline, cursor positioned at the end. :param txt: The text to print :param length: The txt will be padded with spaces to fit this length :param newline: If True, a newline character will be appended :return: """ sys.stdout.write('\r{0}{1}'.format(txt, WHITESPACE * npadding)) if newline: sys.stdout.write('\n') if flush: sys.stdout.flush() def move_next_line(): sys.stdout.write('\n') sys.stdout.flush() def find_next_in_list(lst, what, start=0, reverse=False): """ Finds the next occurrence of what in lst starting at start. :param lst: The list to search :param what: The item to find, should be an iterable :param start: The starting position in the list :param reverse: Set this to True in order to traverse the list towards 0 :return: False if no occurrence found, index otherwise """ if start < 0 or start >= len(lst): return False end = -1 if reverse else len(lst) step = -1 if reverse else 1 for i in range(start, end, step): if lst[i] in what: return i return False class InputHandler: def __init__(self, history): self._input = [] self._position = 0 self._handlers = {} self._highlight = None self._max_length = 0 self._complete = None self._history = history self._prefix = '' def process_input(self, c): """ Processes the input captured by curtsies. :param c: the input, either a curtsies keystroke or an event :return: False if program should stop, the current line otherwise """ if isinstance(c, ev.Event): return self._process_event(c) else: return self._process_char(c) def register_handler(self, key, handler): if key not in self._handlers: self._handlers[key] = [] self._handlers[key].append(handler) def set_highlighter(self, highlight): self._highlight = highlight def set_completer(self, complete): self._complete = complete def set_prefix(self, prefix): self._prefix = prefix def _process_char(self, c): """ Processes keystrokes internally, may call handlers as well. :param c: The curtsies keystroke :return: The current line """ if len(c) == 1: self._insert(c) elif c == '<LEFT>': self._left() elif c == '<RIGHT>': self._right() elif c == '<UP>': self._hist_up() elif c == '<DOWN>': self._hist_down() elif c == '<SPACE>': self._insert(' ') elif c == '<TAB>': if not self._tab_completion(): self._insert(' ') elif c == '<BACKSPACE>': self._back() elif c == '<Ctrl-w>': self._delete_last_word() elif c == '<DELETE>': self._delete() elif c == '<HOME>' or c == '<Ctrl-a>': self._home() elif c == '<END>' or c == '<Ctrl-e>': self._end() elif c == '<Ctrl-u>': self._delete_before() elif c == '<Ctrl-k>': self._delete_after() elif c == '<Esc+f>': self._move_word_forwards() elif c == '<Esc+b>': self._move_word_backwards() elif c == '<Ctrl-r>': pass # history search mode elif c == '<ESC>': pass # history search mode elif c == '<Ctrl-j>': old_line = self._newline() if c in self._handlers: for handler in self._handlers[c]: handler(old_line) elif c == '<Ctrl-c>' or c == '<Ctrl-d>': return False # new lines are handled differently if c in self._handlers and c != '<Ctrl-j>': # call handlers if necessary for handler in self._handlers[c]: handler(self._curline()) return self._curline() def _process_event(self, e): """ Processes events internally. :param e: The event :return: False in case of SigInt, the input otherwise """ if isinstance(e, ev.SigIntEvent): return False elif isinstance(e, ev.PasteEvent): for c in e.events: self.process_input(c) return self._curline() def _line_changed(self): self._history.edit(self._curline()) def _hist_up(self): """ Moves up in the history object. :return: """ self._input = list(self._history.move_up()) self._position = len(self._input) self.draw() def _hist_down(self): """ Moves down in the history object. :return: """ self._input = list(self._history.move_down()) self._position = len(self._input) self.draw() def _curline(self): """ Returns the current line. :return: current line """ return ''.join(self._input) def _insert(self, c): """ Inserts a character at current position, moves cursor forward and redraws. :param c: character :return: """ if len(c) > 1: # only insert single characters for cc in c: self._insert(cc) return self._input.insert(self._position, c) self._position += 1 self._line_changed() self.draw() def _left(self): """ Moves cursor back and redraws. :return: """ if self._position > 0: self._position -= 1 self.draw() def _home(self): """ Moves cursor home and redraws. :return: """ self._position = 0 self.draw() def _right(self): """ Moves cursor forward and redraws. :return: """ if self._position < len(self._input): self._position += 1 self.draw() def _end(self): """ Moves cursor to end and redraws. :return: """ self._position = len(self._input) self.draw() def _move_word_forwards(self): """ Moves cursor towards the next delimiter. :return: """ next_del = find_next_in_list(self._input, DELIMITERS, start=self._position+1) if next_del is False: self._end() else: self._position = next_del self.draw() def _move_word_backwards(self): """ Moves cursor towards the next delimiter. :return: """ next_del = find_next_in_list(self._input, DELIMITERS, start=self._position-2, reverse=True) if next_del is False: self._home() else: self._position = next_del + 1 self.draw() def _delete_last_word(self): """ Deletes until last delimiter. :return: """ next_del = find_next_in_list(self._input, DELIMITERS, start=self._position - 2, reverse=True) if next_del is False: next_del = 0 else: next_del += 1 del self._input[next_del:self._position] self._position = next_del self._line_changed() self.draw() def _back(self): """ Removes element in front of cursor, moves cursor back and redraws. :return: """ if self._position > 0: del self._input[self._position - 1] self._position -= 1 self._line_changed() self.draw() def _delete(self): """ Removes element behind cursor and redraws. :return: """ if self._position < len(self._input): del self._input[self._position] self._line_changed() self.draw() def _delete_before(self): """ Deletes everything in front of the cursor. :return: """ self._input = self._input[self._position:] self._position = 0 self._line_changed() self.draw() def _delete_after(self): """ Deletes everything after the cursor. :return: """ self._input = self._input[:self._position] self._line_changed() self.draw() def _newline(self): """ Creates a new line and returns the old one. :return: old line """ self._history.commit() old_line = self._curline() self._position = 0 self._max_length = 0 self._input = [] move_next_line() return old_line def draw(self): """ Draws input with cursor at right position. :return: """ whole_line = self._curline() cursor_line = whole_line[:self._position] # add prefix whole_line = self._prefix + whole_line cursor_line = self._prefix + cursor_line self._max_length = max(len(whole_line), self._max_length) # highlight texts if self._highlight is not None: whole_line_h = self._highlight(whole_line).strip() cursor_line_h = self._highlight(cursor_line).strip() else: whole_line_h = whole_line cursor_line_h = cursor_line # first print whole line npadding = max(0, self._max_length - len(whole_line)) print_console(whole_line_h, npadding=npadding, flush=False) # then print for cursor position print_console(cursor_line_h) def _tab_completion(self): """ Calls completion function. If possible insert completion. :return: True if completion was successful """ if self._complete is not None: # try completing completion = self._complete(self._curline()[:self._position]) if completion is not False: # if successful, insert the completion for c in completion: self._insert(c) return True return False
3.515625
4
pycon_graphql/events/tests/test_models.py
CarlosMart626/graphql-workshop-pycon.co2019
1
12884
<filename>pycon_graphql/events/tests/test_models.py from django.core.exceptions import ValidationError from django.utils import timezone from django.test import TestCase from events.models import Event, Invitee from users.tests.factories import UserFactory from users.models import get_sentinel_user class EventModelTestCase(TestCase): def setUp(self): self.main_event = Event.objects.create( title="Pycon 2019 - GraphQL Workshop", description="Descripción del evento", invitee_capacity=100, event_day=timezone.now().date(), initial_hour="13:00", end_hour="15:00", place_name="Universidad Javeriana", latitude='4.62844', longitude='-74.06508', zoom=19, ) self.platform_users = UserFactory.create_batch(10) for user in self.platform_users: self.main_event.enroll_user(user) def test_event_model(self): self.assertEqual(str(self.main_event), "Pycon 2019 - GraphQL Workshop") self.assertEqual(self.main_event.invitees_count(), 10) self.assertEqual(Invitee.objects.filter(event=self.main_event).count(), 10) def test_error_already_enrolled_user(self): user = self.platform_users[0] with self.assertRaises(ValidationError): self.main_event.enroll_user(user) def test_delete_enrolled_user(self): new_user = UserFactory() invitee = self.main_event.enroll_user(new_user) new_user.delete() invitee.refresh_from_db(fields=("user",)) self.assertEqual(invitee.user, get_sentinel_user())
2.171875
2
cryptos.py
pogoetic/tricero
0
12885
<filename>cryptos.py cryptolist = ['ETH','BTC','XRP','EOS','ADA','NEO','STEEM', 'BTS','ZEC','XMR','XVG','XEM','OMG','MIOTA','XTZ','SC', 'CVC','BAT','XLM','ZRX','VEN']
1.914063
2
python/test/utils/test_sliced_data_iterator.py
kodavatimahendra/nnabla
0
12886
<filename>python/test/utils/test_sliced_data_iterator.py # Copyright (c) 2017 Sony Corporation. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest import numpy as np from nnabla.utils.data_source_loader import load_image from nnabla.utils.data_iterator import data_iterator_simple from .test_data_iterator import check_data_iterator_result @pytest.mark.parametrize("num_of_slices", [2, 3, 5]) @pytest.mark.parametrize("size", [50]) @pytest.mark.parametrize("batch_size", [1, 5, 11]) @pytest.mark.parametrize("shuffle", [False, True]) def test_sliced_data_iterator(test_data_csv_png_10, num_of_slices, size, batch_size, shuffle): def test_load_func(position): return np.full((1), position, dtype=np.float32) di = data_iterator_simple(test_load_func, size, batch_size, shuffle=shuffle) import fractions def lcm(a, b): return abs(a * b) / fractions.gcd(a, b) if a and b else 0 max_epoch = lcm(batch_size, size) / size all_data = [] for slice_pos in range(num_of_slices): sliced_di = di.slice(num_of_slices=num_of_slices, slice_pos=slice_pos) sliced_data = {} while True: current_epoch = sliced_di.epoch if current_epoch > max_epoch + 1: break data = sliced_di.next() if current_epoch not in sliced_data: sliced_data[current_epoch] = [] for dat in data: for d in dat: sliced_data[current_epoch].append(d) all_data.append(sliced_data) epochs = {} for slice_pos, sliced_data in enumerate(all_data): for epoch in sorted(sliced_data.keys()): if epoch not in epochs: epochs[epoch] = [] epochs[epoch].append(set(sliced_data[epoch])) for epoch in sorted(epochs.keys()): x0 = epochs[epoch][0] acceptable_size = batch_size amount = size // num_of_slices if acceptable_size < amount: acceptable_size = amount for dup in [x0 & x for x in epochs[epoch][1:]]: assert len(dup) < amount
2.390625
2
python/leetcode/646.py
ParkinWu/leetcode
0
12887
# 给出 n 个数对。 在每一个数对中,第一个数字总是比第二个数字小。 # # 现在,我们定义一种跟随关系,当且仅当 b < c 时,数对(c, d) 才可以跟在 (a, b) 后面。我们用这种形式来构造一个数对链。 # # 给定一个对数集合,找出能够形成的最长数对链的长度。你不需要用到所有的数对,你可以以任何顺序选择其中的一些数对来构造。 # # 示例 : # # 输入: [[1,2], [2,3], [3,4]] # 输出: 2 # 解释: 最长的数对链是 [1,2] -> [3,4] # 注意: # # 给出数对的个数在 [1, 1000] 范围内。 # # 来源:力扣(LeetCode) # 链接:https://leetcode-cn.com/problems/maximum-length-of-pair-chain # 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 from typing import List class Solution: def findLongestChain(self, pairs: List[List[int]]) -> int: pairs.sort(key=lambda p: p[1]) tmp = pairs[0] ans = 1 for p in pairs: if p[0] > tmp[1]: tmp = p ans += 1 return ans if __name__ == '__main__': s = Solution() assert s.findLongestChain([[1, 2], [2, 3], [3, 4]]) == 2
3.6875
4
FactorTestMain.py
WeiYouyi/FactorTest
0
12888
from FactorTest.FactorTestPara import * from FactorTest.FactorTestBox import * class FactorTest(): def __init__(self): self.startdate=20000101 self.enddate=21000101 self.factorlist=[] self.FactorDataBase={'v':pd.DataFrame(columns=['time','code'])} self.filterStockDF='FULL' self.retData = getRetData() #统一为time、code time为int code 为str self.ICList={} self.portfolioList={} self.ICAns={} self.portfolioAns={} self.portfolioGroup = pd.DataFrame(columns=['time', 'code']) self.annualTurnover = {} self.year_performance={} self.WR={} self.PL={} pd.options.mode.use_inf_as_na = True #剔除inf self.dataProcess=dataProcess(self.FactorDataBase) def getFactor(self,Factor): #考虑:频率统一为月度 ,sql型数据 if('month' in Factor): Factor.rename(columns={'month':'time'},inplace=True) if('date' in Factor): Factor.rename(columns={'date':'time'},inplace=True) factorList=Factor.columns if(len(factorList)<=2): print('error') return Factor else: factorList=getfactorname(Factor,['code','time']) for factorname in factorList: if(factorname in self.factorlist):#如果重复则先删除信息再重新载入 rest=pd.Series(self.factorlist) self.factorlist=rest[rest!=factorname].tolist() del self.FactorDataBase['v'][factorname] self.FactorDataBase['v']=self.FactorDataBase['v'].merge(Factor[['time','code',factorname]],on=['time','code'],how='outer') self.factorlist=self.factorlist+[factorname] def calcIC(self,factorlist='',startMonth='',endMonth=''): if(factorlist==''): factorlist=self.factorlist if(type(factorlist)==str): factorlist=[factorlist] if(startMonth==''): startMonth=int(str(self.startdate)[:6]) if(endMonth==''): endMonth=int(str(self.enddate)[:6]) RetData=self.retData RetData=RetData[RetData.time>=startMonth] RetData=RetData[RetData.time<=endMonth] if(type(self.filterStockDF)==pd.DataFrame): RetData=setStockPool(RetData,self.filterStockDF) for facname in factorlist: Mer=self.FactorDataBase['v'][['time','code',facname]].merge(RetData,on=['time','code'],how='outer').dropna() self.ICList[facname],self.ICAns[facname]=calcIC(Mer,facname) if(len(factorlist)==1): print(facname+':') print(self.ICAns[facname]) if(len(factorlist)>1): print(self.ICDF) def calcLongShort(self,factorlist='',startMonth='',endMonth='',t=5,asc=''): if(factorlist==''): factorlist=self.factorlist if(type(factorlist)==str): factorlist=[factorlist] if(startMonth==''): startMonth=int(str(self.startdate)[:6]) if(endMonth==''): endMonth=int(str(self.enddate)[:6]) RetData=self.retData RetData=RetData[RetData.time>=startMonth] RetData=RetData[RetData.time<=endMonth] if(type(self.filterStockDF)==pd.DataFrame): RetData=setStockPool(RetData,self.filterStockDF) for facname in factorlist: Mer=self.FactorDataBase['v'][['time','code',facname]].merge(RetData,on=['time','code'],how='outer').dropna() if(asc!=''): ascloc=asc else: ascloc=False if(facname in self.ICAns): if(self.ICAns[facname]['IC:']<0): ascloc=True Mer = Mer.groupby('time').apply(lambda x: isinGroupT(x, facname, asc=ascloc, t=t)).reset_index(drop=True) ls_ret = calcGroupRet(Mer,facname,RetData) ls_ret['多空组合'] = ls_ret[1] - ls_ret[t] # 第一组-第五组 if (facname in self.portfolioGroup.columns): # 如果重复则先删除信息再重新载入 self.portfolioGroup = self.portfolioGroup.drop(columns=facname) self.portfolioGroup = self.portfolioGroup.merge(Mer[['time','code',facname]], on=['time', 'code'], how='outer') self.portfolioList[facname]=ls_ret self.portfolioAns[facname]=evaluatePortfolioRet(ls_ret[1]-ls_ret[t]) self.annualTurnover[facname] = calcAnnualTurnover(self.portfolioGroup, facname) if(len(factorlist)==1): print(facname+':') ls_ret1=ls_ret.reset_index().copy() ls_ret1['time']=ls_ret1['time'].apply(lambda x:str(x)) ls_ret1.set_index('time').apply(lambda x:x+1).cumprod().plot() print(self.portfolioAns[facname]) plt.show() if(len(factorlist)>1): print(self.portfolioDF) def doubleSorting(self,factor_list,method='cond',startMonth=200001,endMonth=210001,t1=5,t2=5,asc=''): ''' Parameters ---------- factor_list : list 必须传入一个列表 ['fac1','fac2'],表示求fac2在fac1条件下的双重排序 fac2在fac1条件下的双重排序命名为:'fac2|fac1'. method : str 'cond' or 'idp' 'cond'为条件双重排序,'idp'为独立双重排序 t1, t2 : int t1, t2分别为fac1, fac2的分组数, 默认为5 Returns ------- 第一个返回值为t1*t2年化收益率矩阵. 第二个返回值为t1*t2信息比率矩阵 portfolioList和portfolioGroup做相应更新 ''' data = self.FactorDataBase['v'][['time','code']+factor_list].copy() data = data[data.time>=startMonth] data = data[data.time<=endMonth] RetData=self.retData RetData=RetData[RetData.time>=startMonth] RetData=RetData[RetData.time<=endMonth] if(asc!=''): ascloc=asc else: ascloc=False if method=='cond': data = data.merge(RetData, on=['time','code'], how='outer').dropna() data = data.groupby('time').apply(isinGroupT, factor_list[0], asc=ascloc, t=t1).reset_index(drop=True) data = data.groupby(['time',factor_list[0]]).apply(isinGroupT, factor_list[1], asc=ascloc, t=t2).reset_index(drop=True) facname=('%s|%s'%(factor_list[1], factor_list[0])) data[facname] = data[factor_list[0]].apply(lambda x: str(x))+data[factor_list[1]].apply(lambda x: str(x)) #条件分组编号 ls_ret = calcGroupRet(data,facname,RetData) #条件分组收益率 fac2_ls_ret = calcGroupRet(data,factor_list[1],RetData) ls_ret['多空组合'] = fac2_ls_ret[1] - fac2_ls_ret[t2] self.portfolioList[facname]=ls_ret self.portfolioGroup = self.portfolioGroup.merge(data[['time','code',facname]], on=['time', 'code'], how='outer') def ARIR(Rev_seq,t=12): ret_mean=e**(np.log(Rev_seq+1).mean()*12)-1 ret_sharpe=Rev_seq.mean()*t/Rev_seq.std()/t**0.5 return pd.DataFrame({'年化收益率':ret_mean, '信息比率':ret_sharpe}) tmp = ARIR(ls_ret.drop('多空组合',axis=1)) tmp_AnlRet,tmp_IR = tmp['年化收益率'].values.reshape((t2,t1)),tmp['信息比率'].values.reshape((t2,t1)) tmp_AnlRet,tmp_IR = pd.DataFrame(tmp_AnlRet,columns=[factor_list[1]+'_'+str(i) for i in range(1,t2+1)],index=[factor_list[0]+'_'+str(i) for i in range(1,t1+1)]),pd.DataFrame(tmp_IR,columns=[factor_list[1]+'_'+str(i) for i in range(1,t2+1)],index=[factor_list[0]+'_'+str(i) for i in range(1,t1+1)]) return tmp_AnlRet,tmp_IR #常规测试流程 def autotest(self,factorlist='',startMonth='',endMonth='',t=5,asc=''): self.calcIC(factorlist,startMonth,endMonth) self.calcLongShort(factorlist,startMonth,endMonth,t,asc) #计算按因子值排名前K def calcTopK(self,factorlist='',startMonth='',endMonth='',k=30,asc='',base=''): if(factorlist==''): factorlist=self.factorlist if(type(factorlist)==str): factorlist=[factorlist] if(startMonth==''): startMonth=int(str(self.startdate)[:6]) if(endMonth==''): endMonth=int(str(self.enddate)[:6]) RetData=self.retData RetData=RetData[RetData.time>=startMonth] RetData=RetData[RetData.time<=endMonth] if(type(self.filterStockDF)==pd.DataFrame): RetData=setStockPool(RetData,self.filterStockDF) if ((base != '') & (base in self.portfolioGroup.columns)): factorDB = self.portfolioGroup[self.portfolioGroup[base] == 1][['time', 'code']].merge(self.FactorDataBase['v'],on=['time', 'code'],how='outer').dropna() elif (base == ''): factorDB = self.FactorDataBase['v'] else: print('error') return factorlist for facname in factorlist: Mer=factorDB[['time','code',facname]].merge(RetData,on=['time','code'],how='outer').dropna() if(asc!=''): ascloc=asc else: ascloc=False if(facname in self.ICAns): if(self.ICAns[facname]['IC:']<0): ascloc=True Mer = Mer.groupby('time').apply(lambda x: isinTopK(x, facname, ascloc, k=k)).reset_index(drop=True) topk_list = calcGroupRet(Mer,facname,RetData) if (facname in self.portfolioGroup.columns): # 如果重复则先删除信息再重新载入 self.portfolioGroup = self.portfolioGroup.drop(columns=facname) # portfoliogroup为1,表明按asc排序该股票的因子值在前k之内,为2表明因子值在倒数k个之内 self.portfolioGroup = self.portfolioGroup.merge(Mer[['time','code',facname]], on=['time', 'code'], how='outer').fillna(0) self.portfolioList[facname]=topk_list self.portfolioAns[facname]=evaluatePortfolioRet(topk_list[1]-topk_list[0]) self.annualTurnover[facname] = calcAnnualTurnover(self.portfolioGroup, facname) if(len(factorlist)==1): print(facname+':') topk_list['ls']=topk_list[1]-topk_list[0] calc_plot(topk_list.apply(lambda x:x+1).cumprod()) print(self.portfolioAns[facname]) plt.show() if(len(factorlist)>1): print(self.portfolioDF) def calcFutureRet(self,factorlist='',startMonth='',endMonth='',L=36,t=5,asc=''): ''' Parameters ---------- factorlist : TYPE, optional 需要测试的因子 'factor1' 或 ['factor1','factor2'] 可留空 startMonth :int 起始月份 201001 可留空 endMonth : int 终止月份 形如202201 可留空 L : 向后看的月数,默认36个月 t : int 分组数,默认为5. asc : T or F 方向, 默认为True 从小到大 False为从大到小 Returns ------- 返回每个月向后未来1到36个月的收益均值,存储在Test.FutureRet里面 ''' if(factorlist==''): factorlist=self.factorlist if(type(factorlist)==str): factorlist=[factorlist] if(startMonth==''): startMonth=int(str(self.startdate)[:6]) if(endMonth==''): endMonth=int(str(self.enddate)[:6]) RetData=self.retData.pivot(index='time',columns='code',values='ret') self.FutureRet=pd.DataFrame(columns=factorlist) RetData=RetData.apply(lambda x:np.log(x+1)) for i in tqdm(range(1,L+1)): Ret_loc=RetData.rolling(window=i).sum().apply(lambda x:np.e**x-1).shift(-1*i+1).dropna(how='all').stack().reset_index() Ret_loc.columns=['time','code','ret'] Ret_loc=Ret_loc[Ret_loc.time>=startMonth] Ret_loc=Ret_loc[Ret_loc.time<=endMonth] if(type(self.filterStockDF)==pd.DataFrame): Ret_loc=setStockPool(RetData,self.filterStockDF) for facname in factorlist: if(asc!=''): ascloc=asc else: ascloc=False if(facname in self.ICAns): if(self.ICAns[facname]['IC:']<0): ascloc=True Mer=self.FactorDataBase['v'][['time','code',facname]].merge(Ret_loc,on=['time','code'],how='outer').dropna() Mer=Mer.groupby('time').apply(isinGroupT,facname,asc=ascloc,t=t).reset_index(drop=True) ls_ret=calcGroupRet(Mer,facname,Ret_loc).reset_index() self.FutureRet.loc[i,facname]=(ls_ret[1]-ls_ret[t]).mean()#第一组-第五组 self.FutureRet.plot() #计算胜率赔率 def displayWinRate(self,factorlist=''): if(factorlist==''): factorlist=self.portfolioList.keys() for facname in factorlist: Mer=self.portfolioGroup[['time','code',facname]].merge(self.retData,on=['time','code'],how='outer').dropna() L=Mer.groupby(['time']).apply(calcGroupWR,facname,self.retData) self.WR[facname]=L.mean()['WR'] self.PL[facname]=L.mean()['PL'] print(pd.concat([pd.Series(self.WR,name='WR'),pd.Series(self.PL,name='PL')],axis=1)) #展示年度收益 def displayYearPerformance(self,factorlist='',t=5): ''' 分年度打印: 一、五组业绩 一-五 收益率、信息比例、月胜率、最大回撤FB.evaluatePortfolioRet ''' if(factorlist==''): factorlist=self.portfolioList.keys() if(type(factorlist)==str): factorlist=[factorlist] for facname in factorlist: portfolio=self.portfolioList[facname].reset_index() portfolio['time']=portfolio['time'].apply(lambda x:str(x)[:4]) portfolioyear=portfolio.groupby('time') ans=pd.DataFrame() for year in portfolio.time.sort_values().unique(): portfolio_loc=portfolioyear.get_group(year).set_index('time') ans1=evaluatePortfolioRet(portfolio_loc['多空组合']) ans1.loc[1]=(portfolio_loc[1]+1).prod()-1 ans1.loc[t]=(portfolio_loc[t]+1).prod()-1 ans1.name=year ans=ans.append(ans1) self.year_performance[facname]=ans #计算相关性矩阵 1.因子值矩阵 2.IC矩阵 def calcCorrMatrix(self,CorType=stats.spearmanr): ''' self.factorCorr 因子相关性 ICCorr IC序列相关性 默认使用 stats.spearmanr 可换成stats.pearsonr Parameters ---------- CorType : TYPE, optional DESCRIPTION. The default is stats.spearmanr. Returns ------- None. ''' self.factorCorr=pd.DataFrame([],index=self.factorlist,columns=self.factorlist) self.ICCorr=pd.DataFrame([],index=self.factorlist,columns=self.factorlist) for i in range(len(self.factorlist)): for j in range(len(self.factorlist)): if(i<j): fac=self.FactorDataBase['v'][['time','code',self.factorlist[i],self.factorlist[j]]].dropna() fac=fac.groupby('time').apply(lambda x:CorType(x[self.factorlist[i]],x[self.factorlist[j]])[0]) self.factorCorr.loc[self.factorlist[i],self.factorlist[j]]=fac.mean() if(self.factorlist[i] in self.ICList and self.factorlist[j] in self.ICList): A=pd.DataFrame(self.ICList[self.factorlist[i]],columns=[1]) A[2]=self.ICList[self.factorlist[j]] A=A.dropna() self.ICCorr.loc[self.factorlist[i],self.factorlist[j]]=CorType(A[1],A[2])[0] print('因子相关性:') print(self.factorCorr) print('IC相关性:') print(self.ICCorr.dropna(how='all').dropna(how='all',axis=1)) #测试与Barra因子 def calcCorrBarra(self,factorlist=''): if(factorlist==''): factorlist=self.factorlist if(type(factorlist)==str): factorlist=[factorlist] factor_tmp,Barra_list=addXBarra(self.FactorDataBase['v'][['time','code']+factorlist]) Corr_Mat=pd.DataFrame(index=factorlist,columns=Barra_list) for fac in factorlist: for barra in Barra_list: corr_loc=factor_tmp[['time',fac,barra]].dropna() Corr_Mat.loc[fac,barra]=corr_loc.groupby('time').apply(lambda x:stats.spearmanr(x[fac],x[barra])[0]).mean() self.Corr_Mat=Corr_Mat print('与Barra相关性') print(self.Corr_Mat.T) #得到纯因子,后缀为 +_pure def calcPureFactor(self,factorlist=''): if(factorlist==''): factorlist=self.factorlist if(type(factorlist)==str): factorlist=[factorlist] for fac in factorlist: factorDF=calcNeuBarra(self.FactorDataBase['v'], fac) self.getFactor(factorDF[['time','code',fac+'_pure']].dropna()) @property def ICDF(self): return pd.DataFrame(self.ICAns).T @property def portfolioDF(self): return pd.DataFrame(self.portfolioAns).T class IndTest(FactorTest): def __init__(self): self.startdate=20000101 self.enddate=21000101 self.factorlist=[] self.FactorDataBase={'v':pd.DataFrame(columns=['time','code'])} self.filterStockDF='FULL' self.retData = getIndRetData() #统一为time、code time为int code 为str self.ICList={} self.portfolioList={} self.ICAns={} self.portfolioAns={} self.portfolioGroup = pd.DataFrame(columns=['time', 'code']) self.annualTurnover = {} self.year_performance={} self.indStatus=pd.read_csv(filepathtestdata+'sw1.csv').set_index('申万代码') pd.options.mode.use_inf_as_na = True #剔除inf self.dataProcess=dataProcess(self.FactorDataBase) #将个股转换为行业数据 @staticmethod def convertStocktoInd(Factor,func=lambda x:x.mean()): if('month' in Factor): Factor.rename(columns={'month':'time'},inplace=True) if('date' in Factor): Factor.rename(columns={'date':'time'},inplace=True) factorList=Factor.columns if(len(factorList)<=2): print('error') return Factor else: factorList=getfactorname(Factor,['code','time']) DF=pd.DataFrame(columns=['time','code']) indStatus=pd.read_csv(filepathtestdata+'sw1.csv').set_index('申万代码') for facname in factorList: DataLoc=Factor[['time','code',facname]] DataLoc=DataLoc.pipe(getSWIndustry,freq='month') DataLoc=DataLoc.groupby(['time','SWind']).mean().reset_index() A=DataLoc.groupby('SWind') for ind in DataLoc['SWind'].unique(): DataLoc.loc[A.get_group(ind)['SWind'].index,'code']=indStatus.loc[ind,'代码'] DF=DF.merge(DataLoc[['time','code',facname]],on=['time','code'],how='outer') return DF
2.515625
3
src/qm/terachem/terachem.py
hkimaf/unixmd
0
12889
<reponame>hkimaf/unixmd from __future__ import division from qm.qm_calculator import QM_calculator from misc import call_name import os class TeraChem(QM_calculator): """ Class for common parts of TeraChem :param string basis_set: Basis set information :param string functional: Exchange-correlation functional information :param string precision: Precision in the calculations :param string root_path: Path for TeraChem root directory :param integer ngpus: Number of GPUs :param integer,list gpu_id: ID of used GPUs :param string version: Version of TeraChem """ def __init__(self, functional, basis_set, root_path, ngpus, \ gpu_id, precision, version): # Save name of QM calculator and its method super().__init__() # Initialize TeraChem common variables self.functional = functional self.basis_set = basis_set self.root_path = root_path if (not os.path.isdir(self.root_path)): error_message = "Root directory for TeraChem binary not found!" error_vars = f"root_path = {self.root_path}" raise FileNotFoundError (f"( {self.qm_method}.{call_name()} ) {error_message} ( {error_vars} )") self.qm_path = os.path.join(self.root_path, "bin") # Set the environmental variables for TeraChem lib_dir = os.path.join(self.root_path, "lib") os.environ["TeraChem"] = self.root_path os.environ["LD_LIBRARY_PATH"] += os.pathsep + os.path.join(lib_dir) self.ngpus = ngpus self.gpu_id = gpu_id if (self.gpu_id == None): error_message = "GPU ID must be set in running script!" error_vars = f"gpu_id = {self.gpu_id}" raise ValueError (f"( {self.qm_method}.{call_name()} ) {error_message} ( {error_vars} )") if (isinstance(self.gpu_id, list)): if (len(self.gpu_id) != self.ngpus): error_message = "Number of elements for GPU ID must be equal to number of GPUs!" error_vars = f"len(gpu_id) = {len(self.gpu_id)}, ngpus = {self.ngpus}" raise ValueError (f"( {self.qm_method}.{call_name()} ) {error_message} ( {error_vars} )") else: error_message = "Type of GPU ID must be list consisting of integer!" error_vars = f"gpu_id = {self.gpu_id}" raise TypeError (f"( {self.qm_method}.{call_name()} ) {error_message} ( {error_vars} )") self.precision = precision self.version = version if (isinstance(self.version, str)): if (self.version != "1.93"): error_message = "Other versions not implemented!" error_vars = f"version = {self.version}" raise ValueError (f"( {self.qm_method}.{call_name()} ) {error_message} ( {error_vars} )") else: error_message = "Type of version must be string!" error_vars = f"version = {self.version}" raise TypeError (f"( {self.qm_method}.{call_name()} ) {error_message} ( {error_vars} )")
2.296875
2
tests/unit/modules/test_reg_win.py
l2ol33rt/salt
0
12890
# -*- coding: utf-8 -*- ''' :synopsis: Unit Tests for Windows Registry Module 'module.reg' :platform: Windows :maturity: develop :codeauthor: <NAME> <https://github.com/damon-atkins> versionadded:: 2016.11.0 ''' # Import Python future libs from __future__ import absolute_import from __future__ import unicode_literals # Import Python Libs import sys import time # Import Salt Testing Libs from tests.support.unit import TestCase, skipIf from tests.support.helpers import destructiveTest # Import Salt Libs import salt.modules.reg as win_mod_reg from salt.ext import six try: from salt.ext.six.moves import winreg as _winreg # pylint: disable=import-error,no-name-in-module NO_WINDOWS_MODULES = False except ImportError: NO_WINDOWS_MODULES = True PY2 = sys.version_info[0] == 2 # The following used to make sure we are not # testing already existing data # Note strftime retunrns a str, so we need to make it unicode TIMEINT = int(time.time()) if PY2: TIME_INT_UNICODE = six.text_type(TIMEINT) TIMESTR = time.strftime('%X %x %Z').decode('utf-8') else: TIMESTR = time.strftime('%X %x %Z') TIME_INT_UNICODE = str(TIMEINT) # pylint: disable=R0204 # we do not need to prefix this with u, as we are # using from __future__ import unicode_literals UNICODETEST_WITH_SIGNS = 'Testing Unicode \N{COPYRIGHT SIGN},\N{TRADE MARK SIGN},\N{REGISTERED SIGN} '+TIMESTR UNICODETEST_WITHOUT_SIGNS = 'Testing Unicode'+TIMESTR UNICODE_TEST_KEY = 'UnicodeKey \N{TRADE MARK SIGN} '+TIME_INT_UNICODE UNICODE_TEST_KEY_DEL = 'Delete Me \N{TRADE MARK SIGN} '+TIME_INT_UNICODE @skipIf(NO_WINDOWS_MODULES, 'requires Windows OS to test Windows registry') class RegWinTestCase(TestCase): ''' Test cases for salt.modules.reg ''' @skipIf(not sys.platform.startswith("win"), "requires Windows OS") def test_read_reg_plain(self): ''' Test - Read a registry value from a subkey using Pythen 2 Strings or Pythen 3 Bytes ''' if not PY2: self.skipTest('Invalid for Python Version 2') subkey = b'Software\\Microsoft\\Windows NT\\CurrentVersion' vname = b'PathName' handle = _winreg.OpenKey( _winreg.HKEY_LOCAL_MACHINE, subkey, 0, _winreg.KEY_ALL_ACCESS ) (current_vdata, dummy_current_vtype) = _winreg.QueryValueEx(handle, vname) _winreg.CloseKey(handle) test_vdata = win_mod_reg.read_value(b'HKEY_LOCAL_MACHINE', subkey, vname)[b'vdata'] self.assertEqual( test_vdata, current_vdata) @skipIf(not sys.platform.startswith("win"), "requires Windows OS") def test_read_reg_unicode(self): ''' Test - Read a registry value from a subkey using Pythen 2 Unicode or Pythen 3 Str i.e. Unicode ''' subkey = 'Software\\Microsoft\\Windows NT\\CurrentVersion' vname = 'PathName' handle = _winreg.OpenKey( _winreg.HKEY_LOCAL_MACHINE, subkey, 0, _winreg.KEY_ALL_ACCESS ) (current_vdata, dummy_current_vtype) = _winreg.QueryValueEx(handle, vname) _winreg.CloseKey(handle) test_vdata = win_mod_reg.read_value( 'HKEY_LOCAL_MACHINE', subkey, vname)['vdata'] self.assertEqual(test_vdata, current_vdata) @skipIf(not sys.platform.startswith("win"), "requires Windows OS") def test_list_keys_fail(self): ''' Test - Read list the keys under a subkey which does not exist. ''' subkey = 'ThisIsJunkItDoesNotExistIhope' test_list = win_mod_reg.list_keys('HKEY_LOCAL_MACHINE', subkey) # returns a tuple with first item false, and second item a reason test = isinstance(test_list, tuple) and (not test_list[0]) self.assertTrue(test) @skipIf(not sys.platform.startswith("win"), "requires Windows OS") def test_list_keys(self): ''' Test - Read list the keys under a subkey ''' subkey = 'Software\\Microsoft\\Windows NT\\CurrentVersion' test_list = win_mod_reg.list_keys('HKEY_LOCAL_MACHINE', subkey) test = len(test_list) > 5 # Their should be a lot more than 5 items self.assertTrue(test) @skipIf(not sys.platform.startswith("win"), "requires Windows OS") def test_list_values_fail(self): ''' Test - List the values under a subkey which does not exist. ''' subkey = 'ThisIsJunkItDoesNotExistIhope' test_list = win_mod_reg.list_values('HKEY_LOCAL_MACHINE', subkey) # returns a tuple with first item false, and second item a reason test = isinstance(test_list, tuple) and (not test_list[0]) self.assertTrue(test) @skipIf(not sys.platform.startswith("win"), "requires Windows OS") def test_list_values(self): ''' Test - List the values under a subkey. ''' subkey = r'Software\Microsoft\Windows NT\CurrentVersion' test_list = win_mod_reg.list_values('HKEY_LOCAL_MACHINE', subkey) test = len(test_list) > 5 # There should be a lot more than 5 items self.assertTrue(test) # Not considering this destructive as its writing to a private space @skipIf(not sys.platform.startswith("win"), "requires Windows OS") def test_set_value_unicode(self): ''' Test - set a registry plain text subkey name to a unicode string value ''' vname = 'TestUniccodeString' subkey = 'Software\\SaltStackTest' test1_success = False test2_success = False test1_success = win_mod_reg.set_value( 'HKEY_LOCAL_MACHINE', subkey, vname, UNICODETEST_WITH_SIGNS ) # Now use _winreg direct to see if it worked as expected if test1_success: handle = _winreg.OpenKey( _winreg.HKEY_LOCAL_MACHINE, subkey, 0, _winreg.KEY_ALL_ACCESS ) (current_vdata, dummy_current_vtype) = _winreg.QueryValueEx(handle, vname) _winreg.CloseKey(handle) test2_success = (current_vdata == UNICODETEST_WITH_SIGNS) self.assertTrue(test1_success and test2_success) @skipIf(not sys.platform.startswith("win"), "requires Windows OS") def test_set_value_unicode_key(self): ''' Test - set a registry Unicode subkey name with unicode characters within to a integer ''' test_success = win_mod_reg.set_value( 'HKEY_LOCAL_MACHINE', 'Software\\SaltStackTest', UNICODE_TEST_KEY, TIMEINT, 'REG_DWORD' ) self.assertTrue(test_success) @skipIf(not sys.platform.startswith("win"), "requires Windows OS") def test_del_value(self): ''' Test - Create Directly and Delete with salt a registry value ''' subkey = 'Software\\SaltStackTest' vname = UNICODE_TEST_KEY_DEL vdata = 'I will be deleted' if PY2: handle = _winreg.CreateKeyEx( _winreg.HKEY_LOCAL_MACHINE, subkey.encode('mbcs'), 0, _winreg.KEY_ALL_ACCESS ) _winreg.SetValueEx( handle, vname.encode('mbcs'), 0, _winreg.REG_SZ, vdata.encode('mbcs') ) else: handle = _winreg.CreateKeyEx( _winreg.HKEY_LOCAL_MACHINE, subkey, 0, _winreg.KEY_ALL_ACCESS ) _winreg.SetValueEx(handle, vname, 0, _winreg.REG_SZ, vdata) _winreg.CloseKey(handle) # time.sleep(15) # delays for 15 seconds test_success = win_mod_reg.delete_value( 'HKEY_LOCAL_MACHINE', subkey, vname ) self.assertTrue(test_success) @skipIf(not sys.platform.startswith("win"), "requires Windows OS") def test_del_key_recursive_user(self): ''' Test - Create directly key/value pair and Delete recusivly with salt ''' subkey = 'Software\\SaltStackTest' vname = UNICODE_TEST_KEY_DEL vdata = 'I will be deleted recursive' if PY2: handle = _winreg.CreateKeyEx( _winreg.HKEY_CURRENT_USER, subkey.encode('mbcs'), 0, _winreg.KEY_ALL_ACCESS ) _winreg.SetValueEx( handle, vname.encode('mbcs'), 0, _winreg.REG_SZ, vdata.encode('mbcs') ) else: handle = _winreg.CreateKeyEx( _winreg.HKEY_CURRENT_USER, subkey, 0, _winreg.KEY_ALL_ACCESS ) _winreg.SetValueEx(handle, vname, 0, _winreg.REG_SZ, vdata) _winreg.CloseKey(handle) # time.sleep(15) # delays for 15 seconds so you can run regedit & watch it happen test_success = win_mod_reg.delete_key_recursive('HKEY_CURRENT_USER', subkey) self.assertTrue(test_success) @skipIf(not sys.platform.startswith("win"), "requires Windows OS") @destructiveTest def test_del_key_recursive_machine(self): ''' This is a DESTRUCTIVE TEST it creates a new registry entry. And then destroys the registry entry recusively , however it is completed in its own space within the registry. We mark this as destructiveTest as it has the potential to detroy a machine if salt reg code has a large error in it. ''' subkey = 'Software\\SaltStackTest' vname = UNICODE_TEST_KEY_DEL vdata = 'I will be deleted recursive' if PY2: handle = _winreg.CreateKeyEx( _winreg.HKEY_LOCAL_MACHINE, subkey.encode('mbcs'), 0, _winreg.KEY_ALL_ACCESS ) _winreg.SetValueEx( handle, vname.encode('mbcs'), 0, _winreg.REG_SZ, vdata.encode('mbcs') ) else: handle = _winreg.CreateKeyEx( _winreg.HKEY_LOCAL_MACHINE, subkey, 0, _winreg.KEY_ALL_ACCESS ) _winreg.SetValueEx(handle, vname, 0, _winreg.REG_SZ, vdata) _winreg.CloseKey(handle) # time.sleep(15) # delays for 15 seconds so you can run regedit and watch it happen test_success = win_mod_reg.delete_key_recursive('HKEY_LOCAL_MACHINE', subkey) self.assertTrue(test_success) # pylint: disable=W0511 # TODO: Test other hives, other than HKEY_LOCAL_MACHINE and HKEY_CURRENT_USER
2.234375
2
glacier/glacierexception.py
JeffAlyanak/amazon-glacier-cmd-interface
166
12891
import traceback import re import sys import logging """ ********** Note by wvmarle: This file contains the complete code from chained_exception.py plus the error handling code from GlacierWrapper.py, allowing it to be used in other modules like glaciercorecalls as well. ********** """ class GlacierException(Exception): """ An extension of the built-in Exception class, this handles an additional cause keyword argument, adding it as cause attribute to the exception message. It logs the error message (amount of information depends on the log level) and passes it on to a higher level to handle. Furthermore it allows for the upstream handler to call for a complete stack trace or just a simple error and cause message. TODO: describe usage. """ ERRORCODE = {'InternalError': 127, # Library internal error. 'UndefinedErrorCode': 126, # Undefined code. 'NoResults': 125, # Operation yielded no results. 'GlacierConnectionError': 1, # Can not connect to Glacier. 'SdbConnectionError': 2, # Can not connect to SimpleDB. 'CommandError': 3, # Command line is invalid. 'VaultNameError': 4, # Invalid vault name. 'DescriptionError': 5, # Invalid archive description. 'IdError': 6, # Invalid upload/archive/job ID given. 'RegionError': 7, # Invalid region given. 'FileError': 8, # Error related to reading/writing a file. 'ResumeError': 9, # Problem resuming a multipart upload. 'NotReady': 10, # Requested download is not ready yet. 'BookkeepingError': 11, # Bookkeeping not available. 'SdbCommunicationError': 12, # Problem reading/writing SimpleDB data. 'ResourceNotFoundException': 13, # Glacier can not find the requested resource. 'InvalidParameterValueException': 14, # Parameter not accepted. 'DownloadError': 15, # Downloading an archive failed. 'SNSConnectionError': 126, # Can not connect to SNS 'SNSConfigurationError': 127, # Problem with configuration file 'SNSParameterError':128, # Problem with arguments passed to SNS } def __init__(self, message, code=None, cause=None): """ Constructor. Logs the error. :param message: the error message. :type message: str :param code: the error code. :type code: str :param cause: explanation on what caused the error. :type cause: str """ self.logger = logging.getLogger(self.__class__.__name__) self.exitcode = self.ERRORCODE[code] if code in self.ERRORCODE else 254 self.code = code if cause: self.logger.error('ERROR: %s'% cause) self.cause = cause if isinstance(cause, tuple) else (cause,) self.stack = traceback.format_stack()[:-2] else: self.logger.error('An error occurred, exiting.') self.cause = () # Just wrap up a cause-less exception. # Get the stack trace for this exception. self.stack = ( traceback.format_stack()[:-2] + traceback.format_tb(sys.exc_info()[2])) # ^^^ let's hope the information is still there; caller must take # care of this. self.message = message self.logger.info(self.fetch(message=True)) self.logger.debug(self.fetch(stack=True)) if self.exitcode == 254: self.logger.debug('Unknown error code: %s.'% code) # Works as a generator to help get the stack trace and the cause # written out. def causeTree(self, indentation=' ', alreadyMentionedTree=[], stack=False, message=False): """ Returns a complete stack tree, an error message, or both. Returns a warning if neither stack or message are True. """ if stack: yield "Traceback (most recent call last):\n" ellipsed = 0 for i, line in enumerate(self.stack): if (ellipsed is not False and i < len(alreadyMentionedTree) and line == alreadyMentionedTree[i]): ellipsed += 1 else: if ellipsed: yield " ... (%d frame%s repeated)\n" % ( ellipsed, "" if ellipsed == 1 else "s") ellipsed = False # marker for "given out" yield line if message: exc = self if self.message is None else self.message for line in traceback.format_exception_only(exc.__class__, exc): yield line if self.cause: yield ("Caused by: %d exception%s\n" % (len(self.cause), "" if len(self.cause) == 1 else "s")) for causePart in self.cause: if hasattr(causePart,"causeTree"): for line in causePart.causeTree(indentation, self.stack): yield re.sub(r'([^\n]*\n)', indentation + r'\1', line) else: for line in traceback.format_exception_only(causePart.__class__, causePart): yield re.sub(r'([^\n]*\n)', indentation + r'\1', line) if not message and not stack: yield ('No output. Specify message=True and/or stack=True \ to get output when calling this function.\n') def write(self, stream=None, indentation=' ', message=False, stack=False): """ Writes the error details to sys.stderr or a stream. """ stream = sys.stderr if stream is None else stream for line in self.causeTree(indentation, message=message, stack=stack): stream.write(line) def fetch(self, indentation=' ', message=False, stack=False): """ Fetches the error details and returns them as string. """ out = '' for line in self.causeTree(indentation, message=message, stack=stack): out += line return out class InputException(GlacierException): """ Exception that is raised when there is someting wrong with the user input. """ VaultNameError = 1 VaultDescriptionError = 2 def __init__(self, message, code=None, cause=None): """ Handles the exception. :param message: the error message. :type message: str :param code: the error code. :type code: :param cause: explanation on what caused the error. :type cause: str """ GlacierException.__init__(self, message, code=code, cause=cause) class ConnectionException(GlacierException): """ Exception that is raised when there is something wrong with the connection. """ GlacierConnectionError = 1 SdbConnectionError = 2 def __init__(self, message, code=None, cause=None): """ Handles the exception. :param message: the error message. :type message: str :param code: the error code. :type code: :param cause: explanation on what caused the error. :type cause: str """ GlacierException.__init__(self, message, code=code, cause=cause) class CommunicationException(GlacierException): """ Exception that is raised when there is something wrong in the communication with an external library like boto. """ def __init__(self, message, code=None, cause=None): """ Handles the exception. :param message: the error message. :type message: str :param code: the error code. :type code: :param cause: explanation on what caused the error. :type cause: str """ GlacierException.__init__(self, message, code=code, cause=cause) class ResponseException(GlacierException): """ Exception that is raised when there is an http response error. """ def __init__(self, message, code=None, cause=None): GlacierException.__init__(self, message, code=code, cause=cause) if __name__ == '__main__': class ChildrenException(Exception): def __init__(self, message): Exception.__init__(self, message) class ParentException(GlacierException): def __init__(self, message, cause=None): if cause: GlacierException.__init__(self, message, cause=cause) else: GlacierException.__init__(self, message) try: try: raise ChildrenException("parent") except ChildrenException, e: raise ParentException("children", cause=e) except ParentException, e: e.write(indentation='|| ')
2.234375
2
modes/import_corpus.py
freingruber/JavaScript-Raider
91
12892
<reponame>freingruber/JavaScript-Raider<filename>modes/import_corpus.py<gh_stars>10-100 # Copyright 2022 @ReneFreingruber # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This mode can be started by passing the "--import_corpus_mode" flag to the fuzzer # or by starting the fuzzer the first time (when no OUTPUT directory exists yet). # # The script imports new testcases into the current corpus. # Please note that the progress of the script is not linear (especially when creating an initial corpus). # The script will start slow (because it will find a lot of testcases with new behavior and this requires # standardization, minimization & state creation. # These operations are slow because they require to restart the JS engine multiple times, # and therefore it will take a longer time. After some time, the import-mode will be faster because it finds less files # with new coverage. At the end, the mode will again be slow (or maybe very slow) because it's processing the # bigger testcases (testcases are sorted based on file size and handled from small files to big files). # State creation for big input files is extremely slow. # It's maybe better to skip these big testcases and continue because later testcases can maybe further be # minimized (which would then be again fast). => I created my initial corpus with a different script, # skipping the big testcases is therefore not implemented here yet (and must manually be done). # TODO: In my original code I also removed v8 native functions because they quickly lead to crashes # But I couldn't find the code anymore. I guess this should be implemented in this file somewhere at the end? # This affect at least the functions: # %ProfileCreateSnapshotDataBlob # %LiveEditPatchScript # %IsWasmCode # %IsAsmWasmCode # %ConstructConsString # %HaveSameMap # %IsJSReceiver # %HasSmiElements # %HasObjectElements # %HasDoubleElements # %HasDictionaryElements # %HasHoleyElements # %HasSloppyArgumentsElements # %HaveSameMap # %HasFastProperties # %HasPackedElements # # More information can be found in my master thesis page 115. import utils import os import config as cfg import native_code.speed_optimized_functions as speed_optimized_functions from native_code.executor import Execution_Status import sys import random import string import re code_prefix = "function my_opt_func() {\n" code_suffix1 = """ } %OptimizeFunctionOnNextCall(my_opt_func); my_opt_func(); """ code_suffix2 = """ } %PrepareFunctionForOptimization(my_opt_func); %OptimizeFunctionOnNextCall(my_opt_func); my_opt_func(); """ code_suffix3 = """ } my_opt_func(); %PrepareFunctionForOptimization(my_opt_func); %OptimizeFunctionOnNextCall(my_opt_func); my_opt_func(); """ # These are just used for debugging debugging_number_exceptions = 0 debugging_number_success = 0 debugging_number_new_coverage = 0 def import_corpus_mode(input_dir_to_import): global code_prefix, code_suffix1, code_suffix2, code_suffix3 utils.msg("[i] Going to import another corpus to the current corpus...") utils.msg("[i] Corpus dir which will be imported is: %s" % input_dir_to_import) files_to_handle = [] already_seen_file_hashes = set() utils.msg("[i] Going to read all files in directory... (this can take some time)") for filename_to_import in os.listdir(input_dir_to_import): if filename_to_import.endswith(".js"): input_file_to_import = os.path.join(input_dir_to_import, filename_to_import) # Just get file size with open(input_file_to_import, 'r') as fobj: content = fobj.read().rstrip() sample_hash = utils.calc_hash(content) if sample_hash not in already_seen_file_hashes: # new file files_to_handle.append((input_file_to_import, len(content))) already_seen_file_hashes.add(sample_hash) utils.msg("[i] Finished reading files. Going to sort files based on file size...") # Sort based on filesize => start with small files => this ensures that the minimizer is faster files_to_handle.sort(key=lambda x: x[1]) utils.msg("[i] Finished sorting, going to start importing...") # Now start to import file by file cfg.my_status_screen.set_current_operation("Importing") total_number_files_to_import = len(files_to_handle) number_files_already_imported = 0 for entry in files_to_handle: (input_file_to_import, filesize) = entry number_files_already_imported += 1 utils.msg("[i] Importing file (%d/%d): %s" % (number_files_already_imported, total_number_files_to_import, input_file_to_import)) with open(input_file_to_import, 'r') as fobj: content = fobj.read().rstrip() if len(content) > 200000: # 200 KB continue # big files are too slow and are bad for mutation, so skip them if '\x00' in content: continue # ignore files with null bytes for the moment because the Python to C conversation does not support this # Check normal execution: check_if_testcase_triggers_new_behavior(content) # Check adapted execution (e.g. with removed testsuite functions) samples = preprocess_testcase(content) for sample in samples: check_if_testcase_triggers_new_behavior(sample) # Now check if it triggers more coverage if the code gets compiled: check_if_testcase_triggers_new_behavior(code_prefix + sample + code_suffix1) check_if_testcase_triggers_new_behavior(code_prefix + sample + code_suffix2) check_if_testcase_triggers_new_behavior(code_prefix + sample + code_suffix3) if cfg.deterministic_preprocessing_enabled: # And now start to preprocess all imported files! This can take a VERY long runtime # => I would not recommend running this because it can easily take several weeks of runtime. # It maybe makes sense for the first small testcases cfg.deterministically_preprocess_queue_func() return total_number_files_to_import def check_if_testcase_triggers_new_behavior(content): if len(content) > 10000: # 10 KB # big files are too slow and are bad for mutation, so skip them # Side note: I'm checking here for 10KB and in the above function for 200KB # because this function is maybe invoked with sub-functionality from the main script # which can be a lot smaller return previous_stats_new_behavior = cfg.my_status_screen.get_stats_new_behavior() # Restart the engine so that every testcase starts in a new v8 process # (=> this slows down the process but having a good input corpus is important) # If you want to be faster, you can maybe skip the engine restart here cfg.exec_engine.restart_engine() cfg.perform_execution_func(content, state=None) current_stats_new_behavior = cfg.my_status_screen.get_stats_new_behavior() if current_stats_new_behavior == previous_stats_new_behavior: # File didn't result in new coverage and was therefore not imported (importing would be done by perform_execution() )! # Just to get sure that there was not a flawed execution, I try it again here cfg.perform_execution_func(content, state=None) # This is a debug version of the above one. # The above one does all the required calculations (standardization, minimization, state creation) # which is very slow. But If I just want to quickly check how many files I can import, # then I'm using this debugging versions (which skips all these steps) # This version does also not restart the exec engine. # To use it, just replace the call with this function def check_if_testcase_triggers_new_behavior_debugging(content): global debugging_number_exceptions, debugging_number_success, debugging_number_new_coverage if len(content) > 10000: # 10 KB return result = cfg.exec_engine.execute_safe(content) if result.status == Execution_Status.SUCCESS: debugging_number_success += 1 if result.num_new_edges > 0: debugging_number_new_coverage += 1 # Dump the new coverage statistics number_triggered_edges, total_number_possible_edges = cfg.exec_engine.get_number_triggered_edges() if total_number_possible_edges == 0: total_number_possible_edges = 1 # avoid division by zero triggered_edges_in_percent = (100 * number_triggered_edges) / float(total_number_possible_edges) utils.msg("[i] Found new coverage! (%d success, %d exceptions, %d new coverage); New Coverage: %.4f %%" % (debugging_number_success, debugging_number_exceptions, debugging_number_new_coverage, triggered_edges_in_percent)) elif result.status == Execution_Status.EXCEPTION_THROWN: debugging_number_exceptions += 1 # TODO: This is pretty old code and needs a lot of refactoring/improvement ... # TODO: Also better implement these whole "\t" and " " and "\ņ" checking... # One testcase file can contain multiple testcases # That's why this function returns a list of samples def preprocess_testcase(code): ret = [] tmp = "" for line in code.split("\n"): line_check = line.strip() if line_check.startswith("import ") \ or line_check.startswith("import(") \ or line_check.startswith("export ") \ or line_check.startswith("loaded++") \ or line_check.startswith("await import"): continue # remove import and export statements tmp += line + "\n" code = tmp # All the following function replacements where manually found # The replacements can be found by starting this script and # dumping all testcases which trigger an exception # Then the testcases can manually be analyzed to understand # why they lead to an exception. By doing this, the following # functions were identified which are not defined as default # JavaScript functions (in v8). # Identification of these functions took a long time and corpus # coverage can still greatly be improved by identifying more such # functions. However, this is a time consuming task. # Example: Replace wscript.echo() function calls with console.log() pattern = re.compile("wscript.echo", re.IGNORECASE) code = pattern.sub("console.log", code) pattern = re.compile("CollectGarbage", re.IGNORECASE) code = pattern.sub("gc", code) code = code.replace("writeLine", "console.log") code = code.replace("WScript.SetTimeout", "setTimeout") code = code.replace("helpers.writeln", "console.log") code = code.replace("$ERROR", "console.log") code = code.replace("helpers.printObject", "console.log") code = code.replace("WScript.Arguments", "[]") code = code.replace("assert.unreachable()", "") code = code.replace("assertUnreachable()", "") code = code.replace("$DONOTEVALUATE()", "") code = code.replace("assertStmt", "eval") code = code.replace("inSection", "Number") code = code.replace("numberOfDFGCompiles", "Number") code = code.replace("optimizeNextInvocation", "%OptimizeFunctionOnNextCall") code = code.replace("printBugNumber", "console.log") code = code.replace("printStatus", "console.log") code = code.replace("saveStack()", "0") code = code.replace("gcPreserveCode()", "gc()") code = code.replace("platformSupportsSamplingProfiler()", "true") # Example: # var OProxy = $262.createRealm().global.Proxy; # => # var OProxy = Proxy; code = code.replace("$262.createRealm().global.", "") # Quit() is detected as a crash because v8 is closed, therefore I remove it # However, there can be functions like test_or_quit() where it could incorrectly remove quit() # Therefore I check for a space or a tab before. This is not a perfect solution, but filters # out some crashes # TODO: I now implemented better JavaScript parsing and should use the fuzzer functionality to replace it.. code = code.replace(" quit()", "") code = code.replace("\tquit()", "") code = code.replace("\nquit()", "\n") code = code.replace(" quit(0)", "") code = code.replace("\tquit(0)", "") code = code.replace("\nquit(0)", "\n") code = code.replace("trueish", "true") # it seems like SpiderMonkey accepts "trueish" as argument to asserEq oder reportCompare functions... code = remove_function_call(code, "this.WScript.LoadScriptFile") code = remove_function_call(code, "wscript.loadscriptfile") code = code.replace("WScript.LoadScript(", "eval(") code = code.replace("evalcx(", "eval(") # from SpiderMonkey, however, it can have a 2nd argument for the context; so this modification is not 100% correct code = remove_function_call(code, "WScript.LoadModuleFile") code = remove_function_call(code, "WScript.LoadModule") code = remove_function_call(code, "WScript.Attach") code = remove_function_call(code, "WScript.Detach") code = remove_function_call(code, "saveStack") # I already removed "saveStack()" but this here is to remove saveStack calls where an argument is passed code = remove_function_call(code, "WScript.FalseFile") code = remove_function_call(code, "assert.fail") code = remove_function_call(code, "assert.isUndefined") code = remove_function_call(code, "description") code = remove_function_call(code, "assertOptimized") code = remove_function_call(code, "assertDoesNotThrow") code = remove_function_call(code, "assertUnoptimized") code = remove_function_call(code, "assertPropertiesEqual") code = remove_function_call(code, "$DONE") code = code.replace("$DONE", "1") code = remove_function_call(code, "assertParts") code = remove_function_call(code, "verifyProperty") code = remove_function_call(code, "verifyWritable") code = remove_function_call(code, "verifyNotWritable") code = remove_function_call(code, "verifyEnumerable") code = remove_function_call(code, "verifyNotEnumerable") code = remove_function_call(code, "verifyConfigurable") code = remove_function_call(code, "verifyNotConfigurable") code = remove_function_call(code, "assertThrowsInstanceOf") code = remove_function_call(code, "testOption") code = remove_function_call(code, "assert.calls") code = remove_function_call(code, "generateBinaryTests") code = remove_function_call(code, "crash") # TODO , does this detect too many functions which end with "crash"? # can also be code like =>crash("foo"); # This is a special function in SpiderMonkey which supports fuzzing (?) code = remove_function_call(code, "offThreadCompileScript") code = remove_function_call(code, "startgc") # maybe I should change it with the gc() function? But then I need to remove the startgc() argument code = remove_function_call(code, "gczeal") # some other garbage collection related stuff in SpiderMonkey code = remove_function_call(code, "gcslice") code = remove_function_call(code, "schedulezone") code = remove_function_call(code, "schedulegc") code = remove_function_call(code, "unsetgczeal") code = remove_function_call(code, "gcstate") # The following is for checks like: # if (this.WScript && this.WScript.LoadScriptFile) { # Which should become: # if (False && False) { code = code.replace("WScript.LoadScriptFile", "False") code = code.replace("WScript.LoadScript", "False") code = code.replace("WScript.LoadModuleFile", "False") code = code.replace("WScript.LoadModule", "False") code = code.replace("this.WScript", "False") code = code.replace("this.False", "False") code = code.replace("WScript", "False") code = code.replace("$MAX_ITERATIONS", "5") code = remove_function_call(code, "utils.load") if " load" not in code and "\tload" not in code: # Little hack, I want to remove load function calls at the start of a file which load other JS files # But if load is used as a function e.g.: as code like: # function load(a) { # I don't want to remove it code = remove_function_call(code, "load") code = remove_function_call(code, "assert.isnotundefined") code = remove_function_call(code, "assert.isdefined") code = remove_function_call(code, "assert.throws") code = remove_function_call(code, "assert_throws") code = remove_function_call(code, "assertThrows") code = remove_function_call(code, "assertDoesNotThrow") code = remove_function_call(code, "shouldThrow") code = remove_function_call(code, "assertNull") code = remove_function_call(code, "shouldBeEqualToString") code = remove_function_call(code, "assertThrowsEquals") code = remove_function_call(code, "new BenchmarkSuite") # This is not a function but it works code = remove_function_call(code, "assertNoEntry") code = remove_function_call(code, "assertEntry") code = remove_function_call(code, " timeout") code = remove_function_call(code, "\ttimeout") code = remove_function_call(code, "\ntimeout") code = remove_function_call(code, "testFailed") code = remove_function_call(code, "finishJSTest") code = remove_function_call(code, "assertIteratorDone") code = remove_function_call(code, "assertIteratorNext") code = remove_function_call(code, "assertThrowsValue") code = remove_function_call(code, "Assertion") code = remove_function_call(code, "assertStackLengthEq") code = remove_function_call(code, "noInline") code = remove_function_call(code, "enableGeckoProfiling") code = remove_function_call(code, "enableSingleStepProfiling") code = remove_function_call(code, "enableSingleStepProfiling") code = remove_function_call(code, "disableSingleStepProfiling") code = remove_function_call(code, "enableGeckoProfilingWithSlowAssertions") code = remove_function_call(code, "assertThrownErrorContains") code = remove_function_call(code, "assertDecl") # can maybe be fixed better code = remove_function_call(code, "assertExpr") code = remove_function_call(code, "assert.compareIterator") code = remove_function_call(code, "$DETACHBUFFER") code = remove_function_call(code, "checkSpeciesAccessorDescriptor") code = remove_function_call(code, "assertPropertyExists") code = remove_function_call(code, "assertPropertyDoesNotExist") code = remove_function_call(code, "assert_equal_to_array") code = replace_assert_function(code, "assert.sameValue", "==") code = replace_assert_function(code, "reportCompare", "==") code = replace_assert_function(code, "assert.areNotEqual", "!=") code = replace_assert_function(code, "assert.areEqual", "==") code = replace_assert_function(code, "assert.equals", "==") code = replace_assert_function(code, "assert.strictEqual", "===") code = replace_assert_function(code, "assert_equals", "==") code = replace_assert_function(code, "assertMatches", "==") code = replace_assert_function(code, "assertSame", "==") code = replace_assert_function(code, "assertEqualsDelta", "==") code = replace_assert_function(code, "assertNotEquals", "!=") code = replace_assert_function(code, "assert.notSameValue", "!=") code = replace_assert_function(code, "assertEq", "==") code = replace_assert_function(code, "verifyEqualTo", "==") code = replace_assert_function(code, "assert.compareArray", "==") code = replace_assert_function(code, "compareArray", "==") code = replace_assert_function(code, "assertDeepEq", "==") code = replace_assert_function(code, "assertArrayEquals", "==") code = replace_assert_function(code, "assertArray", "==") code = replace_assert_function(code, "assertEqArray", "==") # They must not be patched if only v8 is checked, they don't lead to a crash # Only the static assert lead to a crash # code = replace_assert_function(code, "%StrictEqual", "===") # code = replace_assert_function(code, "%StrictNotEqual", "!==") # code = replace_assert_function(code, "%Equal", "==") # %GreaterThanOrEqual # %LessThan # %GreaterThan # %LessThanOrEqual # # TODO: # patching "assertIteratorResult" is more complicated.. # TODO: More complicated : # verifySymbolSplitResult # TODO WebKit: # assert.var fhgjeduyko=array[i]; # => var fhgjeduyko=array[i]; code = replace_assert_function(code, "assertInstanceof", "instanceof") code = replace_assert_function(code, "assertEquals", "==") code = replace_assert_function(code, "assertNotSame", "!=") # assertNotSame(Atomics.wake, Atomics.notify); # The remove_assert_function() calls are for assert functions which just have 1 argument code = remove_assert_function(code, "assert.isTrue") code = remove_assert_function(code, "assert.isFalse") code = remove_assert_function(code, "assert.assertFalse") code = remove_assert_function(code, "assertFalse") code = remove_assert_function(code, "assertTrue") code = remove_assert_function(code, "assert_true") code = remove_assert_function(code, "%TurbofanStaticAssert") code = remove_assert_function(code, "assert.shouldBeTrue") code = remove_assert_function(code, "assert.shouldBeFalse") code = remove_assert_function(code, "assert.shouldBe") code = remove_assert_function(code, "assert.assertNotNull") code = remove_assert_function(code, "shouldBeTrue") code = remove_assert_function(code, "shouldBeFalse") code = remove_assert_function(code, "shouldBe") code = remove_assert_function(code, "assertNotNull") code = remove_assert_function(code, "testJSON") code = remove_assert_function(code, "assertNativeFunction") code = remove_assert_function(code, "assert_malformed") code = remove_assert_function(code, "assertIteratorResult") code = remove_assert_function(code, "assert.doesNotThrow") code = remove_assert_function(code, "assert") # This should be one of the last replacements! # This is a stupid last hack, in some cases assert.throws is not correctly detected because it's inside a string # which is later evaluated. That means the logic to detect the end of the function call does not correctly work # Therefore it's not removed above, here I just replace it with a call to Number to ensure that it does not crash code = code.replace("assert.throws", "Number") if "testRunner.run" in code: # TODO I also need to add function definitions from the start # E.g.: WebKit testcase: tc50725.js # or tc1061.js from ChakraCore start_testcases = ["body: function () {", "body() {"] while True: finished = True for start_testcase in start_testcases: if start_testcase in code: finished = False if finished: break for start_testcase in start_testcases: if start_testcase not in code: continue idx = code.index(start_testcase) rest = code[idx + len(start_testcase):] idx_end = speed_optimized_functions.get_index_of_next_symbol_not_within_string(rest, "}") testcase_code = rest[:idx_end] code = rest[idx_end + 1:] ret.append(testcase_code) elif "oomTest" in code: code = "function oomTest(func_name) { func_name(); }\n" + code ret.append(code) elif "runtest" in code: code = "function runtest(func_name) { func_name(); }\n" + code ret.append(code) else: # Just add it ret.append(code) return ret def remove_function_call(code, function_call_str): if function_call_str[-1] != "(": function_call_str = function_call_str + "(" function_call_str = function_call_str.lower() while True: code_lowered = code.lower() if function_call_str not in code_lowered: return code idx = code_lowered.index(function_call_str) if idx != 0: previous_char = code[idx-1] if previous_char != "\n" and previous_char != " " and previous_char != "\t": return code before = code[:idx] tmp = code[idx + len(function_call_str):] idx_end = speed_optimized_functions.get_index_of_next_symbol_not_within_string(tmp, ")") if idx_end == -1: # print("TODO Internal error in remove_function_call():") # print("function_call_str:") # print(function_call_str) # print("code:") # print(code) # sys.exit(-1) return code try: after = tmp[idx_end+1:] except: # The ")" symbol was the last symbol in the string after = "" code = before+after def replace_assert_function(code, assert_function_str, comparison_str): if assert_function_str[-1] != "(": assert_function_str = assert_function_str + "(" original_code = code original_code_len = len(original_code) while True: if len(code) > original_code_len: # This means the last iterations contained a bug # E.g.: if I replaced something like reportCompare(1,2) but the # actual JavaScript code didn't contain a second argument => # reportCompare(1) # Then this code can be incorrect and start to create bigger samples # I catch this here and just return the unmodified code # Another option is that a regex string is not correctly detected return original_code if assert_function_str not in code: return code # Examples: # assert.sameValue(typeof f, 'function'); # assert.sameValue(f(), 'function declaration'); idx = code.index(assert_function_str) before = code[:idx] rest = code[idx + len(assert_function_str):] idx_end = speed_optimized_functions.get_index_of_next_symbol_not_within_string(rest, ",") part1 = rest[:idx_end] rest = rest[idx_end + 1:] idx_end = speed_optimized_functions.get_index_of_next_symbol_not_within_string(rest, ")") if idx_end == -1: return code # return the unmodified code; this is most likely because the regex string was not correctly detected # and inside the regex string a symbol from another string was used... idx_command = speed_optimized_functions.get_index_of_next_symbol_not_within_string(rest, ",") if idx_command == -1: idx_command = idx_end elif idx_command > idx_end: idx_command = idx_end if idx_end == 0: return code # some buggy case part2 = rest[:idx_command] rest = rest[idx_end + 1:] if len(rest) == 0: # can happen with some funny unicode testcases return original_code if rest[0] == ";": rest = rest[1:] # remove the ";" code = before + part1.strip() + " " + comparison_str + " " + part2.strip() + ";" + rest else: code = before + part1.strip() + " " + comparison_str + " " + part2.strip() + " " + rest def remove_assert_function(code, assert_function_str): if assert_function_str[-1] != "(": assert_function_str = assert_function_str + "(" while True: if assert_function_str not in code: return code # Examples: # assert.isTrue(/error in callback/.test(frames[0]), `Invalid first frame "${frames[0]}" for ${builtin.name}`); idx = code.index(assert_function_str) before = code[:idx] rest = code[idx + len(assert_function_str):] idx_end = speed_optimized_functions.get_index_of_next_symbol_not_within_string(rest, ")") idx_command = speed_optimized_functions.get_index_of_next_symbol_not_within_string(rest, ",") if idx_end == -1: # print("TODO Internal coding error in remove_assert_function():") # print("assert_function_str: %s" % assert_function_str) # print(code) # print("----------------------") # print("Rest:") # print(rest) # sys.exit(-1) return code if idx_command == -1: idx_command = idx_end elif idx_command > idx_end: idx_command = idx_end assert_statement = rest[:idx_command] rest = rest[idx_end+1:] # I add here a var *varname* statement because functions can not be standalone. # E.g.: # assert.doesNotThrow(function() { Object.defineProperty(obj, key, { value: 'something', enumerable: true }); }, "Object.defineProperty uses ToPropertyKey. Property is added to the object"); # would result in: # function() { ....} # this would throw an exception, but # var xyz = function() { ... } # doesn't throw random_variable_name = ''.join(random.sample(string.ascii_lowercase, 10)) if rest[0] == ";": rest = rest[1:] # remove the ";" code = before + "var " + random_variable_name + "=" + assert_statement.strip() + ";" + rest else: code = before + "var " + random_variable_name + "=" + assert_statement.strip() + " " + rest
1.992188
2
crazyflie_demo/scripts/mapping/mapper.py
wydmynd/crazyflie_tom
0
12893
<filename>crazyflie_demo/scripts/mapping/mapper.py #!/usr/bin/env python """ Simple occupancy-grid-based mapping without localization. Subscribed topics: /scan Published topics: /map /map_metadata Author: <NAME> Version: 2/13/14 """ import rospy from nav_msgs.msg import OccupancyGrid, MapMetaData from geometry_msgs.msg import Pose, Point, Quaternion from sensor_msgs.msg import LaserScan import numpy as np class Map(object): """ The Map class stores an occupancy grid as a two dimensional numpy array. Public instance variables: width -- Number of columns in the occupancy grid. height -- Number of rows in the occupancy grid. resolution -- Width of each grid square in meters. origin_x -- Position of the grid cell (0,0) in origin_y -- in the map coordinate system. grid -- numpy array with height rows and width columns. Note that x increases with increasing column number and y increases with increasing row number. """ def __init__(self, origin_x=-2.5, origin_y=-2.5, resolution=.1, width=50, height=50): """ Construct an empty occupancy grid. Arguments: origin_x, origin_y -- The position of grid cell (0,0) in the map coordinate frame. resolution-- width and height of the grid cells in meters. width, height -- The grid will have height rows and width columns cells. width is the size of the x-dimension and height is the size of the y-dimension. The default arguments put (0,0) in the center of the grid. """ self.origin_x = origin_x self.origin_y = origin_y self.resolution = resolution self.width = width self.height = height self.grid = np.zeros((height, width)) def to_message(self): """ Return a nav_msgs/OccupancyGrid representation of this map. """ grid_msg = OccupancyGrid() # Set up the header. grid_msg.header.stamp = rospy.Time.now() grid_msg.header.frame_id = "map" # .info is a nav_msgs/MapMetaData message. grid_msg.info.resolution = self.resolution grid_msg.info.width = self.width grid_msg.info.height = self.height # Rotated maps are not supported... quaternion represents no # rotation. grid_msg.info.origin = Pose(Point(self.origin_x, self.origin_y, 0), Quaternion(0, 0, 0, 1)) # Flatten the numpy array into a list of integers from 0-100. # This assumes that the grid entries are probalities in the # range 0-1. This code will need to be modified if the grid # entries are given a different interpretation (like # log-odds). flat_grid = self.grid.reshape((self.grid.size,)) * 100 grid_msg.data = list(np.round(flat_grid)) return grid_msg def set_cell(self, x, y, val): """ Set the value of a cell in the grid. Arguments: x, y - This is a point in the map coordinate frame. val - This is the value that should be assigned to the grid cell that contains (x,y). This would probably be a helpful method! Feel free to throw out point that land outside of the grid. """ pass class Mapper(object): """ The Mapper class creates a map from laser scan data. """ def __init__(self): """ Start the mapper. """ rospy.init_node('mapper') self._map = Map() # Setting the queue_size to 1 will prevent the subscriber from # buffering scan messages. This is important because the # callback is likely to be too slow to keep up with the scan # messages. If we buffer those messages we will fall behind # and end up processing really old scans. Better to just drop # old scans and always work with the most recent available. rospy.Subscriber('scan', LaserScan, self.scan_callback, queue_size=1) # Latched publishers are used for slow changing topics like # maps. Data will sit on the topic until someone reads it. self._map_pub = rospy.Publisher('map', OccupancyGrid, latch=True) self._map_data_pub = rospy.Publisher('map_metadata', MapMetaData, latch=True) rospy.spin() def scan_callback(self, scan): """ Update the map on every scan callback. """ # Fill some cells in the map just so we can see that something is # being published. self._map.grid[0, 0] = 1.0 self._map.grid[0, 1] = .9 self._map.grid[0, 2] = .7 self._map.grid[1, 0] = .5 self._map.grid[2, 0] = .3 # Now that the map is updated, publish it! rospy.loginfo("Scan is processed, publishing updated map.") self.publish_map() def publish_map(self): """ Publish the map. """ grid_msg = self._map.to_message() self._map_data_pub.publish(grid_msg.info) self._map_pub.publish(grid_msg) if __name__ == '__main__': try: m = Mapper() except rospy.ROSInterruptException: pass
2.8125
3
flags.py
oaxiom/glbase3
8
12894
""" flags.py . should be renamed helpers... . This file is scheduled for deletion """ """ valid accessory tags: "any_tag": {"code": "code_insert_as_string"} # execute arbitrary code to construct this key. "dialect": csv.excel_tab # dialect of the file, default = csv, set this to use tsv. or sniffer "skip_lines": number # number of lines to skip at the head of the file. "skiptill": skip until I see the first instance of <str> """ # lists of format-specifiers.
2.03125
2
packages/jet_bridge/jet_bridge/app.py
goncalomi/jet-bridge
2
12895
<gh_stars>1-10 import os import tornado.ioloop import tornado.web from jet_bridge.handlers.temporary_redirect import TemporaryRedirectHandler from jet_bridge_base import settings as base_settings from jet_bridge_base.views.api import ApiView from jet_bridge_base.views.image_resize import ImageResizeView from jet_bridge_base.views.file_upload import FileUploadView from jet_bridge_base.views.message import MessageView from jet_bridge_base.views.model import ModelViewSet from jet_bridge_base.views.model_description import ModelDescriptionView from jet_bridge_base.views.register import RegisterView from jet_bridge_base.views.reload import ReloadView from jet_bridge_base.views.sql import SqlView from jet_bridge import settings, media from jet_bridge.handlers.view import view_handler from jet_bridge.handlers.not_found import NotFoundHandler from jet_bridge.router import Router def make_app(): router = Router() router.register('/api/models/(?P<model>[^/]+)/', view_handler(ModelViewSet)) urls = [ (r'/', TemporaryRedirectHandler, {'url': "/api/"}), (r'/register/', view_handler(RegisterView)), (r'/api/', view_handler(ApiView)), (r'/api/register/', view_handler(RegisterView)), (r'/api/model_descriptions/', view_handler(ModelDescriptionView)), (r'/api/sql/', view_handler(SqlView)), (r'/api/messages/', view_handler(MessageView)), (r'/api/file_upload/', view_handler(FileUploadView)), (r'/api/image_resize/', view_handler(ImageResizeView)), (r'/api/reload/', view_handler(ReloadView)), (r'/media/(.*)', tornado.web.StaticFileHandler, {'path': settings.MEDIA_ROOT}), ] urls += router.urls if settings.MEDIA_STORAGE == media.MEDIA_STORAGE_FILE: urls.append((r'/media/(.*)', tornado.web.StaticFileHandler, {'path': settings.MEDIA_ROOT})) return tornado.web.Application( handlers=urls, debug=settings.DEBUG, default_handler_class=NotFoundHandler, template_path=os.path.join(base_settings.BASE_DIR, 'templates'), autoreload=settings.DEBUG )
1.960938
2
openslides_backend/services/media/adapter.py
FinnStutzenstein/openslides-backend
0
12896
<reponame>FinnStutzenstein/openslides-backend import requests from ...shared.exceptions import MediaServiceException from ...shared.interfaces.logging import LoggingModule from .interface import MediaService class MediaServiceAdapter(MediaService): """ Adapter to connect to media service. """ def __init__(self, media_url: str, logging: LoggingModule) -> None: self.logger = logging.getLogger(__name__) self.media_url = media_url + "/" def _upload(self, file: str, id: int, mimetype: str, subpath: str) -> None: url = self.media_url + subpath + "/" payload = {"file": file, "id": id, "mimetype": mimetype} self.logger.debug("Starting upload of file") try: response = requests.post(url, json=payload) except requests.exceptions.ConnectionError: msg = "Connect to mediaservice failed." self.logger.debug("Upload of file: " + msg) raise MediaServiceException(msg) if response.status_code != 200: msg = f"Mediaservice Error: {str(response.content)}" self.logger.debug("Upload of file: " + msg) raise MediaServiceException(msg) self.logger.debug("File successfully uploaded to the media service") def upload_mediafile(self, file: str, id: int, mimetype: str) -> None: subpath = "upload_mediafile" self._upload(file, id, mimetype, subpath) def upload_resource(self, file: str, id: int, mimetype: str) -> None: subpath = "upload_resource" self._upload(file, id, mimetype, subpath)
2.515625
3
Lib/async/test/test_echoupper.py
pyparallel/pyparallel
652
12897
<reponame>pyparallel/pyparallel import async from async.services import EchoUpperData server = async.server('10.211.55.3', 20007) async.register(transport=server, protocol=EchoUpperData) async.run()
1.804688
2
alerter/src/alerter/alert_code/node/evm_alert_code.py
SimplyVC/panic
41
12898
from ..alert_code import AlertCode class EVMNodeAlertCode(AlertCode): NoChangeInBlockHeight = 'evm_node_alert_1' BlockHeightUpdatedAlert = 'evm_node_alert_2' BlockHeightDifferenceIncreasedAboveThresholdAlert = 'evm_node_alert_3' BlockHeightDifferenceDecreasedBelowThresholdAlert = 'evm_node_alert_4' InvalidUrlAlert = 'evm_node_alert_5' ValidUrlAlert = 'evm_node_alert_6' NodeWentDownAtAlert = 'evm_node_alert_7' NodeBackUpAgainAlert = 'evm_node_alert_8' NodeStillDownAlert = 'evm_node_alert_9'
1.648438
2
blaze/compute/tests/test_pmap.py
jdmcbr/blaze
1
12899
from blaze import compute, resource, symbol, discover from blaze.utils import example flag = [False] def mymap(func, *args): flag[0] = True return map(func, *args) def test_map_called_on_resource_star(): r = resource(example('accounts_*.csv')) s = symbol('s', discover(r)) flag[0] = False a = compute(s.count(), r) b = compute(s.count(), r, map=mymap) assert a == b assert flag[0]
2.390625
2