text
stringlengths
2
999k
# -*-coding utf-8 -*- ########################################################################## # # Copyright (c) 2022 Baidu.com, Inc. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ########################################################################## """ Losses """ from .cross_entropy_loss import CrossEntropyLoss from .mixed_loss import MixedLoss from .binary_cross_entropy_loss import BinaryCrossEntropyLoss
""" Aim of this program: Get announcements and news from https://ogrisl.erciyes.edu.tr And send mail to given e-mail address that the latest announcement. """ #date: 3 march 2022 #author: BurakEselik import news import mail from news import BASE_URL from time import sleep import json import datetime from plyer import notification import sys def getDifferenceNumber() -> int: with open("old_news.json", "r", encoding="utf-8") as old_news, open("new_news.json", "r", encoding="utf-8") as new_news: old = json.load(old_news) new = json.load(new_news) for i in range(1, 11): if old["1"]["date"] == new[str(i)]["date"] and old["1"]["title"] == new[str(i)]["title"]: return i else: with open("log.txt", "a", encoding="utf-8") as log: log.write(f"\nLast announcements deleted or updated") notification.notify( title="ERU NEWS", message=" Last announcements deleted or updated " timeout=90) return 0 def setContent(df_number) -> dict: content_dict = dict() with open("new_news.json", "r", encoding="utf-8") as new_news: new = json.load(new_news) for i in range(1, df_number): content_dict[str(i)] = new[str(i)] return content_dict def sendMail(content_dict: dict) -> None: for i in range(1, len(content_dict)+1): new_mail = mail.SendMail() title = content_dict[str(i)]["title"] link = BASE_URL[:-1] + content_dict[str(i)]["link"] new_mail.setMessage(title=title, link=link) new_mail.send() def main() -> None: new_news = news.News() new_news.run df_number = getDifferenceNumber() if df_number > 1: content_dict = setContent(df_number=df_number) sendMail(content_dict) else: current_time = datetime.datetime.now() with open("log.txt", "a", encoding="utf-8") as log: log.write(f"\nThere is no new announcement: {current_time}") del current_time if __name__ == "__main__": while 1: try: main() with open("settings.json", "r", encoding="utf-8") as settings: setting = json.load(settings) timer = setting["repeat_timer"] sleep(timer) except Exception as e: with open("log.txt", "a", encoding="utf-8") as log: log.write(f"\nProgram closed because of this error: {e}") notification.notify( title = "ERU NEWS STOPED", message=" Eru news checker program just closed! ", timeout=100) sys.exit()
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import, division, print_function import oneflow as flow BLOCK_COUNTS = [3, 4, 6, 3] BLOCK_FILTERS = [256, 512, 1024, 2048] BLOCK_FILTERS_INNER = [64, 128, 256, 512] def _conv2d( name, input, filters, kernel_size, strides=1, padding="SAME", data_format="NCHW", dilations=1, trainable=True, weight_initializer=flow.variance_scaling_initializer(data_format="NCHW"), ): weight = flow.get_variable( name + "-weight", shape=(filters, input.shape[1], kernel_size, kernel_size), dtype=input.dtype, initializer=weight_initializer, trainable=trainable, ) return flow.nn.conv2d( input, weight, strides, padding, None, data_format, dilations, name=name ) def _batch_norm(inputs, name=None, trainable=True): return flow.layers.batch_normalization( inputs=inputs, axis=1, momentum=0.997, epsilon=1.001e-5, center=True, scale=True, trainable=trainable, name=name, ) def conv2d_affine(input, name, filters, kernel_size, strides, activation=None): # input data_format must be NCHW, cannot check now padding = "SAME" if strides > 1 or kernel_size > 1 else "VALID" output = _conv2d(name, input, filters, kernel_size, strides, padding) output = _batch_norm(output, name + "_bn") if activation == "Relu": output = flow.math.relu(output) return output def bottleneck_transformation(input, block_name, filters, filters_inner, strides): a = conv2d_affine( input, block_name + "_branch2a", filters_inner, 1, 1, activation="Relu", ) b = conv2d_affine( a, block_name + "_branch2b", filters_inner, 3, strides, activation="Relu", ) c = conv2d_affine(b, block_name + "_branch2c", filters, 1, 1) return c def residual_block(input, block_name, filters, filters_inner, strides_init): if strides_init != 1 or block_name == "res2_0": shortcut = conv2d_affine( input, block_name + "_branch1", filters, 1, strides_init ) else: shortcut = input bottleneck = bottleneck_transformation( input, block_name, filters, filters_inner, strides_init ) return flow.math.relu(bottleneck + shortcut) def residual_stage(input, stage_name, counts, filters, filters_inner, stride_init=2): output = input for i in range(counts): block_name = "%s_%d" % (stage_name, i) output = residual_block( output, block_name, filters, filters_inner, stride_init if i == 0 else 1, ) return output def resnet_conv_x_body(input, on_stage_end=lambda x: x): output = input for i, (counts, filters, filters_inner) in enumerate( zip(BLOCK_COUNTS, BLOCK_FILTERS, BLOCK_FILTERS_INNER) ): stage_name = "res%d" % (i + 2) output = residual_stage( output, stage_name, counts, filters, filters_inner, 1 if i == 0 else 2, ) on_stage_end(output) return output def resnet_stem(input): conv1 = _conv2d("conv1", input, 64, 7, 2) conv1_bn = flow.math.relu(_batch_norm(conv1, "conv1_bn")) pool1 = flow.nn.max_pool2d( conv1_bn, ksize=3, strides=2, padding="VALID", data_format="NCHW", name="pool1", ) return pool1 def resnet50(images, trainable=True): with flow.scope.namespace("Resnet"): stem = resnet_stem(images) body = resnet_conv_x_body(stem, lambda x: x) pool5 = flow.nn.avg_pool2d( body, ksize=7, strides=1, padding="VALID", data_format="NCHW", name="pool5", ) fc1001 = flow.layers.dense( flow.reshape(pool5, (pool5.shape[0], -1)), units=1001, use_bias=True, kernel_initializer=flow.xavier_uniform_initializer(), bias_initializer=flow.zeros_initializer(), trainable=trainable, name="fc1001", ) return fc1001
''' Module containing the lint command group, and all its subcommands ''' import logging import click from jira_offline.cli.params import CliParams, filter_option, global_options from jira_offline.jira import jira from jira_offline.linters import fix_versions as lint_fix_versions from jira_offline.linters import issues_missing_epic as lint_issues_missing_epic from jira_offline.utils.cli import print_list logger = logging.getLogger('jira') @click.group(name='lint') @click.option('--fix', is_flag=True, help='Attempt to fix the errors automatically') @click.pass_context @global_options @filter_option def cli_lint(ctx: click.core.Context, fix: bool=False): 'Report on common mistakes in Jira issues' ctx.obj.lint = CliParams.LintParams(fix=fix) # load issues here for all subcommands in the group jira.load_issues() if jira.df.empty: click.echo('No issues in the cache', err=True) raise click.Abort @cli_lint.command(name='fix-versions') @click.option('--value', help='Value set in fix_versions. Used with --fix.') @click.pass_context @global_options @filter_option def cli_lint_fix_versions(ctx: click.core.Context, value: str=None): ''' Lint on missing fix_versions field ''' if ctx.obj.lint.fix and not value: raise click.BadParameter('You must pass --value with --fix', ctx) if value: if not ctx.obj.lint.fix: logger.warning('Passing --value without --fix has no effect', err=True) # query issues missing the fix_versions field df = lint_fix_versions(fix=False) initial_missing_count = len(df) if ctx.obj.lint.fix: df = lint_fix_versions(fix=ctx.obj.lint.fix, value=value) click.echo(f'Updated fix_versions on {initial_missing_count - len(df)} issues') else: click.echo(f'There are {len(df)} issues missing the fix_versions field') if ctx.obj.verbose: print_list(df) @cli_lint.command(name='issues-missing-epic') @click.option('--epic-link', help='Epic to set on issues with no epic. Used with --fix.') @click.pass_context @global_options @filter_option def cli_lint_issues_missing_epic(ctx: click.core.Context, epic_link: str=None): ''' Lint issues without an epic set ''' if ctx.obj.lint.fix and not epic_link: raise click.BadParameter('You must pass --epic_link with --fix', ctx) if epic_link: if not ctx.obj.lint.fix: logger.warning('Passing --epic-link without --fix has no effect', err=True) # query issues missing the epic field df = lint_issues_missing_epic(fix=False) initial_missing_count = len(df) if ctx.obj.lint.fix: df = lint_issues_missing_epic(fix=ctx.obj.lint.fix, epic_link=epic_link) click.echo(f'Set epic to {epic_link} on {initial_missing_count - len(df)} issues') else: click.echo(f'There are {len(df)} issues missing an epic') if ctx.obj.verbose: print_list(df)
# coding=utf-8 # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # coding: utf-8 # pylint: skip-file from msrest.serialization import Model class ServiceRunner(Model): """A container for a managed identity to execute DevTest lab services. :param identity: The identity of the resource. :type identity: :class:`IdentityProperties <azure.mgmt.devtestlabs.models.IdentityProperties>` :param id: The identifier of the resource. :type id: str :param name: The name of the resource. :type name: str :param type: The type of the resource. :type type: str :param location: The location of the resource. :type location: str :param tags: The tags of the resource. :type tags: dict """ _attribute_map = { 'identity': {'key': 'identity', 'type': 'IdentityProperties'}, 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, } def __init__(self, identity=None, id=None, name=None, type=None, location=None, tags=None): self.identity = identity self.id = id self.name = name self.type = type self.location = location self.tags = tags
from datetime import datetime import unittest from stuff.core import Stuff, Coordinates from stuff.maps import Charter class StuffCharterTestCase(unittest.TestCase): def setUp(self): self.stuffs = [ Stuff( url="https://newyork.craigslist.org/brk/zip/d/free-couch/1234556.html", title="free couch", time=datetime(2019, 9, 13, 13, 31), price=0, neighborhood="Clinton Hill", city="newyork", coordinates=Coordinates(longitude='-73.947000', latitude='40.546700'), image_urls=['https://images.craigslist.org/someother.jpg'] ), Stuff( url="https://newyork.craigslist.org/brk/zip/d/free-boxes-and-packing-supplies/6978063787.html", title="FREE BOXES and PACKING SUPPLIES", time=datetime(2019, 9, 13, 16, 31), price=0, city="newyork", neighborhood="Clinton Hill", coordinates=Coordinates(longitude='-73.957000', latitude='40.646700'), image_urls=['https://images.craigslist.org/00L0L_5e2M7zY0JYR_600x450.jpg'] ) ] def test_stuff_charter(self): charter = Charter( stuffs=self.stuffs, city="newyork", ) charter.create_map() self.assertEqual(len(charter.map.to_dict()["children"]), 3) self.assertEqual(charter.map.to_dict()["name"], "Map") @unittest.skip("This test has no fake/mocked parts, don't run wiley niley") def test_stuff_charter_address(self): charter = Charter( stuffs=self.stuffs, city="newyork", address="420 Clinton Ave" ) charter.create_map() self.assertEqual(len(charter.map.to_dict()["children"]), 4) self.assertEqual(charter.map.to_dict()["name"], "Map") def test_stuff_charter_save_map(self): charter = Charter( stuffs=self.stuffs, city="newyork", ) charter.create_map() map_path = charter.save_map() with open(map_path, "r") as fid: self.assertEqual(fid.read()[0:15], "<!DOCTYPE html>") self.assertEqual(len(charter.map.to_dict()["children"]), 3) self.assertEqual(charter.map.to_dict()["name"], "Map")
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Kubernetes codegen tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** # Export this package's modules as members: from .StorageClass import * from .StorageClassList import * from .VolumeAttachment import * from .VolumeAttachmentList import *
import abc from dockstream.containers.target_preparation_container import TargetPreparationContainer from dockstream.utils.enums.target_preparation_enum import TargetPreparationEnum from dockstream.loggers.target_preparation_logger import TargetPreparationLogger from dockstream.loggers.blank_logger import BlankLogger from dockstream.utils.enums.logging_enums import LoggingConfigEnum class TargetPreparator(metaclass=abc.ABCMeta): """Virtual base class implementing the interface for all specific target preparators and the general preparation of the docking target.""" def __init__(self, conf: TargetPreparationContainer, run_number=0): self._TE = TargetPreparationEnum() self._TL = LoggingConfigEnum() self._logger = TargetPreparationLogger() self._logger_blank = BlankLogger() self._config = conf self._target = None # store the specific parameters for this very run for easy access later; the others are ignored self._run_parameters = self._config[self._TE.TARGETPREP][self._TE.RUNS][run_number] def get_target(self): return self._target def specify_cavity(self): raise NotImplementedError("This method needs to be overwritten by child classes.") def write_target(self, path): raise NotImplementedError("This method needs to be overwritten by child classes.")
from mlx.hw_i2c_hal import HwI2cHalMlx90640 import mlx.pympt as pympt from mlx.pympt.core import * import serial.tools.list_ports import sys import time import struct from math import ceil USB_VID = 1001 USB_PID = 32 class Mlx90640Commands: CMD_ResetHardware = bytes([0]) CMD_GetHardwareID = bytes([1]) CMD_GetSoftwareID = bytes([3]) CMD_ReadEEPROM = bytes([10]) CMD_I2C_Master_SW = bytes([31]) CMD_SetVdd = bytes([150]) CMD_StopDAQ = bytes([152]) CMD_ReadDAQ = bytes([153]) CMD_MeasureVddIdd = bytes([156]) CMD_StartDAQ_90640 = bytes([171]) CMD_I2C_Master_90640 = bytes([174]) CMD_StartDAQ_90641 = bytes([179]) class HwUsbEvb90640(HwI2cHalMlx90640): __command_response_pairs = { # name: (command to send, expected response) byte count and crc not included "init_SW_I2C": ([0x1E, 2, 0, 0, 0, 6, 0, 0, 0, 8, 0, 0, 0, 5, 0, 0, 0], [0x1E]), "begin_conversion": ([0xAE, 0x33, 0x80, 0x00, 0x00, 0x20, 0x00, 0x00], [0xAE, 0x00]), } def __init__(self, comport=None): self.support_buffer = True self.frames_buffer = [] self.m_lDaqFrameIdx = 0 self.frame_length_bytes = None self.comport = comport if comport is None or comport == 'auto': comports = HwUsbEvb90640.list_serial_ports(USB_PID, USB_VID) if len(comports) < 1: raise ValueError("no EVB90640 found; please connect to USB port") if len(comports) > 1: print("WARN: found more than one EVB90640 {}, using the first one".format(comports)) self.comport = comports[0] print("comport = {}".format (self.comport)) self.channel = pympt.UsbSerialChannel() self.channel.connect(self.comport) @staticmethod def list_serial_ports(pid=None, vid=None): """ Lists serial port names which startswith serial_number like in argument. :raises EnvironmentError: On unsupported or unknown platforms :returns: A list of the serial ports available on the system """ ports = [] if sys.platform.startswith('win') or sys.platform.startswith('linux') or sys.platform.startswith('cygwin'): for sp in serial.tools.list_ports.comports(): if pid is None or vid is None: ports.append(str(sp.device)) else: if sp.vid == vid and sp.pid == pid: ports.append(str(sp.device)) return ports elif sys.platform.startswith('darwin'): import glob ports = glob.glob('/dev/tty.*') else: raise EnvironmentError('Unsupported platform') result = [] for port in ports: try: s = serial.Serial(port) s.close() result.append(port) except (OSError, serial.SerialException): pass return result def __send_buffered_command(self, name): """this function should be removed in the future""" resp = self.channel.send_command(bytes(HwUsbEvb90640.__command_response_pairs[name][0])) if list(bytes(resp)) != HwUsbEvb90640.__command_response_pairs[name][1]: raise ValueError("Did not get expected response from EVB, {} != {}" .format(HwUsbEvb90640.__command_response_pairs[name][1], list(bytes(resp)))) def connect(self): """ Do the necessary initialisation before measurements are taken procedure: poll until HW id received (timeout??) init SW I2C set vdd I2C: set refresh rate start conversion set evb refresh rate """ timed_out = True for i in range(5): try: if len(self.get_hardware_id()) != 0: timed_out = False break except pympt.MptException: pass time.sleep(1) if timed_out: raise NotConnectedException("The command timed out while attempting to get HW id") # TODO move out of function self.set_vdd(3.3) self.__send_buffered_command("init_SW_I2C") self.__send_buffered_command("begin_conversion") self.channel.send_command(bytes([0xAE, 0x33, 0x24, 0x00, 0x80, 0x06])) self.channel.send_command(bytes([0xAE, 0x33, 0x80, 0x00, 0x22, 0x00])) self.get_sensor_type(0x33) if self.sensor_type == 0: self.frame_length_bytes = 32 * 26 * 2 if self.sensor_type == 1: self.frame_length_bytes = 16 * 16 * 2 def read_frame(self, i2c_addr): if self.frames_buffer is not None and len(self.frames_buffer) > 0: return self.frames_buffer.pop(0) self.frames_buffer = self.read_frames() if self.frames_buffer is None: return None return self.frames_buffer.pop(0) def start_data_acquisition(self, i2c_addr, frame_rate_hz): wait_us = ceil(1000000 / frame_rate_hz) if self.sensor_type == 0: cmd = Mlx90640Commands.CMD_StartDAQ_90640 + struct.pack("<BL", i2c_addr, wait_us) result = self.channel.send_command(cmd) if result != bytes([0xAB, 0x00]): raise Exception("Error during execution of command on the EVB") else: cmd = Mlx90640Commands.CMD_StartDAQ_90641 + struct.pack("<BL", i2c_addr, wait_us) result = self.channel.send_command(cmd) if result != bytes([0xB3, 0x00]): raise Exception("Error during execution of command on the EVB") def get_hardware_id(self): return self.channel.send_command(Mlx90640Commands.CMD_GetHardwareID) def set_vdd(self, vdd): """Set Vdd of the sensor""" cmd = Mlx90640Commands.CMD_SetVdd + bytes([0]) cmd = cmd + struct.pack("<f", float(vdd)) self.channel.send_command(cmd) def read_frames(self): """ Sends a read request to the EVB. If the EVB has buffered any frames (the EVB will buffer up to 4 frames) a list of them is returned. If no frames have been buffered None will be returned. a frame is an array with size 32 * 26 containing signed 16 bit integers. :return: list of frames (each frame is a list of 32 * 26 signed 16 bit ints) or None :raises: ValueError - data received from EVB is not at the expected length, or no data is received """ data = self.channel.send_command(Mlx90640Commands.CMD_ReadDAQ + bytes([0])) if data[1] != 0: raise ValueError("EVB90640: EVB Frame buffer full") frames = None received_data_len = len(data) - 2 if received_data_len >= self.frame_length_bytes: # one or more frames have been received if received_data_len % self.frame_length_bytes != 0: raise ValueError("Invalid data length from EVB") frames = [] for i in range(received_data_len // self.frame_length_bytes): self.m_lDaqFrameIdx += 1 frame_data_start = 2 + i * self.frame_length_bytes frame_data_end = 2 + (i + 1) * self.frame_length_bytes frame_data = data[frame_data_start: frame_data_end] frame = list(struct.unpack(">" + str(self.frame_length_bytes // 2) + "h", frame_data)) frames.append(frame) return frames def i2c_read(self, i2c_addr, addr, count=1): """ Read consecutive data from the device :param int i2c_addr: the I2C slave address of the MLX9064x :param int addr: the start address of the data :param int count: the number of consecutive bytes to be read :return: tuple(result_data[], acknowledge_errors) """ cmd = Mlx90640Commands.CMD_I2C_Master_90640 + \ struct.pack(">BH", i2c_addr, addr) + struct.pack("<H", count) result = self.channel.send_command(cmd) return result[2:], result[1] def i2c_write(self, i2c_addr, addr, data): """ Write consecutive data to the device :param int i2c_addr: the I2C slave address of the MLX9064x :param int addr: the start address of the data :param bytes data: data to send :return: acknowledge_errors """ cmd = Mlx90640Commands.CMD_I2C_Master_90640 + \ struct.pack(">BH", i2c_addr, addr) + data + struct.pack("H", 0) result = self.channel.send_command(cmd) return result[1] def clear_error(self, i2c_addr, frame_rate_hz): self.channel.send_command(Mlx90640Commands.CMD_StopDAQ) self.set_vdd(3.3) wait_us = ceil(1000000 / frame_rate_hz) cmd = Mlx90640Commands.CMD_StartDAQ_90640 + struct.pack("<BL", i2c_addr, wait_us) self.channel.send_command(cmd) def measure_vdd(self): """ Measure Vdd of the sensor @:return vdd """ data = self.channel.send_command(Mlx90640Commands.CMD_MeasureVddIdd) vdd = struct.unpack('<f', data[1:5])[0] return vdd
''' lambdata - a collection of data science helper functions ''' import pandas as pd import numpy as np # # sample code # ONES = pd.DataFrame(np.ones(10)) # ZEROS = pd.DataFrame(np.zeros(50)) # helper functions
from fastapi import FastAPI from starlette.middleware.cors import CORSMiddleware from {{cookiecutter.package_dir}}.api import api_router from {{cookiecutter.package_dir}}.config import settings app = FastAPI(title=settings.PROJECT_NAME, openapi_url="/openapi.json") # Set all CORS enabled origins if settings.BACKEND_CORS_ORIGINS: app.add_middleware( CORSMiddleware, allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) app.include_router(api_router)
# This script performs various tests on Result objects import numpy as np import atomica as at import matplotlib.pyplot as plt import os import sciris as sc import pytest testdir = at.parent_dir() tmpdir = testdir / "temp" def test_export(): P = at.demo("tb", do_run=False) P.run_sim(parset="default", result_name="parset1", store_results=True) P.run_sim(parset="default", result_name="parset2", store_results=True) instructions = at.ProgramInstructions(start_year=2018) P.run_sim(parset="default", progset="default", progset_instructions=instructions, result_name="progset1", store_results=True) P.run_sim(parset="default", progset="default", progset_instructions=instructions, result_name="progset2", store_results=True) # Test export single at.export_results(P.results["parset1"], tmpdir / "export_parset.xlsx") at.export_results(P.results["progset1"], tmpdir / "export_progset.xlsx") # Test export multi at.export_results(P.results, tmpdir / "export_multi.xlsx") output_ordering = ("pop", "output", "result") cascade_ordering = ("pop", "stage", "result") program_ordering = ("quantity", "program", "result") at.export_results(P.results, tmpdir / "export_multi_reordered.xlsx", output_ordering=output_ordering, cascade_ordering=cascade_ordering, program_ordering=program_ordering) # Test raw exports P.results["parset1"].export_raw(tmpdir / "export_raw_parset.xlsx") P.results["progset1"].export_raw(tmpdir / "export_raw_progset.xlsx") if __name__ == "__main__": test_export()
from typing import List from fastapi import FastAPI, Query from pydantic import BaseModel from starlette.responses import JSONResponse from joblib import load import pandas as pd from enum import Enum # Setup a variable to introduce and describe the projectDescription = """ The **Beer Type Prediction Project** uses a trained Machine Learning Model to accurately predict a type of beer based on the following set of review criteria: * **Brewery Name** (_The name of the brewery producing the beer_). * **Aroma Review Score** (_The review score given to the beer regarding the aroma of the beer (on a scale from 0 to 5)_). * **Appearance Review Score** (_The review score given to the beer regarding the appearance of the beer (on a scale from 0 to 5)_). * **Palate Review Score** (_The review score given to the beer regarding the palate of the beer (on a scale from 0 to 5)_). * **Taste Review Score** (_The review score given to the beer regarding the taste of the beer (on a scale from 0 to 5)_). * **Beer ABV** (_The alcohol by volume measurement of the beer_). ### API Endpoints This API provides the following list of endpoints that provide API status reports and interactions with the Machine Learning model: **Status** * **/health** - _Check the Operational status of the API_. **Predictions** * **/beer/type** - _Predict the type of beer based on a single set of inputs_. * **/beers/type** - _Predict the type of beer for one or more sets of inputs_. Please refer to the **Parameters** and **Responses** section of each endpoint for details of the inputs and response(s) of the API ### Using the API * Expand the endpoint you would like to interact with * Click the "Try it Out" button (found on the right hand side in the **Parameters** section of each endpoint) * Fill in all of the Required API Parameters with the **Parameters** section of each endpoint * Click the "Execute" button (found at the bottom of the **Parameters** section of each endpoint) ### API Results * The API response will be found in the Response Body of the **Reponses** section of each endpoint ### GitHub Repo Link The source for this project can be found on GitHub at: [https://github.com/seanbwilliams/beer_type_prediction](https://github.com/seanbwilliams/beer_type_prediction) """ # Create a list containing API section tags and associated descriptions tags_metadata = [ { "name": "status", "description": "Provides API status operations." }, { "name": "predictions", "description": "Provides interactions with the Machine Learning model" } ] # Create a Class to enumerate review ratings class ReviewRating(str, Enum): zero = "0" zerofive = "0.5" one = "1" onefive = "1.5" two = "2" twofive = "2.5" three = "3" threefive = "3.5" four = "4" fourfive = "4.5" five = "5" # Create a Class to Model a single beer type response class SingleResponse(BaseModel): beer_type: str # Create a Class to Model a multiple beer type responses class BeerModel(BaseModel): brewery_name: str review_aroma: float review_appearance: float review_palate: float review_taste: float beer_abv: float beer_type: float # Create a Class to Model a multiple beer type responses class MultiResponse(BaseModel): beers: List[BeerModel] # Initialise the API with some description attributes app = FastAPI(title="Beer Type Prediction", description=projectDescription, version="0.0.1", docs_url="/", redoc_url=None, openapi_tags=tags_metadata) # Load Preprocessing Pipeline preproc = load('../models/preproc_beer_type_prediction.joblib') # Load Target Encoder targetEncoder = load('../models/target_encoder.joblib') # Load Pre-trained XGBoost Classifier clf_xgb = load('../models/xgb_beer_type_prediction.joblib') # Function to convert datatype of passed in objects def convert(input): converters = [int, float] for converter in converters: try: return converter(input) except (TypeError, ValueError): pass return input # Function to format features def format_features(brewery_name: str, review_aroma: float, review_appearance: float, review_palate: float, review_taste: float, beer_abv: float, single_multi: str = 'SINGLE'): if single_multi == 'SINGLE': return { 'brewery_name': [brewery_name], 'review_aroma': [review_aroma], 'review_appearance': [review_appearance], 'review_palate': [review_palate], 'review_taste': [review_taste], 'beer_abv': [beer_abv] } else: return { 'brewery_name': brewery_name, 'review_aroma': review_aroma, 'review_appearance': review_appearance, 'review_palate': review_palate, 'review_taste': review_taste, 'beer_abv': beer_abv } # Define the Health endpoint @app.get('/health', status_code=200, tags=["status"], summary="Check the Operational status of the API") def healthcheck(): return 'The Beer Type Prediction Project is ready to go' # Define the Single Beer Review endpoint @app.get("/beer/type/", tags=["predictions"], response_model=SingleResponse, summary="Predict the type of beer based on a single set of inputs") def predict(brewery_name: str, review_aroma: ReviewRating, review_appearance: ReviewRating, review_palate: ReviewRating, review_taste: ReviewRating, beer_abv: float): """ This API provides a beer type prediction for a single beer review. The API accepts 6 individual parameters for each of the review criteria listed below. brewery_name, review_aroma, review_appearance, review_palate, review_taste, beer_abv """ # Format features features = format_features(brewery_name, review_aroma, review_appearance, review_palate, review_taste, beer_abv) # Create a Pandas Dataframe of Observations df_rawobs = pd.DataFrame(features) # Preprocess dataframe data procobs = preproc.transform(df_rawobs) # Make Predictions on data pred = clf_xgb.predict(procobs) # Get the Label corresponding to the prediction predenc = targetEncoder.inverse_transform(pred) # Create a response from the predictions responseDict = {"beer_type": beer_type for beer_type in predenc} # Return the response return JSONResponse(responseDict) # Define the Multiple Beer Reviews endpoint @app.get("/beers/type/", tags=["predictions"], response_model=MultiResponse, summary="Predict the type of beer for one or more sets of inputs") def predict(beers: List[str] = Query(..., description="A comma separated string of values representing a single beer review. This endpoint accepts multiple beer reviews")): """ This API provides beer type predictions for one or more beer reviews. Each review is provided to the API in the form of a comma seperated string of values that take the following form: {brewery_name},{review_aroma},{review_appearance},{review_palate},{review_taste},{beer_abv} e.g. Caldera Brewing Company,2.5,2,1.5,5,5.8 Click on the "Add string item" button (found in the **Parameters** section of the endpoint) to supply one or more beer reviews to the API or the "-" (minus) button to remove a beer review """ # Create a list to store passed in observations featuresList = [] # Iterate over passed in parameters, split, convert to appropriate datatypes and append to List for beer in beers: # Convert the CSV String to a List with correct datatypes beerList = [convert(x) for x in beer.split(",")] # Create a dictionary of features features = format_features(beerList[0], beerList[1], beerList[2], beerList[3], beerList[4], beerList[5], 'MULTI') # Append the dictionary to the features list featuresList.append(features) # Create a Pandas Dataframe of Observations df_rawobs = pd.DataFrame(featuresList) # Preprocess dataframe data procobs = preproc.transform(df_rawobs) # Make Predictions on data preds = clf_xgb.predict(procobs) # Get the Labels corresponding to the predictions predsenc = targetEncoder.inverse_transform(preds) # Create a response from the predictions for idx, predenc in enumerate(predsenc): featuresList[idx].update({"beer_type": predenc}) # Return the response return JSONResponse({"beers": featuresList})
# -*- coding: utf-8 -*- """ zju_news_alerts.wrapper ~~~~~~~~~~~~~~~~~~~~~ Here lies all the decorators. :author: qwezarty :date: 11:41 am Aug 8 2019 :email: qwezarty@gmail.com """ import os import smtplib from email.message import EmailMessage def mail_errors(func): def mail_errors_wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as error: msg = EmailMessage() msg["Subject"] = "error occoured in zna" msg["Subject"] = "hello world" msg["From"] = "zju.news.alerts@outlook.com" msg["To"] = "qwezarty@gmail.com" # msg["Content-type"] = "text/html" msg.set_content(str(error)) with smtplib.SMTP('smtp.office365.com', 587) as s: s.starttls() s.login("zju.news.alerts@outlook.com", os.environ["ZJU_PASS"]) s.send_message(msg) return mail_errors_wrapper
from hazelcast.serialization.bits import * from hazelcast.protocol.client_message import ClientMessage from hazelcast.util import ImmutableLazyDataList from hazelcast.protocol.codec.multi_map_message_type import * from hazelcast.six.moves import range REQUEST_TYPE = MULTIMAP_REMOVE RESPONSE_TYPE = 106 RETRYABLE = False def calculate_size(name, key, thread_id): """ Calculates the request payload size""" data_size = 0 data_size += calculate_size_str(name) data_size += calculate_size_data(key) data_size += LONG_SIZE_IN_BYTES return data_size def encode_request(name, key, thread_id): """ Encode request into client_message""" client_message = ClientMessage(payload_size=calculate_size(name, key, thread_id)) client_message.set_message_type(REQUEST_TYPE) client_message.set_retryable(RETRYABLE) client_message.append_str(name) client_message.append_data(key) client_message.append_long(thread_id) client_message.update_frame_length() return client_message def decode_response(client_message, to_object=None): """ Decode response from client message""" parameters = dict(response=None) response_size = client_message.read_int() response = [] for _ in range(0, response_size): response_item = client_message.read_data() response.append(response_item) parameters['response'] = ImmutableLazyDataList(response, to_object) return parameters
#!/usr/bin/env python3 import os from slack_sdk import WebClient from slack_sdk.errors import SlackApiError import sys import requests import json import time import datetime if __name__ == "__main__": # state_name = str(input("Enter the state name: ")) # district_name = str(input("Enter the district name: ")) # no_of_days = int(input("Enter the number of days to get appointments: ")) client = WebClient(token=os.environ["SLACK_BOT_TOKEN"]) state_name = "Maharashtra" district_name = "Pune" pincode_list = [ 411038, 411001, 411002, 411006, 411026, 411011, 412409, ] # 411026, 412409 no_of_days = 7 start_date = datetime.datetime.today().strftime("%d-%m-%Y") date_list = [] for i in range(no_of_days): date_list.append( (datetime.datetime.today() + datetime.timedelta(days=i + 1)).strftime( "%d-%m-%Y" ) ) get_states = requests.get("https://cdn-api.co-vin.in/api/v2/admin/location/states") states = get_states.json() state_id = -1 for state_dict in states["states"]: if state_dict["state_name"] == state_name: state_id = state_dict["state_id"] break if state_id == -1: print("Did not find state! Exiting! Check the spelling!") sys.exit() print("State id of {} is {}".format(state_name, state_id)) get_districts = requests.get( "https://cdn-api.co-vin.in/api/v2/admin/location/districts/" + str(state_id) ) districts = get_districts.json() district_id = -1 for district_dict in districts["districts"]: if district_dict["district_name"] == district_name: district_id = district_dict["district_id"] if district_id == -1: print("Did not find district! Exiting! Check the spelling!") sys.exit() print("District id of {} is {}".format(district_name, district_id)) appoint_payload = {"district_id": str(district_id), "date": start_date} # calender_url = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?district_id={}&date={}".format(district_id, start_date) # print(calender_url) while 1: time_now = datetime.datetime.now() time_now = time_now.strftime("%H:%M:%S") print("Current Time: ", time_now) get_appointments = requests.get( "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict", params=appoint_payload, ) centers_list = get_appointments.json()["centers"] # temp_centers = [] # for center in centers_list: # if center["pincode"] in pincode_list: # temp_centers.append(center["name"]) # temp_centers.sort() # print(temp_centers) available_centers = [] for center in centers_list: if ( center["sessions"][0]["min_age_limit"] < 45 and center["pincode"] in pincode_list ): available_centers.append(center) # print("available_centers: ", available_centers) message_list = [] message_str = "" for center in available_centers: for session in center["sessions"]: if session["available_capacity"] > 0: message_str = ( "Center Name: " + center["name"] + "\nPincode: " + str(center["pincode"]) + "\nAvailable Capacity: " + str(session["available_capacity"]) + "\nDate: " + session["date"] + "\nVaccine: " + session["vaccine"] ) message_list.append(message_str) for message in message_list: try: response = client.chat_postMessage(channel="#cowin", text=message) except SlackApiError as e: print(f"Got an error: {e.response['error']}") # print(message_list) time.sleep(10)
from simglucose.patient.t1dpatient import Action from simglucose.analysis.risk import risk_index import pandas as pd from datetime import timedelta import logging from collections import namedtuple from simglucose.simulation.rendering import Viewer try: from rllab.envs.base import Step except ImportError: _Step = namedtuple("Step", ["observation", "reward", "done", "info"]) def Step(observation, reward, done, **kwargs): """ Convenience method creating a namedtuple with the results of the environment.step method. Put extra diagnostic info in the kwargs """ return _Step(observation, reward, done, kwargs) Observation = namedtuple('Observation', ['CGM']) logger = logging.getLogger(__name__) def risk_diff(BG_last_hour): if len(BG_last_hour) < 2: return 0 else: _, _, risk_current = risk_index([BG_last_hour[-1]], 1) _, _, risk_prev = risk_index([BG_last_hour[-2]], 1) return risk_prev - risk_current class T1DSimEnv(object): def __init__(self, patient, sensor, pump, scenario): self.patient = patient self.sensor = sensor self.pump = pump self.scenario = scenario self._reset() @property def time(self): return self.scenario.start_time + timedelta(minutes=self.patient.t) def mini_step(self, action): # current action patient_action = self.scenario.get_action(self.time) basal = self.pump.basal(action.basal) bolus = self.pump.bolus(action.bolus) insulin = basal + bolus CHO = patient_action.meal patient_mdl_act = Action(insulin=insulin, CHO=CHO) # State update self.patient.step(patient_mdl_act) # next observation BG = self.patient.observation.Gsub CGM = self.sensor.measure(self.patient) return CHO, insulin, BG, CGM def step(self, action, reward_fun=risk_diff): ''' action is a namedtuple with keys: basal, bolus ''' CHO = 0.0 insulin = 0.0 BG = 0.0 CGM = 0.0 for _ in range(int(self.sample_time)): # Compute moving average as the sample measurements tmp_CHO, tmp_insulin, tmp_BG, tmp_CGM = self.mini_step(action) CHO += tmp_CHO / self.sample_time insulin += tmp_insulin / self.sample_time BG += tmp_BG / self.sample_time CGM += tmp_CGM / self.sample_time # Compute risk index horizon = 1 LBGI, HBGI, risk = risk_index([BG], horizon) # Record current action self.CHO_hist.append(CHO) self.insulin_hist.append(insulin) # Record next observation self.time_hist.append(self.time) self.BG_hist.append(BG) self.CGM_hist.append(CGM) self.risk_hist.append(risk) self.LBGI_hist.append(LBGI) self.HBGI_hist.append(HBGI) # Compute reward, and decide whether game is over window_size = int(60 / self.sample_time) BG_last_hour = self.CGM_hist[-window_size:] reward = reward_fun(BG_last_hour) done = BG < 10 or BG > 1000 obs = Observation(CGM=CGM) return Step( observation=obs, reward=reward, done=done, sample_time=self.sample_time, patient_name=self.patient.name, meal=CHO, patient_state=self.patient.state) def _reset(self): self.sample_time = self.sensor.sample_time self.viewer = None BG = self.patient.observation.Gsub horizon = 1 LBGI, HBGI, risk = risk_index([BG], horizon) CGM = self.sensor.measure(self.patient) self.time_hist = [self.scenario.start_time] self.BG_hist = [BG] self.CGM_hist = [CGM] self.risk_hist = [risk] self.LBGI_hist = [LBGI] self.HBGI_hist = [HBGI] self.CHO_hist = [] self.insulin_hist = [] def reset(self): self.patient.reset() self.sensor.reset() self.pump.reset() self.scenario.reset() self._reset() CGM = self.sensor.measure(self.patient) obs = Observation(CGM=CGM) return Step( observation=obs, reward=0, done=False, sample_time=self.sample_time, patient_name=self.patient.name, meal=0, patient_state=self.patient.state) def render(self, close=False): if close: if self.viewer is not None: self.viewer.close() self.viewer = None return if self.viewer is None: self.viewer = Viewer(self.scenario.start_time, self.patient.name) self.viewer.render(self.show_history()) def show_history(self): df = pd.DataFrame() df['Time'] = pd.Series(self.time_hist) df['BG'] = pd.Series(self.BG_hist) df['CGM'] = pd.Series(self.CGM_hist) df['CHO'] = pd.Series(self.CHO_hist) df['insulin'] = pd.Series(self.insulin_hist) df['LBGI'] = pd.Series(self.LBGI_hist) df['HBGI'] = pd.Series(self.HBGI_hist) df['Risk'] = pd.Series(self.risk_hist) df = df.set_index('Time') return df
#!/usr/bin/env python3 import sys genomes = [] for line in open(sys.argv[1], "r"): #use sys library to read in a single file entry genomes.append(line.strip()) #assumes a single column of unique identifiers added to search list bigdict = {} for line in open("assembly_summary_genbank.txt", "r"): #parse the GenBank text file if line[0] != "#": #skip the header lines line = line.strip() #remove trailing \n character data = line.split("\t") #create a list split on \t #select the appropriate index values that have the uniq identifier and the target output #bigdict[data[15]] = data[19] bigdict[data[5]] = data[19] outfile1 = open("matched.FTP-calls.txt", "w") #assign an outfile with correct matches outfile2 = open("unmatched-ref-ids.tmp", "w") #assign an outfile with failed matches for i in genomes: #iterate through the search list #using the try-except formatting to search within the dictionary. if no matching key #we assume that the ID is not correctly in the text file #record ID in the failed matches outfile try: outfile1.write("wget "+str(bigdict[i])+"/*_genomic.fna.gz"+"\n") except KeyError: outfile2.write(str(i)+"\n")
import unittest from SimulaQron.cqc.pythonLib.cqc import CQCConnection, qubit, CQCUnsuppError class TestRestrictedTopology(unittest.TestCase): @classmethod def setUpClass(cls): nodes = ["Alice", "Bob", "Charlie"] cls.edges = [("Alice", "Bob"), ("Bob", "Charlie")] cls.non_edges = [(node, node) for node in nodes] + [("Alice", "Charlie")] print("Testing send and EPR in a restricted topology") def test_send(self): for sender_name, receiver_name in self.edges: with CQCConnection(sender_name) as sender: with CQCConnection(receiver_name) as receiver: q = qubit(sender) sender.sendQubit(q=q, name=receiver_name, remote_appID=receiver._appID) q = receiver.recvQubit() m = q.measure() self.assertEqual(m, 0) for sender_name, receiver_name in self.non_edges: with CQCConnection(sender_name) as sender: with CQCConnection(receiver_name) as receiver: q = qubit(sender) with self.assertRaises(CQCUnsuppError): sender.sendQubit(q=q, name=receiver_name, remote_appID=receiver._appID) m = q.measure() self.assertEqual(m, 0) def test_EPR(self): for sender_name, receiver_name in self.edges: with CQCConnection(sender_name) as sender: with CQCConnection(receiver_name) as receiver: qs = sender.createEPR(name=receiver_name, remote_appID=receiver._appID) qr = receiver.recvEPR() ms = qs.measure() mr = qr.measure() self.assertEqual((ms + mr) % 2, 0) for sender_name, receiver_name in self.non_edges: with CQCConnection(sender_name) as sender: with CQCConnection(receiver_name) as receiver: with self.assertRaises(CQCUnsuppError): sender.createEPR(name=receiver_name, remote_appID=receiver._appID) if __name__ == "__main__": unittest.main()
from insertion_sort.insertion_sort import insertion_sort # test 0 one two and many def test_zero(): z = [] actual = insertion_sort(z) expected = "Not a valid input" assert actual == expected def test_one(): z = [1] actual = insertion_sort(z) expected = [1] assert actual == expected def test_many(): z = [8, 4, 23, 42, 16, 15] actual = insertion_sort(z) expected = [4, 8, 15, 16, 23, 42] assert actual == expected def test_many1(): z = [20, 18, 12, 8, 5, -2] actual = insertion_sort(z) expected = [-2, 5, 8, 12, 18, 20] assert actual == expected def test_many2(): z = [5, 12, 7, 5, 5, 7] actual = insertion_sort(z) expected = [5, 5, 5, 7, 7, 12] assert actual == expected def test_many3(): z = [2, 3, 5, 7, 13, 11] actual = insertion_sort(z) expected = [2, 3, 5, 7, 11, 13] assert actual == expected
""" The interface for select statements """ def select(statement): pass
"""YOLO_v3 Model Defined in Keras.""" from functools import wraps import tensorflow as tf from keras import backend as K from keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate from keras.layers.advanced_activations import LeakyReLU from keras.layers.normalization import BatchNormalization from keras.models import Model from keras.regularizers import l2 from yolo3.utils import compose @wraps(Conv2D) def DarknetConv2D(*args, **kwargs): """Wrapper to set Darknet parameters for Convolution2D.""" darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)} # the author made some mistake here kk = None try: kk = kwargs['strides'] except: kk = None #end darknet_conv_kwargs['padding'] = 'VALID' if kk==(2,2) else 'SAME' darknet_conv_kwargs.update(kwargs) return Conv2D(*args, **darknet_conv_kwargs) def DarknetConv2D_BN_Leaky(*args, **kwargs): """Darknet Convolution2D followed by BatchNormalization and LeakyReLU.""" no_bias_kwargs = {'use_bias': False} no_bias_kwargs.update(kwargs) return compose( DarknetConv2D(*args, **no_bias_kwargs), BatchNormalization(), LeakyReLU(alpha=0.1)) def resblock_body(x, num_filters, num_blocks): '''A series of resblocks starting with a downsampling Convolution2D''' # Darknet uses left and top padding instead of 'same' mode x = ZeroPadding2D(((1,0),(1,0)))(x) x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x) for i in range(num_blocks): y = compose( DarknetConv2D_BN_Leaky(num_filters//2, (1,1)), DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x) x = Add()([x,y]) return x def darknet_body(x): '''Darknent body having 52 Convolution2D layers''' x = DarknetConv2D_BN_Leaky(32, (3,3))(x) x = resblock_body(x, 64, 1) x = resblock_body(x, 128, 2) x = resblock_body(x, 256, 8) x = resblock_body(x, 512, 8) x = resblock_body(x, 1024, 4) return x def make_last_layers(x, num_filters, out_filters): '''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer''' x = compose( DarknetConv2D_BN_Leaky(num_filters, (1,1)), DarknetConv2D_BN_Leaky(num_filters*2, (3,3)), DarknetConv2D_BN_Leaky(num_filters, (1,1)), DarknetConv2D_BN_Leaky(num_filters*2, (3,3)), DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x) y = compose( DarknetConv2D_BN_Leaky(num_filters*2, (3,3)), DarknetConv2D(out_filters, (1,1)))(x) return x, y def yolo_body(inputs, num_anchors, num_classes): """Create YOLO_V3 model CNN body in Keras.""" darknet = Model(inputs, darknet_body(inputs)) x, y1 = make_last_layers(darknet.output, 512, num_anchors*(num_classes+5)) x = compose( DarknetConv2D_BN_Leaky(256, (1,1)), UpSampling2D(2))(x) x = Concatenate()([x,darknet.layers[148].output]) x, y2 = make_last_layers(x, 256, num_anchors*(num_classes+5)) x = compose( DarknetConv2D_BN_Leaky(128, (1,1)), UpSampling2D(2))(x) x = Concatenate()([x,darknet.layers[89].output]) x, y3 = make_last_layers(x, 128, num_anchors*(num_classes+5)) return Model(inputs, [y1,y2,y3]) def yolo_head(feats, anchors, num_classes, input_shape): """Convert final layer features to bounding box parameters.""" num_anchors = len(anchors) # Reshape to batch, height, width, num_anchors, box_params. anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2]) grid_shape = K.shape(feats)[1:3] # height, width grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]), [1, grid_shape[1], 1, 1]) grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]), [grid_shape[0], 1, 1, 1]) grid = K.concatenate([grid_x, grid_y]) grid = K.cast(grid, K.dtype(feats)) feats = K.reshape( feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5]) box_xy = K.sigmoid(feats[..., :2]) box_wh = K.exp(feats[..., 2:4]) box_confidence = K.sigmoid(feats[..., 4:5]) box_class_probs = K.sigmoid(feats[..., 5:]) # Adjust preditions to each spatial grid point and anchor size. box_xy = (box_xy + grid) / K.cast(grid_shape[::-1], K.dtype(feats)) box_wh = box_wh * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats)) return box_xy, box_wh, box_confidence, box_class_probs def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape): '''Get corrected boxes''' box_yx = box_xy[..., ::-1] box_hw = box_wh[..., ::-1] input_shape = K.cast(input_shape, K.dtype(box_yx)) image_shape = K.cast(image_shape, K.dtype(box_yx)) new_shape = K.round(image_shape * K.min(input_shape/image_shape)) offset = (input_shape-new_shape)/2./input_shape scale = input_shape/new_shape box_yx = (box_yx - offset) * scale box_hw *= scale box_mins = box_yx - (box_hw / 2.) box_maxes = box_yx + (box_hw / 2.) boxes = K.concatenate([ box_mins[..., 0:1], # y_min box_mins[..., 1:2], # x_min box_maxes[..., 0:1], # y_max box_maxes[..., 1:2] # x_max ]) # Scale boxes back to original image shape. boxes *= K.concatenate([image_shape, image_shape]) return boxes def yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape): '''Process Conv layer output''' box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats, anchors, num_classes, input_shape) boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape) boxes = K.reshape(boxes, [-1, 4]) box_scores = box_confidence * box_class_probs box_scores = K.reshape(box_scores, [-1, num_classes]) return boxes, box_scores def yolo_eval(yolo_outputs, anchors, num_classes, image_shape, max_boxes=20, score_threshold=.6, iou_threshold=.5): """Evaluate YOLO model on given input and return filtered boxes.""" input_shape = K.shape(yolo_outputs[0])[1:3] * 32 for i in range(0,3): _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[i], anchors[6-3*i:9-3*i], num_classes, input_shape, image_shape) if i==0: boxes, box_scores = _boxes, _box_scores else: boxes = K.concatenate([boxes,_boxes], axis=0) box_scores = K.concatenate([box_scores,_box_scores], axis=0) mask = box_scores >= score_threshold max_boxes_tensor = K.constant(max_boxes, dtype='int32') for i in range(num_classes): # TODO: use keras backend instead of tf. class_boxes = tf.boolean_mask(boxes, mask[:, i]) class_box_scores = tf.boolean_mask(box_scores[:, i], mask[:, i]) nms_index = tf.image.non_max_suppression( class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold) class_boxes = K.gather(class_boxes, nms_index) class_box_scores = K.gather(class_box_scores, nms_index) classes = K.ones_like(class_box_scores, 'int32') * i if i==0: boxes_, scores_, classes_ = class_boxes, class_box_scores, classes else: boxes_ = K.concatenate([boxes_,class_boxes], axis=0) scores_ = K.concatenate([scores_,class_box_scores], axis=0) classes_ = K.concatenate([classes_,classes], axis=0) return boxes_, scores_, classes_
from heatmiserV3.config import Config from heatmiserV3.devices import Device, Master import unittest class TestDevices(unittest.TestCase): def test_request_all(self): tm1 = Device("tm1", "Boat Timer", 1) master = Master(0x81) msg = master.build_command(tm1, False, Config.ALL_FIELDS_NAME) byte_msg = bytes(msg) expected = b'\x01\x0a\x81\x00\x00\x00\xff\xff\x2c\x09' # includes 2 crc bytes print() print("generated=", str(byte_msg),"\t\t", " ".join("{:02x}".format(x) for x in byte_msg)) print("expected =", str(expected), "\t\t", " ".join("{:02x}".format(x) for x in expected)) assert(byte_msg == expected) def test_full_response(self): response = b'3\x00\x11\x05\xfd\xeb\xfd\xff\xff\xff'
# _experimental_design/__init__.py __module_name__ = "__init__.py" __author__ = ", ".join(["Michael E. Vinyard"]) __email__ = ", ".join(["vinyard@g.harvard.edu",]) from ._funcs._identify_overlapping_fragments import _OverlappingFragments as OverlappingFragments from ._funcs._get_chromosome_sequence import _get_chromosome_sequence from ._funcs._plot_CDS import _plot_cds as plot_CDS from ._funcs._GTF_Module import _GTF as GTF
from abc import ABC, abstractmethod from functools import lru_cache from vispy.app import Canvas from vispy.gloo import gl from vispy.visuals.transforms import STTransform class VispyBaseLayer(ABC): """Base object for individual layer views Meant to be subclassed. Parameters ---------- layer : napari.layers.Layer Layer model. node : vispy.scene.VisualNode Central node with which to interact with the visual. Attributes ---------- layer : napari.layers.Layer Layer model. node : vispy.scene.VisualNode Central node with which to interact with the visual. scale : sequence of float Scale factors for the layer visual in the scenecanvas. translate : sequence of float Translation values for the layer visual in the scenecanvas. scale_factor : float Conversion factor from canvas coordinates to image coordinates, which depends on the current zoom level. MAX_TEXTURE_SIZE_2D : int Max texture size allowed by the vispy canvas during 2D rendering. MAX_TEXTURE_SIZE_3D : int Max texture size allowed by the vispy canvas during 2D rendering. Extended Summary ---------- _master_transform : vispy.visuals.transforms.STTransform Transform positioning the layer visual inside the scenecanvas. """ def __init__(self, layer, node): super().__init__() self.layer = layer self.node = node MAX_TEXTURE_SIZE_2D, MAX_TEXTURE_SIZE_3D = get_max_texture_sizes() self.MAX_TEXTURE_SIZE_2D = MAX_TEXTURE_SIZE_2D self.MAX_TEXTURE_SIZE_3D = MAX_TEXTURE_SIZE_3D self._position = (0,) * self.layer.dims.ndisplay self.camera = None self.layer.events.refresh.connect(lambda e: self.node.update()) self.layer.events.set_data.connect(self._on_data_change) self.layer.events.visible.connect(self._on_visible_change) self.layer.events.opacity.connect(self._on_opacity_change) self.layer.events.blending.connect(self._on_blending_change) self.layer.events.scale.connect(self._on_scale_change) self.layer.events.translate.connect(self._on_translate_change) @property def _master_transform(self): """vispy.visuals.transforms.STTransform: Central node's firstmost transform. """ # whenever a new parent is set, the transform is reset # to a NullTransform so we reset it here if not isinstance(self.node.transform, STTransform): self.node.transform = STTransform() return self.node.transform @property def order(self): """int: Order in which the visual is drawn in the scenegraph. Lower values are closer to the viewer. """ return self.node.order @order.setter def order(self, order): self.node.order = order @property def scale(self): """sequence of float: Scale factors.""" return self._master_transform.scale @scale.setter def scale(self, scale): self._master_transform.scale = scale @property def translate(self): """sequence of float: Translation values.""" return self._master_transform.translate @translate.setter def translate(self, translate): self._master_transform.translate = translate @property def scale_factor(self): """float: Conversion factor from canvas coordinates to image coordinates, which depends on the current zoom level. """ transform = self.node.canvas.scene.node_transform(self.node) scale_factor = transform.map([1, 1])[0] - transform.map([0, 0])[0] return scale_factor @abstractmethod def _on_data_change(self, event=None): raise NotImplementedError() def _on_visible_change(self, event=None): self.node.visible = self.layer.visible def _on_opacity_change(self, event=None): self.node.opacity = self.layer.opacity def _on_blending_change(self, event=None): self.node.set_gl_state(self.layer.blending) self.node.update() def _on_scale_change(self, event=None): scale = self.layer._transforms.simplified.set_slice( self.layer.dims.displayed ).scale # convert NumPy axis ordering to VisPy axis ordering self.scale = scale[::-1] if self.layer.is_pyramid: self.layer.top_left = self.find_top_left() self.layer.position = self._transform_position(self._position) def _on_translate_change(self, event=None): translate = self.layer._transforms.simplified.set_slice( self.layer.dims.displayed ).translate # convert NumPy axis ordering to VisPy axis ordering self.translate = translate[::-1] self.layer.position = self._transform_position(self._position) def _transform_position(self, position): """Transform cursor position from canvas space (x, y) into image space. Parameters ------- position : 2-tuple Cursor position in canvase (x, y). Returns ------- coords : tuple Coordinates of cursor in image space for displayed dimensions only """ if self.node.canvas is not None: transform = self.node.canvas.scene.node_transform(self.node) # Map and offset position so that pixel center is at 0 mapped_position = ( transform.map(list(position))[: len(self.layer.dims.displayed)] - 0.5 ) coords = tuple(mapped_position[::-1]) else: coords = (0,) * len(self.layer.dims.displayed) return coords def _reset_base(self): self._on_visible_change() self._on_opacity_change() self._on_blending_change() self._on_scale_change() self._on_translate_change() def on_mouse_move(self, event): """Called whenever mouse moves over canvas.""" if event.pos is None: return self._position = list(event.pos) self.layer.position = self._transform_position(self._position) self.layer.on_mouse_move(event) def on_mouse_press(self, event): """Called whenever mouse pressed in canvas. """ if event.pos is None: return self._position = list(event.pos) self.layer.position = self._transform_position(self._position) self.layer.on_mouse_press(event) def on_mouse_release(self, event): """Called whenever mouse released in canvas. """ if event.pos is None: return self._position = list(event.pos) self.layer.position = self._transform_position(self._position) self.layer.on_mouse_release(event) def on_draw(self, event): """Called whenever the canvas is drawn. """ self.layer.scale_factor = self.scale_factor @lru_cache() def get_max_texture_sizes(): """Get maximum texture sizes for 2D and 3D rendering. Returns ------- MAX_TEXTURE_SIZE_2D : int or None Max texture size allowed by the vispy canvas during 2D rendering. MAX_TEXTURE_SIZE_3D : int or None Max texture size allowed by the vispy canvas during 2D rendering. """ # A canvas must be created to access gl values c = Canvas(show=False) try: MAX_TEXTURE_SIZE_2D = gl.glGetParameter(gl.GL_MAX_TEXTURE_SIZE) finally: c.close() if MAX_TEXTURE_SIZE_2D == (): MAX_TEXTURE_SIZE_2D = None # vispy doesn't expose GL_MAX_3D_TEXTURE_SIZE so hard coding # MAX_TEXTURE_SIZE_3D = gl.glGetParameter(gl.GL_MAX_3D_TEXTURE_SIZE) # if MAX_TEXTURE_SIZE_3D == (): # MAX_TEXTURE_SIZE_3D = None MAX_TEXTURE_SIZE_3D = 2048 return MAX_TEXTURE_SIZE_2D, MAX_TEXTURE_SIZE_3D
#!/usr/bin/env python3 # This scripts attempts to generate massive design of experiment runscripts. # and save it into a "runMassive.sh" and "doe.log". #------------------------------------------------------------------------------- import os, sys import os.path import re import itertools import glob PUBLIC = ['nangate45', 'sky130hd', 'sky130hs', 'asap7'] # The number of generated config files into designs/{platform}/{design}/chunks/chuck{number} directory. NumFilesPerChunk = 50000 ## Orignal SDC file name OriginalSDC = 'constraint_doe.sdc' ################################## # define input parameters ################################## # for generated .sh file name ShellName = 'runMassive' # for metrics collect script (with '.sh') file name MetricsShellName = '%s_metrics_collect.sh'%(ShellName) ################## # Design ################## ## Define platform-design. User should remove ',' for the last item in the list. (string) PLATFORM_DESIGN = [ \ 'sky130hd-gcd' \ ] ## Target Clock Period (float) CLK_PERIOD = [] ## SDC uncertainty and IO delay. ## TODO: Currently, it only support when 'set uncertainty' and 'set io_delay' ## are defined in the constraint.sdc file. UNCERTAINTY = [] IO_DELAY = [] ################## # Synthesis ################## ## Clock period for Yosys (for synthesis) ## The unit should follow each design (ns, ps) (float) ABC_CLOCK_PERIOD = [] ## Hierarchical Synthsis. 0 = hierarchical, 1 = flatten, empty = flatten (default) (int) FLATTEN = [] ################## # Floorplan ################## ## Utilization. e.g, 45 -> 45% of core util. (int) #CORE_UTIL = [20, 40, 55] CORE_UTIL = [] ## Aspect ratio. It REQUIRES 'CORE_UTIL' values (float) ASPECT_RATIO = [] ## Core-to-die gap distance (um). It REQUIRES 'CORE_UTIL' values (int) CORE_DIE_MARGIN = [] ## Pin Distance #PINS_DISTANCE = [2] PINS_DISTANCE = [] ################## # Placement ################## ## Global Placement Padding for std cells (int) GP_PAD = [] ## Detailed Placement Padding for std cells (int) DP_PAD = [] ## Global Placement target bin density (select only one option) (.2 float) ## option 1) PLACE_DENSITY uses the values in the list as it is. ## option 2) PLACE_DENSITY_LB_ADDON adds the values in the list to the lower boundary of the PLACE_DENSITY ## For eaxmple, PLACE_DENSITY_LB_ADDON = [0, 0.02, 0.04] means PLACE_DENSITY = [LB, LB+0.02, LB+0.04] ## LB of the place density == (total instance area + padding) / total die area PLACE_DENSITY = [] PLACE_DENSITY_LB_ADDON = [] ################## # CTS ################## ## CTS clustering size and diameter (um) (int) CTS_CLUSTER_SIZE = [] CTS_CLUSTER_DIAMETER = [] ################## # Global Routing ################## ## Set global routing layer capacity adjustment ## e.g.) 0.2 -> 20% usage for global routing ## Set for all layers. ## Each layer's layer adjustment will be overwritten with below per-layer values. (float) LAYER_ADJUST = [] LAYER_ADJUST_M1 = [] LAYER_ADJUST_M2 = [] LAYER_ADJUST_M3 = [] LAYER_ADJUST_M4 = [] LAYER_ADJUST_M5 = [] LAYER_ADJUST_M6 = [] LAYER_ADJUST_M7 = [] LAYER_ADJUST_M8 = [] LAYER_ADJUST_M9 = [] ## Set global routing random seed. (int) GR_SEED = [] ## Set allow global routing overflow. 0 = no, 1 = yes, empty = no (default) (int) # TODO: currently it does not work. Let this as 0 as it is. GR_OVERFLOW = [0] ################## # Detailed Routing ################## ## Set global routing random seed. (int) DR_SEED = [] SweepingAttributes = { "PLATFORM_DESIGN": PLATFORM_DESIGN, "CP": CLK_PERIOD, "ABC_CP": ABC_CLOCK_PERIOD, "FLATTEN": FLATTEN, "UNCERTAINTY": UNCERTAINTY, "IO_DELAY": IO_DELAY, "UTIL": CORE_UTIL, "AR": ASPECT_RATIO, "GAP": CORE_DIE_MARGIN, "PINS_DISTANCE": PINS_DISTANCE, "GP_PAD": GP_PAD, "DP_PAD": DP_PAD, "PD": PLACE_DENSITY, "PD_LB_ADD": PLACE_DENSITY_LB_ADDON, "CTS_CLUSTER_SIZE": CTS_CLUSTER_SIZE, "CTS_CLUSTER_DIAMETER": CTS_CLUSTER_DIAMETER, "LAYER_ADJUST": LAYER_ADJUST, "M1": LAYER_ADJUST_M1, "M2": LAYER_ADJUST_M2, "M3": LAYER_ADJUST_M3, "M4": LAYER_ADJUST_M4, "M5": LAYER_ADJUST_M5, "M6": LAYER_ADJUST_M6, "M7": LAYER_ADJUST_M7, "M8": LAYER_ADJUST_M8, "M9": LAYER_ADJUST_M9, "GR_SEED": GR_SEED, "GR_OVERFLOW": GR_OVERFLOW, "DR_SEED": DR_SEED } def assignEmptyAttrs(dicts): knobs = {} for k, v in dicts.items(): if len(v) == 0: knobs.setdefault(k, ['empty']) else: knobs.setdefault(k,v) return knobs def writeDoeLog(dicts, ProductDicts): fo = open('./doe.log', 'w') numRuns = 1 for k, v in dicts.items(): if len(v)>0: print('%s has %s number of values'%(k,len(v))) fo.write('%s has %s number of values\n'%(k,len(v))) numRuns = numRuns * len(v) fo.write('\nTotal Number of Runs = %s\n\n'%numRuns) print('\nTotal Number of Runs = %s\n\n'%numRuns) knobValuesList = [] knobNamesList = [] for CurAttrs in ProductAttrs: knobValues = [] knobNames = [] for k, v in CurAttrs.items(): if v=='empty': continue else: knobNames.append(str(k)) knobValues.append(str(v)) knobValuesList.append(knobValues) knobNamesList.append(knobNames) fo.write(str(knobNamesList[0])+'\n') for knobSet in knobValuesList: fo.write(str(knobSet)+'\n') fo.close() def productDict(dicts): return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values())) def adjustFastRoute(filedata, adjSet, GrOverflow): if adjSet[0]!='empty': filedata = re.sub("(set_global_routing_layer_adjustment .* )[0-9\.]+", "\g<1>{:.2f}".format(float(adjSet[0])), filedata) sep_la_cmds = "" for i, sep_la in enumerate(adjSet): if i==0 or sep_la=='empty': continue ## TODO: Currently, only supports for SKY130HD and SKY130HS. ## TODO: user should manually change the layer name to match techLEF. layer_name = 'met%s'%i sep_la_cmds += "set_global_routing_layer_adjustment " + layer_name + " {:.2f}\n".format(float(sep_la)) filedata = re.sub("set_global_routing_layer_adjustment.*\n", "\g<0>"+sep_la_cmds, filedata) if int(GrOverflow) == 1: filedata = re.sub("(global_route.*(\n\s+.*)*)", "\g<1> \\\n -allow_overflow", filedata) return(filedata) #def setPlaceDensity(DESIGN, Util, GpPad): # if DESIGN == "ibex": # LB = (Util/100) + (GpPad * (0.4*(Util/100)-0.01))+0.01 # elif DESIGN == "aes": # LB = (Util/100) + (GpPad * (0.5*(Util/100)-0.005))+0.02 # else: # LB = (Util/100) + (GpPad * (0.4*(Util/100)-0.01))+0.01 # return LB def writeConfigs(CurAttrs, CurChunkNum): CurPlatform, CurDesign = CurAttrs.get('PLATFORM_DESIGN').split('-') CurClkPeriod = CurAttrs.get('CP') CurAbcClkPeriod = CurAttrs.get('ABC_CP') CurFlatten = CurAttrs.get('FLATTEN') CurUncertainty = CurAttrs.get('UNCERTAINTY') CurIoDelay = CurAttrs.get('IO_DELAY') CurCoreUtil = CurAttrs.get('UTIL') CurAspectRatio = CurAttrs.get('AR') CurCoreDieMargin = CurAttrs.get('GAP') CurPinsDistance = CurAttrs.get('PINS_DISTANCE') CurGpPad = CurAttrs.get('GP_PAD') CurDpPad = CurAttrs.get('DP_PAD') CurPlaceDensity = CurAttrs.get('PD') CurPlaceDensityLbAddon = CurAttrs.get('PD_LB_ADD') CurCtsClusterSize = CurAttrs.get('CTS_CLUSTER_SIZE') CurCtsClusterDiameter = CurAttrs.get('CTS_CLUSTER_DIAMETER') CurLayerAdjust = CurAttrs.get('LAYER_ADJUST') CurLayerAdjustM1 = CurAttrs.get('M1') CurLayerAdjustM2 = CurAttrs.get('M2') CurLayerAdjustM3 = CurAttrs.get('M3') CurLayerAdjustM4 = CurAttrs.get('M4') CurLayerAdjustM5 = CurAttrs.get('M5') CurLayerAdjustM6 = CurAttrs.get('M6') CurLayerAdjustM7 = CurAttrs.get('M7') CurLayerAdjustM8 = CurAttrs.get('M8') CurLayerAdjustM9 = CurAttrs.get('M9') CurGrSeed = CurAttrs.get('GR_SEED') CurGrOverflow = CurAttrs.get('GR_OVERFLOW') CurDrSeed = CurAttrs.get('DR_SEED') if not os.path.isdir('./designs/%s/%s/chunks'%(CurPlatform,CurDesign)): os.mkdir('./designs/%s/%s/chunks'%(CurPlatform,CurDesign)) CurDesignDir = './designs/%s/%s'%(CurPlatform,CurDesign) CurChunkDir = './designs/%s/%s/chunks/chunk%s'%(CurPlatform,CurDesign,CurChunkNum) if not os.path.isdir(CurChunkDir): os.mkdir(CurChunkDir) if MakeArg=='clean': fileList = glob.glob('%s/*-DoE-*'%(CurChunkDir)) if fileList is not None: for file in fileList: os.remove(file) return #print(CurPlatform, CurDesign) #print(CurClkPeriod, CurAbcClkPeriod, CurFlatten, CurCoreUtil) #print(CurAspectRatio, CurCoreDieMargin, CurGpPad, CurDpPad) #print(CurCtsClusterSize, CurCtsClusterDiameter, CurLayerAdjust) #print(CurLayerAdjustM1, CurLayerAdjustM2, CurLayerAdjustM3) #print(CurLayerAdjustM4, CurLayerAdjustM5, CurLayerAdjustM6) #print(CurLayerAdjustM7, CurLayerAdjustM8, CurLayerAdjustM9) #print(CurGrOverflow) #print(CurAttrs.items()) variantName = '' for k, v in CurAttrs.items(): if v!='empty' and k!='PLATFORM_DESIGN': variantName = variantName + '-' + str(k) + '_' + str(v) variantName = variantName[1:] #fileName = 'config-%s-%s-'%(CurPlatform, CurDesign)+variantName + '.mk' fileName = 'config-DoE-'+variantName + '.mk' fo = open('%s/%s'%(CurChunkDir,fileName), 'w') fo.write('include $(realpath $(dir $(DESIGN_CONFIG))../../)/config.mk\n') fo.write('\n') fo.write('FLOW_VARIANT = %s\n'%(variantName)) fo.write('\n') if CurClkPeriod != 'empty' or CurUncertainty != 'empty' or CurIoDelay != 'empty': fOrigSdc = open('%s/%s'%(CurDesignDir,OriginalSDC),'r') filedata = fOrigSdc.read() fOrigSdc.close() if CurClkPeriod != 'empty': filedata = re.sub("-period [0-9\.]+", "-period " + str(CurClkPeriod), filedata) #filedata = re.sub("-waveform [{}\s0-9\.]+$}", "\n", filedata) filedata = re.sub("-waveform [{}\s0-9\.]+[\s|\n]", "", filedata) if CurUncertainty != 'empty': filedata = re.sub("set uncertainty [0-9\.]+", "set uncertainty " + str(CurUncertainty), filedata) if CurIoDelay != 'empty': filedata = re.sub("set io_delay [0-9\.]+", "set io_delay " + str(CurIoDelay), filedata) #fOutSdc = open('./designs/%s/%s/constraint-%s-%s-'%(CurPlatform,CurDesign,CurPlatform,CurDesign)+variantName+'.sdc','w') fOutSdc = open('%s/constraint-DoE-'%(CurChunkDir)+variantName+'.sdc','w') fOutSdc.write(filedata) fOutSdc.close() fo.write('export SDC_FILE = $(dir $(DESIGN_CONFIG))/constraint-DoE-%s.sdc\n'%variantName) if CurAbcClkPeriod != 'empty': fo.write('export ABC_CLOCK_PERIOD_IN_PS = %s\n'%CurAbcClkPeriod) if CurFlatten != 'empty': if CurFlatten == 0: fo.write('export SYNTH_ARGS = \n') if CurCoreUtil != 'empty': fo.write('export CORE_UTILIZATION = %s\n'%CurCoreUtil) if CurPlaceDensity != 'empty': fo.write('export PLACE_DENSITY = %.2f\n'%CurPlaceDensity) if CurPlaceDensityLbAddon != 'empty': fo.write('export PLACE_DENSITY_LB_ADDON = %.2f\n'%CurPlaceDensityLbAddon) if CurAspectRatio != 'empty': fo.write('export CORE_ASPECT_RATIO = %s\n'%CurAspectRatio) if CurCoreDieMargin != 'empty': fo.write('export CORE_MARGIN = %s\n'%CurCoreDieMargin) if CurPinsDistance != 'empty': fo.write('export PLACE_PINS_ARGS = -min_distance %s\n'%CurPinsDistance) if CurGpPad != 'empty': fo.write('export CELL_PAD_IN_SITES_GLOBAL_PLACEMENT = %s\n'%CurGpPad) if CurDpPad != 'empty': fo.write('export CELL_PAD_IN_SITES_DETAIL_PLACEMENT = %s\n'%CurDpPad) if CurCtsClusterSize != 'empty': fo.write('export CTS_CLUSTER_SIZE = %s\n'%CurCtsClusterSize) if CurCtsClusterDiameter != 'empty': fo.write('export CTS_CLUSTER_DIAMETER = %s\n'%CurCtsClusterDiameter) if CurDrSeed != 'empty': fo.write('export OR_K = 1.0\n') fo.write('export OR_SEED = %s\n'%CurDrSeed) if CurLayerAdjust != 'empty' or \ CurLayerAdjustM1 != 'empty' or \ CurLayerAdjustM2 != 'empty' or \ CurLayerAdjustM3 != 'empty' or \ CurLayerAdjustM4 != 'empty' or \ CurLayerAdjustM5 != 'empty' or \ CurLayerAdjustM6 != 'empty' or \ CurLayerAdjustM7 != 'empty' or \ CurLayerAdjustM8 != 'empty' or \ CurLayerAdjustM9 != 'empty' or \ CurGrSeed != 'empty': fo.write('export FASTROUTE_TCL = $(dir $(DESIGN_CONFIG))/fastroute-DoE-%s.tcl'%variantName) if CurPlatform in PUBLIC: PLATFORM_DIR = './platforms/%s'%CurPlatform else: PLATFORM_DIR = '../../%s'%CurPlatform fFrIn = open('%s/fastroute.tcl'%PLATFORM_DIR,'r') filedata = fFrIn.read() fFrIn.close() CurLayerAdjustSet = [CurLayerAdjust, \ CurLayerAdjustM1, \ CurLayerAdjustM2, \ CurLayerAdjustM3, \ CurLayerAdjustM4, \ CurLayerAdjustM5, \ CurLayerAdjustM6, \ CurLayerAdjustM7, \ CurLayerAdjustM8, \ CurLayerAdjustM9 ] filedata = adjustFastRoute(filedata, CurLayerAdjustSet, CurGrOverflow) FrName = 'fastroute-DoE-'+variantName+'.tcl' fOutFr = open('%s/%s'%(CurChunkDir,FrName),'w') fOutFr.write(filedata) if CurGrSeed != 'empty': fOutFr.write('set_global_routing_random -seed %s'%CurGrSeed) fOutFr.close() fo.close() frun = open('./%s.sh'%ShellName, 'a') RunName = 'DESIGN_CONFIG=%s/%s make\n'%(CurChunkDir,fileName) frun.write(RunName) frun.close() with open('./metrics/%s'%MetricsShellName, 'a') as fcollect: CollectName = 'python util/genMetrics.py -x -p %s -d %s -v %s -o metrics/metrics_%s/%s.json\n'%(CurPlatform, CurDesign, variantName, ShellName, variantName) fcollect.write(CollectName) MakeArg = sys.argv[1] if not os.path.isdir('./metrics'): os.mkdir('./metrics') if not os.path.isdir('./metrics/metrics_%s'%ShellName): os.mkdir('./metrics/metrics_%s'%ShellName) knobs = assignEmptyAttrs(SweepingAttributes) ProductAttrs = list(productDict(knobs)) writeDoeLog(SweepingAttributes, ProductAttrs) if os.path.isfile('./%s.sh'%ShellName): os.remove('./%s.sh'%ShellName) if os.path.isfile('./metrics/%s'%MetricsShellName): os.remove('./metrics/%s'%MetricsShellName) CurChunkNum = 0 for i, CurAttrs in enumerate(ProductAttrs, 1): if i % NumFilesPerChunk == 0: writeConfigs(CurAttrs, CurChunkNum) CurChunkNum = CurChunkNum+1 else: writeConfigs(CurAttrs, CurChunkNum) # with open('file.txt') as data: # line = data.readlines() # #for line in lines: # with open('file.txt') as data: # for line in file_data:
from app import app from flask import request, redirect, url_for, session, flash, render_template @app.route('/save_to_local_storage') def save_to_local_storage(): access_token = request.args.get('access_token', '') user_id = request.args.get('user_id', '') redirect_location = request.args.get('redirect', '') return render_template('save_to_local_storage.html', access_token=access_token, user_id=user_id, redirect=redirect_location)
"""Manages movement of packets through the faucet pipeline.""" # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation. # Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer. # Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd. # Copyright (C) 2015--2018 The Contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import faucet.faucet_metadata as faucet_md from faucet import valve_of from faucet.valve_manager_base import ValveManagerBase class ValvePipeline(ValveManagerBase): """Responsible for maintaing the integrity of the Faucet pipeline for a single valve. Controls what packets a module sees in its tables and how it can pass packets through the pipeline. Responsible for installing flows in the vlan, egress and classification tables""" def __init__(self, dp): self.dp = dp self.vlan_table = dp.tables['vlan'] self.classification_table = dp.classification_table() self.output_table = dp.output_table() self.egress_table = None if dp.egress_pipeline: self.egress_table = dp.tables['egress'] self.filter_priority = self._FILTER_PRIORITY self.select_priority = self._HIGH_PRIORITY def _accept_to_table(self, table, actions): inst = [table.goto_this()] if actions is not None: inst.append(valve_of.apply_actions(actions)) return inst def accept_to_vlan(self, actions=None): """Get instructions to forward packet through the pipeline to vlan table. args: actions: (optional) list of actions to apply to packet. returns: list of instructions """ return self._accept_to_table(self.vlan_table, actions) def accept_to_classification(self, actions=None): """Get instructions to forward packet through the pipeline to classification table. args: actions: (optional) list of actions to apply to packet. returns: list of instructions """ return self._accept_to_table(self.classification_table, actions) def accept_to_l2_forwarding(self, actions=None): """Get instructions to forward packet through the pipeline to l2 forwarding. args: actions: (optional) list of actions to apply to packet. returns: list of instructions """ return self._accept_to_table(self.output_table, actions) def output(self, port, vlan, hairpin=False, loop_protect_field=None): """Get instructions list to output a packet through the regular pipeline. args: port: Port object of port to output packet to vlan: Vlan object of vlan to output packet on hairpin: if True, hairpinning is required returns: list of Instructions """ instructions = [] if self.egress_table: metadata, metadata_mask = faucet_md.get_egress_metadata( port.number, vlan.vid) instructions.extend(valve_of.metadata_goto_table( metadata, metadata_mask, self.egress_table)) else: instructions.append(valve_of.apply_actions(vlan.output_port( port, hairpin=hairpin, output_table=self.output_table, loop_protect_field=loop_protect_field))) return instructions def initialise_tables(self): """Install rules to initialise the classification_table""" ofmsgs = [] # drop broadcast sources if self.dp.drop_broadcast_source_address: ofmsgs.extend(self.filter_packets( {'eth_src': valve_of.mac.BROADCAST_STR} )) ofmsgs.extend(self.filter_packets( {'eth_type': valve_of.ECTP_ETH_TYPE}, priority_offset=10)) # antispoof for FAUCET's MAC address # TODO: antispoof for controller IPs on this VLAN, too. if self.dp.drop_spoofed_faucet_mac: for vlan in list(self.dp.vlans.values()): ofmsgs.extend(self.filter_packets( {'eth_src': vlan.faucet_mac})) return ofmsgs def _add_egress_table_rule(self, port, vlan, pop_vlan=True): metadata, metadata_mask = faucet_md.get_egress_metadata( port.number, vlan.vid) actions = copy.copy(port.mirror_actions()) if pop_vlan: actions.append(valve_of.pop_vlan()) actions.append(valve_of.output_port(port.number)) inst = [valve_of.apply_actions(actions)] return self.egress_table.flowmod( self.egress_table.match( vlan=vlan, metadata=metadata, metadata_mask=metadata_mask ), priority=self.dp.high_priority, inst=inst ) def add_port(self, port): ofmsgs = [] if self.egress_table is None: return ofmsgs for vlan in port.tagged_vlans: ofmsgs.append(self._add_egress_table_rule( port, vlan, pop_vlan=False)) if port.native_vlan is not None: ofmsgs.append(self._add_egress_table_rule( port, port.native_vlan)) return ofmsgs def del_port(self, port): ofmsgs = [] if self.egress_table: mask = faucet_md.PORT_METADATA_MASK ofmsgs.append(self.egress_table.flowdel(self.egress_table.match( metadata=port.number & mask, metadata_mask=mask ))) return ofmsgs def filter_packets(self, match_dict, priority_offset=0): """get a list of flow modification messages to filter packets from the pipeline. args: match_dict: a dictionary specifying the match fields priority_offset: used to prevent overlapping entries """ return [self.classification_table.flowdrop( self.classification_table.match(**match_dict), priority=self.filter_priority + priority_offset)] def select_packets(self, target_table, match_dict, actions=None, priority_offset=0): """retrieve rules to redirect packets matching match_dict to table""" inst = [target_table.goto_this()] if actions is not None: inst.append(valve_of.apply_actions(actions)) return [self.classification_table.flowmod( self.classification_table.match(**match_dict), priority=self.select_priority + priority_offset, inst=inst)] def remove_filter(self, match_dict, strict=True, priority_offset=0): """retrieve flow mods to remove a filter from the classification table """ priority = None if strict: priority = self.filter_priority + priority_offset return [self.classification_table.flowdel( self.classification_table.match(**match_dict), priority=priority, strict=strict)]
# revlog.py - storage back-end for mercurial # # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. """Storage back-end for Mercurial. This provides efficient delta storage with O(1) retrieve and append and O(changes) merge between branches. """ from __future__ import absolute_import import collections import errno import hashlib import os import struct import zlib # import stuff from node for others to import from revlog from .node import ( bin, hex, nullid, nullrev, ) from .i18n import _ from . import ( ancestor, error, mdiff, parsers, templatefilters, util, ) _pack = struct.pack _unpack = struct.unpack _compress = zlib.compress _decompress = zlib.decompress # revlog header flags REVLOGV0 = 0 REVLOGNG = 1 REVLOGNGINLINEDATA = (1 << 16) REVLOGGENERALDELTA = (1 << 17) REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA REVLOG_DEFAULT_FORMAT = REVLOGNG REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA # revlog index flags REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified REVIDX_DEFAULT_FLAGS = 0 REVIDX_KNOWN_FLAGS = REVIDX_ISCENSORED # max size of revlog with inline data _maxinline = 131072 _chunksize = 1048576 RevlogError = error.RevlogError LookupError = error.LookupError CensoredNodeError = error.CensoredNodeError def getoffset(q): return int(q >> 16) def gettype(q): return int(q & 0xFFFF) def offset_type(offset, type): return long(long(offset) << 16 | type) _nullhash = hashlib.sha1(nullid) def hash(text, p1, p2): """generate a hash from the given text and its parent hashes This hash combines both the current file contents and its history in a manner that makes it easy to distinguish nodes with the same content in the revision graph. """ # As of now, if one of the parent node is null, p2 is null if p2 == nullid: # deep copy of a hash is faster than creating one s = _nullhash.copy() s.update(p1) else: # none of the parent nodes are nullid l = [p1, p2] l.sort() s = hashlib.sha1(l[0]) s.update(l[1]) s.update(text) return s.digest() def decompress(bin): """ decompress the given input """ if not bin: return bin t = bin[0] if t == '\0': return bin if t == 'x': try: return _decompress(bin) except zlib.error as e: raise RevlogError(_("revlog decompress error: %s") % str(e)) if t == 'u': return util.buffer(bin, 1) raise RevlogError(_("unknown compression type %r") % t) # index v0: # 4 bytes: offset # 4 bytes: compressed length # 4 bytes: base rev # 4 bytes: link rev # 20 bytes: parent 1 nodeid # 20 bytes: parent 2 nodeid # 20 bytes: nodeid indexformatv0 = ">4l20s20s20s" class revlogoldio(object): def __init__(self): self.size = struct.calcsize(indexformatv0) def parseindex(self, data, inline): s = self.size index = [] nodemap = {nullid: nullrev} n = off = 0 l = len(data) while off + s <= l: cur = data[off:off + s] off += s e = _unpack(indexformatv0, cur) # transform to revlogv1 format e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3], nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6]) index.append(e2) nodemap[e[6]] = n n += 1 # add the magic null revision at -1 index.append((0, 0, 0, -1, -1, -1, -1, nullid)) return index, nodemap, None def packentry(self, entry, node, version, rev): if gettype(entry[0]): raise RevlogError(_("index entry flags need RevlogNG")) e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4], node(entry[5]), node(entry[6]), entry[7]) return _pack(indexformatv0, *e2) # index ng: # 6 bytes: offset # 2 bytes: flags # 4 bytes: compressed length # 4 bytes: uncompressed length # 4 bytes: base rev # 4 bytes: link rev # 4 bytes: parent 1 rev # 4 bytes: parent 2 rev # 32 bytes: nodeid indexformatng = ">Qiiiiii20s12x" versionformat = ">I" # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte # signed integer) _maxentrysize = 0x7fffffff class revlogio(object): def __init__(self): self.size = struct.calcsize(indexformatng) def parseindex(self, data, inline): # call the C implementation to parse the index data index, cache = parsers.parse_index2(data, inline) return index, getattr(index, 'nodemap', None), cache def packentry(self, entry, node, version, rev): p = _pack(indexformatng, *entry) if rev == 0: p = _pack(versionformat, version) + p[4:] return p class revlog(object): """ the underlying revision storage object A revlog consists of two parts, an index and the revision data. The index is a file with a fixed record size containing information on each revision, including its nodeid (hash), the nodeids of its parents, the position and offset of its data within the data file, and the revision it's based on. Finally, each entry contains a linkrev entry that can serve as a pointer to external data. The revision data itself is a linear collection of data chunks. Each chunk represents a revision and is usually represented as a delta against the previous chunk. To bound lookup time, runs of deltas are limited to about 2 times the length of the original version data. This makes retrieval of a version proportional to its size, or O(1) relative to the number of revisions. Both pieces of the revlog are written to in an append-only fashion, which means we never need to rewrite a file to insert or remove data, and can use some simple techniques to avoid the need for locking while reading. If checkambig, indexfile is opened with checkambig=True at writing, to avoid file stat ambiguity. """ def __init__(self, opener, indexfile, checkambig=False): """ create a revlog object opener is a function that abstracts the file opening operation and can be used to implement COW semantics or the like. """ self.indexfile = indexfile self.datafile = indexfile[:-2] + ".d" self.opener = opener # When True, indexfile is opened with checkambig=True at writing, to # avoid file stat ambiguity. self._checkambig = checkambig # 3-tuple of (node, rev, text) for a raw revision. self._cache = None # Maps rev to chain base rev. self._chainbasecache = util.lrucachedict(100) # 2-tuple of (offset, data) of raw data from the revlog at an offset. self._chunkcache = (0, '') # How much data to read and cache into the raw revlog data cache. self._chunkcachesize = 65536 self._maxchainlen = None self._aggressivemergedeltas = False self.index = [] # Mapping of partial identifiers to full nodes. self._pcache = {} # Mapping of revision integer to full node. self._nodecache = {nullid: nullrev} self._nodepos = None v = REVLOG_DEFAULT_VERSION opts = getattr(opener, 'options', None) if opts is not None: if 'revlogv1' in opts: if 'generaldelta' in opts: v |= REVLOGGENERALDELTA else: v = 0 if 'chunkcachesize' in opts: self._chunkcachesize = opts['chunkcachesize'] if 'maxchainlen' in opts: self._maxchainlen = opts['maxchainlen'] if 'aggressivemergedeltas' in opts: self._aggressivemergedeltas = opts['aggressivemergedeltas'] self._lazydeltabase = bool(opts.get('lazydeltabase', False)) if self._chunkcachesize <= 0: raise RevlogError(_('revlog chunk cache size %r is not greater ' 'than 0') % self._chunkcachesize) elif self._chunkcachesize & (self._chunkcachesize - 1): raise RevlogError(_('revlog chunk cache size %r is not a power ' 'of 2') % self._chunkcachesize) indexdata = '' self._initempty = True try: f = self.opener(self.indexfile) indexdata = f.read() f.close() if len(indexdata) > 0: v = struct.unpack(versionformat, indexdata[:4])[0] self._initempty = False except IOError as inst: if inst.errno != errno.ENOENT: raise self.version = v self._inline = v & REVLOGNGINLINEDATA self._generaldelta = v & REVLOGGENERALDELTA flags = v & ~0xFFFF fmt = v & 0xFFFF if fmt == REVLOGV0 and flags: raise RevlogError(_("index %s unknown flags %#04x for format v0") % (self.indexfile, flags >> 16)) elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS: raise RevlogError(_("index %s unknown flags %#04x for revlogng") % (self.indexfile, flags >> 16)) elif fmt > REVLOGNG: raise RevlogError(_("index %s unknown format %d") % (self.indexfile, fmt)) self.storedeltachains = True self._io = revlogio() if self.version == REVLOGV0: self._io = revlogoldio() try: d = self._io.parseindex(indexdata, self._inline) except (ValueError, IndexError): raise RevlogError(_("index %s is corrupted") % (self.indexfile)) self.index, nodemap, self._chunkcache = d if nodemap is not None: self.nodemap = self._nodecache = nodemap if not self._chunkcache: self._chunkclear() # revnum -> (chain-length, sum-delta-length) self._chaininfocache = {} def tip(self): return self.node(len(self.index) - 2) def __contains__(self, rev): return 0 <= rev < len(self) def __len__(self): return len(self.index) - 1 def __iter__(self): return iter(xrange(len(self))) def revs(self, start=0, stop=None): """iterate over all rev in this revlog (from start to stop)""" step = 1 if stop is not None: if start > stop: step = -1 stop += step else: stop = len(self) return xrange(start, stop, step) @util.propertycache def nodemap(self): self.rev(self.node(0)) return self._nodecache def hasnode(self, node): try: self.rev(node) return True except KeyError: return False def clearcaches(self): self._cache = None self._chainbasecache.clear() self._chunkcache = (0, '') self._pcache = {} try: self._nodecache.clearcaches() except AttributeError: self._nodecache = {nullid: nullrev} self._nodepos = None def rev(self, node): try: return self._nodecache[node] except TypeError: raise except RevlogError: # parsers.c radix tree lookup failed raise LookupError(node, self.indexfile, _('no node')) except KeyError: # pure python cache lookup failed n = self._nodecache i = self.index p = self._nodepos if p is None: p = len(i) - 2 for r in xrange(p, -1, -1): v = i[r][7] n[v] = r if v == node: self._nodepos = r - 1 return r raise LookupError(node, self.indexfile, _('no node')) def node(self, rev): return self.index[rev][7] def linkrev(self, rev): return self.index[rev][4] def parents(self, node): i = self.index d = i[self.rev(node)] return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline def parentrevs(self, rev): return self.index[rev][5:7] def start(self, rev): return int(self.index[rev][0] >> 16) def end(self, rev): return self.start(rev) + self.length(rev) def length(self, rev): return self.index[rev][1] def chainbase(self, rev): base = self._chainbasecache.get(rev) if base is not None: return base index = self.index base = index[rev][3] while base != rev: rev = base base = index[rev][3] self._chainbasecache[rev] = base return base def chainlen(self, rev): return self._chaininfo(rev)[0] def _chaininfo(self, rev): chaininfocache = self._chaininfocache if rev in chaininfocache: return chaininfocache[rev] index = self.index generaldelta = self._generaldelta iterrev = rev e = index[iterrev] clen = 0 compresseddeltalen = 0 while iterrev != e[3]: clen += 1 compresseddeltalen += e[1] if generaldelta: iterrev = e[3] else: iterrev -= 1 if iterrev in chaininfocache: t = chaininfocache[iterrev] clen += t[0] compresseddeltalen += t[1] break e = index[iterrev] else: # Add text length of base since decompressing that also takes # work. For cache hits the length is already included. compresseddeltalen += e[1] r = (clen, compresseddeltalen) chaininfocache[rev] = r return r def _deltachain(self, rev, stoprev=None): """Obtain the delta chain for a revision. ``stoprev`` specifies a revision to stop at. If not specified, we stop at the base of the chain. Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of revs in ascending order and ``stopped`` is a bool indicating whether ``stoprev`` was hit. """ chain = [] # Alias to prevent attribute lookup in tight loop. index = self.index generaldelta = self._generaldelta iterrev = rev e = index[iterrev] while iterrev != e[3] and iterrev != stoprev: chain.append(iterrev) if generaldelta: iterrev = e[3] else: iterrev -= 1 e = index[iterrev] if iterrev == stoprev: stopped = True else: chain.append(iterrev) stopped = False chain.reverse() return chain, stopped def flags(self, rev): return self.index[rev][0] & 0xFFFF def rawsize(self, rev): """return the length of the uncompressed text for a given revision""" l = self.index[rev][2] if l >= 0: return l t = self.revision(self.node(rev)) return len(t) size = rawsize def ancestors(self, revs, stoprev=0, inclusive=False): """Generate the ancestors of 'revs' in reverse topological order. Does not generate revs lower than stoprev. See the documentation for ancestor.lazyancestors for more details.""" return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev, inclusive=inclusive) def descendants(self, revs): """Generate the descendants of 'revs' in revision order. Yield a sequence of revision numbers starting with a child of some rev in revs, i.e., each revision is *not* considered a descendant of itself. Results are ordered by revision number (a topological sort).""" first = min(revs) if first == nullrev: for i in self: yield i return seen = set(revs) for i in self.revs(start=first + 1): for x in self.parentrevs(i): if x != nullrev and x in seen: seen.add(i) yield i break def findcommonmissing(self, common=None, heads=None): """Return a tuple of the ancestors of common and the ancestors of heads that are not ancestors of common. In revset terminology, we return the tuple: ::common, (::heads) - (::common) The list is sorted by revision number, meaning it is topologically sorted. 'heads' and 'common' are both lists of node IDs. If heads is not supplied, uses all of the revlog's heads. If common is not supplied, uses nullid.""" if common is None: common = [nullid] if heads is None: heads = self.heads() common = [self.rev(n) for n in common] heads = [self.rev(n) for n in heads] # we want the ancestors, but inclusive class lazyset(object): def __init__(self, lazyvalues): self.addedvalues = set() self.lazyvalues = lazyvalues def __contains__(self, value): return value in self.addedvalues or value in self.lazyvalues def __iter__(self): added = self.addedvalues for r in added: yield r for r in self.lazyvalues: if not r in added: yield r def add(self, value): self.addedvalues.add(value) def update(self, values): self.addedvalues.update(values) has = lazyset(self.ancestors(common)) has.add(nullrev) has.update(common) # take all ancestors from heads that aren't in has missing = set() visit = collections.deque(r for r in heads if r not in has) while visit: r = visit.popleft() if r in missing: continue else: missing.add(r) for p in self.parentrevs(r): if p not in has: visit.append(p) missing = list(missing) missing.sort() return has, [self.node(r) for r in missing] def incrementalmissingrevs(self, common=None): """Return an object that can be used to incrementally compute the revision numbers of the ancestors of arbitrary sets that are not ancestors of common. This is an ancestor.incrementalmissingancestors object. 'common' is a list of revision numbers. If common is not supplied, uses nullrev. """ if common is None: common = [nullrev] return ancestor.incrementalmissingancestors(self.parentrevs, common) def findmissingrevs(self, common=None, heads=None): """Return the revision numbers of the ancestors of heads that are not ancestors of common. More specifically, return a list of revision numbers corresponding to nodes N such that every N satisfies the following constraints: 1. N is an ancestor of some node in 'heads' 2. N is not an ancestor of any node in 'common' The list is sorted by revision number, meaning it is topologically sorted. 'heads' and 'common' are both lists of revision numbers. If heads is not supplied, uses all of the revlog's heads. If common is not supplied, uses nullid.""" if common is None: common = [nullrev] if heads is None: heads = self.headrevs() inc = self.incrementalmissingrevs(common=common) return inc.missingancestors(heads) def findmissing(self, common=None, heads=None): """Return the ancestors of heads that are not ancestors of common. More specifically, return a list of nodes N such that every N satisfies the following constraints: 1. N is an ancestor of some node in 'heads' 2. N is not an ancestor of any node in 'common' The list is sorted by revision number, meaning it is topologically sorted. 'heads' and 'common' are both lists of node IDs. If heads is not supplied, uses all of the revlog's heads. If common is not supplied, uses nullid.""" if common is None: common = [nullid] if heads is None: heads = self.heads() common = [self.rev(n) for n in common] heads = [self.rev(n) for n in heads] inc = self.incrementalmissingrevs(common=common) return [self.node(r) for r in inc.missingancestors(heads)] def nodesbetween(self, roots=None, heads=None): """Return a topological path from 'roots' to 'heads'. Return a tuple (nodes, outroots, outheads) where 'nodes' is a topologically sorted list of all nodes N that satisfy both of these constraints: 1. N is a descendant of some node in 'roots' 2. N is an ancestor of some node in 'heads' Every node is considered to be both a descendant and an ancestor of itself, so every reachable node in 'roots' and 'heads' will be included in 'nodes'. 'outroots' is the list of reachable nodes in 'roots', i.e., the subset of 'roots' that is returned in 'nodes'. Likewise, 'outheads' is the subset of 'heads' that is also in 'nodes'. 'roots' and 'heads' are both lists of node IDs. If 'roots' is unspecified, uses nullid as the only root. If 'heads' is unspecified, uses list of all of the revlog's heads.""" nonodes = ([], [], []) if roots is not None: roots = list(roots) if not roots: return nonodes lowestrev = min([self.rev(n) for n in roots]) else: roots = [nullid] # Everybody's a descendant of nullid lowestrev = nullrev if (lowestrev == nullrev) and (heads is None): # We want _all_ the nodes! return ([self.node(r) for r in self], [nullid], list(self.heads())) if heads is None: # All nodes are ancestors, so the latest ancestor is the last # node. highestrev = len(self) - 1 # Set ancestors to None to signal that every node is an ancestor. ancestors = None # Set heads to an empty dictionary for later discovery of heads heads = {} else: heads = list(heads) if not heads: return nonodes ancestors = set() # Turn heads into a dictionary so we can remove 'fake' heads. # Also, later we will be using it to filter out the heads we can't # find from roots. heads = dict.fromkeys(heads, False) # Start at the top and keep marking parents until we're done. nodestotag = set(heads) # Remember where the top was so we can use it as a limit later. highestrev = max([self.rev(n) for n in nodestotag]) while nodestotag: # grab a node to tag n = nodestotag.pop() # Never tag nullid if n == nullid: continue # A node's revision number represents its place in a # topologically sorted list of nodes. r = self.rev(n) if r >= lowestrev: if n not in ancestors: # If we are possibly a descendant of one of the roots # and we haven't already been marked as an ancestor ancestors.add(n) # Mark as ancestor # Add non-nullid parents to list of nodes to tag. nodestotag.update([p for p in self.parents(n) if p != nullid]) elif n in heads: # We've seen it before, is it a fake head? # So it is, real heads should not be the ancestors of # any other heads. heads.pop(n) if not ancestors: return nonodes # Now that we have our set of ancestors, we want to remove any # roots that are not ancestors. # If one of the roots was nullid, everything is included anyway. if lowestrev > nullrev: # But, since we weren't, let's recompute the lowest rev to not # include roots that aren't ancestors. # Filter out roots that aren't ancestors of heads roots = [n for n in roots if n in ancestors] # Recompute the lowest revision if roots: lowestrev = min([self.rev(n) for n in roots]) else: # No more roots? Return empty list return nonodes else: # We are descending from nullid, and don't need to care about # any other roots. lowestrev = nullrev roots = [nullid] # Transform our roots list into a set. descendants = set(roots) # Also, keep the original roots so we can filter out roots that aren't # 'real' roots (i.e. are descended from other roots). roots = descendants.copy() # Our topologically sorted list of output nodes. orderedout = [] # Don't start at nullid since we don't want nullid in our output list, # and if nullid shows up in descendants, empty parents will look like # they're descendants. for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1): n = self.node(r) isdescendant = False if lowestrev == nullrev: # Everybody is a descendant of nullid isdescendant = True elif n in descendants: # n is already a descendant isdescendant = True # This check only needs to be done here because all the roots # will start being marked is descendants before the loop. if n in roots: # If n was a root, check if it's a 'real' root. p = tuple(self.parents(n)) # If any of its parents are descendants, it's not a root. if (p[0] in descendants) or (p[1] in descendants): roots.remove(n) else: p = tuple(self.parents(n)) # A node is a descendant if either of its parents are # descendants. (We seeded the dependents list with the roots # up there, remember?) if (p[0] in descendants) or (p[1] in descendants): descendants.add(n) isdescendant = True if isdescendant and ((ancestors is None) or (n in ancestors)): # Only include nodes that are both descendants and ancestors. orderedout.append(n) if (ancestors is not None) and (n in heads): # We're trying to figure out which heads are reachable # from roots. # Mark this head as having been reached heads[n] = True elif ancestors is None: # Otherwise, we're trying to discover the heads. # Assume this is a head because if it isn't, the next step # will eventually remove it. heads[n] = True # But, obviously its parents aren't. for p in self.parents(n): heads.pop(p, None) heads = [n for n, flag in heads.iteritems() if flag] roots = list(roots) assert orderedout assert roots assert heads return (orderedout, roots, heads) def headrevs(self): try: return self.index.headrevs() except AttributeError: return self._headrevs() def computephases(self, roots): return self.index.computephasesmapsets(roots) def _headrevs(self): count = len(self) if not count: return [nullrev] # we won't iter over filtered rev so nobody is a head at start ishead = [0] * (count + 1) index = self.index for r in self: ishead[r] = 1 # I may be an head e = index[r] ishead[e[5]] = ishead[e[6]] = 0 # my parent are not return [r for r, val in enumerate(ishead) if val] def heads(self, start=None, stop=None): """return the list of all nodes that have no children if start is specified, only heads that are descendants of start will be returned if stop is specified, it will consider all the revs from stop as if they had no children """ if start is None and stop is None: if not len(self): return [nullid] return [self.node(r) for r in self.headrevs()] if start is None: start = nullid if stop is None: stop = [] stoprevs = set([self.rev(n) for n in stop]) startrev = self.rev(start) reachable = set((startrev,)) heads = set((startrev,)) parentrevs = self.parentrevs for r in self.revs(start=startrev + 1): for p in parentrevs(r): if p in reachable: if r not in stoprevs: reachable.add(r) heads.add(r) if p in heads and p not in stoprevs: heads.remove(p) return [self.node(r) for r in heads] def children(self, node): """find the children of a given node""" c = [] p = self.rev(node) for r in self.revs(start=p + 1): prevs = [pr for pr in self.parentrevs(r) if pr != nullrev] if prevs: for pr in prevs: if pr == p: c.append(self.node(r)) elif p == nullrev: c.append(self.node(r)) return c def descendant(self, start, end): if start == nullrev: return True for i in self.descendants([start]): if i == end: return True elif i > end: break return False def commonancestorsheads(self, a, b): """calculate all the heads of the common ancestors of nodes a and b""" a, b = self.rev(a), self.rev(b) try: ancs = self.index.commonancestorsheads(a, b) except (AttributeError, OverflowError): # C implementation failed ancs = ancestor.commonancestorsheads(self.parentrevs, a, b) return map(self.node, ancs) def isancestor(self, a, b): """return True if node a is an ancestor of node b The implementation of this is trivial but the use of commonancestorsheads is not.""" return a in self.commonancestorsheads(a, b) def ancestor(self, a, b): """calculate the "best" common ancestor of nodes a and b""" a, b = self.rev(a), self.rev(b) try: ancs = self.index.ancestors(a, b) except (AttributeError, OverflowError): ancs = ancestor.ancestors(self.parentrevs, a, b) if ancs: # choose a consistent winner when there's a tie return min(map(self.node, ancs)) return nullid def _match(self, id): if isinstance(id, int): # rev return self.node(id) if len(id) == 20: # possibly a binary node # odds of a binary node being all hex in ASCII are 1 in 10**25 try: node = id self.rev(node) # quick search the index return node except LookupError: pass # may be partial hex id try: # str(rev) rev = int(id) if str(rev) != id: raise ValueError if rev < 0: rev = len(self) + rev if rev < 0 or rev >= len(self): raise ValueError return self.node(rev) except (ValueError, OverflowError): pass if len(id) == 40: try: # a full hex nodeid? node = bin(id) self.rev(node) return node except (TypeError, LookupError): pass def _partialmatch(self, id): try: n = self.index.partialmatch(id) if n and self.hasnode(n): return n return None except RevlogError: # parsers.c radix tree lookup gave multiple matches # fast path: for unfiltered changelog, radix tree is accurate if not getattr(self, 'filteredrevs', None): raise LookupError(id, self.indexfile, _('ambiguous identifier')) # fall through to slow path that filters hidden revisions except (AttributeError, ValueError): # we are pure python, or key was too short to search radix tree pass if id in self._pcache: return self._pcache[id] if len(id) < 40: try: # hex(node)[:...] l = len(id) // 2 # grab an even number of digits prefix = bin(id[:l * 2]) nl = [e[7] for e in self.index if e[7].startswith(prefix)] nl = [n for n in nl if hex(n).startswith(id) and self.hasnode(n)] if len(nl) > 0: if len(nl) == 1: self._pcache[id] = nl[0] return nl[0] raise LookupError(id, self.indexfile, _('ambiguous identifier')) return None except TypeError: pass def lookup(self, id): """locate a node based on: - revision number or str(revision number) - nodeid or subset of hex nodeid """ n = self._match(id) if n is not None: return n n = self._partialmatch(id) if n: return n raise LookupError(id, self.indexfile, _('no match found')) def cmp(self, node, text): """compare text with a given file revision returns True if text is different than what is stored. """ p1, p2 = self.parents(node) return hash(text, p1, p2) != node def _addchunk(self, offset, data): """Add a segment to the revlog cache. Accepts an absolute offset and the data that is at that location. """ o, d = self._chunkcache # try to add to existing cache if o + len(d) == offset and len(d) + len(data) < _chunksize: self._chunkcache = o, d + data else: self._chunkcache = offset, data def _loadchunk(self, offset, length, df=None): """Load a segment of raw data from the revlog. Accepts an absolute offset, length to read, and an optional existing file handle to read from. If an existing file handle is passed, it will be seeked and the original seek position will NOT be restored. Returns a str or buffer of raw byte data. """ if df is not None: closehandle = False else: if self._inline: df = self.opener(self.indexfile) else: df = self.opener(self.datafile) closehandle = True # Cache data both forward and backward around the requested # data, in a fixed size window. This helps speed up operations # involving reading the revlog backwards. cachesize = self._chunkcachesize realoffset = offset & ~(cachesize - 1) reallength = (((offset + length + cachesize) & ~(cachesize - 1)) - realoffset) df.seek(realoffset) d = df.read(reallength) if closehandle: df.close() self._addchunk(realoffset, d) if offset != realoffset or reallength != length: return util.buffer(d, offset - realoffset, length) return d def _getchunk(self, offset, length, df=None): """Obtain a segment of raw data from the revlog. Accepts an absolute offset, length of bytes to obtain, and an optional file handle to the already-opened revlog. If the file handle is used, it's original seek position will not be preserved. Requests for data may be returned from a cache. Returns a str or a buffer instance of raw byte data. """ o, d = self._chunkcache l = len(d) # is it in the cache? cachestart = offset - o cacheend = cachestart + length if cachestart >= 0 and cacheend <= l: if cachestart == 0 and cacheend == l: return d # avoid a copy return util.buffer(d, cachestart, cacheend - cachestart) return self._loadchunk(offset, length, df=df) def _chunkraw(self, startrev, endrev, df=None): """Obtain a segment of raw data corresponding to a range of revisions. Accepts the start and end revisions and an optional already-open file handle to be used for reading. If the file handle is read, its seek position will not be preserved. Requests for data may be satisfied by a cache. Returns a 2-tuple of (offset, data) for the requested range of revisions. Offset is the integer offset from the beginning of the revlog and data is a str or buffer of the raw byte data. Callers will need to call ``self.start(rev)`` and ``self.length(rev)`` to determine where each revision's data begins and ends. """ start = self.start(startrev) end = self.end(endrev) if self._inline: start += (startrev + 1) * self._io.size end += (endrev + 1) * self._io.size length = end - start return start, self._getchunk(start, length, df=df) def _chunk(self, rev, df=None): """Obtain a single decompressed chunk for a revision. Accepts an integer revision and an optional already-open file handle to be used for reading. If used, the seek position of the file will not be preserved. Returns a str holding uncompressed data for the requested revision. """ return decompress(self._chunkraw(rev, rev, df=df)[1]) def _chunks(self, revs, df=None): """Obtain decompressed chunks for the specified revisions. Accepts an iterable of numeric revisions that are assumed to be in ascending order. Also accepts an optional already-open file handle to be used for reading. If used, the seek position of the file will not be preserved. This function is similar to calling ``self._chunk()`` multiple times, but is faster. Returns a list with decompressed data for each requested revision. """ if not revs: return [] start = self.start length = self.length inline = self._inline iosize = self._io.size buffer = util.buffer l = [] ladd = l.append try: offset, data = self._chunkraw(revs[0], revs[-1], df=df) except OverflowError: # issue4215 - we can't cache a run of chunks greater than # 2G on Windows return [self._chunk(rev, df=df) for rev in revs] for rev in revs: chunkstart = start(rev) if inline: chunkstart += (rev + 1) * iosize chunklength = length(rev) ladd(decompress(buffer(data, chunkstart - offset, chunklength))) return l def _chunkclear(self): """Clear the raw chunk cache.""" self._chunkcache = (0, '') def deltaparent(self, rev): """return deltaparent of the given revision""" base = self.index[rev][3] if base == rev: return nullrev elif self._generaldelta: return base else: return rev - 1 def revdiff(self, rev1, rev2): """return or calculate a delta between two revisions""" if rev1 != nullrev and self.deltaparent(rev2) == rev1: return str(self._chunk(rev2)) return mdiff.textdiff(self.revision(rev1), self.revision(rev2)) def revision(self, nodeorrev, _df=None): """return an uncompressed revision of a given node or revision number. _df is an existing file handle to read from. It is meant to only be used internally. """ if isinstance(nodeorrev, int): rev = nodeorrev node = self.node(rev) else: node = nodeorrev rev = None cachedrev = None if node == nullid: return "" if self._cache: if self._cache[0] == node: return self._cache[2] cachedrev = self._cache[1] # look up what we need to read text = None if rev is None: rev = self.rev(node) # check rev flags if self.flags(rev) & ~REVIDX_KNOWN_FLAGS: raise RevlogError(_('incompatible revision flag %x') % (self.flags(rev) & ~REVIDX_KNOWN_FLAGS)) chain, stopped = self._deltachain(rev, stoprev=cachedrev) if stopped: text = self._cache[2] # drop cache to save memory self._cache = None bins = self._chunks(chain, df=_df) if text is None: text = str(bins[0]) bins = bins[1:] text = mdiff.patches(text, bins) text = self._checkhash(text, node, rev) self._cache = (node, rev, text) return text def hash(self, text, p1, p2): """Compute a node hash. Available as a function so that subclasses can replace the hash as needed. """ return hash(text, p1, p2) def _checkhash(self, text, node, rev): p1, p2 = self.parents(node) self.checkhash(text, p1, p2, node, rev) return text def checkhash(self, text, p1, p2, node, rev=None): if node != self.hash(text, p1, p2): revornode = rev if revornode is None: revornode = templatefilters.short(hex(node)) raise RevlogError(_("integrity check failed on %s:%s") % (self.indexfile, revornode)) def checkinlinesize(self, tr, fp=None): """Check if the revlog is too big for inline and convert if so. This should be called after revisions are added to the revlog. If the revlog has grown too large to be an inline revlog, it will convert it to use multiple index and data files. """ if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline: return trinfo = tr.find(self.indexfile) if trinfo is None: raise RevlogError(_("%s not found in the transaction") % self.indexfile) trindex = trinfo[2] if trindex is not None: dataoff = self.start(trindex) else: # revlog was stripped at start of transaction, use all leftover data trindex = len(self) - 1 dataoff = self.end(-2) tr.add(self.datafile, dataoff) if fp: fp.flush() fp.close() df = self.opener(self.datafile, 'w') try: for r in self: df.write(self._chunkraw(r, r)[1]) finally: df.close() fp = self.opener(self.indexfile, 'w', atomictemp=True, checkambig=self._checkambig) self.version &= ~(REVLOGNGINLINEDATA) self._inline = False for i in self: e = self._io.packentry(self.index[i], self.node, self.version, i) fp.write(e) # if we don't call close, the temp file will never replace the # real index fp.close() tr.replace(self.indexfile, trindex * self._io.size) self._chunkclear() def addrevision(self, text, transaction, link, p1, p2, cachedelta=None, node=None): """add a revision to the log text - the revision data to add transaction - the transaction object used for rollback link - the linkrev data to add p1, p2 - the parent nodeids of the revision cachedelta - an optional precomputed delta node - nodeid of revision; typically node is not specified, and it is computed by default as hash(text, p1, p2), however subclasses might use different hashing method (and override checkhash() in such case) """ if link == nullrev: raise RevlogError(_("attempted to add linkrev -1 to %s") % self.indexfile) if len(text) > _maxentrysize: raise RevlogError( _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB") % (self.indexfile, len(text))) node = node or self.hash(text, p1, p2) if node in self.nodemap: return node dfh = None if not self._inline: dfh = self.opener(self.datafile, "a+") ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig) try: return self._addrevision(node, text, transaction, link, p1, p2, REVIDX_DEFAULT_FLAGS, cachedelta, ifh, dfh) finally: if dfh: dfh.close() ifh.close() def compress(self, text): """ generate a possibly-compressed representation of text """ if not text: return ("", text) l = len(text) bin = None if l < 44: pass elif l > 1000000: # zlib makes an internal copy, thus doubling memory usage for # large files, so lets do this in pieces z = zlib.compressobj() p = [] pos = 0 while pos < l: pos2 = pos + 2**20 p.append(z.compress(text[pos:pos2])) pos = pos2 p.append(z.flush()) if sum(map(len, p)) < l: bin = "".join(p) else: bin = _compress(text) if bin is None or len(bin) > l: if text[0] == '\0': return ("", text) return ('u', text) return ("", bin) def _isgooddelta(self, d, textlen): """Returns True if the given delta is good. Good means that it is within the disk span, disk size, and chain length bounds that we know to be performant.""" if d is None: return False # - 'dist' is the distance from the base revision -- bounding it limits # the amount of I/O we need to do. # - 'compresseddeltalen' is the sum of the total size of deltas we need # to apply -- bounding it limits the amount of CPU we consume. dist, l, data, base, chainbase, chainlen, compresseddeltalen = d if (dist > textlen * 4 or l > textlen or compresseddeltalen > textlen * 2 or (self._maxchainlen and chainlen > self._maxchainlen)): return False return True def _addrevision(self, node, text, transaction, link, p1, p2, flags, cachedelta, ifh, dfh, alwayscache=False): """internal function to add revisions to the log see addrevision for argument descriptions. invariants: - text is optional (can be None); if not set, cachedelta must be set. if both are set, they must correspond to each other. """ btext = [text] def buildtext(): if btext[0] is not None: return btext[0] baserev = cachedelta[0] delta = cachedelta[1] # special case deltas which replace entire base; no need to decode # base revision. this neatly avoids censored bases, which throw when # they're decoded. hlen = struct.calcsize(">lll") if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev), len(delta) - hlen): btext[0] = delta[hlen:] else: if self._inline: fh = ifh else: fh = dfh basetext = self.revision(self.node(baserev), _df=fh) btext[0] = mdiff.patch(basetext, delta) try: self.checkhash(btext[0], p1, p2, node) if flags & REVIDX_ISCENSORED: raise RevlogError(_('node %s is not censored') % node) except CensoredNodeError: # must pass the censored index flag to add censored revisions if not flags & REVIDX_ISCENSORED: raise return btext[0] def builddelta(rev): # can we use the cached delta? if cachedelta and cachedelta[0] == rev: delta = cachedelta[1] else: t = buildtext() if self.iscensored(rev): # deltas based on a censored revision must replace the # full content in one patch, so delta works everywhere header = mdiff.replacediffheader(self.rawsize(rev), len(t)) delta = header + t else: if self._inline: fh = ifh else: fh = dfh ptext = self.revision(self.node(rev), _df=fh) delta = mdiff.textdiff(ptext, t) header, data = self.compress(delta) deltalen = len(header) + len(data) chainbase = self.chainbase(rev) dist = deltalen + offset - self.start(chainbase) if self._generaldelta: base = rev else: base = chainbase chainlen, compresseddeltalen = self._chaininfo(rev) chainlen += 1 compresseddeltalen += deltalen return (dist, deltalen, (header, data), base, chainbase, chainlen, compresseddeltalen) curr = len(self) prev = curr - 1 offset = self.end(prev) delta = None p1r, p2r = self.rev(p1), self.rev(p2) # full versions are inserted when the needed deltas # become comparable to the uncompressed text if text is None: textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]), cachedelta[1]) else: textlen = len(text) # should we try to build a delta? if prev != nullrev and self.storedeltachains: tested = set() # This condition is true most of the time when processing # changegroup data into a generaldelta repo. The only time it # isn't true is if this is the first revision in a delta chain # or if ``format.generaldelta=true`` disabled ``lazydeltabase``. if cachedelta and self._generaldelta and self._lazydeltabase: # Assume what we received from the server is a good choice # build delta will reuse the cache candidatedelta = builddelta(cachedelta[0]) tested.add(cachedelta[0]) if self._isgooddelta(candidatedelta, textlen): delta = candidatedelta if delta is None and self._generaldelta: # exclude already lazy tested base if any parents = [p for p in (p1r, p2r) if p != nullrev and p not in tested] if parents and not self._aggressivemergedeltas: # Pick whichever parent is closer to us (to minimize the # chance of having to build a fulltext). parents = [max(parents)] tested.update(parents) pdeltas = [] for p in parents: pd = builddelta(p) if self._isgooddelta(pd, textlen): pdeltas.append(pd) if pdeltas: delta = min(pdeltas, key=lambda x: x[1]) if delta is None and prev not in tested: # other approach failed try against prev to hopefully save us a # fulltext. candidatedelta = builddelta(prev) if self._isgooddelta(candidatedelta, textlen): delta = candidatedelta if delta is not None: dist, l, data, base, chainbase, chainlen, compresseddeltalen = delta else: text = buildtext() data = self.compress(text) l = len(data[1]) + len(data[0]) base = chainbase = curr e = (offset_type(offset, flags), l, textlen, base, link, p1r, p2r, node) self.index.insert(-1, e) self.nodemap[node] = curr entry = self._io.packentry(e, self.node, self.version, curr) self._writeentry(transaction, ifh, dfh, entry, data, link, offset) if alwayscache and text is None: text = buildtext() if type(text) == str: # only accept immutable objects self._cache = (node, curr, text) self._chainbasecache[curr] = chainbase return node def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset): # Files opened in a+ mode have inconsistent behavior on various # platforms. Windows requires that a file positioning call be made # when the file handle transitions between reads and writes. See # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other # platforms, Python or the platform itself can be buggy. Some versions # of Solaris have been observed to not append at the end of the file # if the file was seeked to before the end. See issue4943 for more. # # We work around this issue by inserting a seek() before writing. # Note: This is likely not necessary on Python 3. ifh.seek(0, os.SEEK_END) if dfh: dfh.seek(0, os.SEEK_END) curr = len(self) - 1 if not self._inline: transaction.add(self.datafile, offset) transaction.add(self.indexfile, curr * len(entry)) if data[0]: dfh.write(data[0]) dfh.write(data[1]) ifh.write(entry) else: offset += curr * self._io.size transaction.add(self.indexfile, offset, curr) ifh.write(entry) ifh.write(data[0]) ifh.write(data[1]) self.checkinlinesize(transaction, ifh) def addgroup(self, cg, linkmapper, transaction, addrevisioncb=None): """ add a delta group given a set of deltas, add them to the revision log. the first delta is against its parent, which should be in our log, the rest are against the previous delta. If ``addrevisioncb`` is defined, it will be called with arguments of this revlog and the node that was added. """ # track the base of the current delta log content = [] node = None r = len(self) end = 0 if r: end = self.end(r - 1) ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig) isize = r * self._io.size if self._inline: transaction.add(self.indexfile, end + isize, r) dfh = None else: transaction.add(self.indexfile, isize, r) transaction.add(self.datafile, end) dfh = self.opener(self.datafile, "a+") def flush(): if dfh: dfh.flush() ifh.flush() try: # loop through our set of deltas chain = None for chunkdata in iter(lambda: cg.deltachunk(chain), {}): node = chunkdata['node'] p1 = chunkdata['p1'] p2 = chunkdata['p2'] cs = chunkdata['cs'] deltabase = chunkdata['deltabase'] delta = chunkdata['delta'] flags = chunkdata['flags'] or REVIDX_DEFAULT_FLAGS content.append(node) link = linkmapper(cs) if node in self.nodemap: # this can happen if two branches make the same change chain = node continue for p in (p1, p2): if p not in self.nodemap: raise LookupError(p, self.indexfile, _('unknown parent')) if deltabase not in self.nodemap: raise LookupError(deltabase, self.indexfile, _('unknown delta base')) baserev = self.rev(deltabase) if baserev != nullrev and self.iscensored(baserev): # if base is censored, delta must be full replacement in a # single patch operation hlen = struct.calcsize(">lll") oldlen = self.rawsize(baserev) newlen = len(delta) - hlen if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen): raise error.CensoredBaseError(self.indexfile, self.node(baserev)) if not flags and self._peek_iscensored(baserev, delta, flush): flags |= REVIDX_ISCENSORED # We assume consumers of addrevisioncb will want to retrieve # the added revision, which will require a call to # revision(). revision() will fast path if there is a cache # hit. So, we tell _addrevision() to always cache in this case. chain = self._addrevision(node, None, transaction, link, p1, p2, flags, (baserev, delta), ifh, dfh, alwayscache=bool(addrevisioncb)) if addrevisioncb: addrevisioncb(self, chain) if not dfh and not self._inline: # addrevision switched from inline to conventional # reopen the index ifh.close() dfh = self.opener(self.datafile, "a+") ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig) finally: if dfh: dfh.close() ifh.close() return content def iscensored(self, rev): """Check if a file revision is censored.""" return False def _peek_iscensored(self, baserev, delta, flush): """Quickly check if a delta produces a censored revision.""" return False def getstrippoint(self, minlink): """find the minimum rev that must be stripped to strip the linkrev Returns a tuple containing the minimum rev and a set of all revs that have linkrevs that will be broken by this strip. """ brokenrevs = set() strippoint = len(self) heads = {} futurelargelinkrevs = set() for head in self.headrevs(): headlinkrev = self.linkrev(head) heads[head] = headlinkrev if headlinkrev >= minlink: futurelargelinkrevs.add(headlinkrev) # This algorithm involves walking down the rev graph, starting at the # heads. Since the revs are topologically sorted according to linkrev, # once all head linkrevs are below the minlink, we know there are # no more revs that could have a linkrev greater than minlink. # So we can stop walking. while futurelargelinkrevs: strippoint -= 1 linkrev = heads.pop(strippoint) if linkrev < minlink: brokenrevs.add(strippoint) else: futurelargelinkrevs.remove(linkrev) for p in self.parentrevs(strippoint): if p != nullrev: plinkrev = self.linkrev(p) heads[p] = plinkrev if plinkrev >= minlink: futurelargelinkrevs.add(plinkrev) return strippoint, brokenrevs def strip(self, minlink, transaction): """truncate the revlog on the first revision with a linkrev >= minlink This function is called when we're stripping revision minlink and its descendants from the repository. We have to remove all revisions with linkrev >= minlink, because the equivalent changelog revisions will be renumbered after the strip. So we truncate the revlog on the first of these revisions, and trust that the caller has saved the revisions that shouldn't be removed and that it'll re-add them after this truncation. """ if len(self) == 0: return rev, _ = self.getstrippoint(minlink) if rev == len(self): return # first truncate the files on disk end = self.start(rev) if not self._inline: transaction.add(self.datafile, end) end = rev * self._io.size else: end += rev * self._io.size transaction.add(self.indexfile, end) # then reset internal state in memory to forget those revisions self._cache = None self._chaininfocache = {} self._chunkclear() for x in xrange(rev, len(self)): del self.nodemap[self.node(x)] del self.index[rev:-1] def checksize(self): expected = 0 if len(self): expected = max(0, self.end(len(self) - 1)) try: f = self.opener(self.datafile) f.seek(0, 2) actual = f.tell() f.close() dd = actual - expected except IOError as inst: if inst.errno != errno.ENOENT: raise dd = 0 try: f = self.opener(self.indexfile) f.seek(0, 2) actual = f.tell() f.close() s = self._io.size i = max(0, actual // s) di = actual - (i * s) if self._inline: databytes = 0 for r in self: databytes += max(0, self.length(r)) dd = 0 di = actual - len(self) * s - databytes except IOError as inst: if inst.errno != errno.ENOENT: raise di = 0 return (dd, di) def files(self): res = [self.indexfile] if not self._inline: res.append(self.datafile) return res
############################################################################## # # Copyright (c) 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Locale and LocaleProvider Implementation. """ __docformat__ = 'restructuredtext' import os from datetime import date from zope.interface import implementer from zope.i18n.interfaces.locales import ILocale from zope.i18n.interfaces.locales import ILocaleDisplayNames, ILocaleDates from zope.i18n.interfaces.locales import ILocaleVersion, ILocaleIdentity from zope.i18n.interfaces.locales import ILocaleTimeZone, ILocaleCalendar from zope.i18n.interfaces.locales import ILocaleCurrency, ILocaleNumbers from zope.i18n.interfaces.locales import ILocaleFormat, ILocaleFormatLength from zope.i18n.interfaces.locales import ILocaleOrientation from zope.i18n.interfaces.locales import ILocaleDayContext, ILocaleMonthContext from zope.i18n.format import NumberFormat, DateTimeFormat from zope.i18n.locales.inheritance import \ AttributeInheritance, InheritingDictionary, NoParentException from zope.i18n.locales.provider import LocaleProvider, LoadLocaleError # Setup the locale directory from zope import i18n LOCALEDIR = os.path.join(os.path.dirname(i18n.__file__), "locales", "data") # Global LocaleProvider. We really just need this single one. locales = LocaleProvider(LOCALEDIR) # Define some constants that can be used JANUARY = 1 FEBRUARY = 2 MARCH = 3 APRIL = 4 MAY = 5 JUNE = 6 JULY = 7 AUGUST = 8 SEPTEMBER = 9 OCTOBER = 10 NOVEMBER = 11 DECEMBER = 12 MONDAY = 1 TUESDAY = 2 WEDNESDAY = 3 THURSDAY = 4 FRIDAY = 5 SATURDAY = 6 SUNDAY = 7 dayMapping = {'mon': 1, 'tue': 2, 'wed': 3, 'thu': 4, 'fri': 5, 'sat': 6, 'sun': 7} BC = 1 AD = 2 calendarAliases = {'islamic': ('arabic',), 'islamic-civil': ('civil-arabic',), 'buddhist': ('thai-buddhist', )} @implementer(ILocaleIdentity) class LocaleIdentity(object): """Represents a unique identification of the locale This class does not have to deal with inheritance. Examples:: >>> id = LocaleIdentity('en') >>> id <LocaleIdentity (en, None, None, None)> >>> id = LocaleIdentity('en', 'latin') >>> id <LocaleIdentity (en, latin, None, None)> >>> id = LocaleIdentity('en', 'latin', 'US') >>> id <LocaleIdentity (en, latin, US, None)> >>> id = LocaleIdentity('en', 'latin', 'US', 'POSIX') >>> id <LocaleIdentity (en, latin, US, POSIX)> >>> id = LocaleIdentity('en', None, 'US', 'POSIX') >>> id <LocaleIdentity (en, None, US, POSIX)> """ def __init__(self, language=None, script=None, territory=None, variant=None): """Initialize object.""" self.language = language self.script = script self.territory = territory self.variant = variant def __repr__(self): """See zope.i18n.interfaces.ILocaleIdentity """ return "<LocaleIdentity (%s, %s, %s, %s)>" %( self.language, self.script, self.territory, self.variant) @implementer(ILocaleVersion) class LocaleVersion(object): """Represents a particular version of a locale This class does not have to deal with inheritance. Examples:: >>> from datetime import datetime >>> (LocaleVersion('1.0', datetime(2004, 1, 1), 'no notes') == ... LocaleVersion('1.0', datetime(2004, 1, 1), 'no notes again')) True >>> (LocaleVersion('1.0', datetime(2004, 1, 1), 'no notes') < ... LocaleVersion('1.0', datetime(2004, 1, 2), 'no notes again')) True >>> (LocaleVersion('1.0', datetime(2004, 1, 1), 'no notes') < ... LocaleVersion('0.9', datetime(2004, 1, 2), 'no notes again')) True >>> (LocaleVersion('1.0', datetime(2004, 1, 1), 'no notes') > ... LocaleVersion('0.9', datetime(2004, 1, 1), 'no notes again')) True """ def __init__(self, number, generationDate, notes): """Initialize object.""" self.number = number assert isinstance(generationDate, (date, type(None))) self.generationDate = generationDate self.notes = notes def __lt__(self, other): return ((self.generationDate, self.number) < (other.generationDate, other.number)) def __eq__(self, other): return ((self.generationDate, self.number) == (other.generationDate, other.number)) @implementer(ILocaleDisplayNames) class LocaleDisplayNames(AttributeInheritance): """Locale display names with inheritable data. Examples:: >>> from zope.i18n.locales.tests.test_docstrings import \\ ... LocaleInheritanceStub >>> root = LocaleInheritanceStub() >>> root.displayNames = LocaleDisplayNames() >>> root.displayNames.languages = ['en', 'de'] >>> root.displayNames.keys = ['foo', 'bar'] >>> locale = LocaleInheritanceStub(nextLocale=root) >>> locale.displayNames = LocaleDisplayNames() >>> locale.displayNames.keys = ['fu', 'bahr'] Here you can see the inheritance in action:: >>> locale.displayNames.languages ['en', 'de'] >>> locale.displayNames.keys ['fu', 'bahr'] """ @implementer(ILocaleTimeZone) class LocaleTimeZone(object): """Specifies one of the timezones of a specific locale. The attributes of this class are not inherited, since all timezone information is always provided together. Example:: >>> tz = LocaleTimeZone('Europe/Berlin') >>> tz.cities = ['Berlin'] >>> tz.names = {'standard': ('Mitteleuropaeische Zeit', 'MEZ'), ... 'daylight': ('Mitteleuropaeische Sommerzeit', 'MESZ')} >>> tz.type 'Europe/Berlin' >>> tz.cities ['Berlin'] """ def __init__(self, type): """Initialize the object.""" self.type = type self.cities = [] self.names = {} @implementer(ILocaleFormat) class LocaleFormat(object): """Specifies one of the format of a specific format length. The attributes of this class are not inherited, since all format information is always provided together. Note that this information by itself is often not useful, since other calendar data is required to use the specified pattern for formatting and parsing. """ def __init__(self, type=None): """Initialize the object.""" self.type = type self.displayName = u"" self.pattern = u"" @implementer(ILocaleFormatLength) class LocaleFormatLength(AttributeInheritance): """Specifies one of the format lengths of a specific quantity, like numbers, dates, times and datetimes.""" def __init__(self, type=None): """Initialize the object.""" self.type = type self.default = None @implementer(ILocaleMonthContext) class LocaleMonthContext(AttributeInheritance): def __init__(self, type=None): """Initialize the object.""" self.type = type self.default = u"wide" @implementer(ILocaleDayContext) class LocaleDayContext(AttributeInheritance): def __init__(self, type=None): """Initialize the object.""" self.type = type self.default = u"wide" @implementer(ILocaleCalendar) class LocaleCalendar(AttributeInheritance): """Represents locale data for a calendar, like 'gregorian'. This object is particular tricky, since the calendar not only inherits from higher-up locales, but also from the specified gregorian calendar available for this locale. This was done, since most other calendars have different year and era data, but everything else remains the same. Example: Even though the 'Locale' object has no 'calendar' attribute for real, it helps us here to make the example simpler. >>> from zope.i18n.locales.tests.test_docstrings import \\ ... LocaleInheritanceStub >>> root = LocaleInheritanceStub() >>> root.calendar = LocaleCalendar('gregorian') >>> locale = LocaleInheritanceStub(nextLocale=root) >>> locale.calendar = LocaleCalendar('gregorian') >>> root.calendar.months = InheritingDictionary( ... {1: (u"January", u"Jan"), 2: (u"February", u"Feb")}) >>> locale.calendar.months = InheritingDictionary( ... {2: (u"Februar", u"Feb"), 3: (u"Maerz", u"Mrz")}) >>> locale.calendar.getMonthNames()[:4] [u'January', u'Februar', u'Maerz', None] >>> locale.calendar.getMonthTypeFromName(u"January") 1 >>> locale.calendar.getMonthTypeFromName(u"Februar") 2 >>> locale.calendar.getMonthAbbreviations()[:4] [u'Jan', u'Feb', u'Mrz', None] >>> locale.calendar.getMonthTypeFromAbbreviation(u"Jan") 1 >>> locale.calendar.getMonthTypeFromAbbreviation(u"Mrz") 3 >>> root.calendar.days = InheritingDictionary( ... {1: (u"Monday", u"Mon"), 2: (u"Tuesday", u"Tue")}) >>> locale.calendar.days = InheritingDictionary( ... {2: (u"Dienstag", u"Die"), 3: (u"Mittwoch", u"Mit")}) >>> locale.calendar.getDayNames()[:4] [u'Monday', u'Dienstag', u'Mittwoch', None] >>> locale.calendar.getDayTypeFromName(u"Monday") 1 >>> locale.calendar.getDayTypeFromName(u"Dienstag") 2 >>> locale.calendar.getDayAbbreviations()[:4] [u'Mon', u'Die', u'Mit', None] >>> locale.calendar.getDayTypeFromAbbreviation(u"Mon") 1 >>> locale.calendar.getDayTypeFromAbbreviation(u"Die") 2 >>> root.calendar.week = {'firstDay': 1} >>> locale.calendar.getFirstWeekDayName() u'Monday' Let's test the direct attribute access as well. >>> root.am = u"AM" >>> root.pm = u"PM" >>> locale.pm = u"nachm." >>> locale.pm u'nachm.' >>> locale.am u'AM' Note that ``isWeekend`` is not implemented: >>> locale.calendar.isWeekend(object()) False >>> locale.calendar.isWeekend(None) False >>> locale.calendar.isWeekend('anything') False """ def __init__(self, type): """Initialize the object.""" self.type = type def getMonthNames(self): """See zope.i18n.interfaces.ILocaleCalendar""" return [self.months.get(type, (None, None))[0] for type in range(1, 13)] def getMonthTypeFromName(self, name): """See zope.i18n.interfaces.ILocaleCalendar""" for item in self.months.items(): if item[1][0] == name: return item[0] def getMonthAbbreviations(self): """See zope.i18n.interfaces.ILocaleCalendar""" return [self.months.get(type, (None, None))[1] for type in range(1, 13)] def getMonthTypeFromAbbreviation(self, abbr): """See zope.i18n.interfaces.ILocaleCalendar""" for item in self.months.items(): if item[1][1] == abbr: return item[0] def getDayNames(self): """See zope.i18n.interfaces.ILocaleCalendar""" return [self.days.get(type, (None, None))[0] for type in range(1, 8)] def getDayTypeFromName(self, name): """See zope.i18n.interfaces.ILocaleCalendar""" for item in self.days.items(): if item[1][0] == name: return item[0] def getDayAbbreviations(self): """See zope.i18n.interfaces.ILocaleCalendar""" return [self.days.get(type, (None, None))[1] for type in range(1, 8)] def getDayTypeFromAbbreviation(self, abbr): """See zope.i18n.interfaces.ILocaleCalendar""" for item in self.days.items(): if item[1][1] == abbr: return item[0] def isWeekend(self, datetime): """See zope.i18n.interfaces.ILocaleCalendar""" # TODO: Implement this method return False def getFirstWeekDayName(self): """See zope.i18n.interfaces.ILocaleCalendar""" firstDayNumber = self.week['firstDay'] return self.days[firstDayNumber][0] @implementer(ILocaleDates) class LocaleDates(AttributeInheritance): """Simple ILocaleDates implementation that can inherit data from other locales. Examples:: >>> from zope.i18n.tests.test_formats import LocaleCalendarStub as Stub >>> from datetime import datetime, date, time >>> dates = LocaleDates() >>> cal = LocaleCalendar('gregorian') >>> cal.months = Stub.months >>> cal.days = Stub.days >>> cal.am = Stub.am >>> cal.pm = Stub.pm >>> cal.eras = Stub.eras >>> cal.week = {'firstDay': 1, 'minDays': 1} >>> dates.calendars = {'gregorian': cal} Setting up and accessing date format through a specific length (very common scenario):: >>> fulllength = LocaleFormatLength() >>> format = LocaleFormat() >>> format.pattern = u"EEEE, d. MMMM yyyy" >>> fulllength.formats = {None: format} >>> mediumlength = LocaleFormatLength() >>> format = LocaleFormat() >>> format.pattern = u"dd.MM.yyyy" >>> mediumlength.formats = {None: format} >>> cal.dateFormats = {'full': fulllength, 'medium': mediumlength} >>> cal.defaultDateFormat = 'medium' >>> formatter = dates.getFormatter('date') >>> formatter.format(date(2004, 2, 4)) u'04.02.2004' >>> formatter = dates.getFormatter('date', length='full') >>> formatter.format(date(2004, 2, 4)) u'Mittwoch, 4. Februar 2004' Let's also test the time formatter:: >>> fulllength = LocaleFormatLength() >>> format = LocaleFormat() >>> format.pattern = u"H:mm' Uhr 'z" >>> fulllength.formats = {None: format} >>> mediumlength = LocaleFormatLength() >>> format = LocaleFormat() >>> format.pattern = u"HH:mm:ss" >>> mediumlength.formats = {None: format} >>> cal.timeFormats = {'full': fulllength, 'medium': mediumlength} >>> cal.defaultTimeFormat = 'medium' >>> formatter = dates.getFormatter('time') >>> formatter.format(time(12, 15, 00)) u'12:15:00' >>> formatter = dates.getFormatter('time', length='full') >>> formatter.format(time(12, 15, 00)) u'12:15 Uhr +000' The datetime formatter is a bit special, since it is constructed from the other two:: >>> length = LocaleFormatLength() >>> format = LocaleFormat() >>> format.pattern = u"{1} {0}" >>> length.formats = {None: format} >>> cal.dateTimeFormats = {None: length} >>> formatter = dates.getFormatter('dateTime') >>> formatter.format(datetime(2004, 2, 4, 12, 15, 00)) u'04.02.2004 12:15:00' >>> formatter = dates.getFormatter('dateTime', length='full') >>> formatter.format(datetime(2004, 2, 4, 12, 15, 00)) u'Mittwoch, 4. Februar 2004 12:15 Uhr +000' Finally, we'll test some invalid input:: >>> dates.getFormatter('timeDate') Traceback (most recent call last): ValueError: Invalid category: timeDate >>> dates.getFormatter('date', length='superlong') Traceback (most recent call last): ValueError: Invalid format length: superlong >>> dates.getFormatter('date', calendar='irish-catholic') Traceback (most recent call last): ValueError: Invalid calendar: irish-catholic """ def getFormatter(self, category, length=None, name=None, calendar=u"gregorian"): """See zope.i18n.interfaces.locales.ILocaleDates""" if category not in (u"date", u"time", u"dateTime"): raise ValueError('Invalid category: %s' % category) if calendar not in (u"gregorian", u"arabic", u"chinese", u"civil-arabic", u"hebrew", u"japanese", u"thai-buddhist"): raise ValueError('Invalid calendar: %s' % calendar) if length not in (u"short", u"medium", u"long", u"full", None): raise ValueError('Invalid format length: %s' % length) cal = self.calendars[calendar] formats = getattr(cal, category+'Formats') if length is None: length = getattr( cal, 'default'+category[0].upper()+category[1:]+'Format', list(formats.keys())[0]) # 'datetime' is always a bit special; we often do not have a length # specification, but we need it for looking up the date and time # formatters if category == 'dateTime': formatLength = formats.get(length, formats[None]) else: formatLength = formats[length] if name is None: name = formatLength.default format = formatLength.formats[name] pattern = format.pattern if category == 'dateTime': date_pat = self.getFormatter( 'date', length, name, calendar).getPattern() time_pat = self.getFormatter( 'time', length, name, calendar).getPattern() pattern = pattern.replace('{1}', date_pat) pattern = pattern.replace('{0}', time_pat) return DateTimeFormat(pattern, cal) @implementer(ILocaleCurrency) class LocaleCurrency(object): """Simple implementation of ILocaleCurrency without inheritance support, since it is not needed for a single currency.""" def __init__(self, type): """Initialize object.""" self.type = type self.symbol = None self.symbolChoice = False self.displayName = None @implementer(ILocaleNumbers) class LocaleNumbers(AttributeInheritance): """Implementation of ILocaleCurrency including inheritance support. Examples:: >>> numbers = LocaleNumbers() >>> numbers.symbols = { ... 'decimal': ',', 'group': '.', 'list': ';', 'percentSign': '%', ... 'nativeZeroDigit': '0', 'patternDigit': '#', 'plusSign': '+', ... 'minusSign': '-', 'exponential': 'E', 'perMille': 'o/oo', ... 'infinity': 'oo', 'nan': 'N/A'} Setting up and accessing totally unnamed decimal format (very common scenario):: >>> length = LocaleFormatLength() >>> format = LocaleFormat() >>> format.pattern = u"#,##0.###;-#,##0.###" >>> length.formats = {None: format} >>> numbers.decimalFormats = {None: length} >>> formatter = numbers.getFormatter('decimal') >>> formatter.format(3.4) u'3,4' >>> formatter.format(-3.4567) u'-3,457' >>> formatter.format(3210.4) u'3.210,4' Setting up and accessing scientific formats with named format lengths:: >>> longlength = LocaleFormatLength('long') >>> format = LocaleFormat() >>> format.pattern = u"0.000###E+00" >>> longlength.formats = {None: format} >>> mediumlength = LocaleFormatLength('long') >>> format = LocaleFormat() >>> format.pattern = u"0.00##E+00" >>> mediumlength.formats = {None: format} >>> numbers.scientificFormats = {'long': longlength, ... 'medium': mediumlength} >>> numbers.defaultScientificFormat = 'long' >>> formatter = numbers.getFormatter('scientific') >>> formatter.format(1234.5678) u'1,234568E+03' >>> formatter = numbers.getFormatter('scientific', 'medium') >>> formatter.format(1234.5678) u'1,2346E+03' Setting up and accessing percent formats with named format lengths and format names:: >>> longlength = LocaleFormatLength('long') >>> fooformat = LocaleFormat() >>> fooformat.pattern = u"0.##0%" >>> barformat = LocaleFormat() >>> barformat.pattern = u"0%" >>> longlength.formats = {None: fooformat, 'bar': barformat} >>> numbers.percentFormats = {'long': longlength} >>> numbers.defaultPercentFormat = 'long' >>> formatter = numbers.getFormatter('percent') >>> formatter.format(123.45678) u'123,457%' >>> formatter = numbers.getFormatter('percent', name='bar') >>> formatter.format(123.45678) u'123%' ...using a default name:: >>> numbers.percentFormats['long'].default = 'bar' >>> formatter = numbers.getFormatter('percent') >>> formatter.format(123.45678) u'123%' """ def getFormatter(self, category, length=None, name=None): """See zope.i18n.interfaces.locales.ILocaleNumbers""" assert category in (u"decimal", u"percent", u"scientific", u"currency") assert length in (u"short", u"medium", u"long", u"full", None) formats = getattr(self, category + 'Formats') if length is None: length = getattr( self, 'default' + category[0].upper() + category[1:] + 'Format', list(formats.keys())[0]) formatLength = formats[length] if name is None: name = formatLength.default format = formatLength.formats[name] return NumberFormat(format.pattern, self.symbols) @implementer(ILocaleOrientation) class LocaleOrientation(AttributeInheritance): """Implementation of ILocaleOrientation """ @implementer(ILocale) class Locale(AttributeInheritance): """Implementation of the ILocale interface.""" def __init__(self, id): self.id = id def getLocaleID(self): """ Return the locale id. Example:: >>> lid = LocaleIdentity('en', 'latin', 'US', 'POSIX') >>> locale = Locale(lid) >>> locale.getLocaleID() 'en_latin_US_POSIX' >>> lid = LocaleIdentity('en', 'latin') >>> locale = Locale(lid) >>> locale.getLocaleID() 'en_latin' >>> lid = LocaleIdentity() >>> locale = Locale(lid) >>> locale.getLocaleID() '' """ id = self.id pieces = [x for x in (id.language, id.script, id.territory, id.variant) if x] id_string = '_'.join(pieces) # TODO: What about keys??? Where do I get this info from? # Notice that 'pieces' is always empty. pieces = [key + '=' + type for (key, type) in ()] assert not pieces if pieces: # pragma: no cover id_string += '@' + ','.join(pieces) return id_string def getInheritedSelf(self): """See zope.i18n.interfaces.locales.ILocaleInheritance This is the really interesting method that looks up the next (more general) Locale object. This is used in case this locale object does not have the required information. This method works closely with with LocaleProvider. """ language = self.id.language territory = self.id.territory variant = self.id.variant if variant is not None: return locales.getLocale(language, territory, None) elif territory is not None: return locales.getLocale(language, None, None) elif language is not None: return locales.getLocale(None, None, None) else: # Well, this is bad; we are already at the root locale raise NoParentException('Cannot find a more general locale.')
#!/usr/bin/env python # Copyright 2019 Scott Wales # author: Scott Wales <scott.wales@unimelb.edu.au> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import xarray import numpy import dask import pandas from climtas import io def test_to_netcdf_throttled(tmpdir, distributed_client): def helper(path, data): da = xarray.DataArray(data, dims=["t", "x", "y"], name="test") io.to_netcdf_throttled(da, path) out = xarray.open_dataset(str(path)).test xarray.testing.assert_identical(da, out) path = tmpdir / "numpy.nc" data = numpy.zeros([10, 10, 10]) helper(path, data) path = tmpdir / "dask.nc" data = dask.array.zeros([10, 10, 10]) helper(path, data) data = dask.array.random.random([10, 10, 10]) + numpy.random.random([10, 10, 10]) helper(path, data) def test_to_netcdf_throttled_serial(tmpdir): def helper(path, data): da = xarray.DataArray(data, dims=["t", "x", "y"], name="test") io.to_netcdf_throttled(da, path) out = xarray.open_dataset(str(path)).test xarray.testing.assert_identical(da, out) path = tmpdir / "numpy.nc" data = numpy.zeros([10, 10, 10]) helper(path, data) path = tmpdir / "dask.nc" data = dask.array.zeros([10, 10, 10]) helper(path, data) def test_to_netcdf_series(tmpdir): path = tmpdir / "data_{start.year}.nc" data = xarray.DataArray( numpy.zeros([20]), coords=[("time", pandas.date_range("20010101", freq="MS", periods=20))], name="test", ) io.to_netcdf_series(data, path, groupby="time.year") assert (tmpdir / "data_2001.nc").exists() assert (tmpdir / "data_2002.nc").exists() data.coords["group"] = ( "time", [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ) path = tmpdir / "data_{group}.nc" io.to_netcdf_series(data, path, groupby="group") assert (tmpdir / "data_0.nc").exists() assert (tmpdir / "data_1.nc").exists() def test_to_netcdf_throttled_fillvalue(tmpdir, distributed_client): def helper(path, data): da = xarray.DataArray(data, dims=["t", "x", "y"], name="test") da.encoding["_FillValue"] = 1 io.to_netcdf_throttled(da, path) out = xarray.open_dataset(str(path)).test xarray.testing.assert_identical(da, out) assert out.encoding["_FillValue"] == 1 path = tmpdir / "numpy.nc" data = numpy.zeros([10, 10, 10]) helper(path, data) path = tmpdir / "dask.nc" data = dask.array.zeros([10, 10, 10]) helper(path, data) data = dask.array.random.random([10, 10, 10]) + numpy.random.random([10, 10, 10]) helper(path, data)
# -*- coding: utf-8 -*- # Copyright (c) 2015, Indictrans and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document class RequestCategory(Document): pass
import random import numpy as np from bo.run import run_bo if __name__ == '__main__': seed = 0 random.seed(seed) np.random.seed(seed) best_robot, best_fitness = run_bo( experiment_name='test_bo', structure_shape=(5, 5), pop_size=3, max_evaluations=6, train_iters=50, num_cores=3, ) print('Best robot:') print(best_robot) print('Best fitness:', best_fitness)
import sys from awsglue.transforms import * from awsglue.utils import getResolvedOptions from pyspark.context import SparkContext from awsglue.context import GlueContext from awsglue.job import Job sc = SparkContext() glueContext = GlueContext(sc) spark = glueContext.spark_session job = Job(glueContext) job.commit() # -------------------------Glue Job housekeeping section ----------------------- import boto3 from botocore.errorfactory import ClientError from urllib.parse import urlparse import pandas as pd import pandera as pa import datetime as dt from dateutil import parser import pyarrow import pyarrow.parquet as pq import time import logging import json import copy # --------------------End of Script imports--------------------------- def setLog(e,check): """add success/fail logs to the file_list. Parameters ---------- e :[str] An exception or the string value of an exception check:[str] DQ check for which logs are being added """ log_msg = f"{check} -> {e}" file_status.append(log_msg) def setTaskVal(source,file,status,error_loc=""): """Append file status to a global df exit_status Parameters ---------- source:[str] Source file for which DQ logs are being added status:[str] Status of File DQ check (success/fail) error_loc:[str] Location of error file, if DQ check failed, blank otherwise. """ ts=dt.datetime.now() logs=copy.deepcopy(file_status) exit_status.loc[len(exit_status.index)] = [ts,layer,source,file,status,error_loc,logs] file_status.clear() return def extractBotoParams(config_path): """Extract bucket name and key from s3 path. Parameters ---------- config_path : str path of application configuration file Returns ------- pair pair with bucket name and path key. """ parsed_url = urlparse(config_path) bucket_name = parsed_url.hostname s3_path = parsed_url.path[1:] return bucket_name,s3_path def getConfig(config_path: str): """Read application configuration file into json Parameters ---------- config_path : str path of application configuration file Returns ------- dict return a json dictionary contain application configuration """ try: logger.info(f'Reading config from {config_path}') bucket_name,s3_path = extractBotoParams(config_path) obj = s3_connect.get_object(Bucket=bucket_name, Key=s3_path) config = obj['Body'].read().decode("utf-8") logger.info("Config file read successfully!") return json.loads(config) except FileNotFoundError as e: logger.exception('CONFIGURATION ERROR: Couldnt find the configuration file : ' + str(e)) raise FileNotFoundError('CONFIGURATION ERROR: Couldnt find the configuration file : ' + str(e)) except ValueError as e: if not config_path: logger.exception('CONFIGURATION ERROR: Null config path : ' + str(e)) raise ValueError('CONFIGURATION ERROR: Null config path : ' + str(e)) else: logger.exception( 'CONFIGURATION ERROR: Invalid json. Error occurred while loading configuration file : ' + str(e)) raise ValueError( 'CONFIGURATION ERROR: Invalid json. Error occurred while loading configuration file : ' + str(e)) except Exception as e: logger.exception('CONFIGURATION ERROR: Error occurred while loading configuration file : ' + str(e)) err_msg = f"{type(e).__name__} : CONFIGURATION ERROR: Error occurred while loading configuration file : {str(e)}" raise Exception(err_msg) from e def move_file(src,dest): """Method to move file between directories. If path is not a file -> return, do nothing. Args: src (str): Source path with filename dest (str): Destination path with file name """ try: s3 = boto3.resource('s3') src_bucket,src_key=extractBotoParams(src) dest_bucket,dest_key=extractBotoParams(dest) copy_source = { 'Bucket': src_bucket, 'Key': src_key } bucket = s3.Bucket(dest_bucket) obj = bucket.Object(dest_key) obj.copy(copy_source) s3.Object(src_bucket, src_key).delete() logger.info(f"{src.split('/')[-1]} moved to : {dest}") except Exception as e: msg=f"Failed to move file to {dest} due to -> {e}" logger.error(msg) raise msg #try block in main for will catch this exception and push file status to df then continue with next file. def create_dir(path): """Create a new directory, do nothing if directory exists. Args: path (string): directory path """ try: if directoryExists(path): return else: bucketname,key=extractBotoParams(path) s3_connect.put_object(Bucket=bucketname,Key=key) logger.info(f"Directory created at: {path}") except Exception as e: msg=f"Failed to create directory {path} due to -> {e}" logger.error(msg) raise msg #try block in main for will catch this exception and push file status to df then continue with next file. def directoryExists(path): """Check if directory/file exists. Note: Folder paths must end with '/' Args: path (string): directory path Returns: bool """ bucket,key = extractBotoParams(path) try: s3_connect.head_object(Bucket=bucket, Key=key) except ClientError: return False return True def emptyDir(path): file_list=listDir(path) s3 = boto3.resource('s3') bucket,key=extractBotoParams(path) print("--------------") for file in file_list: s3.Object(bucket, file).delete() msg=f"Partition {curr_dt} cleaned." logger.info(msg) def listDir(path): """List all files present in dir and return list. Args: path (string): directory path Returns: list: List of file keys. """ try: files=[] s3 = boto3.resource('s3') bucket,key=extractBotoParams(path) my_bucket = s3.Bucket(bucket) for object_summary in my_bucket.objects.filter(Prefix=key): file_key=object_summary.key if file_key[-1]!='/': files.append(file_key) return files except Exception as e: msg=f"Failed to list dir {path} due to -> {e}" logger.error(msg) raise e def perform_generic_checks(file_path): """The method performs generic file checks on the file. These include - - Csv extension check - Check is file is empty Args: file_path (str): File path on which dq checks are performed. """ try: status=True file=file_path.split("/")[-1] logger.info(f"Starting generic DQ Checks for: {file}") #DQ1: File extension check ext=file_path[-3:] if ext!=config["source_ext"]: msg=f"CSV format check failed for: {file}" logger.warning(msg) setLog(msg,"File extension check") status=False return status else: msg=f"CSV format check passed for: {file}" logger.info(msg) # setLog(msg,"File extension check") #DQ2: File Not Empty check try: df=pd.read_csv(file_path) if df.shape[0] == 0: msg=f"File Not Empty check failed for: {file}" logger.warning(msg) setLog(msg,"File Not Empty") status=False return status else: msg=f"File Not empty check passed for: {file}" logger.info(msg) # setLog(msg,"File Not Empty check") except pd.errors.EmptyDataError as e: msg=f"File Not Empty check failed for: {file}" logger.warning(msg) setLog(msg,"File Not Empty") status=False return status return status except Exception as e: print(type(e)) print(e) msg=f"Error:{e} while performing Generic DQ checks for file {file}" logger.error(msg) setLog(msg,"Generic DQ") return False def clean_schema(lst): """This method cleans the list items so that they can be compared. - Strips space - Remove trailing/leading spaces - convert to lower case Args: lst (list): List to be cleaned Returns: list : Cleaned list """ schema=[] for col in lst: col=col.lower().strip() col=" ".join(col.split()) schema.append(col) return schema def validate_ts(ts): """Check if the pandas column has all timestamp values. Args: ts: Pandas Series (col) Returns: bool """ try: for i in ts: if i == "": continue parser.parse(i) return True except ValueError as e: logger.warning(f"Not a timestamp->{e}") return False def write_to_parquet(df,path,mode): """Write file to parquet. Args: df: dataframe to write path: Path to write to mode: overwrite/append Returns: bool """ try: table_name=path.split('/')[-2] table_name=table_name+".parquet" if mode=="append": if len(listDir(path+"date_partition="+curr_dt+"/"))>0: emptyDir(path+"date_partition="+curr_dt+"/") df['date_partition'] = curr_dt df.to_parquet(path,partition_cols=['date_partition']) msg=f"Parquet data written to -> {path}" logger.info(msg) if mode=="overwrite": df.to_parquet(path+table_name) msg=f"Parquet data written to -> {path}" logger.info(msg) except Exception as e: logger.error(f"Error writing as parquet ->{e}") def perform_business_checks(source,file_path): """The method performs DQ checks based on specific business rules. These include - - File name check - Column check - Schema check (data type) - Special char check Args: source (str): Source system name. file_path (str): File path on which dq checks are performed. """ try: status=True file=file_path.split("/")[-1] logger.info(f"Starting Business rule DQ Checks for: {file}") #ADD -> Check if filename in source: col_map -> if not just return False, file will be moved to error location. if file not in list(config["sources"][source]["col_map"].keys()): msg=f"{file} not in config list of files for {source}" logger.warning(msg) setLog(msg,"File Name Check") return False # DQ3: File Column Check df=pd.read_csv(file_path) control_schema=list(config["sources"][source]["col_map"][file].keys()) curr_schema=list(df.columns) if set(clean_schema(control_schema)) != set(clean_schema(curr_schema)): msg=f"Column check failed for: {file}" logger.warning(msg) setLog(msg,"Column Check") status=False return status else: msg=f"Column check passed for: {file}" logger.info(msg) # setLog(msg,"Schema Check") #DQ4: Schema check - data type # DQ3: File Column Check df.columns=clean_schema(df.columns) col_map={} file_name=file_path.split('/')[-1] for k,v in config["sources"][source]["col_map"][file_name].items(): col_map[k]=eval(v) schema = pa.DataFrameSchema(columns=col_map,strict=True) try: schema.validate(df) msg=f"Schema data type check passed for: {file_name}" logger.info(msg) except pa.errors.SchemaError as e: msg=f"Data type schema check failed for: {file_name} due to ->{e}" logger.warning(msg) setLog(msg,"Schema - Data Type Check") status=False return status return status except Exception as e: print(type(e)) print(e) msg=f"Error:{e} while performing Business DQ checks for file {file}" logger.error(msg) setLog(msg,"Business Rule DQ") return False def main(): """_summary_: Driver function for the DQ script. """ try: #Read all sources from config as a list source_list=list(config["sources"].keys()) for source in source_list: # -- this try : -> isRequired? msg=f"------Starting DQ checks for Source System: {source} ------" logger.info(msg) s3_uri=config['s3_base_uri'] root_dir=f"{s3_uri}{config['sources'][source]['root_dir']}" dated_dir=eval(f'''f"{config['dated_folder']}"''') dated_dir=s3_uri+dated_dir create_dir(dated_dir) # except -> this will loop through all files in source and add failed message for all files in source folder # because dated folder creation failed #Move all files to dated folder for file in listDir(root_dir): file_path=f"{s3_uri}/{file}" file=file.split('/')[-1] # #move file to dated folder move_file(file_path,f"{dated_dir}{file}") logger.info(f"All files moved to dated folder for:{source}") for file in listDir(dated_dir): #Add try catch here failure of one file shouldnt stop the process. file_path=f"{s3_uri}/{file}" generic_dq_status=perform_generic_checks(file_path) if generic_dq_status == False: logger.warning(f"One or more generic DQ checks failed for file: {file}") err_path=eval(f'''f"{config['error_folder']}"''') err_path=s3_uri+err_path file_name=file.split('/')[-1] file_name=file_name.split('.') curr_ts=dt.datetime.today().strftime("%Y-%m-%d_%H:%M:%S") file_name[0]=file_name[0]+'_'+curr_ts file_name=".".join(file_name) create_dir(err_path) err_file_path=err_path+file_name move_file(file_path,err_file_path) setTaskVal(source,file_path.split('/')[-1],config["fail_flag"],err_file_path) else: msg=f"All Generic DQ checks passed for {file}" logger.info(msg) business_dq_status=perform_business_checks(source,file_path) if business_dq_status == True: msg=f"Business Rule DQ checks passed for {file}" logger.info(msg) df=pd.read_csv(file_path) table_path=eval(f'''f"{config['landing_dir']}"''') file_name=file_path.split('/')[-1] table_name=file_name.split('.')[0] table_path=s3_uri+table_path+table_name+'/' write_to_parquet(df,table_path,"append") setTaskVal(source,file,config["success_flag"],"")#write external table... else: logger.warning(f"One or more business rule DQ checks failed for file: {file}") err_path=eval(f'''f"{config['error_folder']}"''') err_path=s3_uri+err_path file_name=file.split('/')[-1] file_name=file_name.split('.') curr_ts=dt.datetime.today().strftime("%Y-%m-%d_%H:%M:%S") file_name[0]=file_name[0]+'_'+curr_ts file_name=".".join(file_name) create_dir(err_path) err_file_path=err_path+file_name move_file(file_path,err_file_path) setTaskVal(source,file,config["fail_flag"],err_path) except Exception as e: logger.error(e) raise e finally: exit_status["DQ Logs"] = exit_status["DQ Logs"].map(str) write_to_parquet(exit_status,"s3://cte-project/landing/dq/job_result/","overwrite") # --------------------Global declarations--------------------- logger=logging.getLogger("DQ Script") logging.basicConfig(format='%(name)s:%(levelname)s: %(message)s', level=logging.DEBUG) logging.getLogger('boto3').setLevel(logging.CRITICAL) logging.getLogger('botocore').setLevel(logging.CRITICAL) logging.getLogger('urllib3').setLevel(logging.CRITICAL) logging.getLogger('s3transfer').setLevel(logging.CRITICAL) logging.getLogger('aiobotocore').setLevel(logging.CRITICAL) logging.getLogger('charset_normalizer').setLevel(logging.CRITICAL) logging.getLogger('s3fs').setLevel(logging.CRITICAL) logging.getLogger('fsspec').setLevel(logging.CRITICAL) logging.getLogger('asyncio').setLevel(logging.CRITICAL) config_path="s3://cte-project/config/config.json" global err_msgs,config,exit_status,curr_dt,layer,s3_connect s3_connect = boto3.client('s3') config=getConfig(config_path) curr_dt=dt.date.today().strftime("%Y-%m") layer="DQ Script" print(type(config)) #Instantiate logging vars file_status=[] exit_status=pd.DataFrame(columns=config["status_dict_cols"]) main()
import unittest from datetime import datetime from src.backup.dataset_id_creator import DatasetIdCreator from src.commons.exceptions import ParameterValidationException class TestDatasetIdCreator(unittest.TestCase): def test_create_happy_path(self): # given date = datetime(1901, 12, 21) location = 'US' project = 'project123' # when result = DatasetIdCreator.create(date, location, project) # then expected_result = '1901_51_US_project123' self.assertEquals(result, expected_result) def test_create_should_replace_all_pauses_to_emphasis(self): # given date = datetime(1901, 01, 10) location = 'U-S' project = 'project-_123' # when result = DatasetIdCreator.create(date, location, project) # then expected_result = '1901_02_U_S_project__123' self.assertEquals(result, expected_result) def test_create_without_date_should_throw_error(self): # given date = None location = 'US' project = 'project_id' # when then self.assertRaises(ParameterValidationException, DatasetIdCreator.create, date, location, project) def test_create_without_location_should_throw_error(self): # given date = datetime(2016, 5, 15) location = None project = 'project_id' # when then self.assertRaises(ParameterValidationException, DatasetIdCreator.create, date, location, project) def test_create_without_project_should_throw_error(self): # given date = datetime(2016, 5, 15) location = 'US' project = None # when then self.assertRaises(ParameterValidationException, DatasetIdCreator.create, date, location, project)
# coding: utf-8 """ No descripton provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: 2.1.1+01d50e5 Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import import os import sys import unittest import graylog from graylog.rest import ApiException from graylog.models.input_created import InputCreated class TestInputCreated(unittest.TestCase): """ InputCreated unit test stubs """ def setUp(self): pass def tearDown(self): pass def testInputCreated(self): """ Test InputCreated """ model = graylog.models.input_created.InputCreated() if __name__ == '__main__': unittest.main()
# ----------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # ----------------------------------------------------------------------------- import glob from importlib import import_module import json import os import re from subprocess import CalledProcessError import sys from knack.log import get_logger from knack.util import CLIError from azdev.utilities import ( display, output, heading, subheading, cmd as raw_cmd, py_cmd, pip_cmd, find_file, IS_WINDOWS, ENV_VAR_TEST_LIVE, COMMAND_MODULE_PREFIX, EXTENSION_PREFIX, make_dirs, get_azdev_config_dir, get_path_table, require_virtual_env, get_name_index) from .pytest_runner import get_test_runner from .profile_context import ProfileContext, current_profile from .incremental_strategy import CLIAzureDevOpsContext logger = get_logger(__name__) # pylint: disable=too-many-statements,too-many-locals def run_tests(tests, xml_path=None, discover=False, in_series=False, run_live=False, profile=None, last_failed=False, pytest_args=None, no_exit_first=False, mark=None, git_source=None, git_target=None, git_repo=None, cli_ci=False): require_virtual_env() DEFAULT_RESULT_FILE = 'test_results.xml' DEFAULT_RESULT_PATH = os.path.join(get_azdev_config_dir(), DEFAULT_RESULT_FILE) heading('Run Tests') path_table = get_path_table() test_index = _get_test_index(profile or current_profile(), discover) if not tests: tests = list(path_table['mod'].keys()) + list(path_table['core'].keys()) + list(path_table['ext'].keys()) if tests == ['CLI']: tests = list(path_table['mod'].keys()) + list(path_table['core'].keys()) elif tests == ['EXT']: tests = list(path_table['ext'].keys()) # filter out tests whose modules haven't changed modified_mods = _filter_by_git_diff(tests, test_index, git_source, git_target, git_repo) if modified_mods: display('\nTest on modules: {}\n'.format(', '.join(modified_mods))) if cli_ci is True: ctx = CLIAzureDevOpsContext(git_repo, git_source, git_target) modified_mods = ctx.filter(test_index) # resolve the path at which to dump the XML results xml_path = xml_path or DEFAULT_RESULT_PATH if not xml_path.endswith('.xml'): xml_path = os.path.join(xml_path, DEFAULT_RESULT_FILE) # process environment variables if run_live: logger.warning('RUNNING TESTS LIVE') os.environ[ENV_VAR_TEST_LIVE] = 'True' def _find_test(index, name): name_comps = name.split('.') num_comps = len(name_comps) key_error = KeyError() for i in range(num_comps): check_name = '.'.join(name_comps[(-1 - i):]) try: match = index[check_name] if check_name != name: logger.info("Test found using just '%s'. The rest of the name was ignored.\n", check_name) return match except KeyError as ex: key_error = ex continue raise key_error # lookup test paths from index test_paths = [] for t in modified_mods: try: test_path = os.path.normpath(_find_test(test_index, t)) test_paths.append(test_path) except KeyError: logger.warning("'%s' not found. If newly added, re-run with --discover", t) continue exit_code = 0 # Tests have been collected. Now run them. if not test_paths: logger.warning('No tests selected to run.') sys.exit(exit_code) exit_code = 0 with ProfileContext(profile): runner = get_test_runner(parallel=not in_series, log_path=xml_path, last_failed=last_failed, no_exit_first=no_exit_first, mark=mark) exit_code = runner(test_paths=test_paths, pytest_args=pytest_args) sys.exit(0 if not exit_code else 1) def _filter_by_git_diff(tests, test_index, git_source, git_target, git_repo): from azdev.utilities import diff_branches, extract_module_name from azdev.utilities.git_util import summarize_changed_mods if not any([git_source, git_target, git_repo]): return tests if not all([git_target, git_repo]): raise CLIError('usage error: [--src NAME] --tgt NAME --repo PATH') files_changed = diff_branches(git_repo, git_target, git_source) mods_changed = summarize_changed_mods(files_changed) repo_path = str(os.path.abspath(git_repo)).lower() to_remove = [] for key in tests: test_path = test_index.get(key, None) if test_path and test_path.lower().startswith(repo_path): mod_name = extract_module_name(test_path) if next((x for x in mods_changed if mod_name in x), None): # has changed, so do not filter out continue # in not in the repo or has not changed, filter out to_remove.append(key) # remove the unchanged modules tests = [t for t in tests if t not in to_remove] logger.info('Filtered out: %s', to_remove) return tests def _discover_module_tests(mod_name, mod_data): # get the list of test files in each module total_tests = 0 total_files = 0 logger.info('Mod: %s', mod_name) try: contents = os.listdir(mod_data['filepath']) test_files = { x[:-len('.py')]: {} for x in contents if x.startswith('test_') and x.endswith('.py') } total_files = len(test_files) except FileNotFoundError: logger.info(' No test files found.') return None for file_name in test_files: mod_data['files'][file_name] = {} test_file_path = mod_data['base_path'] + '.' + file_name try: module = import_module(test_file_path) except ImportError as ex: logger.info(' %s', ex) continue module_dict = module.__dict__ possible_test_classes = {x: y for x, y in module_dict.items() if not x.startswith('_')} for class_name, class_def in possible_test_classes.items(): try: class_dict = class_def.__dict__ except AttributeError: # skip non-class symbols in files like constants, imported methods, etc. continue if class_dict.get('__module__') == test_file_path: tests = [x for x in class_def.__dict__ if x.startswith('test_')] if tests: mod_data['files'][file_name][class_name] = tests total_tests += len(tests) logger.info(' %s tests found in %s files.', total_tests, total_files) return mod_data # pylint: disable=too-many-statements, too-many-locals def _discover_tests(profile): """ Builds an index of tests so that the user can simply supply the name they wish to test instead of the full path. """ profile_split = profile.split('-') profile_namespace = '_'.join([profile_split[-1]] + profile_split[:-1]) heading('Discovering Tests') path_table = get_path_table() core_modules = path_table['core'].items() command_modules = path_table['mod'].items() extensions = path_table['ext'].items() inverse_name_table = get_name_index(invert=True) module_data = {} logger.info('\nCore Modules: %s', ', '.join([name for name, _ in core_modules])) for mod_name, mod_path in core_modules: file_path = mod_path for comp in mod_name.split('-'): file_path = os.path.join(file_path, comp) mod_data = { 'alt_name': 'main' if mod_name == 'azure-cli' else mod_name.replace(COMMAND_MODULE_PREFIX, ''), 'filepath': os.path.join(file_path, 'tests'), 'base_path': '{}.tests'.format(mod_name).replace('-', '.'), 'files': {} } tests = _discover_module_tests(mod_name, mod_data) if tests: module_data[mod_name] = tests logger.info('\nCommand Modules: %s', ', '.join([name for name, _ in command_modules])) for mod_name, mod_path in command_modules: mod_data = { # Modules don't technically have azure-cli-foo moniker anymore, but preserving # for consistency. 'alt_name': '{}{}'.format(COMMAND_MODULE_PREFIX, mod_name), 'filepath': os.path.join( mod_path, 'tests', profile_namespace), 'base_path': 'azure.cli.command_modules.{}.tests.{}'.format(mod_name, profile_namespace), 'files': {} } tests = _discover_module_tests(mod_name, mod_data) if tests: module_data[mod_name] = tests logger.info('\nExtensions: %s', ', '.join([name for name, _ in extensions])) for mod_name, mod_path in extensions: glob_pattern = os.path.normcase(os.path.join('{}*'.format(EXTENSION_PREFIX))) try: file_path = glob.glob(os.path.join(mod_path, glob_pattern))[0] except IndexError: logger.debug("No extension found at: %s", os.path.join(mod_path, glob_pattern)) continue import_name = os.path.basename(file_path) mod_data = { 'alt_name': inverse_name_table[mod_name], 'filepath': os.path.join(file_path, 'tests', profile_namespace), 'base_path': '{}.tests.{}'.format(import_name, profile_namespace), 'files': {} } tests = _discover_module_tests(import_name, mod_data) if tests: module_data[mod_name] = tests test_index = {} conflicted_keys = [] def add_to_index(key, path): from azdev.utilities import extract_module_name key = key or mod_name if key in test_index: if key not in conflicted_keys: conflicted_keys.append(key) mod1 = extract_module_name(path) mod2 = extract_module_name(test_index[key]) if mod1 != mod2: # resolve conflicted keys by prefixing with the module name and a dot (.) logger.warning("'%s' exists in both '%s' and '%s'. Resolve using `%s.%s` or `%s.%s`", key, mod1, mod2, mod1, key, mod2, key) test_index['{}.{}'.format(mod1, key)] = path test_index['{}.{}'.format(mod2, key)] = test_index[key] else: logger.error("'%s' exists twice in the '%s' module. " "Please rename one or both and re-run --discover.", key, mod1) else: test_index[key] = path # build the index for mod_name, mod_data in module_data.items(): # don't add empty mods to the index if not mod_data: continue mod_path = mod_data['filepath'] for file_name, file_data in mod_data['files'].items(): file_path = os.path.join(mod_path, file_name) + '.py' for class_name, test_list in file_data.items(): for test_name in test_list: test_path = '{}::{}::{}'.format(file_path, class_name, test_name) add_to_index(test_name, test_path) class_path = '{}::{}'.format(file_path, class_name) add_to_index(class_name, class_path) add_to_index(file_name, file_path) add_to_index(mod_name, mod_path) add_to_index(mod_data['alt_name'], mod_path) # remove the conflicted keys since they would arbitrarily point to a random implementation for key in conflicted_keys: del test_index[key] return test_index def _get_test_index(profile, discover): config_dir = get_azdev_config_dir() test_index_dir = os.path.join(config_dir, 'test_index') make_dirs(test_index_dir) test_index_path = os.path.join(test_index_dir, '{}.json'.format(profile)) test_index = {} if discover: test_index = _discover_tests(profile) with open(test_index_path, 'w') as f: f.write(json.dumps(test_index)) display('\ntest index updated: {}'.format(test_index_path)) elif os.path.isfile(test_index_path): with open(test_index_path, 'r') as f: test_index = json.loads(''.join(f.readlines())) display('\ntest index found: {}'.format(test_index_path)) else: test_index = _discover_tests(profile) with open(test_index_path, 'w') as f: f.write(json.dumps(test_index)) display('\ntest index created: {}'.format(test_index_path)) return test_index
from django.contrib import messages from django.shortcuts import render, get_object_or_404, redirect from django.utils import timezone from .forms import PublisherForm, ReviewForm, SearchForm from .models import Book, Contributor, Publisher, Review from .utils import average_rating def index(request): return render(request, "base.html") def book_search(request): search_text = request.GET.get("search", "") form = SearchForm(request.GET) books = set() if form.is_valid() and form.cleaned_data["search"]: search = form.cleaned_data["search"] if form.cleaned_data["search_in"] == "title": books = Book.objects.filter(title__icontains=search) else: contributors = Contributor.objects.filter(first_names__icontains=search) | \ Contributor.objects.filter(last_names__icontains=search) for contributor in contributors: for book in contributor.book_set.all(): books.add(book) return render(request, "reviews/search-results.html", {"form": form, "search_text": search_text, "books": books}) def book_list(request): books = Book.objects.all() books_with_reviews = [] for book in books: reviews = book.review_set.all() if reviews: book_rating = average_rating([review.rating for review in reviews]) number_of_reviews = len(reviews) else: book_rating = None number_of_reviews = 0 books_with_reviews.append({"book": book, "book_rating": book_rating, "number_of_reviews": number_of_reviews}) context = { "book_list": books_with_reviews } return render(request, "reviews/book_list.html", context) def book_detail(request, pk): book = get_object_or_404(Book, pk=pk) reviews = book.review_set.all() if reviews: book_rating = average_rating([review.rating for review in reviews]) context = { "book": book, "book_rating": book_rating, "reviews": reviews } else: context = { "book": book, "book_rating": None, "reviews": None } return render(request, "reviews/book_detail.html", context) def publisher_edit(request, pk=None): if pk is not None: publisher = get_object_or_404(Publisher, pk=pk) else: publisher = None if request.method == "POST": form = PublisherForm(request.POST, instance=publisher) if form.is_valid(): updated_publisher = form.save() if publisher is None: messages.success(request, "Publisher \"{}\" was created.".format(updated_publisher)) else: messages.success(request, "Publisher \"{}\" was updated.".format(updated_publisher)) return redirect("publisher_detail", updated_publisher.pk) else: form = PublisherForm(instance=publisher) return render(request, "reviews/instance-form.html", {"form": form, "instance": publisher, "model_type": "Publisher"}) def review_edit(request, book_pk, review_pk=None): book = get_object_or_404(Book, pk=book_pk) if review_pk is not None: review = get_object_or_404(Review, book_id=book_pk, pk=review_pk) else: review = None if request.method == "POST": form = ReviewForm(request.POST, instance=review) if form.is_valid(): updated_review = form.save(False) updated_review.book = book if review is None: messages.success(request, "Review for \"{}\" created.".format(book)) else: updated_review.date_edited = timezone.now() messages.success(request, "Review for \"{}\" updated.".format(book)) updated_review.save() return redirect("book_detail", book.pk) else: form = ReviewForm(instance=review) return render(request, "reviews/instance-form.html", {"form": form, "instance": review, "model_type": "Review", "related_instance": book, "related_model_type": "Book" }) def media_example(request): return render(request, "media-example.html")
import json import unittest from os.path import join as path_join import numpy as np from numpy.testing import assert_almost_equal from pkg_resources import resource_filename from pymatgen.io.vasp import Vasprun from sumo.electronic_structure.optics import calculate_dielectric_properties, kkr class AbsorptionTestCase(unittest.TestCase): def setUp(self): diel_path = resource_filename( __name__, path_join("..", "data", "Ge", "ge_diel.json") ) with open(diel_path) as f: self.ge_diel = json.load(f) absorption_path = resource_filename( __name__, path_join("..", "data", "Ge", "ge_alpha.json") ) with open(absorption_path) as f: self.ge_abs = json.load(f) def test_absorption(self): energy, alpha = calculate_dielectric_properties( self.ge_diel, { "absorption", }, )["absorption"] self.assertIsNone(assert_almost_equal(alpha, self.ge_abs)) class KramersKronigTestCase(unittest.TestCase): def setUp(self): ge_vasprun_path = resource_filename( __name__, path_join("..", "data", "Ge", "vasprun.xml.gz") ) self.ge_vasprun = Vasprun(ge_vasprun_path) self.ge_text_file = resource_filename( __name__, path_join("..", "data", "Ge", "optics.txt") ) def test_kkr(self): energy, eps_real, eps_imag = self.ge_vasprun.dielectric de = (energy[10] - energy[0]) / 10 def symmetrise(a): """Convert XX YY ZZ XY YZ XZ array to a symmetrical 3x3 matrix""" return [[a[0], a[3], a[5]], [a[4], a[1], a[4]], [a[5], a[4], a[2]]] eps_imag_3x3 = [symmetrise(a) for a in eps_imag] eps_real_3x3 = np.array([symmetrise(a) for a in eps_real]) # Check difference between eps_real reported by Vasp and determined # by Kramers-Kronig transformation of eps_im. # # Some discrepancy is normal, check RMS is as expected # This is likely due to the limited precision available in vasprun error = kkr(de, eps_imag_3x3) - eps_real_3x3 error_fracs = [ eps / eps_ref for eps, eps_ref in zip(error.flatten(), eps_real_3x3.flatten()) if eps_ref > 1e-2 ] # Exclude low-precision cases self.assertLess(np.sqrt((np.array(error_fracs) ** 2).mean()), 0.1)
from django.apps import AppConfig class RudumbappConfig(AppConfig): name = 'rudumbapp'
import logging import time import struct _logger = logging.getLogger(__name__) SR = int(182) CR = int(13) class Request: def __init__(self, cmd, charmber_index, *args): """ args is param_1, 2, 3...up to 4 """ self.cmd = cmd self.charmber_index = charmber_index self.params = args @property def bytes(self): """ convert request to bytes """ fmt = self.convert_fmt(self.cmd, self.charmber_index, self.params) # encode self.params to bytes params = [param.encode() for param in self.params] # insert SR to params # egg.[param1, SR, param2, SR ...] for i in range(len(params)): params.insert(2*i+1, SR) bytes_data = struct.pack(fmt, self.cmd.encode(), SR, self.charmber_index.encode(), SR, *params, CR) return bytes_data @staticmethod def convert_fmt(command, charmber_index, params): """ convert struct need format """ # separator SR = 182 # terminator CR = 13 fmt_header = "{0}s1B{1}s1B".format(len(command), len(charmber_index)) fmt_body = "" for param in params: fmt_body = fmt_body + "{0}s1B".format(len(param)) fmt_tail = "1B" fmt = fmt_header + fmt_body + fmt_tail return fmt
n = int(input()) termo_atual = 1 termo_anterior = 0 cont = 2 if n == 1 or n == 2: seq = 1 else: while cont <= n: seq = termo_atual + termo_anterior termo_anterior = termo_atual termo_atual = seq cont += 1 fatorial = x = 1 for _ in range(n): fatorial *= x x += 1 print(f'{seq} {fatorial}', end='') if seq % 2 == 0: mes_seguinte = termo_atual + termo_anterior - seq print(f' {mes_seguinte}')
from django.db import models class Service(models.Model): name = models.CharField(max_length=40, unique=True) price = models.IntegerField() master = models.ForeignKey("Master", on_delete=models.SET_NULL, null=True, related_name='services') def __str__(self): return self.name class Master(models.Model): photo = models.ImageField() full_name = models.CharField(max_length=30) exp = models.IntegerField(default=0) birth_date = models.DateField() def __str__(self): return self.full_name class Certificate(models.Model): name = models.CharField(max_length=40) date_graduate = models.DateField() date_expired = models.DateField() school = models.CharField(max_length=40) photo = models.ImageField() status = models.CharField(max_length=15, choices=( ('active', 'active'), ('dead', 'dead'), ), default='active') master = models.ForeignKey(Master,on_delete=models.CASCADE)
# 定时任务和邮件发送 import smtplib from email.mime.text import MIMEText # send email def send_email(): # 接受方邮箱地址 receivers = ['bestwishfang@foxmail.com'] # 发送方邮箱地址 msg_from = 'bestwishfang@126.com' password = 'continue00' # 邮箱授权码 # 邮件内容 # 主题 subject = "Do you know?" # 正文 # content = "努力" content = "好久了,好多都忘记了" # 使用MIMEText对文本信息进行封装 msg = MIMEText(content, 'plain', 'utf-8') msg['Subject'] = subject msg['Form'] = msg_from msg['To'] = ','.join(receivers) try: smtp = smtplib.SMTP() smtp.connect('smtp.126.com') smtp.login(msg_from, password) smtp.sendmail(msg_from, receivers, msg.as_string()) print("发送成功") smtp.close() except Exception as err: print(err) print("发送失败") if __name__ == '__main__': send_email()
# Copyright 2021 Dynatrace LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from dtcli import utils def test_require_extension_name_valid(): utils.require_extension_name_valid("custom:e") utils.require_extension_name_valid("custom:some.test.ext") utils.require_extension_name_valid("custom:some_simple.test.ext-1") utils.require_extension_name_valid("custom:_some_simple_test_extension") utils.require_extension_name_valid("custom:-some-simple.test.ext_1_") def test_require_extension_name_valid_negative(): with pytest.raises(utils.ExtensionBuildError): utils.require_extension_name_valid("some.test.ext") with pytest.raises(utils.ExtensionBuildError): utils.require_extension_name_valid("custom:") with pytest.raises(utils.ExtensionBuildError): utils.require_extension_name_valid("custom:.some.test.ext.") with pytest.raises(utils.ExtensionBuildError): utils.require_extension_name_valid("custom:some.test..ext") with pytest.raises(utils.ExtensionBuildError): utils.require_extension_name_valid("custom:som:e.t/est.e$xt") with pytest.raises(utils.ExtensionBuildError): utils.require_extension_name_valid("custom:SOME.test.ext") with pytest.raises(utils.ExtensionBuildError): utils.require_extension_name_valid("custom:SOME123.test.ext") with pytest.raises(utils.ExtensionBuildError): utils.require_extension_name_valid("custom:\u0194test,ext")
#!/usr/bin/env python3 # Copyright (c) 2015-2016 The Fujicoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test PrioritiseTransaction code # from test_framework.test_framework import FujicoinTestFramework from test_framework.util import * from test_framework.mininode import COIN, MAX_BLOCK_SIZE class PrioritiseTransactionTest(FujicoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 1 self.txouts = gen_return_txouts() def setup_network(self): self.nodes = [] self.is_network_split = False self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-printpriority=1"])) self.relayfee = self.nodes[0].getnetworkinfo()['relayfee'] def run_test(self): utxo_count = 90 utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count) base_fee = self.relayfee*100 # our transactions are smaller than 100kb txids = [] # Create 3 batches of transactions at 3 different fee rate levels range_size = utxo_count // 3 for i in range(3): txids.append([]) start_range = i * range_size end_range = start_range + range_size txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], (i+1)*base_fee) # Make sure that the size of each group of transactions exceeds # MAX_BLOCK_SIZE -- otherwise the test needs to be revised to create # more transactions. mempool = self.nodes[0].getrawmempool(True) sizes = [0, 0, 0] for i in range(3): for j in txids[i]: assert(j in mempool) sizes[i] += mempool[j]['size'] assert(sizes[i] > MAX_BLOCK_SIZE) # Fail => raise utxo_count # add a fee delta to something in the cheapest bucket and make sure it gets mined # also check that a different entry in the cheapest bucket is NOT mined (lower # the priority to ensure its not mined due to priority) self.nodes[0].prioritisetransaction(txids[0][0], 0, int(3*base_fee*COIN)) self.nodes[0].prioritisetransaction(txids[0][1], -1e15, 0) self.nodes[0].generate(1) mempool = self.nodes[0].getrawmempool() print("Assert that prioritised transaction was mined") assert(txids[0][0] not in mempool) assert(txids[0][1] in mempool) high_fee_tx = None for x in txids[2]: if x not in mempool: high_fee_tx = x # Something high-fee should have been mined! assert(high_fee_tx != None) # Add a prioritisation before a tx is in the mempool (de-prioritising a # high-fee transaction so that it's now low fee). self.nodes[0].prioritisetransaction(high_fee_tx, -1e15, -int(2*base_fee*COIN)) # Add everything back to mempool self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Check to make sure our high fee rate tx is back in the mempool mempool = self.nodes[0].getrawmempool() assert(high_fee_tx in mempool) # Now verify the modified-high feerate transaction isn't mined before # the other high fee transactions. Keep mining until our mempool has # decreased by all the high fee size that we calculated above. while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]): self.nodes[0].generate(1) # High fee transaction should not have been mined, but other high fee rate # transactions should have been. mempool = self.nodes[0].getrawmempool() print("Assert that de-prioritised transaction is still in mempool") assert(high_fee_tx in mempool) for x in txids[2]: if (x != high_fee_tx): assert(x not in mempool) # Create a free, low priority transaction. Should be rejected. utxo_list = self.nodes[0].listunspent() assert(len(utxo_list) > 0) utxo = utxo_list[0] inputs = [] outputs = {} inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]}) outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) tx_hex = self.nodes[0].signrawtransaction(raw_tx)["hex"] txid = self.nodes[0].sendrawtransaction(tx_hex) # A tx that spends an in-mempool tx has 0 priority, so we can use it to # test the effect of using prioritise transaction for mempool acceptance inputs = [] inputs.append({"txid": txid, "vout": 0}) outputs = {} outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee raw_tx2 = self.nodes[0].createrawtransaction(inputs, outputs) tx2_hex = self.nodes[0].signrawtransaction(raw_tx2)["hex"] tx2_id = self.nodes[0].decoderawtransaction(tx2_hex)["txid"] try: self.nodes[0].sendrawtransaction(tx2_hex) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) # insufficient fee assert(tx2_id not in self.nodes[0].getrawmempool()) else: assert(False) # This is a less than 1000-byte transaction, so just set the fee # to be the minimum for a 1000 byte transaction and check that it is # accepted. self.nodes[0].prioritisetransaction(tx2_id, 0, int(self.relayfee*COIN)) print("Assert that prioritised free transaction is accepted to mempool") assert_equal(self.nodes[0].sendrawtransaction(tx2_hex), tx2_id) assert(tx2_id in self.nodes[0].getrawmempool()) if __name__ == '__main__': PrioritiseTransactionTest().main()
#!/usr/bin/python # # Copyright 2018-2020 Polyaxon, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from polycommon.config_manager import ConfigManager def set_admin(context, config: ConfigManager): admin_name = config.get_string("POLYAXON_ADMIN_NAME", is_optional=True) admin_mail = config.get_string("POLYAXON_ADMIN_MAIL", is_optional=True) if admin_mail and admin_mail: admins = ((admin_name, admin_mail),) context["ADMINS"] = admins context["MANAGERS"] = admins
# Electrum - Lightweight Bitcoin Client # Copyright (c) 2011-2016 Thomas Voegtlin # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import time import queue import os import random import re from collections import defaultdict import threading import socket import json import sys import ipaddress import asyncio from typing import NamedTuple, Optional, Sequence, List, Dict, Tuple import traceback import dns import dns.resolver import aiorpcx from aiorpcx import TaskGroup from aiohttp import ClientResponse from . import util from .util import (PrintError, print_error, log_exceptions, ignore_exceptions, bfh, SilentTaskGroup, make_aiohttp_session, send_exception_to_crash_reporter, is_hash256_str, is_non_negative_integer) from .bitcoin import COIN from . import constants from . import blockchain from . import bitcoin from .blockchain import Blockchain, HEADER_SIZE from .interface import (Interface, serialize_server, deserialize_server, RequestTimedOut, NetworkTimeout) from .version import PROTOCOL_VERSION from .simple_config import SimpleConfig from .i18n import _ NODES_RETRY_INTERVAL = 60 SERVER_RETRY_INTERVAL = 10 def parse_servers(result: Sequence[Tuple[str, str, List[str]]]) -> Dict[str, dict]: """ parse servers list into dict format""" servers = {} for item in result: host = item[1] out = {} version = None pruning_level = '-' if len(item) > 2: for v in item[2]: if re.match(r"[st]\d*", v): protocol, port = v[0], v[1:] if port == '': port = constants.net.DEFAULT_PORTS[protocol] out[protocol] = port elif re.match("v(.?)+", v): version = v[1:] elif re.match(r"p\d*", v): pruning_level = v[1:] if pruning_level == '': pruning_level = '0' if out: out['pruning'] = pruning_level out['version'] = version servers[host] = out return servers def filter_version(servers): def is_recent(version): try: return util.versiontuple(version) >= util.versiontuple(PROTOCOL_VERSION) except Exception as e: return False return {k: v for k, v in servers.items() if is_recent(v.get('version'))} def filter_noonion(servers): return {k: v for k, v in servers.items() if not k.endswith('.onion')} def filter_protocol(hostmap, protocol='s'): '''Filters the hostmap for those implementing protocol. The result is a list in serialized form.''' eligible = [] for host, portmap in hostmap.items(): port = portmap.get(protocol) if port: eligible.append(serialize_server(host, port, protocol)) return eligible def pick_random_server(hostmap = None, protocol = 's', exclude_set = set()): if hostmap is None: hostmap = constants.net.DEFAULT_SERVERS eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set) return random.choice(eligible) if eligible else None class NetworkParameters(NamedTuple): host: str port: str protocol: str proxy: Optional[dict] auto_connect: bool oneserver: bool = False proxy_modes = ['socks4', 'socks5'] def serialize_proxy(p): if not isinstance(p, dict): return None return ':'.join([p.get('mode'), p.get('host'), p.get('port'), p.get('user', ''), p.get('password', '')]) def deserialize_proxy(s: str) -> Optional[dict]: if not isinstance(s, str): return None if s.lower() == 'none': return None proxy = { "mode":"socks5", "host":"localhost" } # FIXME raw IPv6 address fails here args = s.split(':') n = 0 if proxy_modes.count(args[n]) == 1: proxy["mode"] = args[n] n += 1 if len(args) > n: proxy["host"] = args[n] n += 1 if len(args) > n: proxy["port"] = args[n] n += 1 else: proxy["port"] = "8080" if proxy["mode"] == "http" else "1080" if len(args) > n: proxy["user"] = args[n] n += 1 if len(args) > n: proxy["password"] = args[n] return proxy class BestEffortRequestFailed(Exception): pass class TxBroadcastError(Exception): def get_message_for_gui(self): raise NotImplementedError() class TxBroadcastHashMismatch(TxBroadcastError): def get_message_for_gui(self): return "{}\n{}\n\n{}" \ .format(_("The server returned an unexpected transaction ID when broadcasting the transaction."), _("Consider trying to connect to a different server, or updating Electrum."), str(self)) class TxBroadcastServerReturnedError(TxBroadcastError): def get_message_for_gui(self): return "{}\n{}\n\n{}" \ .format(_("The server returned an error when broadcasting the transaction."), _("Consider trying to connect to a different server, or updating Electrum."), str(self)) class TxBroadcastUnknownError(TxBroadcastError): def get_message_for_gui(self): return "{}\n{}" \ .format(_("Unknown error when broadcasting the transaction."), _("Consider trying to connect to a different server, or updating Electrum.")) class UntrustedServerReturnedError(Exception): def __init__(self, *, original_exception): self.original_exception = original_exception def __str__(self): return _("The server returned an error.") def __repr__(self): return f"<UntrustedServerReturnedError original_exception: {repr(self.original_exception)}>" INSTANCE = None class Network(PrintError): """The Network class manages a set of connections to remote electrum servers, each connected socket is handled by an Interface() object. """ verbosity_filter = 'n' def __init__(self, config: SimpleConfig=None): global INSTANCE INSTANCE = self self.asyncio_loop = asyncio.get_event_loop() assert self.asyncio_loop.is_running(), "event loop not running" self._loop_thread = None # type: threading.Thread # set by caller; only used for sanity checks if config is None: config = {} # Do not use mutables as default values! self.config = SimpleConfig(config) if isinstance(config, dict) else config # type: SimpleConfig blockchain.read_blockchains(self.config) self.print_error("blockchains", list(map(lambda b: b.forkpoint, blockchain.blockchains.values()))) self._blockchain_preferred_block = self.config.get('blockchain_preferred_block', None) # type: Optional[Dict] self._blockchain = blockchain.get_best_chain() # Server for addresses and transactions self.default_server = self.config.get('server', None) # Sanitize default server if self.default_server: try: deserialize_server(self.default_server) except: self.print_error('Warning: failed to parse server-string; falling back to random.') self.default_server = None if not self.default_server: self.default_server = pick_random_server() self.main_taskgroup = None # type: TaskGroup # locks self.restart_lock = asyncio.Lock() self.bhi_lock = asyncio.Lock() self.callback_lock = threading.Lock() self.recent_servers_lock = threading.RLock() # <- re-entrant self.interfaces_lock = threading.Lock() # for mutating/iterating self.interfaces self.server_peers = {} # returned by interface (servers that the main interface knows about) self.recent_servers = self._read_recent_servers() # note: needs self.recent_servers_lock self.banner = '' self.donation_address = '' self.relay_fee = None # type: Optional[int] # callbacks set by the GUI self.callbacks = defaultdict(list) # note: needs self.callback_lock dir_path = os.path.join(self.config.path, 'certs') util.make_dir(dir_path) # retry times self.server_retry_time = time.time() self.nodes_retry_time = time.time() # the main server we are currently communicating with self.interface = None # type: Interface # set of servers we have an ongoing connection with self.interfaces = {} # type: Dict[str, Interface] self.auto_connect = self.config.get('auto_connect', True) self.connecting = set() self.server_queue = None self.proxy = None # Dump network messages (all interfaces). Set at runtime from the console. self.debug = False self._set_status('disconnected') def run_from_another_thread(self, coro): assert self._loop_thread != threading.current_thread(), 'must not be called from network thread' fut = asyncio.run_coroutine_threadsafe(coro, self.asyncio_loop) return fut.result() @staticmethod def get_instance(): return INSTANCE def with_recent_servers_lock(func): def func_wrapper(self, *args, **kwargs): with self.recent_servers_lock: return func(self, *args, **kwargs) return func_wrapper def register_callback(self, callback, events): with self.callback_lock: for event in events: self.callbacks[event].append(callback) def unregister_callback(self, callback): with self.callback_lock: for callbacks in self.callbacks.values(): if callback in callbacks: callbacks.remove(callback) def trigger_callback(self, event, *args): with self.callback_lock: callbacks = self.callbacks[event][:] for callback in callbacks: # FIXME: if callback throws, we will lose the traceback if asyncio.iscoroutinefunction(callback): asyncio.run_coroutine_threadsafe(callback(event, *args), self.asyncio_loop) else: self.asyncio_loop.call_soon_threadsafe(callback, event, *args) def _read_recent_servers(self): if not self.config.path: return [] path = os.path.join(self.config.path, "recent_servers") try: with open(path, "r", encoding='utf-8') as f: data = f.read() return json.loads(data) except: return [] @with_recent_servers_lock def _save_recent_servers(self): if not self.config.path: return path = os.path.join(self.config.path, "recent_servers") s = json.dumps(self.recent_servers, indent=4, sort_keys=True) try: with open(path, "w", encoding='utf-8') as f: f.write(s) except: pass def get_server_height(self): interface = self.interface return interface.tip if interface else 0 async def _server_is_lagging(self): sh = self.get_server_height() if not sh: self.print_error('no height for main interface') return True lh = self.get_local_height() result = (lh - sh) > 1 if result: self.print_error(f'{self.default_server} is lagging ({sh} vs {lh})') return result def _set_status(self, status): self.connection_status = status self.notify('status') def is_connected(self): interface = self.interface return interface is not None and interface.ready.done() def is_connecting(self): return self.connection_status == 'connecting' async def _request_server_info(self, interface): await interface.ready session = interface.session async def get_banner(): self.banner = await session.send_request('server.banner') self.notify('banner') async def get_donation_address(): addr = await session.send_request('server.donation_address') if not bitcoin.is_address(addr): self.print_error(f"invalid donation address from server: {addr}") addr = '' self.donation_address = addr async def get_server_peers(): self.server_peers = parse_servers(await session.send_request('server.peers.subscribe')) self.notify('servers') async def get_relay_fee(): relayfee = await session.send_request('blockchain.relayfee') if relayfee is None: self.relay_fee = None else: relayfee = int(relayfee * COIN) self.relay_fee = max(0, relayfee) async with TaskGroup() as group: await group.spawn(get_banner) await group.spawn(get_donation_address) await group.spawn(get_server_peers) await group.spawn(get_relay_fee) await group.spawn(self._request_fee_estimates(interface)) async def _request_fee_estimates(self, interface): session = interface.session from .simple_config import FEE_ETA_TARGETS self.config.requested_fee_estimates() async with TaskGroup() as group: histogram_task = await group.spawn(session.send_request('mempool.get_fee_histogram')) fee_tasks = [] for i in FEE_ETA_TARGETS: fee_tasks.append((i, await group.spawn(session.send_request('blockchain.estimatefee', [i])))) self.config.mempool_fees = histogram = histogram_task.result() self.print_error(f'fee_histogram {histogram}') self.notify('fee_histogram') fee_estimates_eta = {} for nblock_target, task in fee_tasks: fee = int(task.result() * COIN) fee_estimates_eta[nblock_target] = fee if fee < 0: continue self.config.update_fee_estimates(nblock_target, fee) self.print_error(f'fee_estimates {fee_estimates_eta}') self.notify('fee') def get_status_value(self, key): if key == 'status': value = self.connection_status elif key == 'banner': value = self.banner elif key == 'fee': value = self.config.fee_estimates elif key == 'fee_histogram': value = self.config.mempool_fees elif key == 'servers': value = self.get_servers() else: raise Exception('unexpected trigger key {}'.format(key)) return value def notify(self, key): if key in ['status', 'updated']: self.trigger_callback(key) else: self.trigger_callback(key, self.get_status_value(key)) def get_parameters(self) -> NetworkParameters: host, port, protocol = deserialize_server(self.default_server) return NetworkParameters(host=host, port=port, protocol=protocol, proxy=self.proxy, auto_connect=self.auto_connect, oneserver=self.oneserver) def get_donation_address(self): if self.is_connected(): return self.donation_address def get_interfaces(self) -> List[str]: """The list of servers for the connected interfaces.""" with self.interfaces_lock: return list(self.interfaces) @with_recent_servers_lock def get_servers(self): # start with hardcoded servers out = dict(constants.net.DEFAULT_SERVERS) # copy # add recent servers for s in self.recent_servers: try: host, port, protocol = deserialize_server(s) except: continue if host not in out: out[host] = {protocol: port} # add servers received from main interface server_peers = self.server_peers if server_peers: out.update(filter_version(server_peers.copy())) # potentially filter out some if self.config.get('noonion'): out = filter_noonion(out) return out def _start_interface(self, server: str): if server not in self.interfaces and server not in self.connecting: if server == self.default_server: self.print_error(f"connecting to {server} as new interface") self._set_status('connecting') self.connecting.add(server) self.server_queue.put(server) def _start_random_interface(self): with self.interfaces_lock: exclude_set = self.disconnected_servers | set(self.interfaces) | self.connecting server = pick_random_server(self.get_servers(), self.protocol, exclude_set) if server: self._start_interface(server) return server def _set_proxy(self, proxy: Optional[dict]): self.proxy = proxy # Store these somewhere so we can un-monkey-patch if not hasattr(socket, "_getaddrinfo"): socket._getaddrinfo = socket.getaddrinfo if proxy: self.print_error('setting proxy', proxy) # prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))] else: if sys.platform == 'win32': # On Windows, socket.getaddrinfo takes a mutex, and might hold it for up to 10 seconds # when dns-resolving. To speed it up drastically, we resolve dns ourselves, outside that lock. # see #4421 socket.getaddrinfo = self._fast_getaddrinfo else: socket.getaddrinfo = socket._getaddrinfo self.trigger_callback('proxy_set', self.proxy) @staticmethod def _fast_getaddrinfo(host, *args, **kwargs): def needs_dns_resolving(host2): try: ipaddress.ip_address(host2) return False # already valid IP except ValueError: pass # not an IP if str(host) in ('localhost', 'localhost.',): return False return True try: if needs_dns_resolving(host): answers = dns.resolver.query(host) addr = str(answers[0]) else: addr = host except dns.exception.DNSException as e: # dns failed for some reason, e.g. dns.resolver.NXDOMAIN # this is normal. Simply report back failure: raise socket.gaierror(11001, 'getaddrinfo failed') from e except BaseException as e: # Possibly internal error in dnspython :( see #4483 # Fall back to original socket.getaddrinfo to resolve dns. print_error('dnspython failed to resolve dns with error:', e) addr = host return socket._getaddrinfo(addr, *args, **kwargs) @log_exceptions async def set_parameters(self, net_params: NetworkParameters): proxy = net_params.proxy proxy_str = serialize_proxy(proxy) host, port, protocol = net_params.host, net_params.port, net_params.protocol server_str = serialize_server(host, port, protocol) # sanitize parameters try: deserialize_server(serialize_server(host, port, protocol)) if proxy: proxy_modes.index(proxy['mode']) + 1 int(proxy['port']) except: return self.config.set_key('auto_connect', net_params.auto_connect, False) self.config.set_key('oneserver', net_params.oneserver, False) self.config.set_key('proxy', proxy_str, False) self.config.set_key('server', server_str, True) # abort if changes were not allowed by config if self.config.get('server') != server_str \ or self.config.get('proxy') != proxy_str \ or self.config.get('oneserver') != net_params.oneserver: return async with self.restart_lock: self.auto_connect = net_params.auto_connect if self.proxy != proxy or self.protocol != protocol or self.oneserver != net_params.oneserver: # Restart the network defaulting to the given server await self._stop() self.default_server = server_str await self._start() elif self.default_server != server_str: await self.switch_to_interface(server_str) else: await self.switch_lagging_interface() def _set_oneserver(self, oneserver: bool): self.num_server = 10 if not oneserver else 0 self.oneserver = bool(oneserver) async def _switch_to_random_interface(self): '''Switch to a random connected server other than the current one''' servers = self.get_interfaces() # Those in connected state if self.default_server in servers: servers.remove(self.default_server) if servers: await self.switch_to_interface(random.choice(servers)) async def switch_lagging_interface(self): '''If auto_connect and lagging, switch interface''' if self.auto_connect and await self._server_is_lagging(): # switch to one that has the correct header (not height) best_header = self.blockchain().read_header(self.get_local_height()) with self.interfaces_lock: interfaces = list(self.interfaces.values()) filtered = list(filter(lambda iface: iface.tip_header == best_header, interfaces)) if filtered: chosen_iface = random.choice(filtered) await self.switch_to_interface(chosen_iface.server) async def switch_unwanted_fork_interface(self): """If auto_connect and main interface is not on preferred fork, try to switch to preferred fork. """ if not self.auto_connect or not self.interface: return with self.interfaces_lock: interfaces = list(self.interfaces.values()) # try to switch to preferred fork if self._blockchain_preferred_block: pref_height = self._blockchain_preferred_block['height'] pref_hash = self._blockchain_preferred_block['hash'] if self.interface.blockchain.check_hash(pref_height, pref_hash): return # already on preferred fork filtered = list(filter(lambda iface: iface.blockchain.check_hash(pref_height, pref_hash), interfaces)) if filtered: self.print_error("switching to preferred fork") chosen_iface = random.choice(filtered) await self.switch_to_interface(chosen_iface.server) return else: self.print_error("tried to switch to preferred fork but no interfaces are on it") # try to switch to best chain if self.blockchain().parent is None: return # already on best chain filtered = list(filter(lambda iface: iface.blockchain.parent is None, interfaces)) if filtered: self.print_error("switching to best chain") chosen_iface = random.choice(filtered) await self.switch_to_interface(chosen_iface.server) else: # FIXME switch to best available? self.print_error("tried to switch to best chain but no interfaces are on it") async def switch_to_interface(self, server: str): """Switch to server as our main interface. If no connection exists, queue interface to be started. The actual switch will happen when the interface becomes ready. """ self.default_server = server old_interface = self.interface old_server = old_interface.server if old_interface else None # Stop any current interface in order to terminate subscriptions, # and to cancel tasks in interface.group. # However, for headers sub, give preference to this interface # over unknown ones, i.e. start it again right away. if old_server and old_server != server: await self._close_interface(old_interface) if len(self.interfaces) <= self.num_server: self._start_interface(old_server) if server not in self.interfaces: self.interface = None self._start_interface(server) return i = self.interfaces[server] if old_interface != i: self.print_error("switching to", server) blockchain_updated = i.blockchain != self.blockchain() self.interface = i await i.group.spawn(self._request_server_info(i)) self.trigger_callback('default_server_changed') self._set_status('connected') self.trigger_callback('network_updated') if blockchain_updated: self.trigger_callback('blockchain_updated') async def _close_interface(self, interface): if interface: with self.interfaces_lock: if self.interfaces.get(interface.server) == interface: self.interfaces.pop(interface.server) if interface.server == self.default_server: self.interface = None await interface.close() @with_recent_servers_lock def _add_recent_server(self, server): # list is ordered if server in self.recent_servers: self.recent_servers.remove(server) self.recent_servers.insert(0, server) self.recent_servers = self.recent_servers[0:20] self._save_recent_servers() async def connection_down(self, interface: Interface): '''A connection to server either went down, or was never made. We distinguish by whether it is in self.interfaces.''' if not interface: return server = interface.server self.disconnected_servers.add(server) if server == self.default_server: self._set_status('disconnected') await self._close_interface(interface) self.trigger_callback('network_updated') def get_network_timeout_seconds(self, request_type=NetworkTimeout.Generic) -> int: if self.oneserver and not self.auto_connect: return request_type.MOST_RELAXED if self.proxy: return request_type.RELAXED return request_type.NORMAL @ignore_exceptions # do not kill main_taskgroup @log_exceptions async def _run_new_interface(self, server): interface = Interface(self, server, self.proxy) timeout = self.get_network_timeout_seconds(NetworkTimeout.Urgent) try: await asyncio.wait_for(interface.ready, timeout) except BaseException as e: #traceback.print_exc() self.print_error(f"couldn't launch iface {server} -- {repr(e)}") await interface.close() return else: with self.interfaces_lock: assert server not in self.interfaces self.interfaces[server] = interface finally: try: self.connecting.remove(server) except KeyError: pass if server == self.default_server: await self.switch_to_interface(server) self._add_recent_server(server) self.trigger_callback('network_updated') async def _init_headers_file(self): b = blockchain.get_best_chain() filename = b.path() length = HEADER_SIZE * len(constants.net.CHECKPOINTS) * 2016 if not os.path.exists(filename) or os.path.getsize(filename) < length: with open(filename, 'wb') as f: if length > 0: f.seek(length-1) f.write(b'\x00') util.ensure_sparse_file(filename) with b.lock: b.update_size() def best_effort_reliable(func): async def make_reliable_wrapper(self, *args, **kwargs): for i in range(10): iface = self.interface # retry until there is a main interface if not iface: await asyncio.sleep(0.1) continue # try again # wait for it to be usable iface_ready = iface.ready iface_disconnected = iface.got_disconnected await asyncio.wait([iface_ready, iface_disconnected], return_when=asyncio.FIRST_COMPLETED) if not iface_ready.done() or iface_ready.cancelled(): await asyncio.sleep(0.1) continue # try again # try actual request success_fut = asyncio.ensure_future(func(self, *args, **kwargs)) await asyncio.wait([success_fut, iface_disconnected], return_when=asyncio.FIRST_COMPLETED) if success_fut.done() and not success_fut.cancelled(): if success_fut.exception(): try: raise success_fut.exception() except RequestTimedOut: await iface.close() await iface_disconnected continue # try again return success_fut.result() # otherwise; try again raise BestEffortRequestFailed('no interface to do request on... gave up.') return make_reliable_wrapper def catch_server_exceptions(func): async def wrapper(self, *args, **kwargs): try: return await func(self, *args, **kwargs) except aiorpcx.jsonrpc.CodeMessageError as e: raise UntrustedServerReturnedError(original_exception=e) return wrapper @best_effort_reliable @catch_server_exceptions async def get_merkle_for_transaction(self, tx_hash: str, tx_height: int) -> dict: if not is_hash256_str(tx_hash): raise Exception(f"{repr(tx_hash)} is not a txid") if not is_non_negative_integer(tx_height): raise Exception(f"{repr(tx_height)} is not a block height") return await self.interface.session.send_request('blockchain.transaction.get_merkle', [tx_hash, tx_height]) @best_effort_reliable async def broadcast_transaction(self, tx, *, timeout=None) -> None: if timeout is None: timeout = self.get_network_timeout_seconds(NetworkTimeout.Urgent) try: out = await self.interface.session.send_request('blockchain.transaction.broadcast', [str(tx)], timeout=timeout) # note: both 'out' and exception messages are untrusted input from the server except (RequestTimedOut, asyncio.CancelledError, asyncio.TimeoutError): raise # pass-through except aiorpcx.jsonrpc.CodeMessageError as e: self.print_error(f"broadcast_transaction error: {repr(e)}") raise TxBroadcastServerReturnedError(self.sanitize_tx_broadcast_response(e.message)) from e except BaseException as e: # intentional BaseException for sanity! self.print_error(f"broadcast_transaction error2: {repr(e)}") send_exception_to_crash_reporter(e) raise TxBroadcastUnknownError() from e if out != tx.txid(): self.print_error(f"unexpected txid for broadcast_transaction: {out} != {tx.txid()}") raise TxBroadcastHashMismatch(_("Server returned unexpected transaction ID.")) @staticmethod def sanitize_tx_broadcast_response(server_msg) -> str: # Unfortunately, bitcoind and hence the Electrum protocol doesn't return a useful error code. # So, we use substring matching to grok the error message. # server_msg is untrusted input so it should not be shown to the user. see #4968 server_msg = str(server_msg) server_msg = server_msg.replace("\n", r"\n") # https://github.com/bitcoin/bitcoin/blob/cd42553b1178a48a16017eff0b70669c84c3895c/src/policy/policy.cpp # grep "reason =" policy_error_messages = { r"version": _("Transaction uses non-standard version."), r"tx-size": _("The transaction was rejected because it is too large (in bytes)."), r"scriptsig-size": None, r"scriptsig-not-pushonly": None, r"scriptpubkey": None, r"bare-multisig": None, r"dust": _("Transaction could not be broadcast due to dust outputs."), r"multi-op-return": _("The transaction was rejected because it contains multiple OP_RETURN outputs."), } for substring in policy_error_messages: if substring in server_msg: msg = policy_error_messages[substring] return msg if msg else substring # https://github.com/bitcoin/bitcoin/blob/cd42553b1178a48a16017eff0b70669c84c3895c/src/script/script_error.cpp script_error_messages = { r"Script evaluated without error but finished with a false/empty top stack element", r"Script failed an OP_VERIFY operation", r"Script failed an OP_EQUALVERIFY operation", r"Script failed an OP_CHECKMULTISIGVERIFY operation", r"Script failed an OP_CHECKSIGVERIFY operation", r"Script failed an OP_NUMEQUALVERIFY operation", r"Script is too big", r"Push value size limit exceeded", r"Operation limit exceeded", r"Stack size limit exceeded", r"Signature count negative or greater than pubkey count", r"Pubkey count negative or limit exceeded", r"Opcode missing or not understood", r"Attempted to use a disabled opcode", r"Operation not valid with the current stack size", r"Operation not valid with the current altstack size", r"OP_RETURN was encountered", r"Invalid OP_IF construction", r"Negative locktime", r"Locktime requirement not satisfied", r"Signature hash type missing or not understood", r"Non-canonical DER signature", r"Data push larger than necessary", r"Only non-push operators allowed in signatures", r"Non-canonical signature: S value is unnecessarily high", r"Dummy CHECKMULTISIG argument must be zero", r"OP_IF/NOTIF argument must be minimal", r"Signature must be zero for failed CHECK(MULTI)SIG operation", r"NOPx reserved for soft-fork upgrades", r"Witness version reserved for soft-fork upgrades", r"Public key is neither compressed or uncompressed", r"Extra items left on stack after execution", r"Witness program has incorrect length", r"Witness program was passed an empty witness", r"Witness program hash mismatch", r"Witness requires empty scriptSig", r"Witness requires only-redeemscript scriptSig", r"Witness provided for non-witness script", r"Using non-compressed keys in segwit", r"Using OP_CODESEPARATOR in non-witness script", r"Signature is found in scriptCode", } for substring in script_error_messages: if substring in server_msg: return substring # https://github.com/bitcoin/bitcoin/blob/cd42553b1178a48a16017eff0b70669c84c3895c/src/validation.cpp # grep "REJECT_" # should come after script_error.cpp (due to e.g. non-mandatory-script-verify-flag) validation_error_messages = { r"coinbase", r"tx-size-small", r"non-final", r"txn-already-in-mempool", r"txn-mempool-conflict", r"txn-already-known", r"non-BIP68-final", r"bad-txns-nonstandard-inputs", r"bad-witness-nonstandard", r"bad-txns-too-many-sigops", r"mempool min fee not met", r"min relay fee not met", r"absurdly-high-fee", r"too-long-mempool-chain", r"bad-txns-spends-conflicting-tx", r"insufficient fee", r"too many potential replacements", r"replacement-adds-unconfirmed", r"mempool full", r"non-mandatory-script-verify-flag", r"mandatory-script-verify-flag-failed", } for substring in validation_error_messages: if substring in server_msg: return substring # https://github.com/bitcoin/bitcoin/blob/cd42553b1178a48a16017eff0b70669c84c3895c/src/rpc/rawtransaction.cpp # grep "RPC_TRANSACTION" # grep "RPC_DESERIALIZATION_ERROR" rawtransaction_error_messages = { r"Missing inputs", r"transaction already in block chain", r"TX decode failed", } for substring in rawtransaction_error_messages: if substring in server_msg: return substring # https://github.com/bitcoin/bitcoin/blob/cd42553b1178a48a16017eff0b70669c84c3895c/src/consensus/tx_verify.cpp # grep "REJECT_" tx_verify_error_messages = { r"bad-txns-vin-empty", r"bad-txns-vout-empty", r"bad-txns-oversize", r"bad-txns-vout-negative", r"bad-txns-vout-toolarge", r"bad-txns-txouttotal-toolarge", r"bad-txns-inputs-duplicate", r"bad-cb-length", r"bad-txns-prevout-null", r"bad-txns-inputs-missingorspent", r"bad-txns-premature-spend-of-coinbase", r"bad-txns-inputvalues-outofrange", r"bad-txns-in-belowout", r"bad-txns-fee-outofrange", } for substring in tx_verify_error_messages: if substring in server_msg: return substring # otherwise: return _("Unknown error") @best_effort_reliable @catch_server_exceptions async def request_chunk(self, height: int, tip=None, *, can_return_early=False): if not is_non_negative_integer(height): raise Exception(f"{repr(height)} is not a block height") return await self.interface.request_chunk(height, tip=tip, can_return_early=can_return_early) @best_effort_reliable @catch_server_exceptions async def get_transaction(self, tx_hash: str, *, timeout=None) -> str: if not is_hash256_str(tx_hash): raise Exception(f"{repr(tx_hash)} is not a txid") return await self.interface.session.send_request('blockchain.transaction.get', [tx_hash], timeout=timeout) @best_effort_reliable @catch_server_exceptions async def get_history_for_scripthash(self, sh: str) -> List[dict]: if not is_hash256_str(sh): raise Exception(f"{repr(sh)} is not a scripthash") return await self.interface.session.send_request('blockchain.scripthash.get_history', [sh]) @best_effort_reliable @catch_server_exceptions async def listunspent_for_scripthash(self, sh: str) -> List[dict]: if not is_hash256_str(sh): raise Exception(f"{repr(sh)} is not a scripthash") return await self.interface.session.send_request('blockchain.scripthash.listunspent', [sh]) @best_effort_reliable @catch_server_exceptions async def get_balance_for_scripthash(self, sh: str) -> dict: if not is_hash256_str(sh): raise Exception(f"{repr(sh)} is not a scripthash") return await self.interface.session.send_request('blockchain.scripthash.get_balance', [sh]) def blockchain(self) -> Blockchain: interface = self.interface if interface and interface.blockchain is not None: self._blockchain = interface.blockchain return self._blockchain def get_blockchains(self): out = {} # blockchain_id -> list(interfaces) with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items()) with self.interfaces_lock: interfaces_values = list(self.interfaces.values()) for chain_id, bc in blockchain_items: r = list(filter(lambda i: i.blockchain==bc, interfaces_values)) if r: out[chain_id] = r return out def _set_preferred_chain(self, chain: Blockchain): height = chain.get_max_forkpoint() header_hash = chain.get_hash(height) self._blockchain_preferred_block = { 'height': height, 'hash': header_hash, } self.config.set_key('blockchain_preferred_block', self._blockchain_preferred_block) async def follow_chain_given_id(self, chain_id: str) -> None: bc = blockchain.blockchains.get(chain_id) if not bc: raise Exception('blockchain {} not found'.format(chain_id)) self._set_preferred_chain(bc) # select server on this chain with self.interfaces_lock: interfaces = list(self.interfaces.values()) interfaces_on_selected_chain = list(filter(lambda iface: iface.blockchain == bc, interfaces)) if len(interfaces_on_selected_chain) == 0: return chosen_iface = random.choice(interfaces_on_selected_chain) # switch to server (and save to config) net_params = self.get_parameters() host, port, protocol = deserialize_server(chosen_iface.server) net_params = net_params._replace(host=host, port=port, protocol=protocol) await self.set_parameters(net_params) async def follow_chain_given_server(self, server_str: str) -> None: # note that server_str should correspond to a connected interface iface = self.interfaces.get(server_str) if iface is None: return self._set_preferred_chain(iface.blockchain) # switch to server (and save to config) net_params = self.get_parameters() host, port, protocol = deserialize_server(server_str) net_params = net_params._replace(host=host, port=port, protocol=protocol) await self.set_parameters(net_params) def get_local_height(self): return self.blockchain().height() def export_checkpoints(self, path): """Run manually to generate blockchain checkpoints. Kept for console use only. """ cp = self.blockchain().get_checkpoints() with open(path, 'w', encoding='utf-8') as f: f.write(json.dumps(cp, indent=4)) async def _start(self): assert not self.main_taskgroup self.main_taskgroup = main_taskgroup = SilentTaskGroup() assert not self.interface and not self.interfaces assert not self.connecting and not self.server_queue self.print_error('starting network') self.disconnected_servers = set([]) self.protocol = deserialize_server(self.default_server)[2] self.server_queue = queue.Queue() self._set_proxy(deserialize_proxy(self.config.get('proxy'))) self._set_oneserver(self.config.get('oneserver', False)) self._start_interface(self.default_server) async def main(): try: await self._init_headers_file() # note: if a task finishes with CancelledError, that # will NOT raise, and the group will keep the other tasks running async with main_taskgroup as group: await group.spawn(self._maintain_sessions()) [await group.spawn(job) for job in self._jobs] except Exception as e: traceback.print_exc(file=sys.stderr) raise e asyncio.run_coroutine_threadsafe(main(), self.asyncio_loop) self.trigger_callback('network_updated') def start(self, jobs: List=None): self._jobs = jobs or [] asyncio.run_coroutine_threadsafe(self._start(), self.asyncio_loop) @log_exceptions async def _stop(self, full_shutdown=False): self.print_error("stopping network") try: await asyncio.wait_for(self.main_taskgroup.cancel_remaining(), timeout=2) except (asyncio.TimeoutError, asyncio.CancelledError) as e: self.print_error(f"exc during main_taskgroup cancellation: {repr(e)}") self.main_taskgroup = None # type: TaskGroup self.interface = None # type: Interface self.interfaces = {} # type: Dict[str, Interface] self.connecting.clear() self.server_queue = None if not full_shutdown: self.trigger_callback('network_updated') def stop(self): assert self._loop_thread != threading.current_thread(), 'must not be called from network thread' fut = asyncio.run_coroutine_threadsafe(self._stop(full_shutdown=True), self.asyncio_loop) try: fut.result(timeout=2) except (asyncio.TimeoutError, asyncio.CancelledError): pass async def _ensure_there_is_a_main_interface(self): if self.is_connected(): return now = time.time() # if auto_connect is set, try a different server if self.auto_connect and not self.is_connecting(): await self._switch_to_random_interface() # if auto_connect is not set, or still no main interface, retry current if not self.is_connected() and not self.is_connecting(): if self.default_server in self.disconnected_servers: if now - self.server_retry_time > SERVER_RETRY_INTERVAL: self.disconnected_servers.remove(self.default_server) self.server_retry_time = now else: await self.switch_to_interface(self.default_server) async def _maintain_sessions(self): async def launch_already_queued_up_new_interfaces(): while self.server_queue.qsize() > 0: server = self.server_queue.get() await self.main_taskgroup.spawn(self._run_new_interface(server)) async def maybe_queue_new_interfaces_to_be_launched_later(): now = time.time() for i in range(self.num_server - len(self.interfaces) - len(self.connecting)): self._start_random_interface() if now - self.nodes_retry_time > NODES_RETRY_INTERVAL: self.print_error('network: retrying connections') self.disconnected_servers = set([]) self.nodes_retry_time = now async def maintain_main_interface(): await self._ensure_there_is_a_main_interface() if self.is_connected(): if self.config.is_fee_estimates_update_required(): await self.interface.group.spawn(self._request_fee_estimates, self.interface) while True: try: await launch_already_queued_up_new_interfaces() await maybe_queue_new_interfaces_to_be_launched_later() await maintain_main_interface() except asyncio.CancelledError: # suppress spurious cancellations group = self.main_taskgroup if not group or group._closed: raise await asyncio.sleep(0.1) async def _send_http_on_proxy(self, method: str, url: str, params: str = None, body: bytes = None, json: dict = None, headers=None, on_finish=None): async def default_on_finish(resp: ClientResponse): resp.raise_for_status() return await resp.text() if headers is None: headers = {} if on_finish is None: on_finish = default_on_finish async with make_aiohttp_session(self.proxy) as session: if method == 'get': async with session.get(url, params=params, headers=headers) as resp: return await on_finish(resp) elif method == 'post': assert body is not None or json is not None, 'body or json must be supplied if method is post' if body is not None: async with session.post(url, data=body, headers=headers) as resp: return await on_finish(resp) elif json is not None: async with session.post(url, json=json, headers=headers) as resp: return await on_finish(resp) else: assert False @staticmethod def send_http_on_proxy(method, url, **kwargs): network = Network.get_instance() assert network._loop_thread is not threading.currentThread() coro = asyncio.run_coroutine_threadsafe(network._send_http_on_proxy(method, url, **kwargs), network.asyncio_loop) return coro.result(5) # methods used in scripts async def get_peers(self): while not self.is_connected(): await asyncio.sleep(1) session = self.interface.session return parse_servers(await session.send_request('server.peers.subscribe')) async def send_multiple_requests(self, servers: List[str], method: str, params: Sequence): num_connecting = len(self.connecting) for server in servers: self._start_interface(server) # sleep a bit for _ in range(10): if len(self.connecting) < num_connecting: break await asyncio.sleep(1) responses = dict() async def get_response(iface: Interface): try: res = await iface.session.send_request(method, params, timeout=10) except Exception as e: res = e responses[iface.server] = res async with TaskGroup() as group: for server in servers: interface = self.interfaces.get(server) if interface: await group.spawn(get_response(interface)) return responses
import mock import testtools from shakenfist import net class NetTestCase(testtools.TestCase): def setUp(self): super(NetTestCase, self).setUp() self.ipmanager_persist = mock.patch( 'shakenfist.db.persist_ipmanager') self.mock_ipmanager_persist = self.ipmanager_persist.start() def test_init(self): net.Network(uuid='notauuid', vxlan_id=42, provide_dhcp=True, provide_nat=True, physical_nic='eth0', ipblock='192.168.1.0/24') def test_str(self): n = net.Network(uuid='notauuid', vxlan_id=42, provide_dhcp=True, provide_nat=True, physical_nic='eth0', ipblock='192.168.1.0/24') self.assertEqual('network(notauuid, vxid 42)', str(n))
#._cv_part guppy.etc.KanExtension class LeftKanExtension: # Implementation of algorithms described by Brown and Heyworth (ref.251) # and Heyworth (ref.253). def __init__(self, mod, A, B, R, X, F): # External subsystem dependencies # mod.KnuthBendix # mod.FiniteAutomaton # mod.SolveFSA # mod.Cat # mod.Cat.Function # mod.Cat.Functor # mod.Cat.check_graph # mod.Cat.check_rules self.mod = mod self.Cat = mod.Cat # self.Cat.check_graph(A) self.Cat.check_graph(B) self.Cat.check_rules(R, B) # self.A = A self.B = B self.R = [(tuple(g), tuple(h)) for (g, h) in R] self.X = X self.F = F self.general_procedure() def general_procedure(self): self.initialize_tables() self.make_confluent_system() if 0: self.make_catalogue() else: self.make_automaton() self.make_natural_transformation() def initialize_tables(self): self.obj_to_str_table = {} self.str_to_obj_table = {} self.make_initial_rules() def make_initial_rules(self): # Algorithm 6.1 in (251) Re = [] def add_rule(a, b): aw = self.make_word(a) bw = self.make_word(b) if aw != bw: Re.append( ( aw, bw )) for a in self.A.arrows: srca = self.A.source(a) tgta = self.A.target(a) XA = self.X.fo(srca) Xa = self.X.fa(a) Fa = tuple(self.F.fa(a)) Fsrca = self.F.fo(srca) Ftgta = self.F.fo(tgta) if Fa: t = Fsrca for b in Fa: srcb = self.B.source(b) if srcb != t: raise ValueError, \ 'Arrow [%s] with source %s does not compose with target %s'%(b, srcb, t) t = self.B.target(b) if t != Ftgta: raise ValueError, \ 'Arrow %s with target %s does not compose with %s'%(Fa, t, Ftgta) else: if Fsrca != Ftgta: raise ValueError, \ 'Source %s does not match target %s'%(Fsrca, Ftgta) for x in XA: add_rule(((srca, x),) + Fa , ((tgta, Xa(x)),) ) Rk = [(self.make_word(x), self.make_word(y)) for (x, y) in self.R] self.Re = Re self.Rk = Rk self.Rinit = Re + Rk def make_confluent_system(self): self.rs = self.mod.KnuthBendix(self.Rinit, delim='.') self.Rconf = self.rs.reductions def make_automaton(self): # Make nondeterministic finite automaton def target(e): if len(e) == 1 and isinstance(e[0], tuple): return self.F.fo(e[0][0]) else: return self.B.target(e[-1]) XA = [] for A in self.A.objects: for x in self.X.fo(A): XA.append(((A, x),)) follows = dict([(B, []) for B in self.B.objects]) for b, (srcb, tgtb) in self.B.arrows.items(): follows[srcb].append((b, tgtb)) IR = dict([(self.make_term(u), self.make_term(v)) for u, v in self.Rconf]) pplR = {} for l, r in self.Rconf: t = self.make_term(l) for i in range(1, len(t)): pplR[t[:i]] = 1 s0 = ('s0',) fsa = self.mod.FiniteAutomaton(s0) for xi in XA: if xi not in IR: fsa.add_transition(s0, xi[0], xi) for xi in XA: for b, tgtb in follows[target(xi)]: bterm = (b,) xib = xi + bterm if xib in pplR: fsa.add_transition(xi, b, xib, tgtb) elif (bterm in pplR and xib not in IR): fsa.add_transition(xi, b, bterm, tgtb) elif xib not in IR: fsa.add_transition(xi, b, tgtb) for Bi in self.B.objects: for b, tgtb in follows[Bi]: bterm = (b,) if bterm in pplR: fsa.add_transition(Bi, b, bterm, tgtb) elif bterm not in IR: fsa.add_transition(Bi, b, tgtb) for u in pplR: if u in XA: continue for b, tgtb in follows[target(u)]: bterm = (b,) ub = u + bterm if ub in pplR: fsa.add_transition(u, b, ub, tgtb) elif self.irreducible(ub): # ub not in IR: fsa.add_transition(u, b, tgtb) def get_RS(Bi): finals = {} finals[Bi] = 1 for xi in XA: if self.F.fo(xi[0][0]) == Bi: finals[xi] = 1 for u in pplR: if target(u) == Bi: finals[u] = 1 for c in fsa.get_composites(): for s in c: if s not in finals: break else: finals[c] = 1 dfa = fsa.get_minimized_dfa(finals) regexp = self.mod.SolveFSA(dfa) return RegularSet(regexp) KB = self.Cat.Function(get_RS, self.B.objects, None) Kb = self.Cat.Function( lambda a:KanAction(self.B, KB, a, target, self.irreducible, self.reduce), self.B.arrows, KanAction, ) self.KB = KB self.Kb = Kb self.K = self.Cat.Functor(KB, Kb) def make_catalogue(self): # Catalogue the elements of the sets pointed to by extension functor K, # according to algorithm described in 7.1 in (251). # Precondition: # Tables initialized and a confluent system created. # The system is assumed to be finite, otherwise we won't terminate. # Postcondition: # Functor self.K represented as: # # self.K.tabo = self.KB = dict mapping, # source: {each B in self.B.objects} # target: sets represented as lists # self.K.taba = self.Kb = dict, mapping # source: {each a in self.B.arrows} # target: tabulated function, mapping # source: KB[source of a] # target: KB[target of a] def target(e): if len(e) == 1: return self.F.fo(e[0][0]) else: return self.B.target(e[-1]) def add_element(e): if self.irreducible(e): block.append(e) KB[target(e)].append(e) else: pass #print e, self #pdb.set_trace() KB = dict([(B, []) for B in self.B.objects]) block = [] for A in self.A.objects: for x in self.X.fo(A): add_element(((A, x),)) while block: oblock = block block = [] for e in oblock: tgt = target(e) for a in self.B.arrows: if self.B.source(a) == tgt: add_element( e + (a,) ) Kb = {} for a in self.B.arrows: src = KB[self.B.source(a)] tgt = KB[self.B.target(a)] tab = dict([(s, self.reduce(s + (a,))) for s in src]) Kb[a] = self.Cat.Function(tab, src, tgt) KB = self.Cat.Function(KB, self.B.objects, KB.values()) Kb = self.Cat.Function(Kb, self.B.arrows, Kb.values()) self.KB = KB self.Kb = Kb self.K = self.Cat.Functor(KB, Kb) def make_natural_transformation(self): # Precondition: # initial tables should be initialized # self.K.fo should exist # Postcondition: # # self.nat[A] for A in self.A.objects get_nat_memo = {} def get_nat(A): if A in get_nat_memo: return get_nat_memo[A] src = self.X.fo(A) tgt = self.K.fo(self.F.fo(A)) tab = dict([(x, self.reduce( ((A, x),) )) for x in src]) get_nat_memo[A] = self.Cat.Function(tab, src, tgt) return get_nat_memo[A] self.nat = self.Cat.Function(get_nat, self.A.objects, None) def make_word(self, x): ots = self.obj_to_str return '.'.join([ots(e) for e in x if e != '']) def obj_to_str(self, x): otn = self.obj_to_str_table try: return otn[x] except KeyError: assert not (isinstance(x, tuple) and len(x) > 2) n = str(len(otn)) #n = '%d:%s'%(len(otn), x) #n = str(x) otn[x] = n self.str_to_obj_table[n] = x return n def str_to_obj(self, x): return self.str_to_obj_table[x] def irreducible(self, x): tx = self.make_word(x) return tx == self.rs.reduce(tx) def reduce(self, x): w = self.rs.reduce(self.make_word(x)) return self.make_term(w) def make_term(self, word): sto = self.str_to_obj_table return tuple( [sto[s] for s in word.split('.') if s] ) class KanAction: def __init__(self, B, KB, a, targetof, irreducible, reduce): srca = B.source(a) tgta = B.target(a) self.src = KB(srca) self.tgt = KB(tgta) self.a = a self.srca = srca self.targetof = targetof self.irreducible = irreducible self.reduce = reduce def __call__(self, s): if self.targetof(s) != self.srca: raise TypeError, '''\ Target of %r (= %r) does not match source of %r (= %r)'''%( s, self.targetof(s), self.a, self.srca) if not self.irreducible(s): raise TypeError, '''\ Argument %r is reducible to %r; and is thus not in the source set K.fo(%r)'''%( s, self.reduce(s),self.srca) return self.reduce(s + (self.a,)) class RegularSet: # Wraps a regular expression; # provides a set protocol for the underlying set of sequences: # o If the RE specifies a finite language, iteration over its strings # [ o set inclusion ] is_simplified = 0 def __init__(self, re): self.re = re def __iter__(self): return iter(self.uniform) def __getitem__(self, x): return self.uniform[x] def __len__(self): return len(self.uniform) def get_xs_covered(self, coverage): N = coverage X = self.re.limited(coverage) xs = X.sequni() return [tuple(x) for x in xs] def get_uniform(self): self.simplify() return self.re.sequni() uniform = property(fget=get_uniform) def simplify(self): if not self.is_simplified: self.re = self.re.simplified() self.is_simplified = 1 class ObjectTester: def __init__(self, category_tester, object, code): self.category_tester = category_tester self.functor = category_tester.functor self.object = object self.code = code def get_all_arrows(self): return self.category_tester.arrows[self.object] def get_intermediate_test_code(self): return self.code def get_python_test_source_code(self): cmap = { 'aseq':'assert e[%r] == e[%r]', 'evalfa':'e[%r] = fa[%r](e[%r])', 'asfo':'assert fo[%r](e[%r])' } return '\n'.join([cmap[c[0]]%c[1:] for c in self.code]) def execode(self, arg): code = self.get_python_test_source_code() e = {'arg':arg} d = {'fa':self.functor.fa, 'fo':self.functor.fo, 'e':e, } exec code in d return e def intercode(self, arg): e = {'arg':arg} fa = self.functor.fa fo = self.functor.fo for c in self.code: a = c[0] if a == 'evalfa': dst, ar, src = c[1:] e[dst] = fa[ar](e[src]) elif a == 'asfo': ob, src = c[1:] if not fo[ob](e[src]): raise ValueError, 'Predicate failed' elif a == 'aseq': na, nb = c[1:] if e[na] != e[nb]: raise ValueError, 'e[%r] != e[%r]'%(na, nb) else: raise ValueError, 'Invalid code: %r'%(a,) def test(self, arg): return self.intercode(arg) class CategoryTester: def __init__(self, mod, functor, arrows, get_arrow_name=None): self.mod = mod self.cat = functor.src self.functor = functor self.arrows = arrows if get_arrow_name is not None: self.get_arrow_name = get_arrow_name def get_arrow_name(self, a): return '.'.join(a) def get_eval_arrows_code(self, object, argname): fa = self.functor.fa name = argname memo = {():name} memolist = [((),name)] codes = [] def eval_arrow(a): if a in memo: return memo[a] a0 = a[:-1] a1 = a[-1] name = self.get_arrow_name(a) na0 = eval_arrow(a0) #codes.append('%s = fa[%r](%s)'%(name, a1, na0)) codes.append(('evalfa', name, a1, na0)) memo[a] = name memolist.append((a, name)) return name for ar in self.arrows[object]: eval_arrow(ar) return codes, memolist def get_object_tester(self, object): code = self.get_test_object_code(object) return ObjectTester(self, object, code) def get_test_inclusion_code(self, object, ml): codes = [] src = self.functor.fo.src for arrow, value in ml: ob = object if arrow: ob = self.cat.graph.target(arrow[-1]) #codes.append('assert fo[%r](%s)'%(ob, value)) if src is None or ob in src: codes.append(('asfo', ob, value)) return codes def get_test_object_code(self, object): argname = 'arg' evalcodes, memolist = self.get_eval_arrows_code(object, argname) relcodes = self.get_test_relations_code(object, memolist) incodes = self.get_test_inclusion_code(object, memolist) return evalcodes+relcodes+incodes def get_test_relations_code(self, object, memolist): codes = [] cat = self.cat fa = self.functor.fa memo = dict(memolist) def teval_arrow(ar): if ar in memo: return memo[ar] a0 = teval_arrow(ar[:-1]) name = self.get_arrow_name(ar) #codes.append('%s = fa[%r](%s)'%(name, ar[-1], a0)) codes.append(('evalfa', name, ar[-1], a0)) memo[ar] = name return name # Check that the equality relations really match up # for all arrows in old memolist, i.e. original unique arrows # which is arguably overkill sometimes?.. for a, b in cat.relations: a = tuple(a) b = tuple(b) src = cat.graph.source(a[0]) for (arr, val) in memolist: if arr: tgt = cat.graph.target(arr[-1]) else: tgt = object if src == tgt: ara = arr + a arb = arr + b if ara != arb: va = teval_arrow(ara) vb = teval_arrow(arb) assert va != vb #codes.append('assert %s == %s'%(va, vb)) codes.append(('aseq', va, vb)) return codes def test_object(self, object, value): tester = self.get_object_tester(object) tester.test(value) return tester def test_object_fail(self, object, value): try: self.test_object(object, value) except: pass else: raise Exception, 'Exception excepted' class _GLUECLAMP_: # 'imports' def _get_KnuthBendix(self): return self._parent.KnuthBendix.KnuthBendix def _get_FiniteAutomaton(self): return self._parent.FSA.FiniteAutomaton def _get_SolveFSA(self): return self._parent.RE.SolveFSA def _get_Cat(self): return self._parent.Cat # Main exported interface is the lke method # which provides a context for the LeftKanExtension class. def lke(self, A, B, R, X, F): return LeftKanExtension(self, A, B, R, X, F) # Other functions - examples of applications of Kan extension # in alphabetic order def arrows_map(self, cat, from_objects=0, coverage=1): if from_objects: cat = cat.get_dual() A = self.Cat.Graph(cat.graph.objects, []) B = cat.graph R = cat.relations X = self.Cat.Functor(lambda x: [1], lambda x: lambda y:y) F = self.Cat.Functor(lambda x: x, lambda x: []) ke = self.lke(A, B, R, X, F) memo = {} def get_arrows(object): if object in memo: return memo[object] re = ke.K.fo[object].re.rempretup() if from_objects: re = re.reversed() if str(coverage).startswith('length'): maxlen = int(coverage[6:]) ar = [] xs = re.get_words_memo() for i in range(1, maxlen+1): ar.extend([tuple(x) for x in xs.get_words_of_length(i)]) else: re = re.limited(coverage) xs = re.sequni() ar = [tuple(x) for x in xs] memo[object] = ar return ar return self.Cat.Function( get_arrows, src = ke.K.fo.src, tgt = None ) def category_tester(self, functor, arrows=None, coverage=1): if isinstance(functor, tuple): fo, fa, src = functor if fo is None: fo = lambda x:lambda y:1 functor = self.Cat.Functor(fo, fa, src) if arrows is None: arrows = self.arrows_map(functor.src, from_objects=1, coverage=coverage) return CategoryTester(self, functor, arrows) def coequalizer(self, S0, S1, f0, f1): # Given # # S0, S1 sets (objects that can be iterated over) # f0, f1 functions from S0 to S1 # # Return a coequalizing function, # such that in the following diagram: # # S0 ===== S0 # | | # | f0 | f1 # | | # V V # S1 ===== S1 ==== coequalizing_function.src # | # | coequalizing_function # | # V # coequalizing_function.tgt # both paths from S0 to coequalizing_function.tgt will be equivalent, # and coequalizing_function.tgt is a colimit of all such sets. # # The coequalizing_function object is callable with # an argument from S1, and has the following attributes: # .src is identical to S1 # .tgt is a set in iterable form # .asdict() returns a dict representing the mapping objects = [0, 1] arrows = {'a0':(0, 1), 'a1': (0, 1)} A = self.Cat.Graph(objects, arrows) Xo = self.Cat.Function({0:S0, 1:S1}, objects, [S0,S1]) Xa = self.Cat.Function({'a0':f0, 'a1':f1}, arrows, [f0,f1]) X = self.Cat.Functor(Xo, Xa) colimit_object, colimit_functions = self.colimit(A, X) return colimit_functions[1] def colimit(self, A, X): # According to 9.6 in (ref.251) B = self.Cat.Graph([0], {}) R = [] F = self.Cat.Functor(lambda x: 0, lambda x: ()) lka = self.lke(A, B, R, X, F) colimit_object = lka.KB[0] colimit_functions = lka.nat # Reduce elements to a smaller (but isomorphic) form # I.E since elements are all of the form # ((A, X),) # they can be reduced to the form # (A, X) # colimit_object = [x[0] for x in colimit_object] colimit_functions = dict([ (A, self.Cat.Function( dict([(a, k[0]) for (a, k) in cof.items()]), cof.src, colimit_object, ) ) for (A, cof) in colimit_functions.items()]) return colimit_object, colimit_functions def test_arrows(self, functor, object, value): # Application of arrow listing to test sequencing # Discussed in Notes Mar 9 2005 tester = self.category_tester(functor) return tester.test_object(object, value)
from math import radians from typing import Any, Dict, Optional, Set, Tuple import bpy from mathutils import Matrix from . import migration from .template_mesh_maker import IcypTemplateMeshMaker class ICYP_OT_make_armature(bpy.types.Operator): # type: ignore[misc] # noqa: N801 bl_idname = "icyp.make_basic_armature" bl_label = "Add VRM Humanoid" bl_description = "Create armature along with a simple setup for VRM export" bl_options = {"REGISTER", "UNDO"} # WIP_with_template_mesh: bpy.props.BoolProperty( # type: ignore[valid-type] default=False ) # 身長 at meter tall: bpy.props.FloatProperty( # type: ignore[valid-type] default=1.70, min=0.3, step=0.001, name="Bone tall" # noqa: F722 ) # 頭身 head_ratio: bpy.props.FloatProperty( # type: ignore[valid-type] default=8.0, min=4, step=0.05, description="height per heads" # noqa: F722 ) head_width_ratio: bpy.props.FloatProperty( # type: ignore[valid-type] default=2 / 3, min=0.3, max=1.2, step=0.05, description="height per heads", # noqa: F722 ) # 足-胴比率:0:子供、1:大人 に近くなる(低等身で有効) aging_ratio: bpy.props.FloatProperty( # type: ignore[valid-type] default=0.5, min=0, max=1, step=0.1 ) # 目の奥み eye_depth: bpy.props.FloatProperty( # type: ignore[valid-type] default=-0.03, min=-0.1, max=0, step=0.005 ) # 肩幅 shoulder_in_width: bpy.props.FloatProperty( # type: ignore[valid-type] default=0.05, min=0.01, step=0.005, description="Inner shoulder position", # noqa: F722 ) shoulder_width: bpy.props.FloatProperty( # type: ignore[valid-type] default=0.08, min=0.01, step=0.005, description="shoulder roll position", # noqa: F722 ) # 腕長さ率 arm_length_ratio: bpy.props.FloatProperty( # type: ignore[valid-type] default=1, min=0.5, step=0.01 ) # 手 hand_ratio: bpy.props.FloatProperty( # type: ignore[valid-type] default=1, min=0.5, max=2.0, step=0.05 ) finger_1_2_ratio: bpy.props.FloatProperty( # type: ignore[valid-type] default=0.75, min=0.5, max=1, step=0.005, description="proximal / intermediate", # noqa: F722,F821 ) finger_2_3_ratio: bpy.props.FloatProperty( # type: ignore[valid-type] default=0.75, min=0.5, max=1, step=0.005, description="intermediate / distal", # noqa: F722,F821 ) nail_bone: bpy.props.BoolProperty( # type: ignore[valid-type] default=False, description="may need for finger collider" # noqa: F722 ) # 指先の当たり判定として必要 # 足 leg_length_ratio: bpy.props.FloatProperty( # type: ignore[valid-type] default=0.5, min=0.3, max=0.6, step=0.01, description="upper body/lower body", # noqa: F722 ) leg_width_ratio: bpy.props.FloatProperty( # type: ignore[valid-type] default=1, min=0.01, step=0.005 ) leg_size: bpy.props.FloatProperty( # type: ignore[valid-type] default=0.26, min=0.05, step=0.005 ) custom_property_name: bpy.props.StringProperty( # type: ignore[valid-type] options={"HIDDEN"} # noqa: F821 ) armature_obj = None def execute(self, context: bpy.types.Context) -> Set[str]: if ( context.view_layer.objects.active is not None and context.view_layer.objects.active.mode != "OBJECT" ): bpy.ops.object.mode_set(mode="OBJECT") self.armature_obj, compare_dict = self.make_armature(context) self.setup_as_vrm(self.armature_obj, compare_dict) if self.custom_property_name: self.armature_obj[self.custom_property_name] = True if self.WIP_with_template_mesh: IcypTemplateMeshMaker(self) return {"FINISHED"} def float_prop(self, name: str) -> float: prop = getattr(self, name) if not isinstance(prop, float): raise Exception(f"prop {name} is not float") return prop def head_size(self) -> float: return self.float_prop("tall") / self.float_prop("head_ratio") def hand_size(self) -> float: return self.head_size() * 0.75 * self.float_prop("hand_ratio") def make_armature( self, context: bpy.types.Context ) -> Tuple[bpy.types.Object, Dict[str, Any]]: bpy.ops.object.add(type="ARMATURE", enter_editmode=True, location=(0, 0, 0)) armature = context.object bone_dic = {} def bone_add( name: str, head_pos: Tuple[float, float, float], tail_pos: Tuple[float, float, float], parent_bone: Optional[bpy.types.Bone] = None, radius: float = 0.1, roll: float = 0, ) -> bpy.types.Bone: added_bone = armature.data.edit_bones.new(name) added_bone.head = head_pos added_bone.tail = tail_pos added_bone.head_radius = radius added_bone.tail_radius = radius added_bone.envelope_distance = 0.01 added_bone.roll = radians(roll) if parent_bone is not None: added_bone.parent = parent_bone bone_dic.update({name: added_bone}) return added_bone # bone_type = "leg" or "arm" for roll setting def x_mirror_bones_add( base_name: str, right_head_pos: Tuple[float, float, float], right_tail_pos: Tuple[float, float, float], parent_bones: Tuple[bpy.types.Bone, bpy.types.Bone], radius: float = 0.1, bone_type: str = "other", ) -> Tuple[bpy.types.Bone, bpy.types.Bone]: right_roll = 0 left_roll = 0 if bone_type == "arm": right_roll = 180 elif bone_type == "leg": right_roll = 90 left_roll = 90 left_bone = bone_add( base_name + ".L", right_head_pos, right_tail_pos, parent_bones[0], radius=radius, roll=left_roll, ) head_pos = [pos * axis for pos, axis in zip(right_head_pos, (-1, 1, 1))] tail_pos = [pos * axis for pos, axis in zip(right_tail_pos, (-1, 1, 1))] right_bone = bone_add( base_name + ".R", (head_pos[0], head_pos[1], head_pos[2]), (tail_pos[0], tail_pos[1], tail_pos[2]), parent_bones[1], radius=radius, roll=right_roll, ) return left_bone, right_bone def x_add( pos_a: Tuple[float, float, float], add_x: float ) -> Tuple[float, float, float]: pos = [p_a + _add for p_a, _add in zip(pos_a, [add_x, 0, 0])] return (pos[0], pos[1], pos[2]) def y_add( pos_a: Tuple[float, float, float], add_y: float ) -> Tuple[float, float, float]: pos = [p_a + _add for p_a, _add in zip(pos_a, [0, add_y, 0])] return (pos[0], pos[1], pos[2]) def z_add( pos_a: Tuple[float, float, float], add_z: float ) -> Tuple[float, float, float]: pos = [p_a + _add for p_a, _add in zip(pos_a, [0, 0, add_z])] return (pos[0], pos[1], pos[2]) head_size = self.head_size() # down side (前は8頭身の時の股上/股下の股下側割合、後ろは4頭身のときの〃を年齢具合で線形補完)(股上高めにすると破綻する) eight_upside_ratio, four_upside_ratio = ( 1 - self.leg_length_ratio, (2.5 / 4) * (1 - self.aging_ratio) + (1 - self.leg_length_ratio) * self.aging_ratio, ) hip_up_down_ratio = ( eight_upside_ratio * (1 - (8 - self.head_ratio) / 4) + four_upside_ratio * (8 - self.head_ratio) / 4 ) # 体幹 # 股間 body_separate = self.tall * (1 - hip_up_down_ratio) # 首の長さ neck_len = head_size * 2 / 3 # 仙骨(骨盤脊柱基部) hips_tall = body_separate + head_size * 3 / 4 # 胸椎・spineの全長 #首の1/3は顎の後ろに隠れてる backbone_len = self.tall - hips_tall - head_size - neck_len / 2 # FIXME 胸椎と脊椎の割合の確認 //脊椎の基部に位置する主となる屈曲点と、胸郭基部に位置するもうひとつの屈曲点byHumanoid Doc spine_len = backbone_len * 5 / 17 root = bone_add("root", (0, 0, 0), (0, 0, 0.3)) # 仙骨基部 hips = bone_add("hips", (0, 0, body_separate), (0, 0, hips_tall), root, roll=90) # 骨盤基部->胸郭基部 spine = bone_add( "spine", hips.tail, z_add(hips.tail, spine_len), hips, roll=-90 ) # 胸郭基部->首元 chest = bone_add( "chest", spine.tail, z_add(hips.tail, backbone_len), spine, roll=-90 ) neck = bone_add( "neck", (0, 0, self.tall - head_size - neck_len / 2), (0, 0, self.tall - head_size + neck_len / 2), chest, roll=-90, ) # 首の1/2は顎の後ろに隠れてる head = bone_add( "head", (0, 0, self.tall - head_size + neck_len / 2), (0, 0, self.tall), neck, roll=-90, ) # 目 eye_depth = self.eye_depth eyes = x_mirror_bones_add( "eye", (head_size * self.head_width_ratio / 5, 0, self.tall - head_size / 2), ( head_size * self.head_width_ratio / 5, eye_depth, self.tall - head_size / 2, ), (head, head), ) # 足 leg_width = head_size / 4 * self.leg_width_ratio leg_size = self.leg_size leg_bone_length = (body_separate + head_size * 3 / 8 - self.tall * 0.05) / 2 upside_legs = x_mirror_bones_add( "upper_leg", x_add((0, 0, body_separate + head_size * 3 / 8), leg_width), x_add( z_add((0, 0, body_separate + head_size * 3 / 8), -leg_bone_length), leg_width, ), (hips, hips), radius=leg_width * 0.9, bone_type="leg", ) lower_legs = x_mirror_bones_add( "lower_leg", upside_legs[0].tail, (leg_width, 0, self.tall * 0.05), upside_legs, radius=leg_width * 0.9, bone_type="leg", ) foots = x_mirror_bones_add( "foot", lower_legs[0].tail, (leg_width, -leg_size * (2 / 3), 0), lower_legs, radius=leg_width * 0.9, bone_type="leg", ) toes = x_mirror_bones_add( "toes", foots[0].tail, (leg_width, -leg_size, 0), foots, radius=leg_width * 0.5, bone_type="leg", ) # 肩~指 shoulder_in_pos = self.shoulder_in_width / 2 shoulder_parent = chest shoulders = x_mirror_bones_add( "shoulder", x_add(shoulder_parent.tail, shoulder_in_pos), x_add(shoulder_parent.tail, shoulder_in_pos + self.shoulder_width), (shoulder_parent, shoulder_parent), radius=self.hand_size() * 0.4, bone_type="arm", ) arm_length = ( head_size * (1 * (1 - (self.head_ratio - 6) / 2) + 1.5 * ((self.head_ratio - 6) / 2)) * self.arm_length_ratio ) arms = x_mirror_bones_add( "upper_arm", shoulders[0].tail, x_add(shoulders[0].tail, arm_length), shoulders, radius=self.hand_size() * 0.4, bone_type="arm", ) # グーにするとパーの半分くらいになる、グーのとき手を含む下腕の長さと上腕の長さが概ね一緒、けど手がでかすぎると破綻する forearm_length = max(arm_length - self.hand_size() / 2, arm_length * 0.8) forearms = x_mirror_bones_add( "lower_arm", arms[0].tail, x_add(arms[0].tail, forearm_length), arms, radius=self.hand_size() * 0.4, bone_type="arm", ) hands = x_mirror_bones_add( "hand", forearms[0].tail, x_add(forearms[0].tail, self.hand_size() / 2), forearms, radius=self.hand_size() / 4, bone_type="arm", ) def fingers( finger_name: str, proximal_pos: Tuple[float, float, float], finger_len_sum: float, ) -> Tuple[ Tuple[bpy.types.Bone, bpy.types.Bone], Tuple[bpy.types.Bone, bpy.types.Bone], Tuple[bpy.types.Bone, bpy.types.Bone], ]: finger_normalize = 1 / ( self.finger_1_2_ratio * self.finger_2_3_ratio + self.finger_1_2_ratio + 1 ) proximal_finger_len = finger_len_sum * finger_normalize intermediate_finger_len = ( finger_len_sum * finger_normalize * self.finger_1_2_ratio ) distal_finger_len = ( finger_len_sum * finger_normalize * self.finger_1_2_ratio * self.finger_2_3_ratio ) proximal_bones = x_mirror_bones_add( f"{finger_name}.proximal", proximal_pos, x_add(proximal_pos, proximal_finger_len), hands, self.hand_size() / 18, bone_type="arm", ) intermediate_bones = x_mirror_bones_add( f"{finger_name}.intermediate", proximal_bones[0].tail, x_add(proximal_bones[0].tail, intermediate_finger_len), proximal_bones, self.hand_size() / 18, bone_type="arm", ) distal_bones = x_mirror_bones_add( f"{finger_name}.distal", intermediate_bones[0].tail, x_add(intermediate_bones[0].tail, distal_finger_len), intermediate_bones, self.hand_size() / 18, bone_type="arm", ) if self.nail_bone: x_mirror_bones_add( f"{finger_name}.nail", distal_bones[0].tail, x_add(distal_bones[0].tail, distal_finger_len), distal_bones, self.hand_size() / 20, bone_type="arm", ) return proximal_bones, intermediate_bones, distal_bones finger_y_offset = -self.hand_size() / 16 thumbs = fingers( "thumb", y_add(hands[0].head, finger_y_offset * 3), self.hand_size() / 2, ) mats = [thumbs[0][i].matrix.translation for i in [0, 1]] mats = [Matrix.Translation(mat) for mat in mats] for j in range(3): for n, angle in enumerate([-45, 45]): thumbs[j][n].transform(mats[n].inverted(), scale=False, roll=False) thumbs[j][n].transform(Matrix.Rotation(radians(angle), 4, "Z")) thumbs[j][n].transform(mats[n], scale=False, roll=False) thumbs[j][n].roll = [0, radians(180)][n] index_fingers = fingers( "index", y_add(hands[0].tail, finger_y_offset * 3), (self.hand_size() / 2) - (1 / 2.3125) * (self.hand_size() / 2) / 3, ) middle_fingers = fingers( "middle", y_add(hands[0].tail, finger_y_offset), self.hand_size() / 2 ) ring_fingers = fingers( "ring", y_add(hands[0].tail, -finger_y_offset), (self.hand_size() / 2) - (1 / 2.3125) * (self.hand_size() / 2) / 3, ) little_fingers = fingers( "little", y_add(hands[0].tail, -finger_y_offset * 3), ((self.hand_size() / 2) - (1 / 2.3125) * (self.hand_size() / 2) / 3) * ((1 / 2.3125) + (1 / 2.3125) * 0.75), ) body_dict = { "hips": hips.name, "spine": spine.name, "chest": chest.name, "neck": neck.name, "head": head.name, } left_right_body_dict = { f"{left_right}{bone_name}": bones[lr].name for bone_name, bones in { "Eye": eyes, "UpperLeg": upside_legs, "LowerLeg": lower_legs, "Foot": foots, "Toes": toes, "Shoulder": shoulders, "UpperArm": arms, "LowerArm": forearms, "Hand": hands, }.items() for lr, left_right in enumerate(["left", "right"]) } # VRM finger like name key fingers_dict = { f"{left_right}{finger_name}{position}": finger[i][lr].name for finger_name, finger in zip( ["Thumb", "Index", "Middle", "Ring", "Little"], [thumbs, index_fingers, middle_fingers, ring_fingers, little_fingers], ) for i, position in enumerate(["Proximal", "Intermediate", "Distal"]) for lr, left_right in enumerate(["left", "right"]) } # VRM bone name : blender bone name bone_name_all_dict = {} bone_name_all_dict.update(body_dict) bone_name_all_dict.update(left_right_body_dict) bone_name_all_dict.update(fingers_dict) connect_parent_tail_and_child_head_if_same_position(armature.data) context.scene.view_layers.update() bpy.ops.object.mode_set(mode="OBJECT") context.scene.view_layers.update() return armature, bone_name_all_dict def setup_as_vrm( self, armature: bpy.types.Object, compare_dict: Dict[str, str] ) -> None: for vrm_bone_name, bpy_bone_name in compare_dict.items(): props = armature.data.vrm_addon_extension.vrm0.humanoid.human_bones.add() props.bone = vrm_bone_name props.node.value = bpy_bone_name self.make_extension_setting_and_metas(armature) migration.migrate(armature.name, defer=False) @classmethod def make_extension_setting_and_metas(cls, armature: bpy.types.Object) -> None: vrm0 = armature.data.vrm_addon_extension.vrm0 vrm0.first_person.first_person_bone.value = "head" vrm0.first_person.first_person_offset = (0, 0, 0.06) vrm0.first_person.look_at_horizontal_inner.y_range = 8 vrm0.first_person.look_at_horizontal_outer.y_range = 12 vrm0.meta.author = "undefined" vrm0.meta.contact_information = "undefined" vrm0.meta.other_license_url = "undefined" vrm0.meta.other_permission_url = "undefined" vrm0.meta.reference = "undefined" vrm0.meta.title = "undefined" vrm0.meta.version = "undefined" for name in [ "Neutral", "A", "I", "U", "E", "O", "Blink", "Joy", "Angry", "Sorrow", "Fun", "LookUp", "LookDown", "LookLeft", "LookRight", "Blink_L", "Blink_R", ]: # str.lower() is locale dependent. preset_name = name.encode().lower().decode() blend_shape_group = vrm0.blend_shape_master.blend_shape_groups.add() blend_shape_group.name = name blend_shape_group.preset_name = preset_name def connect_parent_tail_and_child_head_if_same_position( armature: bpy.types.Object, ) -> None: for bone in armature.edit_bones: # 親ボーンがある場合かつ、ボーンのヘッドと親ボーンのテールが一致していたら if ( bone.parent is not None and (bone.head - bone.parent.tail).length < 0.000001 # 1μm ): # ボーンの関係の接続を有効に bone.use_connect = True
import argparse import operator parser = argparse.ArgumentParser("convert dicom files to nii.gz") parser.add_argument("db", help="location of the csv create by the create_csv_db command") # TODO def group_by_correct_volumes(slices_mdatas): """Correctly group slices to obtain 3D volume. More specifically, the following thing can happened (have happened to me...): * different Acquisition number, but same volume * more to come... So, need to check both z-location (from ImagePositionPatient tag), instanceNumber and acquisition number to have a proper 3D volume """ # start from a list of flat dict new_by_inst_nb = sorted(slices_mdatas, key=operator.itemgetter("AcquisitionNumber", "InstanceNumber")) new_by_zloc = sorted(slices_mdatas, key=operator.itemgetter("AcquisitionNumber", "InstanceNumber")) if not new_by_zloc == new_by_inst_nb: print("discrepancy in slice order between z position and instance number!") print("Using zloc to discriminate slice")
import errno import math import os import sys from .. import _core, _subprocess from .._sync import CapacityLimiter, Event from .._threads import to_thread_run_sync try: from os import waitid def sync_wait_reapable(pid): waitid(os.P_PID, pid, os.WEXITED | os.WNOWAIT) except ImportError: # pypy doesn't define os.waitid so we need to pull it out ourselves # using cffi: https://bitbucket.org/pypy/pypy/issues/2922/ import cffi waitid_ffi = cffi.FFI() # Believe it or not, siginfo_t starts with fields in the # same layout on both Linux and Darwin. The Linux structure # is bigger so that's what we use to size `pad`; while # there are a few extra fields in there, most of it is # true padding which would not be written by the syscall. waitid_ffi.cdef( """ typedef struct siginfo_s { int si_signo; int si_errno; int si_code; int si_pid; int si_uid; int si_status; int pad[26]; } siginfo_t; int waitid(int idtype, int id, siginfo_t* result, int options); """ ) waitid = waitid_ffi.dlopen(None).waitid def sync_wait_reapable(pid): P_PID = 1 WEXITED = 0x00000004 if sys.platform == 'darwin': # pragma: no cover # waitid() is not exposed on Python on Darwin but does # work through CFFI; note that we typically won't get # here since Darwin also defines kqueue WNOWAIT = 0x00000020 else: WNOWAIT = 0x01000000 result = waitid_ffi.new("siginfo_t *") while waitid(P_PID, pid, result, WEXITED | WNOWAIT) < 0: got_errno = waitid_ffi.errno if got_errno == errno.EINTR: continue raise OSError(got_errno, os.strerror(got_errno)) # adapted from # https://github.com/python-trio/trio/issues/4#issuecomment-398967572 waitid_limiter = CapacityLimiter(math.inf) async def _waitid_system_task(pid: int, event: Event) -> None: """Spawn a thread that waits for ``pid`` to exit, then wake any tasks that were waiting on it. """ # cancellable=True: if this task is cancelled, then we abandon the # thread to keep running waitpid in the background. Since this is # always run as a system task, this will only happen if the whole # call to trio.run is shutting down. try: await to_thread_run_sync( sync_wait_reapable, pid, cancellable=True, limiter=waitid_limiter, ) except OSError: # If waitid fails, waitpid will fail too, so it still makes # sense to wake up the callers of wait_process_exiting(). The # most likely reason for this error in practice is a child # exiting when wait() is not possible because SIGCHLD is # ignored. pass finally: event.set() async def wait_child_exiting(process: "_subprocess.Process") -> None: # Logic of this function: # - The first time we get called, we create an Event and start # an instance of _waitid_system_task that will set the Event # when waitid() completes. If that Event is set before # we get cancelled, we're good. # - Otherwise, a following call after the cancellation must # reuse the Event created during the first call, lest we # create an arbitrary number of threads waiting on the same # process. if process._wait_for_exit_data is None: process._wait_for_exit_data = event = Event() _core.spawn_system_task(_waitid_system_task, process.pid, event) await process._wait_for_exit_data.wait()
#!/usr/bin/python # -*- coding: utf-8 -*- import networkx as nx import numpy as np class SolutionNotFound(Exception): pass def solve_it(input_data): # parse the input lines = input_data.split('\n') first_line = lines[0].split() node_count = int(first_line[0]) edge_count = int(first_line[1]) edges = [] for i in range(1, edge_count + 1): line = lines[i] parts = line.split() edges.append((int(parts[0]), int(parts[1]))) # create matrix to keep track of current constraints # make networkx graph with edgelist edges_nx = [] for i in range(1, edge_count + 1): line = lines[i] parts = line.split() edges_nx.append(parts[0] + " " + parts[1]) g = nx.read_edgelist(edges_nx) # find how many nodes are in the largest clique - this gives the most optimal solution max_colors = max(map(len, nx.find_cliques(g))) output = {} # This part is for constraint - consider expanding def is_safe(node_id, color_id): """ Check if node with the given color is valid :param edges: list of tuples :param node_id: int :param color_id: int :param output: dict, current state of things :return: boolean """ for idx in range(node_count): if (node_id, idx) in edges or (idx, node_id) in edges: if idx in output: if color_id == output[idx]: return False return True # make an iteration with the cliques for clique in nx.find_cliques(g): for node in clique: if node not in output: color_assn = False # Node not in output, assign a color from range(max_colors) for color in range(max_colors): if is_safe(node, color): output[node] = color color_assn = True if not color_assn: raise SolutionNotFound # Backtrack solution with while loop def color_graph(edges, node_count, max_colors): """ solves a coloring problem given the list of edges and node count super slow, but will give the optimal solution :param edges: list of tuples :param node_count: int :param max_colors: int :return: dict with node id as key and color id as value """ forbidden_colors = {} output = {} cur_node = 0 solution_found = False while not solution_found: valid_assn = False # print("curretly at node " + str(cur_node)) if cur_node < 0: print("Exhausted all solutions, unable to solve with max_colors = " + str(max_colors)) raise SolutionNotFound # create empty set for forbidden colors memory if cur_node not in forbidden_colors: forbidden_colors[cur_node] = set() # at each node, try all colors minus forbidden colors: valid_colors = set(range(max_colors)) - forbidden_colors[cur_node] for color in valid_colors: if not valid_assn and is_safe(cur_node, color, edges, output): # print("trying color " + str(color)) output[cur_node] = color valid_assn = True # print("assigned color " + str(color) + " to node " + str(cur_node)) if valid_assn: if (cur_node + 1) < node_count: cur_node += 1 continue else: # print("Solution found") return output elif (cur_node - 1) >= 0: # print("backtracking") # clear downstream backtrack memory forbidden_colors[cur_node] = set() # add color to forbidden colors dict forbidden_colors[cur_node - 1].add(output[cur_node - 1]) # remove the dict entry for that color del output[cur_node - 1] # backtrack cur_node -= 1 else: print("Exhausted all solutions, unable to solve with max_colors = " + str(max_colors)) raise SolutionNotFound output = color_graph(edges, node_count, max_colors) solution, optimal = list(output.values()), "1" # prepare the solution in the specified output format output_data = str(node_count) + ' ' + optimal + '\n' output_data += ' '.join(map(str, solution)) return output_data if __name__ == '__main__': import sys if len(sys.argv) > 1: file_location = sys.argv[1].strip() with open(file_location, 'r') as input_data_file: input_data = input_data_file.read() print(solve_it(input_data)) else: print('This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/gc_4_1)')
import aiohttp import jinja2 import aiohttp_jinja2 import router import os port = 80 os.chdir(os.path.dirname(os.path.realpath(__file__))) app = aiohttp.web.Application() templates = aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader("assets/html")) app.router.add_static("/assets/", path="./assets/", name="assets") def main(): router.add_all_routes(app) aiohttp.web.run_app(app, port=port) if __name__ == "__main__": main()
# File: sentinelone_connector.py # Copyright (c) 2018-2020 Splunk Inc. # # Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt) # import phantom.app as phantom from phantom.base_connector import BaseConnector from phantom.action_result import ActionResult import requests import json from bs4 import BeautifulSoup, UnicodeDammit import sys class RetVal(tuple): def __new__(cls, val1, val2): return tuple.__new__(RetVal, (val1, val2)) class SentineloneConnector(BaseConnector): def __init__(self): # Call the BaseConnectors init first super(SentineloneConnector, self).__init__() self._state = None # Variable to hold a base_url in case the app makes REST calls # Do note that the app json defines the asset config, so please # modify this as you deem fit. self._base_url = None self._api_v = "/web/api/v2.1" self.HEADER = {"Content-Type": "application/json"} def _handle_py_ver_compat_for_input_str(self, input_str): """ This method returns the encoded|original string based on the Python version. :param input_str: Input string to be processed :return: input_str (Processed input string based on following logic 'input_str - Python 3; encoded input_str - Python 2') """ try: if input_str and self._python_version < 3: input_str = UnicodeDammit(input_str).unicode_markup.encode('utf-8') except Exception: self.debug_print("Error occurred while handling python 2to3 compatibility for the input string") return input_str def _get_error_message_from_exception(self, e): """ This function is used to get appropriate error message from the exception. :param e: Exception object :return: error message """ error_msg = "Unknown error occurred. Please check the asset configuration and|or action parameters." error_code = "Error code unavailable" try: if e.args: if len(e.args) > 1: error_code = e.args[0] error_msg = e.args[1] elif len(e.args) == 1: error_code = "Error code unavailable" error_msg = e.args[0] else: error_code = "Error code unavailable" error_msg = "Unknown error occurred. Please check the asset configuration and|or action parameters." except: error_code = "Error code unavailable" error_msg = "Unknown error occurred. Please check the asset configuration and|or action parameters." try: error_msg = self._handle_py_ver_compat_for_input_str(error_msg) except TypeError: error_msg = "Error occurred while connecting to the SentinelOne server. Please check the asset configuration and|or the action parameters." except: error_msg = "Unknown error occurred. Please check the asset configuration and|or action parameters." return "Error Code: {0}. Error Message: {1}".format(error_code, error_msg) def _process_empty_response(self, response, action_result): if response.status_code == 200: return RetVal(phantom.APP_SUCCESS, {}) return RetVal(action_result.set_status(phantom.APP_ERROR, "Empty response and no information in the header"), None) def _process_html_response(self, response, action_result): # An html response, treat it like an error status_code = response.status_code try: soup = BeautifulSoup(response.text, "html.parser") # Remove the script, style, footer and navigation part from the HTML message for element in soup(["script", "style", "footer", "nav"]): element.extract() error_text = soup.text split_lines = error_text.split('\n') split_lines = [x.strip() for x in split_lines if x.strip()] error_text = '\n'.join(split_lines) except: error_text = "Cannot parse error details" message = "Status Code: {0}. Data from server:\n{1}\n".format(status_code, error_text) message = message.replace('{', '{{').replace('}', '}}') return RetVal(action_result.set_status(phantom.APP_ERROR, message), None) def _process_json_response(self, r, action_result): # Try a json parse try: resp_json = r.json() except Exception as e: return RetVal(action_result.set_status(phantom.APP_ERROR, "Unable to parse JSON response. Error: {0}".format(str(e))), None) # Please specify the status codes here if 200 <= r.status_code < 399: return RetVal(phantom.APP_SUCCESS, resp_json) # You should process the error returned in the json message = "Error from server. Status Code: {0} Data from server: {1}".format( r.status_code, r.text.replace('{', '{{').replace('}', '}}')) return RetVal(action_result.set_status(phantom.APP_ERROR, message), None) def _process_response(self, r, action_result): # store the r_text in debug data, it will get dumped in the logs if the action fails if hasattr(action_result, 'add_debug_data'): action_result.add_debug_data({'r_status_code': r.status_code}) action_result.add_debug_data({'r_text': r.text}) action_result.add_debug_data({'r_headers': r.headers}) # Process each 'Content-Type' of response separately # Process a json response if 'json' in r.headers.get('Content-Type', ''): return self._process_json_response(r, action_result) # Process an HTML resonse, Do this no matter what the api talks. # There is a high chance of a PROXY in between phantom and the rest of # world, in case of errors, PROXY's return HTML, this function parses # the error and adds it to the action_result. if 'html' in r.headers.get('Content-Type', ''): return self._process_html_response(r, action_result) # it's not content-type that is to be parsed, handle an empty response if not r.text: return self._process_empty_response(r, action_result) # everything else is actually an error at this point message = "Can't process response from server. Status Code: {0} Data from server: {1}".format( r.status_code, r.text.replace('{', '{{').replace('}', '}}')) return RetVal(action_result.set_status(phantom.APP_ERROR, message), None) def _make_rest_call(self, endpoint, action_result, headers=None, params=None, data=None, method='get'): config = self.get_config() resp_json = None try: request_func = getattr(requests, method) except AttributeError: return RetVal(action_result.set_status(phantom.APP_ERROR, "Invalid method: {0}".format(method)), resp_json) # Create a URL to connect to url = self._base_url + self._api_v + str(endpoint) self.save_progress(url) try: r = request_func( url, json=data, headers=headers, verify=config.get('verify_server_cert', True), params=params) except Exception as e: error_message = self._get_error_message_from_exception(e) return RetVal(action_result.set_status(phantom.APP_ERROR, "Error Connecting to server. Details: {0}".format(error_message)), resp_json) return self._process_response(r, action_result) def _handle_test_connectivity(self, param): # Add an action result object to self (BaseConnector) to represent the action for this param action_result = self.add_action_result(ActionResult(dict(param))) self.save_progress("Connecting to the SentinelOne server") # make rest call headers = self.HEADER headers["Authorization"] = "APIToken %s" % self.token ret_val, response = self._make_rest_call('/private/threats/summary', action_result, headers=headers) self.save_progress("response: {0}".format(response)) if (phantom.is_fail(ret_val)): # the call to the 3rd party device or service failed, action result should contain all the error details # so just return from here self.save_progress("Test Connectivity Failed. Error: {0}".format(action_result.get_message())) return action_result.get_status() # Return success self.save_progress("Login to SentinelOne server is successful") self.save_progress("Test Connectivity passed") return action_result.set_status(phantom.APP_SUCCESS) def _get_site_ids(self, sites, headers, action_result): sites_tokens = [each.strip() for each in sites.split(",")] sites_tokens = list(filter(None, sites_tokens)) site_ids = [] try: url = self._base_url + self._api_v + "/sites" for site in sites_tokens: param = {"registrationToken": site} ret = requests.get(url, headers=headers, params=param) sites_data = ret.json().get('data', {}).get('sites', []) if sites_data: site_data = sites_data[0] site_id = site_data.get('id') if site_id: site_ids.append(site_id) else: self.debug_print("The site_token:{0} is invalid and is getting ignored".format(site)) except Exception as e: action_result.set_status(phantom.APP_ERROR, "Error occurred while getting site ID : {0}".format(str(e))) return None # if site_ids is empty/none then it return None, provided side_tokens were invalid if not site_ids: action_result.set_status(phantom.APP_ERROR, "Please provide valid site token(s)") return None return site_ids def _handle_block_hash(self, param): self.save_progress("In action handler for: {0}".format(self.get_action_identifier())) action_result = self.add_action_result(ActionResult(dict(param))) hash = self._handle_py_ver_compat_for_input_str(param['hash']) description = self._handle_py_ver_compat_for_input_str(param['description']) os_family = self._handle_py_ver_compat_for_input_str(param['os_family']) sites = self._handle_py_ver_compat_for_input_str(param['sites_tokens']) summary = action_result.update_summary({}) summary['hash'] = hash summary['description'] = UnicodeDammit(description).unicode_markup headers = self.HEADER headers["Authorization"] = "APIToken %s" % self.token headers["Content-Type"] = "application/json" # Fetch siteIds from siteToken site_ids = self._get_site_ids(sites, headers, action_result) if site_ids is None: return action_result.get_status() body = { "filter": { "siteIds": site_ids, "tenant": True }, "data": { "description": description, "value": hash, "source": "sentinelone_connector", "osType": os_family, "type": "black_hash", } } try: ret_val, _ = self._make_rest_call('/restrictions', action_result, headers=headers, method='post', data=body) if (phantom.is_fail(ret_val)): return action_result.get_status() except Exception as e: return action_result.set_status(phantom.APP_ERROR, "Error occurred while getting restrictions: {0}".format(e)) return action_result.set_status(phantom.APP_SUCCESS) def _handle_unblock_hash(self, param): self.save_progress("In action handler for: {0}".format(self.get_action_identifier())) action_result = self.add_action_result(ActionResult(dict(param))) hash = self._handle_py_ver_compat_for_input_str(param['hash']) sites = self._handle_py_ver_compat_for_input_str(param['sites_tokens']) summary = action_result.update_summary({}) summary['hash'] = hash headers = self.HEADER headers["Authorization"] = "APIToken %s" % self.token headers["Content-Type"] = "application/json" # Fetch siteIds from siteToken site_ids = self._get_site_ids(sites, headers, action_result) if site_ids is None: return action_result.get_status() restrictions_url = self._base_url + self._api_v + "/restrictions" for site_id in site_ids: ids = [] params = {"type": "black_hash", "siteIds": site_id, "value": hash} try: ret = requests.get(restrictions_url, headers=headers, params=params) except Exception as e: return action_result.set_status(phantom.APP_ERROR, "Error occurred while getting restrictions : {0}".format(e)) restrictions_data = ret.json().get('data', []) if restrictions_data: restriction = restrictions_data[0] restriction_id = restriction.get('id') if restriction_id: ids.append(restriction_id) body = { "data": { "type": "black_hash", "ids": ids } } try: ret_val, _ = self._make_rest_call('/restrictions', action_result, headers=headers, method='delete', data=body) if (phantom.is_fail(ret_val)): return action_result.get_status() except Exception as e: return action_result.set_status(phantom.APP_ERROR, "Error occurred while unblock hash: {0}".format(e)) return action_result.set_status(phantom.APP_SUCCESS) def _handle_list_endpoints(self, param): # Implement the handler here # use self.save_progress(...) to send progress messages back to the platform self.save_progress("In action handler for: {0}".format(self.get_action_identifier())) # Add an action result object to self (BaseConnector) to represent the action for this param action_result = self.add_action_result(ActionResult(dict(param))) endpoint = "/agents" list_pgitems = self._list_pageitems(action_result, endpoint) if list_pgitems is None: return action_result.set_status(phantom.APP_ERROR, "Error while getting the endpoints") for item in list_pgitems: action_result.add_data(item) summary = action_result.update_summary({}) summary['total_endpoints'] = len(list_pgitems) return action_result.set_status(phantom.APP_SUCCESS) def _handle_quarantine_device(self, param): self.save_progress("In action handler for: {0}".format(self.get_action_identifier())) action_result = self.add_action_result(ActionResult(dict(param))) ip_hostname = self._handle_py_ver_compat_for_input_str(param['ip_hostname']) sites = self._handle_py_ver_compat_for_input_str(param['sites_tokens']) try: ret_val = self._get_agent_id(ip_hostname, action_result) except Exception as e: return action_result.set_status(phantom.APP_ERROR, "Error occurred while getting agent id : {0}".format(e)) self.save_progress('Agent query: ' + ret_val) if (ret_val == '0'): return action_result.set_status(phantom.APP_ERROR, "Endpoint not found") elif (ret_val == '99'): return action_result.set_status(phantom.APP_ERROR, "More than one endpoint found") else: summary = action_result.update_summary({}) summary['ip_hostname'] = ip_hostname summary['agent_id'] = ret_val headers = self.HEADER headers["Authorization"] = "APIToken %s" % self.token # Fetch siteIds from siteToken site_ids = self._get_site_ids(sites, headers, action_result) if site_ids is None: return action_result.get_status() body = { "filter": { "siteIds": site_ids, "ids": [ret_val], }, } try: ret_val, _ = self._make_rest_call('/agents/actions/disconnect', action_result, headers=headers, method='post', data=body) if (phantom.is_fail(ret_val)): return action_result.get_status() except Exception as e: return action_result.set_status(phantom.APP_ERROR, "Error occurred while running quarantine device : {0}".format(e)) return action_result.set_status(phantom.APP_SUCCESS) def _handle_unquarantine_device(self, param): self.save_progress("In action handler for: {0}".format(self.get_action_identifier())) action_result = self.add_action_result(ActionResult(dict(param))) ip_hostname = self._handle_py_ver_compat_for_input_str(param['ip_hostname']) sites = self._handle_py_ver_compat_for_input_str(param['sites_tokens']) try: ret_val = self._get_agent_id(ip_hostname, action_result) except Exception as e: return action_result.set_status(phantom.APP_ERROR, "Error occurred while getting agent ID : {0}".format(e)) self.save_progress('Agent query: ' + ret_val) if (ret_val == '0'): return action_result.set_status(phantom.APP_ERROR, "Endpoint not found") elif (ret_val == '99'): return action_result.set_status(phantom.APP_ERROR, "More than one endpoint found") else: summary = action_result.update_summary({}) summary['ip_hostname'] = ip_hostname summary['agent_id'] = ret_val headers = self.HEADER headers["Authorization"] = "APIToken %s" % self.token # Fetch siteIds from siteToken site_ids = self._get_site_ids(sites, headers, action_result) if site_ids is None: return action_result.get_status() body = { "filter": { "siteIds": site_ids, "ids": [ret_val], }, } try: ret_val, _ = self._make_rest_call('/agents/actions/connect', action_result, headers=headers, method='post', data=body) if (phantom.is_fail(ret_val)): return action_result.get_status() except Exception as e: return action_result.set_status(phantom.APP_ERROR, "Error occurred while unquarantine device : {0}".format(e)) return action_result.set_status(phantom.APP_SUCCESS) def _handle_scan_endpoint(self, param): self.save_progress("In action handler for: {0}".format(self.get_action_identifier())) action_result = self.add_action_result(ActionResult(dict(param))) ip_hostname = self._handle_py_ver_compat_for_input_str(param['ip_hostname']) sites = self._handle_py_ver_compat_for_input_str(param['sites_tokens']) try: ret_val = self._get_agent_id(ip_hostname, action_result) except Exception as e: return action_result.set_status(phantom.APP_ERROR, "Error occurred while getting agent id : {0}".format(e)) self.save_progress('Agent query: ' + ret_val) if ret_val == '0': return action_result.set_status(phantom.APP_ERROR, "Endpoint not found") elif ret_val == '99': return action_result.set_status(phantom.APP_ERROR, "More than one endpoint found") else: summary = action_result.update_summary({}) summary['ip_hostname'] = ip_hostname summary['agent_id'] = ret_val headers = self.HEADER headers["Authorization"] = "APIToken %s" % self.token # Fetch siteIds from siteToken site_ids = self._get_site_ids(sites, headers, action_result) if site_ids is None: return action_result.get_status() body = { "filter": { "siteIds": site_ids, "ids": [ret_val], }, } try: ret_val, _ = self._make_rest_call('/agents/actions/initiate-scan', action_result, headers=headers, method='post', data=body) if (phantom.is_fail(ret_val)): return action_result.get_status() except Exception as e: return action_result.set_status(phantom.APP_ERROR, "Error occurred while scanning endpoint : {0}".format(e)) return action_result.set_status(phantom.APP_SUCCESS) def _handle_get_endpoint_info(self, param): self.save_progress("In action handler for: {0}".format(self.get_action_identifier())) action_result = self.add_action_result(ActionResult(dict(param))) ip_hostname = self._handle_py_ver_compat_for_input_str(param['ip_hostname']) try: ret_val = self._get_agent_id(ip_hostname, action_result) except Exception as e: return action_result.set_status(phantom.APP_ERROR, "Error occurred while getting agent id : {0}".format(e)) self.save_progress('Agent query: ' + ret_val) if ret_val == '0': return action_result.set_status(phantom.APP_ERROR, "Endpoint not found") elif ret_val == '99': return action_result.set_status(phantom.APP_ERROR, "More than one endpoint found") else: summary = action_result.update_summary({}) summary['ip_hostname'] = ip_hostname summary['agent_id'] = ret_val # make rest call # GET /web/api/v1.6/agents/{id} headers = self.HEADER headers["Authorization"] = "APIToken %s" % self.token param = {"ids": ret_val} try: ret_val, response = self._make_rest_call('/agents', action_result, headers=headers, params=param) if (phantom.is_fail(ret_val)): return action_result.get_status() self.save_progress("ret_val: {0}".format(ret_val)) except Exception as ee: return action_result.set_status(phantom.APP_ERROR, "Error occurred while getting endpoint info : {0}".format(ee)) if not response.get('data'): return action_result.set_status(phantom.APP_ERROR, 'Found no details for the given endpoint') else: action_result.add_data(response.get('data')[0]) return action_result.set_status(phantom.APP_SUCCESS) def _handle_mitigate_threat(self, param): self.save_progress("In action handler for: {0}".format(self.get_action_identifier())) action_result = self.add_action_result(ActionResult(dict(param))) threat_id = self._handle_py_ver_compat_for_input_str(param['threat_id']) action = self._handle_py_ver_compat_for_input_str(param['action']) sites = self._handle_py_ver_compat_for_input_str(param['sites_tokens']) summary = action_result.update_summary({}) summary['threat_id'] = threat_id summary['action'] = action headers = self.HEADER headers["Authorization"] = "APIToken %s" % self.token headers["Content-Type"] = "application/json" # Fetch siteIds from siteToken site_ids = self._get_site_ids(sites, headers, action_result) if site_ids is None: return action_result.get_status() body = { "filter": { "siteIds": site_ids, "ids": [threat_id], }, } # POST /web/api/v2.1/threats/mitigate/:action try: ret_val, _ = self._make_rest_call('/threats/mitigate/' + action, action_result, headers=headers, method='post', data=body) if (phantom.is_fail(ret_val)): return action_result.get_status() except Exception as e: return action_result.set_status(phantom.APP_ERROR, "Error occurred while mitigate threat : {0}".format(e)) return action_result.set_status(phantom.APP_SUCCESS) def _list_pageitems(self, action_result, endpoint): limit = 100 cursor = None list_pgitems = list() headers = self.HEADER headers["Authorization"] = "APIToken %s" % self.token while True: params = dict() params['limit'] = limit if cursor is not None: params['cursor'] = cursor ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result, params=params, headers=headers) if phantom.is_fail(ret_val) or response is None: return None if response.get('data'): list_pgitems.extend(response.get('data')) if response.get('pagination').get('nextCursor') is None: break else: cursor = response.get('pagination').get('nextCursor') return list_pgitems def _handle_list_threats(self, param): # List the threats self.save_progress("In action handler for: {0}".format(self.get_action_identifier())) action_result = self.add_action_result(ActionResult(dict(param))) endpoint = "/threats" list_pgitems = self._list_pageitems(action_result, endpoint) if list_pgitems is None: return action_result.set_status(phantom.APP_ERROR, "Error while getting the threats") for threat in list_pgitems: action_result.add_data(threat) summary = action_result.update_summary({}) summary['total_threats'] = len(list_pgitems) return action_result.set_status(phantom.APP_SUCCESS) def _get_agent_id(self, search_text, action_result): # First lookup the Agent ID headers = self.HEADER headers["Authorization"] = "APIToken %s" % self.token param = {"query": search_text} ret_val, response = self._make_rest_call('/agents', action_result, headers=headers, params=param) if (phantom.is_fail(ret_val)): return str(-1) endpoints_found = len(response.get('data', [])) self.save_progress("Endpoints found: " + str(endpoints_found)) if endpoints_found == 0: return '0' elif endpoints_found > 1: return '99' else: return response.get('data')[0].get('id', str(-1)) def handle_action(self, param=None, sites=None): ret_val = phantom.APP_SUCCESS # Get the action that we are supposed to execute for this App Run action_id = self.get_action_identifier() self.debug_print("action_id", self.get_action_identifier()) if action_id == 'test_connectivity': ret_val = self._handle_test_connectivity(param) elif action_id == 'list_endpoints': ret_val = self._handle_list_endpoints(param) elif action_id == 'get_endpoint_info': ret_val = self._handle_get_endpoint_info(param) elif action_id == 'block_hash': ret_val = self._handle_block_hash(param) elif action_id == 'quarantine_device': ret_val = self._handle_quarantine_device(param) elif action_id == 'unquarantine_device': ret_val = self._handle_unquarantine_device(param) elif action_id == 'unblock_hash': ret_val = self._handle_unblock_hash(param) elif action_id == 'mitigate_threat': ret_val = self._handle_mitigate_threat(param) elif action_id == 'scan_endpoint': ret_val = self._handle_scan_endpoint(param) elif action_id == 'list_threats': ret_val = self._handle_list_threats(param) return ret_val def initialize(self): # Load the state in initialize, use it to store data # that needs to be accessed across actions self._python_version = int(sys.version_info[0]) self._state = self.load_state() # get the asset config config = self.get_config() # Access values in asset config by the name # Required values can be accessed directly self._base_url = self._handle_py_ver_compat_for_input_str(config['sentinelone_server_url']) # Optional values should use the .get() function self.token = self._handle_py_ver_compat_for_input_str(config.get('access_token')) return phantom.APP_SUCCESS def finalize(self): # Save the state, this data is saved accross actions and app upgrades self.save_state(self._state) return phantom.APP_SUCCESS if __name__ == '__main__': import pudb import argparse pudb.set_trace() argparser = argparse.ArgumentParser() argparser.add_argument('input_test_json', help='Input Test JSON file') argparser.add_argument('-s', '--sites', help='sites', required=True) argparser.add_argument('-u', '--username', help='username', required=False) argparser.add_argument('-p', '--password', help='password', required=False) args = argparser.parse_args() session_id = None sites_tokens = (args.sites).split(',') username = args.username password = args.password if username is not None and password is None: # User specified a username but not a password, so ask import getpass password = getpass.getpass("Password: ") if username and password: login_url = BaseConnector._get_phantom_base_url() + "login" try: print("Accessing the Login page") r = requests.get(login_url, verify=False) csrftoken = r.cookies['csrftoken'] data = dict() data['username'] = username data['password'] = password data['csrfmiddlewaretoken'] = csrftoken headers = dict() headers['Cookie'] = 'csrftoken=' + csrftoken headers['Referer'] = login_url print("Logging into Platform to get the session id") r2 = requests.post(login_url, verify=False, data=data, headers=headers) session_id = r2.cookies['sessionid'] except Exception as e: print("Unable to get session id from the platfrom. Error: " + str(e)) exit(1) if len(sys.argv) < 2: print("No test json specified as input") exit(0) with open(sys.argv[1]) as f: in_json = f.read() in_json = json.loads(in_json) print(json.dumps(in_json, indent=4)) connector = SentineloneConnector() connector.print_progress_message = True if session_id is not None: in_json['user_session_token'] = session_id ret_val = connector._handle_action(json.dumps(in_json), None, sites_tokens) print(json.dumps(json.loads(ret_val), indent=4))
from collections import deque import argparse import keras.backend as K import numpy as np import os import pickle from argparse import ArgumentParser from glob import glob from keras import Input, Model from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, Callback from keras.layers import Dense, Lambda from keras.optimizers import Adam from natsort import natsorted from constants import c from ml.triplet_loss import deep_speaker_loss from ml.utils import data_to_keras BATCH_SIZE = 900 def get_arguments(parser: ArgumentParser): args = None try: args = parser.parse_args() except Exception: parser.print_help() exit(1) return args def get_script_arguments(): parser = argparse.ArgumentParser() parser.add_argument('--loss_on_softmax', action='store_true') parser.add_argument('--loss_on_embeddings', action='store_true') parser.add_argument('--freeze_embedding_weights', action='store_true') parser.add_argument('--normalize_embeddings', action='store_true') args = get_arguments(parser) return args # - Triplet Loss for embeddings # - Softmax for pre-training def triplet_softmax_model(num_speakers_softmax, batch_size=BATCH_SIZE, emb_trainable=True, normalize_embeddings=False): inp = Input(batch_shape=[batch_size, 39 * 10]) embeddings = Dense(200, activation='sigmoid', name='fc1', trainable=emb_trainable)(inp) if normalize_embeddings: print('Embeddings will be normalized.') embeddings = Lambda(lambda y: K.l2_normalize(y, axis=1), name='normalization')(embeddings) embeddings = Lambda(lambda y: y, name='embeddings')(embeddings) # just a trick to name a layer after if-else. softmax = Dense(num_speakers_softmax, activation='softmax', name='softmax')(embeddings) return Model(inputs=[inp], outputs=[embeddings, softmax]) def compile_triplet_softmax_model(m: Model, loss_on_softmax=True, loss_on_embeddings=False): losses = { 'embeddings': deep_speaker_loss, 'softmax': 'categorical_crossentropy', } loss_weights = { 'embeddings': int(loss_on_embeddings), 'softmax': int(loss_on_softmax), } print(losses) print(loss_weights) m.compile(optimizer=Adam(lr=0.001), loss=losses, loss_weights=loss_weights, metrics=['accuracy']) def fit_model(m, kx_train, ky_train, kx_test, ky_test, batch_size=BATCH_SIZE, max_grad_steps=1000000, initial_epoch=0): # TODO: use this callback checkpoint. # checkpoint = ModelCheckpoint(monitor='val_acc', filepath='checkpoints/model_{epoch:02d}_{val_acc:.3f}.h5', # save_best_only=True) # if the accuracy does not increase by 1.0% over 10 epochs, we stop the training. # early_stopping = EarlyStopping(monitor='val_acc', min_delta=0.01, patience=100, verbose=1, mode='max') # # if the accuracy does not increase over 10 epochs, we reduce the learning rate by half. # reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=10, min_lr=0.0001, verbose=1) # anchor and positive = first one. # negative = second one. # order is [anchor, positive, negative]. def select_inputs_and_outputs_for_speaker(x, y, speaker_id_): indices = np.random.choice(np.where(y.argmax(axis=1) == speaker_id_)[0], size=batch_size // 3) return x[indices], y[indices] print() print() assert sorted(set(ky_test.argmax(axis=1))) == sorted(set(ky_train.argmax(axis=1))) num_different_speakers = len(set(ky_train.argmax(axis=1))) print('num different speakers =', num_different_speakers) deque_size = 100 train_overall_loss_emb = deque(maxlen=deque_size) test_overall_loss_emb = deque(maxlen=deque_size) train_overall_loss_softmax = deque(maxlen=deque_size) test_overall_loss_softmax = deque(maxlen=deque_size) for epoch in range(initial_epoch, max_grad_steps): two_different_speakers = np.random.choice(range(num_different_speakers), size=2, replace=False) anchor_positive_speaker = two_different_speakers[0] # negative_speaker = two_different_speakers[0] negative_speaker = two_different_speakers[1] assert negative_speaker != anchor_positive_speaker train_inputs_outputs = [ select_inputs_and_outputs_for_speaker(kx_train, ky_train, anchor_positive_speaker), select_inputs_and_outputs_for_speaker(kx_train, ky_train, anchor_positive_speaker), select_inputs_and_outputs_for_speaker(kx_train, ky_train, negative_speaker) ] inputs = np.vstack([v[0] for v in train_inputs_outputs]) outputs = np.vstack([v[1] for v in train_inputs_outputs]) train_loss = m.train_on_batch(inputs, {'embeddings': outputs * 0, 'softmax': outputs}) train_loss = dict(zip(m.metrics_names, train_loss)) train_overall_loss_emb.append(train_loss['embeddings_loss']) train_overall_loss_softmax.append(train_loss['softmax_loss']) test_inputs_outputs = [ select_inputs_and_outputs_for_speaker(kx_test, ky_test, anchor_positive_speaker), select_inputs_and_outputs_for_speaker(kx_test, ky_test, anchor_positive_speaker), select_inputs_and_outputs_for_speaker(kx_test, ky_test, negative_speaker) ] test_inputs = np.vstack([v[0] for v in test_inputs_outputs]) test_outputs = np.vstack([v[1] for v in test_inputs_outputs]) test_loss = m.test_on_batch(test_inputs, {'embeddings': test_outputs * 0, 'softmax': test_outputs}) test_loss = dict(zip(m.metrics_names, test_loss)) test_overall_loss_emb.append(test_loss['embeddings_loss']) test_overall_loss_softmax.append(test_loss['softmax_loss']) if epoch % 10 == 0: format_str = '{0}, train(emb, soft, last {3}) = {1:.5f} {4:.5f}, ' \ 'test(emb, soft, last {3}) = {2:.5f} {4:.5f}.' print(format_str.format(str(epoch).zfill(6), np.mean(train_overall_loss_emb), np.mean(test_overall_loss_emb), deque_size, np.mean(train_overall_loss_softmax), np.mean(train_overall_loss_softmax), )) if epoch % 100 == 0: print('train metrics =', train_loss) print('test metrics =', test_loss) m.save_weights('checkpoints/unified_model_checkpoints_{}.h5'.format(epoch), overwrite=True) print('Last two speakers were {} and {}.'.format(anchor_positive_speaker, negative_speaker)) print('Saving...') def fit_model_softmax(m, kx_train, ky_train, kx_test, ky_test, batch_size=BATCH_SIZE, max_epochs=1000, initial_epoch=0): checkpoint = ModelCheckpoint(filepath='checkpoints/unified_model_checkpoints_{epoch}.h5', period=10) # if the accuracy does not increase by 1.0% over 10 epochs, we stop the training. early_stopping = EarlyStopping(monitor='val_softmax_acc', min_delta=0.01, patience=100, verbose=1, mode='max') # if the accuracy does not increase over 10 epochs, we reduce the learning rate by half. reduce_lr = ReduceLROnPlateau(monitor='val_softmax_acc', factor=0.5, patience=10, min_lr=0.0001, verbose=1) max_len_train = len(kx_train) - len(kx_train) % batch_size kx_train = kx_train[0:max_len_train] ky_train = ky_train[0:max_len_train] max_len_test = len(kx_test) - len(kx_test) % batch_size kx_test = kx_test[0:max_len_test] ky_test = ky_test[0:max_len_test] print('The embedding loss here does not make sense. Do not get fooled by it. Triplets are not present here.') print('We train the embedding weights first.') class WarningCallback(Callback): def on_epoch_end(self, epoch, logs=None): print('The embedding loss here does not make sense. Do not get fooled by it. ' 'Triplets are not generated here. We train the embedding weights first.') m.fit(kx_train, {'embeddings': ky_train, 'softmax': ky_train}, batch_size=batch_size, epochs=initial_epoch + max_epochs, initial_epoch=initial_epoch, verbose=1, validation_data=(kx_test, {'embeddings': ky_test, 'softmax': ky_test}), callbacks=[early_stopping, reduce_lr, checkpoint, WarningCallback()]) def start_training(): if not os.path.exists('checkpoints'): os.makedirs('checkpoints') args = get_script_arguments() if not args.loss_on_softmax and not args.loss_on_embeddings: print('Please provide at least --loss_on_softmax or --loss_on_embeddings.') exit(1) data_filename = '/tmp/speaker-change-detection-data.pkl' assert os.path.exists(data_filename), 'Data does not exist.' print('Loading the inputs in memory. It might take a while...') data = pickle.load(open(data_filename, 'rb')) kx_train, ky_train, kx_test, ky_test, categorical_speakers = data_to_keras(data) print(categorical_speakers.speaker_ids) print(len(categorical_speakers.speaker_ids)) # print(len(AudioReader(audio_dir=c.AUDIO.VCTK_CORPUS_PATH, # sample_rate=c.AUDIO.SAMPLE_RATE, # speakers_sub_list=None).metadata)) assert c.AUDIO.SPEAKER_FOR_CLASSIFICATION_TASK == categorical_speakers.speaker_ids assert len(categorical_speakers.speaker_ids) == 80 emb_trainable = True if args.freeze_embedding_weights: print('FrEeZiNg tHe eMbeDdInG wEiGhTs.') emb_trainable = False m = triplet_softmax_model(num_speakers_softmax=len(categorical_speakers.speaker_ids), emb_trainable=emb_trainable, normalize_embeddings=args.normalize_embeddings) checkpoints = natsorted(glob('checkpoints/*.h5')) compile_triplet_softmax_model(m, loss_on_softmax=args.loss_on_softmax, loss_on_embeddings=args.loss_on_embeddings) print(m.summary()) initial_epoch = 0 if len(checkpoints) != 0: checkpoint_file = checkpoints[-1] initial_epoch = int(checkpoint_file.split('/')[-1].split('.')[0].split('_')[-1]) print('Initial epoch is {}.'.format(initial_epoch)) print('Loading checkpoint: {}.'.format(checkpoint_file)) m.load_weights(checkpoint_file) # latest one. if args.loss_on_softmax: print('Softmax pre-training.') fit_model_softmax(m, kx_train, ky_train, kx_test, ky_test, initial_epoch=initial_epoch) else: fit_model(m, kx_train, ky_train, kx_test, ky_test, initial_epoch=initial_epoch) if __name__ == '__main__': start_training()
# Copyright 2017 Or Ozeri # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from paramodai.cfg import CFG import struct class ExecutableParsingError(Exception): pass class Section(object): def __init__(self, data, start_addr): self.data = data self.start_addr = start_addr self.end_addr = start_addr + len(data) def __contains__(self, addr): return self.start_addr <= addr < self.end_addr def __getitem__(self, addr): return struct.unpack("<I", self.get_data(addr, 4))[0] def get_data(self, addr, count): offset = addr-self.start_addr return self.data[offset:offset+count] class CodeSection(Section): def __init__(self, data, start_addr, parser, executable): Section.__init__(self, data, start_addr) self.parser = parser self.executable = executable self.instructions = {} self.prev_instr_addr = {} def get_instr(self, addr): instr = self.instructions.get(addr, None) if instr is None: prev_instr = self.prev_instr_addr.pop(addr, None) data = self.get_data(addr, 16) instr = self.parser(addr, data, prev_instr, self.executable) self.instructions[addr] = instr self.prev_instr_addr[addr+instr.length] = instr return instr class Executable(object): def __init__(self, filename): self.filename = filename self.symbols = {} self.sections = [] self.code_section = None self.parser = self._parse_header() self._parse_sections() self.symbol_addr = {} for addr, sym_name in self.symbols.iteritems(): self.symbol_addr[sym_name] = addr def _parse_header(self): raise NotImplementedError() def _parse_sections(self): raise NotImplementedError() def get_cfg(self, addr): return CFG.get(addr, self) def add_code_section(self, data, addr): self.code_section = CodeSection(data, addr, self.parser, self) self.sections.append(self.code_section) def add_data_section(self, data, addr): self.sections.append(Section(data, addr)) def get_instr(self, addr): return self.code_section.get_instr(addr) def __getitem__(self, addr): for section in self.sections: if addr in section: return section[addr] def __contains__(self, addr): for section in self.sections: if addr in section: return True return False @staticmethod def parse(filename): from paramodai.pe import PEExecutable from paramodai.elf import ELFExecutable containers = [PEExecutable, ELFExecutable] executable = None for container in containers: try: executable = container(filename) except ExecutableParsingError: continue break if not executable: print "Cannot parse file:", filename raise ExecutableParsingError() return executable
"""Constants for the Verisure integration.""" from datetime import timedelta import logging from homeassistant.const import ( STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME, STATE_ALARM_DISARMED, STATE_ALARM_PENDING, ) DOMAIN = "verisure" LOGGER = logging.getLogger(__package__) CONF_GIID = "giid" CONF_LOCK_CODE_DIGITS = "lock_code_digits" CONF_LOCK_DEFAULT_CODE = "lock_default_code" DEFAULT_SCAN_INTERVAL = timedelta(minutes=1) DEFAULT_LOCK_CODE_DIGITS = 4 SERVICE_CAPTURE_SMARTCAM = "capture_smartcam" SERVICE_DISABLE_AUTOLOCK = "disable_autolock" SERVICE_ENABLE_AUTOLOCK = "enable_autolock" # Mapping of device types to a human readable name DEVICE_TYPE_NAME = { "CAMERAPIR2": "Camera detector", "HOMEPAD1": "VoiceBox", "HUMIDITY1": "Climate sensor", "PIR2": "Camera detector", "SIREN1": "Siren", "SMARTCAMERA1": "SmartCam", "SMOKE2": "Smoke detector", "SMOKE3": "Smoke detector", "VOICEBOX1": "VoiceBox", "WATER1": "Water detector", } ALARM_STATE_TO_HA = { "DISARMED": STATE_ALARM_DISARMED, "ARMED_HOME": STATE_ALARM_ARMED_HOME, "ARMED_AWAY": STATE_ALARM_ARMED_AWAY, "PENDING": STATE_ALARM_PENDING, } # Legacy; to remove after YAML removal CONF_CODE_DIGITS = "code_digits" CONF_DEFAULT_LOCK_CODE = "default_lock_code"
import humanize import typer from docker.models.containers import Container from rich import print from rich.console import Console from rich.table import Table from rich.prompt import Prompt from datetime import datetime import docker import subprocess app = typer.Typer() @app.command() def ls(): """ List all active containers """ console = Console() client = docker.from_env() containers = client.containers.list() _print_containers_table(console, containers) @app.command() def sh( name: str = typer.Argument(..., help="Approximate container name"), shell_path: str = typer.Argument("/bin/sh", help="shell path inside the container"), ): """ Jump into container shellssss """ console = Console() client = docker.from_env() containers = client.containers.list() found_containers = [] for container in containers: if name in container.attrs["Name"]: found_containers.append(container) if len(found_containers) == 0: print("[bold red]Container not found[/bold red]") print("Running containers :") _print_containers_table(console, containers) input_name = Prompt.ask("Please specify which one you want :") sh(input_name, shell_path) return elif len(found_containers) > 1: print(f"[bold dark_orange]Multiple containers with similar name ({name}) : [/bold dark_orange]") _print_containers_table(console, found_containers) input_name = Prompt.ask("Please specify which one you want :", default=found_containers[0].attrs["Name"]) sh(input_name, shell_path) return container: Container = found_containers[0] p = subprocess.Popen(["docker", "exec", "-it", container.attrs["Config"]["Hostname"], str(shell_path)]) p.communicate() def _print_containers_table(console: Console, containers: list): table = Table(show_header=True, header_style="bold") table.add_column("container ID", width=12) table.add_column("Image", width=45) table.add_column("Status", width=20) table.add_column("Name") for container in containers: table.add_row( container.attrs["Config"]["Hostname"], container.attrs["Config"]["Image"], "Up " + humanize.naturaldelta( datetime.now() - datetime.strptime(container.attrs["State"]["StartedAt"].split(".")[0], "%Y-%m-%dT%H:%M:%S") ), container.attrs["Name"], ) console.print(table)
#!/usr/bin/env python # coding: utf-8 # In[1]: #Lara Betül Arslantaş-180401024 def polinoma_cevirme(derece,veriler): matris = [] a = 0 for i in range(derece+1): satir = [] for j in range(derece+1): toplam = 0 for k in range(1,len(veriler)+1): toplam += k**a satir.append(toplam) a += 1 matris.append(satir) a -= derece sonuc = [] for i in range(derece+1): toplam = 0 for j in range(len(veriler)): toplam += veriler[j]*(j+1)**i sonuc.append(toplam) for i in range(derece+1): b = matris[i][i] for j in range(i+1,derece+1): bolum = b/matris[j][i] sonuc[j] = sonuc[j]*bolum-sonuc[i] for k in range(derece+1): matris[j][k] = matris[j][k]*bolum-matris[i][k] for i in range(derece,-1,-1): b = matris[i][i] for j in range(i-1,-1,-1): bolum = b/matris[j][i] sonuc[j] = sonuc[j]*bolum-sonuc[i] for k in range(derece+1): matris[j][k] = matris[j][k]*bolum-matris[i][k] for i in range(derece+1): sonuc[i] = sonuc[i]/matris[i][i] y_ort=0 for i in range (len(veriler)): y_ort += veriler[i] y_ort = y_ort/len(veriler) St=0 Sr=0 for i in range(len(veriler)): x = veriler[i] St +=(veriler[i]-y_ort)**2 for j in range(len(sonuc)): x -= sonuc[j]*(i+1)**j x=x**2 Sr += x korelasyon = ((St-Sr)/St)**(1/2) return sonuc,korelasyon def polinom_katsayilari(p1,p2,p3,p4,p5,p6,dosya): dosya2.write("1.dereceden polinom : a0 = "+str(p1[0]) + " a1 = " + str(p1[1])+"\n" ) dosya2.write("2.dereceden polinom : a0 = "+str(p2[0]) + " a1 = " + str(p2[1]) + " a2 =" + str(p2[2]) + "\n") dosya2.write("3.dereceden polinom : a0 = "+str(p3[0]) + " a1 = " + str(p3[1]) + " a2 =" + str(p3[2]) + " a3 = " + str(p3[3]) + "\n") dosya2.write("4.dereceden polinom : a0 = "+str(p4[0]) + " a1 = " + str(p4[1]) + " a2 =" + str(p4[2]) + " a3 = " + str(p4[3]) + " a4 = " + str(p4[4]) + "\n") dosya2.write("5.dereceden polinom : a0 = "+str(p5[0]) + " a1 = " + str(p5[1]) + " a2 =" + str(p5[2]) + " a3 = " + str(p5[3]) + " a4 = " + str(p5[4]) + " a5 = "+ str(p5[5])+ "\n") dosya2.write("6.dereceden polinom : a0 = "+str(p6[0]) + " a1 = " + str(p6[1]) + " a2 =" + str(p6[2]) + " a3 = " + str(p6[3]) + " a4 = " + str(p6[4]) + " a5 = "+ str(p6[5])+" a6 = "+str(p6[6])+ "\n") def en_uygun_polinom(k1,k2,k3,k4,k5,k6,dosya): dosya2.write("katsayi1 = "+str(k1)+" katsayi2 = "+str(k2)+" katsayi 3 = "+str(k3)+" katsayi4 = "+str(k4)+" katsayi5 = "+str(k5)+" katsayi6 = "+str(k6)+"\n") degerler = [k1,k2,k3,k4,k5,k6] for i in range(len(degerler)): if degerler[i] == max(degerler): dosya2.write("En uygun olan "+str(i+1)+". polinomdur.\n") dosya = open("veriler.txt","r") veriler = dosya.readlines() for i in range(len(veriler)): veriler[i]=int(veriler[i]) p1,k1=polinoma_cevirme(1,veriler) p2,k2=polinoma_cevirme(2,veriler) p3,k3=polinoma_cevirme(3,veriler) p4,k4=polinoma_cevirme(4,veriler) p5,k5=polinoma_cevirme(5,veriler) p6,k6=polinoma_cevirme(6,veriler) dosya.close() dosya2 = open("sonuc.txt","w") polinom_katsayilari(p1,p2,p3,p4,p5,p6,dosya2) en_uygun_polinom(k1,k2,k3,k4,k5,k6,dosya2) for i in range(len(veriler)//10): dosya2.write("\n"+str(i+1)+". 10'lu grup : \n") onluGruplar=[] for j in range(10): onluGruplar.append(veriler[10*i+j]) p1,k1=polinoma_cevirme(1,onluGruplar) p2,k2=polinoma_cevirme(2,onluGruplar) p3,k3=polinoma_cevirme(3,onluGruplar) p4,k4=polinoma_cevirme(4,onluGruplar) p5,k5=polinoma_cevirme(5,onluGruplar) p6,k6=polinoma_cevirme(6,onluGruplar) polinom_katsayilari(p1,p2,p3,p4,p5,p6,dosya2) en_uygun_polinom(k1,k2,k3,k4,k5,k6,dosya2) dosya2.close() # In[ ]:
"""Minor optimization routines.""" from prysm.conf import config from prysm.mathops import np from . import raygen, spencer_and_murty from scipy import optimize def _intersect_lines(P1, S1, P2, S2): """Find the slerp along the line (P1, S1) that results in intersection with the line (P2, S2). P = position, array shape (3,) S = direction cosines, array shape (3,) pair of two lines only. """ # solution via linear algebra Ax = np.stack([S1, -S2], axis=1) y = P2 - P1 return np.linalg.pinv(Ax) @ y def _establish_axis(P1, P2): """Given two points, establish an axis between them. Parameters ---------- P1 : numpy.ndarray shape (3,), any float dtype first point P2 : numpy.ndarray shape (3,), any float dtype second point Returns ------- numpy.ndarray, numpy.ndarray P1 (same exact PyObject) and direction cosine from P1 -> P2 """ diff = P2 - P1 euclidean_distance = np.sqrt(diff ** 2).sum() num = diff den = euclidean_distance return num / den def paraxial_image_solve(prescription, z, na=0, epd=0, wvl=0.6328): """Find the location of the paraxial image. The location is found via raytracing and not third-order calculations. Two rays are traced very near the optical axis in each X and Y, and the mean distance which produces a zero image height is the result of the solve. If na is nonzero, then the ray originates at x=y=0 at 1/1000th of the given NA. Parameters ---------- prescription : iterable of Surface the prescription to be solved z : float the z distance (absolute) to solve from na : float the object-space numerical aperture to use in the solve, if zero the object is at infinity, else a finite conjugate. 1/1000th of the given NA is used in the solve, the NA of the real system may be quite safely provided as an argument. epd : float entrance pupil diameter, if na=0 and epd=0 an error will be generated. wvl : float wavelength of light, microns consider : str, {'x', 'y', 'xy'} which ray directions to consider in performing the solve, defaults to both X and Y. Returns ------- numpy.ndarray the "P" value to be used with Surface.stop to complete the solve """ if na == 0 and epd == 0: raise ValueError("either na or epd must be nonzero") PARAXIAL_FRACTION = 1e-4 # 1/1000th if na == 0: r = epd/2*PARAXIAL_FRACTION rayfanx = raygen.generate_collimated_ray_fan(2, maxr=r, azimuth=0) rayfany = raygen.generate_collimated_ray_fan(2, maxr=r) all_rays = raygen.concat_rayfans(rayfanx, rayfany) ps, ss = all_rays phist, shist = spencer_and_murty.raytrace(prescription, ps, ss, wvl) # now solve for intersection between the X rays, # P for the each ray P = phist[-1] Px1 = P[0] Px2 = P[1] Py1 = P[2] Py2 = P[3] # S for each ray S = shist[-1] Sx1 = S[0] Sx2 = S[1] Sy1 = S[2] Sy2 = S[3] # find the distance along line 1 which results in intersection with line 2 sx = _intersect_lines(Px1, Sx1, Px2, Sx2) sy = _intersect_lines(Py1, Sy1, Py2, Sy2) s = np.array([*sx, *sy]) # fast-forward all the rays and take the average position P_out = P + s[:, np.newaxis] * S return P_out.mean(axis=0) def ray_aim(P, S, prescription, j, wvl, target=(0, 0, np.nan), debug=False): """Aim a ray such that it encounters the jth surface at target. Parameters ---------- P : numpy.ndarray shape (3,), a single ray's initial positions S : numpy.ndarray shape (3,) a single ray's initial direction cosines prescription : iterable sequence of surfaces in the prescription j : int the surface index in prescription at which the ray should hit (target) wvl : float wavelength of light to use in ray aiming, microns target : iterable of length 3 the position at which the ray should intersect the target surface NaNs indicate to ignore that position in aiming debug : bool, optional if True, returns the (ray-aiming) optimization result as well as the adjustment P Returns ------- numpy.ndarray deltas to P which result in ray intersection """ P = np.asarray(P).astype(config.precision).copy() S = np.asarray(S).astype(config.precision).copy() target = np.asarray(target) trace_path = prescription[:j+1] def optfcn(x): P[:2] = x phist, _ = spencer_and_murty.raytrace(trace_path, P, S, wvl) final_position = phist[-1] euclidean_dist = (final_position - target)**2 euclidean_dist = np.nansum(euclidean_dist)/3 # /3 = div by number of axes return euclidean_dist res = optimize.minimize(optfcn, np.zeros(2), method='L-BFGS-B') P[:] = 0 P[:2] = res.x if debug: return P, res else: return P def locate_ep(P_chief, S_chief, P_obj, P_s1): """Locate the entrance pupil of a system. Note, for a co-axial system P_obj[0] and [1] should be 0, and the same is true for P_s1[0] and [1]. This function, 1) establishes the axis between the object and the first surface of the system 2) finds the intersection of the chief ray and that axis Parameters ---------- P_chief : numpy.ndarray starting position of the chief ray, at the object plane S_chief : numpy.ndarray starting direction cosine of the chief ray P_obj : iterable the position of the object P_s1 : iterable the position of the first surface of the prescription. Not the point of intersection for the chief ray, pres[0].P Returns ------- numpy.ndarray position of the entrance pupil (X,Y,Z) """ S_axis = _establish_axis(P_obj, P_s1) s = _intersect_lines(P_chief, S_chief, P_s1, S_axis) # s is the slerp for each ray, we just want to go from S1 return P_s1 + s[1] * S_axis def locate_xp(P_chief, S_chief, P_img, P_sk): """Locate the exit pupil of a system. Note, for a co-axial system P_img[0] and [1] should be 0, and the same is true for P_sk[0] and [1]. This function, 1) establishes the axis between the object and the first surface of the system 2) finds the intersection of the chief ray and that axis Parameters ---------- P_chief : numpy.ndarray final position of the chief ray, at the image plane S_chief : numpy.ndarray final direction cosine of the chief ray P_img : iterable the position of the object P_sk : iterable the position of the first surface of the prescription. Not the point of intersection for the chief ray, pres[0].P Returns ------- numpy.ndarray position of the entrance pupil (X,Y,Z) """ S_axis = _establish_axis(P_img, P_sk) s = _intersect_lines(P_chief, S_chief, P_sk, S_axis) # s is the slerp for each ray, we just want to go from S1 return P_sk + s[1] * S_axis
# -*- coding: utf-8 -*- """ @author: marcoguerro @title: EMNIST - Support Vector Machine """ import scipy import random import matplotlib.pyplot as plt import matplotlib import sklearn.svm as svm import sklearn.metrics as metrics import numpy as np import pandas as pd import time #file in matlab format data = scipy.io.loadmat('emnist-balanced.mat') train_set, test_set, mapping_set = data['dataset'][0][0] class prova(): def esempio(dat): rand = random.sample(range(len(dat[0][0][0])),1) a = dat[0][0][0][rand] some_digit_image = a.reshape(28, 28)/255 #[0,255] some_digit_image_transpose = some_digit_image.transpose() plt.imshow(some_digit_image_transpose, cmap = matplotlib.cm.binary, interpolation="nearest") plt.axis("off") plt.show() print(dat[0][0][1][rand]) def trans_image(dat): vect=[] for i in range(len(dat[0][0][0])): a = dat[0][0][0][i] some_digit_image = a.reshape(28, 28)/255 #[0,255] some_digit_image_transpose = some_digit_image.transpose() vect.append(some_digit_image_transpose.ravel()) return np.array(vect) def labels(dat): return(dat[0][0][1].ravel()) train_images = prova.trans_image(train_set) train_labels = prova.labels(train_set) test_images = prova.trans_image(test_set) test_labels = prova.labels(test_set) SVC = svm.SVC(C=125,gamma=0.015) print('Inizio fit -> ',time.localtime().tm_hour,':',time.localtime().tm_min,':',time.localtime().tm_sec) SVC.fit(train_images, train_labels) print('Fine fit -> ',time.localtime().tm_hour,':',time.localtime().tm_min,':',time.localtime().tm_sec) expected = test_labels print('Inizio predict -> ',time.localtime().tm_hour,':',time.localtime().tm_min,':',time.localtime().tm_sec) predicted = SVC.predict(test_images) print('Fine predict -> ',time.localtime().tm_hour,':',time.localtime().tm_min,':',time.localtime().tm_sec) print(metrics.accuracy_score(expected, predicted)) print("Classification report for classifier %s:\n%s\n" % (SVC, metrics.classification_report(expected, predicted))) print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted)) ## Utilizzando un campione omogeneo e ridotto (validation) SVC.fit(train_images[94000:], train_labels[94000:]) expected = test_labels predicted = SVC.predict(test_images) print(metrics.accuracy_score(expected, predicted)) ## Plot a nice confusion matrix import seaborn as sn labels = list(range(10)) labels.extend(list(map(chr, range(65, 91)))) labels.extend(['a', 'b', 'd', 'e', 'f', 'g', 'h', 'n', 'q', 'r', 't']) cnf_matrix = metrics.confusion_matrix(expected, predicted) cnf_matrix_pd = pd.DataFrame(cnf_matrix, index=labels, columns=labels) plt.figure(figsize = (20,14)) sn.set(font_scale=1.4) #for label size sn.heatmap(cnf_matrix_pd, annot=True,annot_kws={"size": 5}, fmt='g') # font size
# Copyright (c) 2021, Apple Inc. All rights reserved. # # Use of this source code is governed by a BSD-3-clause license that can be # found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause import gc import collections import warnings from coremltools import ComputeUnit as _ComputeUnit from coremltools.converters.mil.mil.passes.quantization_passes import AbstractQuantizationPass, FP16ComputePrecision from coremltools.converters.mil.mil.passes.quantization_passes import ComputePrecision as precision from coremltools.converters.mil.input_types import InputType, ClassifierConfig from coremltools.converters.mil.converter import mil_convert from coremltools.converters.mil.mil import Program from coremltools._deps import _HAS_TORCH, _HAS_TF_1, _HAS_TF_2 from coremltools.converters._profile_utils import _profile from coremltools.models import _METADATA_VERSION, _METADATA_SOURCE from coremltools.converters.mil._deployment_compatibility import ( AvailableTarget, check_deployment_compatibility, ) if _HAS_TF_1: import tensorflow as tf from coremltools.converters.mil.frontend.tensorflow.load import TF1Loader if _HAS_TF_2: import tensorflow as tf from coremltools.converters.mil.frontend.tensorflow2.load import TF2Loader if _HAS_TORCH: import torch from coremltools.converters.mil.frontend.torch.load import ( _torchscript_from_model as pytorch_load, ) @_profile def convert( model, source="auto", inputs=None, outputs=None, classifier_config=None, minimum_deployment_target=None, convert_to=None, compute_precision=None, skip_model_load=False, compute_units=_ComputeUnit.ALL, **kwargs ): """ Convert a TensorFlow or PyTorch model to the Core ML model format as either a neural network or an ML program. To learn about the differences, see `ML Programs <https://coremltools.readme.io/docs/ml-programs>`_. This function is aliased as ``ct.convert`` in examples and guides. Some parameters and requirements differ by TensorFlow and PyTorch frameworks. Parameters ---------- model : TensorFlow 1, TensorFlow 2, or PyTorch model in one of the following formats: For TensorFlow versions 1.x: - Frozen `tf.Graph <https://www.tensorflow.org/api_docs/python/tf/Graph>`_ - Frozen graph (``.pb``) file path - `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras>`_ - `HDF5 <https://keras.io/api/models/model_saving_apis/>`_ file path (``.h5``) - `SavedModel <https://www.tensorflow.org/guide/saved_model>`_ directory path For TensorFlow versions 2.x: - `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras>`_ - `HDF5 file path <https://keras.io/api/models/model_saving_apis/>`_ (``.h5``) - `SavedModel <https://www.tensorflow.org/guide/saved_model>`_ directory path - A `concrete function <https://www.tensorflow.org/guide/concrete_function>`_ For PyTorch: - A `TorchScript <https://pytorch.org/docs/stable/jit.html>`_ object - Path to a ``.pt`` file source : str (optional) One of [``auto``, ``tensorflow``, ``pytorch``, ``milinternal``]. ``auto`` determines the framework automatically for most cases. Raises ``ValueError`` if it fails to determine the source framework. inputs : list of ``TensorType`` or ``ImageType`` TensorFlow 1 and 2: - The ``inputs`` parameter is optional. If not provided, the inputs are placeholder nodes in the model (if the model is frozen graph) or function inputs (if the model is a ``tf.function``). - The inputs must correspond to all or some of the placeholder nodes in the TF model. - ``TensorType`` and ``ImageType`` in ``inputs`` must have the ``name`` specified. ``shape`` is optional. - If ``inputs`` is provided, it must be a flat list. PyTorch: - The ``inputs`` parameter is required. - ``inputs`` may be a nested list or tuple. - ``TensorType`` and ``ImageType`` in ``inputs`` must have the ``name`` and ``shape`` specified. outputs : list[str] (optional) TensorFlow 1 and 2: - The ``outputs`` parameter is optional. - If specified, ``outputs`` is a list of string representing node names. - If ``outputs`` is not specified, the converter infers outputs to all be terminal identity nodes. PyTorch: - ``outputs`` must not be specified. classifier_config : ClassifierConfig class (optional) The configuration if the MLModel is intended to be a classifier. minimum_deployment_target : coremltools.target enumeration (optional) - One of the members of enum ``coremltools.target``. - The value of this parameter determines the type of the model reperesentation produced by the converter. Alternatively, you can use the ``convert_to`` parameter to specify the model type (see the ``convert_to`` parameter). To learn about the differences between neural networks and ML programs, see `ML Programs <https://coremltools.readme.io/docs/ml-programs>`_. - The converter produces a neural network (``neuralnetwork``) if: :: minimum_deployment_target <= coremltools.target.iOS14/ coremltools.target.macOS11/ coremltools.target.watchOS7/ coremltools.target.tvOS14: - The converter produces an ML program (``mlprogram``) if: :: minimum_deployment_target >= coremltools.target.iOS15/ coremltools.target.macOS12/ coremltools.target.watchOS8/ coremltools.target.tvOS15: - If neither the ``minimum_deployment_target`` nor the ``convert_to`` parameter is specified, the converter produces the neural network model type with as minimum of a deployment target as possible. - If this parameter is specified and ``convert_to`` is also specified, they must be compatible. The following are examples of invalid values: :: # Invalid: convert_to="neuralnetwork", minimum_deployment_target=coremltools.target.iOS15 # Invalid: convert_to="mlprogram", minimum_deployment_target=coremltools.target.iOS14 convert_to : str (optional) - Must be one of [``'neuralnetwork'``, ``'mlprogram'``, ``'milinternal'``]. - The value of this parameter determines the type of the model reperesentation produced by the converter. Alternatively, you can use the ``minimum_deployment_target`` parameter to specify the model type (see the ``minimum_deployment_target`` parameter). To learn about the differences between neural networks and ML programs, see `ML Programs <https://coremltools.readme.io/docs/ml-programs>`_. - ``'neuralnetwork'``: Returns an MLModel (``coremltools.models.MLModel``) containing a NeuralNetwork proto, which is the original Core ML format. The model saved from this returned object is executable either on iOS13/macOS10.15/watchOS6/tvOS13 and above, or on iOS14/macOS11/watchOS7/tvOS14 and above, depending on the layers used in the model. - ``'mlprogram'`` : Returns an MLModel (``coremltools.models.MLModel``) containing a MILSpec.Program proto, which is the Core ML program format. The model saved from this returned object is executable on iOS15, macOS12, watchOS8, and tvOS15. - ``'milinternal'``: Returns an MIL program object (``coremltools.converters.mil.Program``). An MIL program is primarily used for debugging and inspection. It can be converted to an MLModel for execution by using one of the following: :: ct.convert(mil_program, convert_to="neuralnetwork") ct.convert(mil_program, convert_to="mlprogram") - If neither the ``minimum_deployment_target`` nor the ``convert_to`` parameter is specified, the converter produces the neural network model type with as minimum of a deployment target as possible. compute_precision : coremltools.precision enumeration or ct.transform.FP16ComputePrecision() (optional) - Must be one of the following: - ``coremltools.precision.FLOAT16`` - The following transform is applied: :: coremltools.transform.FP16ComputePrecision(op_selector= lambda op:True) The above transform injects ``cast`` ops to convert the float32 dtypes of intermediate tensors to float16. - ``coremltools.precision.FLOAT32`` - No transform is applied. The original float32 tensor dtype in the source model is preserved. - ``coremltools.transform.FP16ComputePrecision(op_selector=...)`` - Use the above to control which tensors are cast to float16. - For example: :: coremltools.transform.FP16ComputePrecision(op_selector= lambda op: op.op_type != "linear") The above casts all the float32 tensors to be float16, except the input/output tensors to any ``linear`` op. - If ``None``, - When ``convert_to="mlprogram"``, compute_precision parameter defaults to ``coremltools.precision.FLOAT16``. - When ``convert_to="neuralnetwork"``, compute_precision parameter needs to be ``None`` and has no meaning. skip_model_load : bool Set to True to prevent coremltools from calling into the Core ML framework to compile and load the model, post-conversion. In that case, the returned model object cannot be used to make a prediction, but can be used to save via ``"model.save()"``. This flag may be used to convert to a newer model type on an older Mac, which if done without turning this flag on, may raise a runtime warning. Example: Use this flag to suppress runtime warning when converting to ML program model type on a macOS 11, since ML program can only be compiled and loaded from macOS12+. Defaults to False. compute_units: coremltools.ComputeUnit An enum with three possible values: - ``coremltools.ComputeUnit.ALL``: Use all compute units available, including the neural engine. - ``coremltools.ComputeUnit.CPU_ONLY``: Limit the model to only use the CPU. - ``coremltools.ComputeUnit.CPU_AND_GPU``: Use both the CPU and GPU, but not the neural engine. Returns ------- model : ``coremltools.models.MLModel`` or ``coremltools.converters.mil.Program`` A Core ML MLModel object or MIL Program object (see ``convert_to``). Examples -------- TensorFlow 1, 2 (``model`` is a frozen graph): >>> with tf.Graph().as_default() as graph: >>> x = tf.placeholder(tf.float32, shape=(1, 2, 3), name="input") >>> y = tf.nn.relu(x, name="output") Automatically infer inputs and outputs: >>> mlmodel = ct.convert(graph) >>> test_input = np.random.rand(1, 2, 3) - 0.5 >>> results = mlmodel.predict({"input": test_input}) >>> print(results['output']) TensorFlow 2 (``model`` is tf.Keras model path): >>> x = tf.keras.Input(shape=(32,), name='input') >>> y = tf.keras.layers.Dense(16, activation='softmax')(x) >>> keras_model = tf.keras.Model(x, y) >>> keras_model.save(h5_path) >>> mlmodel = ct.convert(h5_path) >>> test_input = np.random.rand(2, 32) >>> results = mlmodel.predict({'input': test_input}) >>> print(results['Identity']) PyTorch: >>> model = torchvision.models.mobilenet_v2() >>> model.eval() >>> example_input = torch.rand(1, 3, 256, 256) >>> traced_model = torch.jit.trace(model, example_input) >>> input = ct.TensorType(name='input_name', shape=(1, 3, 256, 256)) >>> mlmodel = ct.convert(traced_model, inputs=[input]) >>> results = mlmodel.predict({"input": example_input.numpy()}) >>> print(results['1651']) # 1651 is the node name given by PyTorch's JIT See `neural-network-conversion <https://coremltools.readme.io/docs/neural-network-conversion>`_ for more advanced options. """ from coremltools import __version__ as ct_version _check_deployment_target(minimum_deployment_target) exact_source = _determine_source(model, source, outputs) exact_target = _determine_target(convert_to, minimum_deployment_target) _validate_inputs(model, exact_source, inputs, outputs, classifier_config, compute_precision, exact_target, **kwargs) if "useCPUOnly" in kwargs and kwargs["useCPUOnly"]: warnings.warn('The "useCPUOnly" parameter is deprecated and will be removed in 6.0. ' 'Use the compute_units parameter: "compute_units=coremotools.ComputeUnits.CPU_ONLY".') compute_units = _ComputeUnit.CPU_ONLY if compute_precision is None: transforms = [FP16ComputePrecision(op_selector=lambda op: True)] if convert_to != "neuralnetwork" else list() elif compute_precision == precision.FLOAT32: transforms = list() elif compute_precision == precision.FLOAT16: transforms = [FP16ComputePrecision(op_selector=lambda op: True)] elif isinstance(compute_precision, FP16ComputePrecision): transforms = [compute_precision] else: raise ValueError("Invalid value of the argument 'compute_precision'") mlmodel = mil_convert( model, convert_from=exact_source, convert_to=exact_target, inputs=inputs, outputs=outputs, classifier_config=classifier_config, transforms=tuple(transforms), skip_model_load=skip_model_load, compute_units=compute_units, **kwargs ) if exact_target == 'milinternal': return mlmodel # Returns the MIL program if minimum_deployment_target is not None: check_deployment_compatibility( spec=mlmodel.get_spec(), representation=exact_target, deployment_target=minimum_deployment_target, ) gc.collect() mlmodel = _record_src_version(mlmodel, exact_source) mlmodel.user_defined_metadata[_METADATA_VERSION] = ct_version return mlmodel def _check_deployment_target(minimum_deployment_target): if minimum_deployment_target is not None and \ not isinstance(minimum_deployment_target, AvailableTarget): msg = ( "Unrecognized value of argument 'minimum_deployment_target': {}. " "It needs to be a member of 'coremltools.target' enumeration. " "For example, coremltools.target.iOS13" ) raise TypeError(msg.format(minimum_deployment_target)) def _validate_inputs(model, exact_source, inputs, outputs, classifier_config, compute_precision, convert_to, **kwargs): """ Validate and process model, inputs, outputs, classifier_config based on `exact_source` (which cannot be `auto`) """ def raise_if_duplicated(input_list): # Detect duplicated inputs input_names = [t.name for t in input_list if t.name is not None] dups = [ item for item, count in collections.Counter(input_names).items() if count > 1 ] if len(dups) > 0: raise ValueError("Duplicated inputs: {}".format(dups)) if inputs is not None: if not isinstance(inputs, list): msg = '"inputs" must be of type list' raise ValueError(msg) if classifier_config is not None: if not isinstance(classifier_config, ClassifierConfig): msg = '"classifier_config" must be of type ClassifierConfig' raise ValueError(msg) if convert_to.lower() == 'neuralnetwork' and compute_precision is not None: msg = "compute_precision is only supported for mlprogram target and must be None if target=='neuralnetwork'.\n" \ "Note that target may be implicitly set depending on the minimum_deployment_target.\n" \ "See minimum_deployment_target for more details." raise ValueError(msg) if compute_precision is not None: if compute_precision not in [precision.FLOAT32, precision.FLOAT16]: if not isinstance(compute_precision, FP16ComputePrecision): msg = "'compute_precision' must be either coremltools.precision.FLOAT32 or coremltools.precision.FLOAT16" \ " or of type coremltools.transform.FP16ComputePrecision()" raise ValueError(msg) if exact_source in {"tensorflow", "tensorflow2"}: if exact_source == "tensorflow" and not _HAS_TF_1: msg = 'Converter was called with source="tensorflow", ' +\ 'but missing tensorflow package' raise ValueError(msg) if inputs is not None: raise_if_duplicated(inputs) if inputs is not None and not all( [isinstance(_input, InputType) for _input in inputs] ): raise ValueError("Input should be a list of TensorType or ImageType") elif exact_source == "pytorch": if "example_inputs" in kwargs: msg = 'Unexpected argument "example_inputs" found' raise ValueError(msg) if inputs is None: msg = 'Expected argument for pytorch "inputs" not provided' raise ValueError(msg) def _flatten_list(_inputs): ret = [] for _input in _inputs: if isinstance(_input, (list, tuple)): ret.extend(_flatten_list(_input)) elif isinstance(_input, InputType): ret.append(_input) else: raise ValueError( "Unknown type {} for flattening into InputType.".format( type(_input) ) ) return ret flat_inputs = _flatten_list(inputs) raise_if_duplicated(flat_inputs) if inputs is not None and not all( [isinstance(_input, InputType) for _input in flat_inputs] ): raise ValueError( "Input should be a list/tuple (or nested lists/tuples) of TensorType or ImageType" ) if outputs is not None: raise ValueError("outputs must not be specified for PyTorch") elif exact_source == "milinternal": if not isinstance(model, Program): msg = "Converter was asked to convert MIL input, but input is not a MIL program!" raise ValueError(msg) def _determine_source(model, source, outputs): """ Infer source (which can be auto) to the precise framework. """ source = source.lower() if source not in {"auto", "tensorflow", "pytorch", "milinternal"}: msg = ( 'Unrecognized value of argument "source": {}. ' 'It must be one of ["auto", "tensorflow", "pytorch"].' ) raise ValueError(msg.format(source)) # Determine tensorflow version if source == "tensorflow" and _HAS_TF_2: return "tensorflow2" if source != 'auto': return source # Determine `auto` source if source == "auto" and _HAS_TF_1: try: loader = TF1Loader(model, outputs=outputs) loader._graph_def_from_model(outputs=outputs) return "tensorflow" except: pass if source == "auto" and _HAS_TF_2: try: loader = TF2Loader(model, outputs=outputs) loader._graph_def_from_model(outputs=outputs) return "tensorflow2" except: pass if source == "auto" and _HAS_TORCH: try: pytorch_load(model) return "pytorch" except: pass if source == "auto" and isinstance(model, Program): return "milinternal" msg = ( "Unable to determine the type of the model, i.e. the source framework. " 'Please provide the value of argument "source", from one of ' '["tensorflow", "pytorch", "milinternal"]. Note that model conversion requires the ' "source package that generates the model. Please make sure you have " "the appropriate version of source package installed. E.g., if you're " "converting model originally trained with TensorFlow 1.14, make sure " "you have `tensorflow==1.14` installed." ) raise ValueError(msg) def _determine_target(convert_to, minimum_deployment_target): """ Infer the precise backend target, which could be one of ``milinternal``, ``neuralnetwork`` or ``mlprogram`` """ if minimum_deployment_target is not None: if convert_to == "mlprogram" and \ minimum_deployment_target.value < AvailableTarget.iOS15.value: msg = "When 'convert_to' is {}, the minimum deployment target must be at least iOS15/macOS12/watchOS8/tvOS15" raise ValueError(msg.format(convert_to)) if convert_to == "neuralnetwork" and \ minimum_deployment_target.value >= AvailableTarget.iOS15.value: msg = "If minimum deployment target is iOS15/macOS12/watchOS8/tvOS15 or higher, then " \ "'convert_to' cannot be {}. It must be 'mlprogram'" raise ValueError(msg.format(convert_to)) if convert_to is not None: return convert_to else: if minimum_deployment_target is None: return "neuralnetwork" elif minimum_deployment_target.value <= AvailableTarget.iOS14.value: return "neuralnetwork" else: return "mlprogram" def _record_src_version(mlmodel, exact_source): # recording metadata: coremltools version, source framework and version if exact_source in {"tensorflow", "tensorflow2"} and (_HAS_TF_1 or _HAS_TF_2): src_pkg_version = "tensorflow=={0}".format(tf.__version__) elif exact_source == "pytorch" and _HAS_TORCH: src_pkg_version = "torch=={0}".format(torch.__version__) elif exact_source == 'milinternal': src_pkg_version = "milinternal" else: raise ValueError('Unsupported source {}'.format(exact_source)) mlmodel.user_defined_metadata[_METADATA_SOURCE] = src_pkg_version return mlmodel
from nose.tools import eq_, ok_, raises, assert_true from wtforms import fields, validators from flask_admin import form from flask_admin._compat import as_unicode from flask_admin._compat import iteritems from flask_admin.contrib.sqla import ModelView, filters, tools from flask_babelex import Babel from sqlalchemy.ext.hybrid import hybrid_property from . import setup from datetime import datetime, time, date class CustomModelView(ModelView): def __init__(self, model, session, name=None, category=None, endpoint=None, url=None, **kwargs): for k, v in iteritems(kwargs): setattr(self, k, v) super(CustomModelView, self).__init__(model, session, name, category, endpoint, url) def create_models(db): class Model1(db.Model): def __init__(self, test1=None, test2=None, test3=None, test4=None, bool_field=False, date_field=None, time_field=None, datetime_field=None, enum_field=None): self.test1 = test1 self.test2 = test2 self.test3 = test3 self.test4 = test4 self.bool_field = bool_field self.date_field = date_field self.time_field = time_field self.datetime_field = datetime_field self.enum_field = enum_field id = db.Column(db.Integer, primary_key=True) test1 = db.Column(db.String(20)) test2 = db.Column(db.Unicode(20)) test3 = db.Column(db.Text) test4 = db.Column(db.UnicodeText) bool_field = db.Column(db.Boolean) enum_field = db.Column(db.Enum('model1_v1', 'model1_v2'), nullable=True) date_field = db.Column(db.Date) time_field = db.Column(db.Time) datetime_field = db.Column(db.DateTime) def __unicode__(self): return self.test1 def __str__(self): return self.test1 class Model2(db.Model): def __init__(self, string_field=None, int_field=None, bool_field=None, model1=None, float_field=None, string_field_default=None, string_field_empty_default=None): self.string_field = string_field self.int_field = int_field self.bool_field = bool_field self.model1 = model1 self.float_field = float_field self.string_field_default = string_field_default self.string_field_empty_default = string_field_empty_default id = db.Column(db.Integer, primary_key=True) string_field = db.Column(db.String) string_field_default = db.Column(db.Text, nullable=False, default='') string_field_empty_default = db.Column(db.Text, nullable=False, default='') int_field = db.Column(db.Integer) bool_field = db.Column(db.Boolean) enum_field = db.Column(db.Enum('model2_v1', 'model2_v2'), nullable=True) float_field = db.Column(db.Float) # Relation model1_id = db.Column(db.Integer, db.ForeignKey(Model1.id)) model1 = db.relationship(Model1, backref='model2') db.create_all() return Model1, Model2 def fill_db(db, Model1, Model2): model1_obj1 = Model1('test1_val_1', 'test2_val_1', bool_field=True) model1_obj2 = Model1('test1_val_2', 'test2_val_2', bool_field=False) model1_obj3 = Model1('test1_val_3', 'test2_val_3') model1_obj4 = Model1('test1_val_4', 'test2_val_4') model2_obj1 = Model2('test2_val_1', model1=model1_obj1, float_field=None) model2_obj2 = Model2('test2_val_2', model1=model1_obj2, float_field=None) model2_obj3 = Model2('test2_val_3', int_field=5000, float_field=25.9) model2_obj4 = Model2('test2_val_4', int_field=9000, float_field=75.5) model2_obj5 = Model2('test2_val_5', int_field=6169453081680413441) date_obj1 = Model1('date_obj1', date_field=date(2014,11,17)) date_obj2 = Model1('date_obj2', date_field=date(2013,10,16)) timeonly_obj1 = Model1('timeonly_obj1', time_field=time(11,10,9)) timeonly_obj2 = Model1('timeonly_obj2', time_field=time(10,9,8)) datetime_obj1 = Model1('datetime_obj1', datetime_field=datetime(2014,4,3,1,9,0)) datetime_obj2 = Model1('datetime_obj2', datetime_field=datetime(2013,3,2,0,8,0)) enum_obj1 = Model1('enum_obj1', enum_field="model1_v1") enum_obj2 = Model1('enum_obj2', enum_field="model1_v2") empty_obj = Model1(test2="empty_obj") db.session.add_all([ model1_obj1, model1_obj2, model1_obj3, model1_obj4, model2_obj1, model2_obj2, model2_obj3, model2_obj4, model2_obj5, date_obj1, timeonly_obj1, datetime_obj1, date_obj2, timeonly_obj2, datetime_obj2, enum_obj1, enum_obj2, empty_obj ]) db.session.commit() def test_model(): app, db, admin = setup() Model1, Model2 = create_models(db) view = CustomModelView(Model1, db.session) admin.add_view(view) eq_(view.model, Model1) eq_(view.name, 'Model1') eq_(view.endpoint, 'model1') eq_(view._primary_key, 'id') ok_('test1' in view._sortable_columns) ok_('test2' in view._sortable_columns) ok_('test3' in view._sortable_columns) ok_('test4' in view._sortable_columns) ok_(view._create_form_class is not None) ok_(view._edit_form_class is not None) eq_(view._search_supported, False) eq_(view._filters, None) # Verify form eq_(view._create_form_class.test1.field_class, fields.StringField) eq_(view._create_form_class.test2.field_class, fields.StringField) eq_(view._create_form_class.test3.field_class, fields.TextAreaField) eq_(view._create_form_class.test4.field_class, fields.TextAreaField) # Make some test clients client = app.test_client() rv = client.get('/admin/model1/') eq_(rv.status_code, 200) rv = client.get('/admin/model1/new/') eq_(rv.status_code, 200) rv = client.post('/admin/model1/new/', data=dict(test1='test1large', test2='test2', time_field=time(0,0,0))) eq_(rv.status_code, 302) model = db.session.query(Model1).first() eq_(model.test1, u'test1large') eq_(model.test2, u'test2') eq_(model.test3, u'') eq_(model.test4, u'') rv = client.get('/admin/model1/') eq_(rv.status_code, 200) ok_(u'test1large' in rv.data.decode('utf-8')) url = '/admin/model1/edit/?id=%s' % model.id rv = client.get(url) eq_(rv.status_code, 200) # verify that midnight does not show as blank ok_(u'00:00:00' in rv.data.decode('utf-8')) rv = client.post(url, data=dict(test1='test1small', test2='test2large')) eq_(rv.status_code, 302) model = db.session.query(Model1).first() eq_(model.test1, 'test1small') eq_(model.test2, 'test2large') eq_(model.test3, '') eq_(model.test4, '') url = '/admin/model1/delete/?id=%s' % model.id rv = client.post(url) eq_(rv.status_code, 302) eq_(db.session.query(Model1).count(), 0) @raises(Exception) def test_no_pk(): app, db, admin = setup() class Model(db.Model): test = db.Column(db.Integer) view = CustomModelView(Model) admin.add_view(view) def test_list_columns(): app, db, admin = setup() Model1, Model2 = create_models(db) # test column_list with a list of strings view = CustomModelView(Model1, db.session, column_list=['test1', 'test3'], column_labels=dict(test1='Column1')) admin.add_view(view) eq_(len(view._list_columns), 2) eq_(view._list_columns, [('test1', 'Column1'), ('test3', 'Test3')]) client = app.test_client() rv = client.get('/admin/model1/') data = rv.data.decode('utf-8') ok_('Column1' in data) ok_('Test2' not in data) # test column_list with a list of SQLAlchemy columns view2 = CustomModelView(Model1, db.session, endpoint='model1_2', column_list=[Model1.test1, Model1.test3], column_labels=dict(test1='Column1')) admin.add_view(view2) eq_(len(view2._list_columns), 2) eq_(view2._list_columns, [('test1', 'Column1'), ('test3', 'Test3')]) rv = client.get('/admin/model1_2/') data = rv.data.decode('utf-8') ok_('Column1' in data) ok_('Test2' not in data) def test_complex_list_columns(): app, db, admin = setup() M1, M2 = create_models(db) m1 = M1('model1_val1') db.session.add(m1) db.session.add(M2('model2_val1', model1=m1)) db.session.commit() # test column_list with a list of strings on a relation view = CustomModelView(M2, db.session, column_list=['model1.test1']) admin.add_view(view) client = app.test_client() rv = client.get('/admin/model2/') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('model1_val1' in data) def test_exclude_columns(): app, db, admin = setup() Model1, Model2 = create_models(db) view = CustomModelView( Model1, db.session, column_exclude_list=['test2', 'test4', 'enum_field', 'date_field', 'time_field', 'datetime_field'] ) admin.add_view(view) eq_( view._list_columns, [('test1', 'Test1'), ('test3', 'Test3'), ('bool_field', 'Bool Field')] ) client = app.test_client() rv = client.get('/admin/model1/') data = rv.data.decode('utf-8') ok_('Test1' in data) ok_('Test2' not in data) def test_column_searchable_list(): app, db, admin = setup() Model1, Model2 = create_models(db) view = CustomModelView(Model2, db.session, column_searchable_list=['string_field', 'int_field']) admin.add_view(view) eq_(view._search_supported, True) eq_(len(view._search_fields), 2) ok_(isinstance(view._search_fields[0][0], db.Column)) ok_(isinstance(view._search_fields[1][0], db.Column)) eq_(view._search_fields[0][0].name, 'string_field') eq_(view._search_fields[1][0].name, 'int_field') db.session.add(Model2('model1-test', 5000)) db.session.add(Model2('model2-test', 9000)) db.session.commit() client = app.test_client() rv = client.get('/admin/model2/?search=model1') data = rv.data.decode('utf-8') ok_('model1-test' in data) ok_('model2-test' not in data) rv = client.get('/admin/model2/?search=9000') data = rv.data.decode('utf-8') ok_('model1-test' not in data) ok_('model2-test' in data) def test_complex_searchable_list(): app, db, admin = setup() Model1, Model2 = create_models(db) view = CustomModelView(Model2, db.session, column_searchable_list=['model1.test1']) admin.add_view(view) m1 = Model1('model1-test1-val') m2 = Model1('model1-test2-val') db.session.add(m1) db.session.add(m2) db.session.add(Model2('model2-test1-val', model1=m1)) db.session.add(Model2('model2-test2-val', model1=m2)) db.session.commit() client = app.test_client() # test relation string - 'model1.test1' rv = client.get('/admin/model2/?search=model1-test1') data = rv.data.decode('utf-8') ok_('model2-test1-val' in data) ok_('model2-test2-val' not in data) view2 = CustomModelView(Model1, db.session, column_searchable_list=[Model2.string_field]) admin.add_view(view2) # test relation object - Model2.string_field rv = client.get('/admin/model1/?search=model2-test1') data = rv.data.decode('utf-8') ok_('model1-test1-val' in data) ok_('model1-test2-val' not in data) def test_complex_searchable_list_missing_children(): app, db, admin = setup() Model1, Model2 = create_models(db) view = CustomModelView(Model1, db.session, column_searchable_list=[ 'test1', 'model2.string_field']) admin.add_view(view) db.session.add(Model1('magic string')) db.session.commit() client = app.test_client() rv = client.get('/admin/model1/?search=magic') data = rv.data.decode('utf-8') ok_('magic string' in data) def test_column_editable_list(): app, db, admin = setup() Model1, Model2 = create_models(db) view = CustomModelView(Model1, db.session, column_editable_list=['test1', 'enum_field']) admin.add_view(view) fill_db(db, Model1, Model2) client = app.test_client() # Test in-line edit field rendering rv = client.get('/admin/model1/') data = rv.data.decode('utf-8') ok_('data-role="x-editable"' in data) # Form - Test basic in-line edit functionality rv = client.post('/admin/model1/ajax/update/', data={ 'list_form_pk': '1', 'test1': 'change-success-1', }) data = rv.data.decode('utf-8') ok_('Record was successfully saved.' == data) # ensure the value has changed rv = client.get('/admin/model1/') data = rv.data.decode('utf-8') ok_('change-success-1' in data) # Test validation error rv = client.post('/admin/model1/ajax/update/', data={ 'list_form_pk': '1', 'enum_field': 'problematic-input', }) eq_(rv.status_code, 500) # Test invalid primary key rv = client.post('/admin/model1/ajax/update/', data={ 'list_form_pk': '1000', 'test1': 'problematic-input', }) data = rv.data.decode('utf-8') eq_(rv.status_code, 500) # Test editing column not in column_editable_list rv = client.post('/admin/model1/ajax/update/', data={ 'list_form_pk': '1', 'test2': 'problematic-input', }) data = rv.data.decode('utf-8') ok_('problematic-input' not in data) # Test in-line editing for relations view = CustomModelView(Model2, db.session, column_editable_list=['model1']) admin.add_view(view) rv = client.post('/admin/model2/ajax/update/', data={ 'list_form_pk': '1', 'model1': '3', }) data = rv.data.decode('utf-8') ok_('Record was successfully saved.' == data) # confirm the value has changed rv = client.get('/admin/model2/') data = rv.data.decode('utf-8') ok_('test1_val_3' in data) def test_details_view(): app, db, admin = setup() Model1, Model2 = create_models(db) view_no_details = CustomModelView(Model1, db.session) admin.add_view(view_no_details) # fields are scaffolded view_w_details = CustomModelView(Model2, db.session, can_view_details=True) admin.add_view(view_w_details) # show only specific fields in details w/ column_details_list string_field_view = CustomModelView(Model2, db.session, can_view_details=True, column_details_list=["string_field"], endpoint="sf_view") admin.add_view(string_field_view) fill_db(db, Model1, Model2) client = app.test_client() # ensure link to details is hidden when can_view_details is disabled rv = client.get('/admin/model1/') data = rv.data.decode('utf-8') ok_('/admin/model1/details/' not in data) # ensure link to details view appears rv = client.get('/admin/model2/') data = rv.data.decode('utf-8') ok_('/admin/model2/details/' in data) # test redirection when details are disabled rv = client.get('/admin/model1/details/?url=%2Fadmin%2Fmodel1%2F&id=1') eq_(rv.status_code, 302) # test if correct data appears in details view when enabled rv = client.get('/admin/model2/details/?url=%2Fadmin%2Fmodel2%2F&id=1') data = rv.data.decode('utf-8') ok_('String Field' in data) ok_('test2_val_1' in data) ok_('test1_val_1' in data) # test column_details_list rv = client.get('/admin/sf_view/details/?url=%2Fadmin%2Fsf_view%2F&id=1') data = rv.data.decode('utf-8') ok_('String Field' in data) ok_('test2_val_1' in data) ok_('test1_val_1' not in data) def test_editable_list_special_pks(): ''' Tests editable list view + a primary key with special characters ''' app, db, admin = setup() class Model1(db.Model): def __init__(self, id=None, val1=None): self.id = id self.val1 = val1 id = db.Column(db.String(20), primary_key=True) val1 = db.Column(db.String(20)) db.create_all() view = CustomModelView(Model1, db.session, column_editable_list=['val1']) admin.add_view(view) db.session.add(Model1('1-1', 'test1')) db.session.add(Model1('1-5', 'test2')) db.session.commit() client = app.test_client() # Form - Test basic in-line edit functionality rv = client.post('/admin/model1/ajax/update/', data={ 'list_form_pk': '1-1', 'val1': 'change-success-1', }) data = rv.data.decode('utf-8') ok_('Record was successfully saved.' == data) # ensure the value has changed rv = client.get('/admin/model1/') data = rv.data.decode('utf-8') ok_('change-success-1' in data) def test_column_filters(): app, db, admin = setup() Model1, Model2 = create_models(db) view = CustomModelView( Model1, db.session, column_filters=['test1'] ) admin.add_view(view) client = app.test_client() eq_(len(view._filters), 7) eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Test1']], [ (0, u'contains'), (1, u'not contains'), (2, u'equals'), (3, u'not equal'), (4, u'empty'), (5, u'in list'), (6, u'not in list'), ]) # Test filter that references property view = CustomModelView(Model2, db.session, column_filters=['model1']) eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Test1']], [ (0, u'contains'), (1, u'not contains'), (2, u'equals'), (3, u'not equal'), (4, u'empty'), (5, u'in list'), (6, u'not in list'), ]) eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Test2']], [ (7, u'contains'), (8, u'not contains'), (9, u'equals'), (10, u'not equal'), (11, u'empty'), (12, u'in list'), (13, u'not in list'), ]) eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Test3']], [ (14, u'contains'), (15, u'not contains'), (16, u'equals'), (17, u'not equal'), (18, u'empty'), (19, u'in list'), (20, u'not in list'), ]) eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Test4']], [ (21, u'contains'), (22, u'not contains'), (23, u'equals'), (24, u'not equal'), (25, u'empty'), (26, u'in list'), (27, u'not in list'), ]) eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Bool Field']], [ (28, u'equals'), (29, u'not equal'), ]) eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Enum Field']], [ (30, u'equals'), (31, u'not equal'), (32, u'empty'), (33, u'in list'), (34, u'not in list'), ]) # Test filter with a dot view = CustomModelView(Model2, db.session, column_filters=['model1.bool_field']) eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Bool Field']], [ (0, 'equals'), (1, 'not equal'), ]) # Test column_labels on filters view = CustomModelView(Model2, db.session, column_filters=['model1.bool_field', 'string_field'], column_labels={ 'model1.bool_field': 'Test Filter #1', 'string_field': 'Test Filter #2', }) eq_(list(view._filter_groups.keys()), [u'Test Filter #1', u'Test Filter #2']) fill_db(db, Model1, Model2) # Test equals rv = client.get('/admin/model1/?flt0_0=test1_val_1') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') # the filter value is always in "data" # need to check a different column than test1 for the expected row ok_('test2_val_1' in data) ok_('test1_val_2' not in data) # Test NOT IN filter rv = client.get('/admin/model1/?flt0_6=test1_val_1') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test1_val_2' in data) ok_('test2_val_1' not in data) # Test string filter view = CustomModelView(Model1, db.session, column_filters=['test1'], endpoint='_strings') admin.add_view(view) eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Test1']], [ (0, 'contains'), (1, 'not contains'), (2, 'equals'), (3, 'not equal'), (4, 'empty'), (5, 'in list'), (6, 'not in list'), ]) # string - equals rv = client.get('/admin/_strings/?flt0_0=test1_val_1') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_1' in data) ok_('test1_val_2' not in data) # string - not equal rv = client.get('/admin/_strings/?flt0_1=test1_val_1') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_1' not in data) ok_('test1_val_2' in data) # string - contains rv = client.get('/admin/_strings/?flt0_2=test1_val_1') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_1' in data) ok_('test1_val_2' not in data) # string - not contains rv = client.get('/admin/_strings/?flt0_3=test1_val_1') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_1' not in data) ok_('test1_val_2' in data) # string - empty rv = client.get('/admin/_strings/?flt0_4=1') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('empty_obj' in data) ok_('test1_val_1' not in data) ok_('test1_val_2' not in data) # string - not empty rv = client.get('/admin/_strings/?flt0_4=0') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('empty_obj' not in data) ok_('test1_val_1' in data) ok_('test1_val_2' in data) # string - in list rv = client.get('/admin/_strings/?flt0_5=test1_val_1%2Ctest1_val_2') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_1' in data) ok_('test2_val_2' in data) ok_('test1_val_3' not in data) ok_('test1_val_4' not in data) # string - not in list rv = client.get('/admin/_strings/?flt0_6=test1_val_1%2Ctest1_val_2') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_1' not in data) ok_('test2_val_2' not in data) ok_('test1_val_3' in data) ok_('test1_val_4' in data) # Test integer filter view = CustomModelView(Model2, db.session, column_filters=['int_field']) admin.add_view(view) eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Int Field']], [ (0, 'equals'), (1, 'not equal'), (2, 'greater than'), (3, 'smaller than'), (4, 'empty'), (5, 'in list'), (6, 'not in list'), ]) # integer - equals rv = client.get('/admin/model2/?flt0_0=5000') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_3' in data) ok_('test2_val_4' not in data) # integer - equals (huge number) rv = client.get('/admin/model2/?flt0_0=6169453081680413441') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_5' in data) ok_('test2_val_4' not in data) # integer - equals - test validation rv = client.get('/admin/model2/?flt0_0=badval') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('Invalid Filter Value' in data) # integer - not equal rv = client.get('/admin/model2/?flt0_1=5000') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_3' not in data) ok_('test2_val_4' in data) # integer - greater rv = client.get('/admin/model2/?flt0_2=6000') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_3' not in data) ok_('test2_val_4' in data) # integer - smaller rv = client.get('/admin/model2/?flt0_3=6000') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_3' in data) ok_('test2_val_4' not in data) # integer - empty rv = client.get('/admin/model2/?flt0_4=1') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_1' in data) ok_('test2_val_2' in data) ok_('test2_val_3' not in data) ok_('test2_val_4' not in data) # integer - not empty rv = client.get('/admin/model2/?flt0_4=0') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_1' not in data) ok_('test2_val_2' not in data) ok_('test2_val_3' in data) ok_('test2_val_4' in data) # integer - in list rv = client.get('/admin/model2/?flt0_5=5000%2C9000') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_1' not in data) ok_('test2_val_2' not in data) ok_('test2_val_3' in data) ok_('test2_val_4' in data) # integer - in list (huge number) rv = client.get('/admin/model2/?flt0_5=6169453081680413441') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_1' not in data) ok_('test2_val_5' in data) # integer - in list - test validation rv = client.get('/admin/model2/?flt0_5=5000%2Cbadval') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('Invalid Filter Value' in data) # integer - not in list rv = client.get('/admin/model2/?flt0_6=5000%2C9000') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_1' in data) ok_('test2_val_2' in data) ok_('test2_val_3' not in data) ok_('test2_val_4' not in data) # Test boolean filter view = CustomModelView(Model1, db.session, column_filters=['bool_field'], endpoint="_bools") admin.add_view(view) eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Bool Field']], [ (0, 'equals'), (1, 'not equal'), ]) # boolean - equals - Yes rv = client.get('/admin/_bools/?flt0_0=1') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_1' in data) ok_('test2_val_2' not in data) ok_('test2_val_3' not in data) # boolean - equals - No rv = client.get('/admin/_bools/?flt0_0=0') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_1' not in data) ok_('test2_val_2' in data) ok_('test2_val_3' in data) # boolean - not equals - Yes rv = client.get('/admin/_bools/?flt0_1=1') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_1' not in data) ok_('test2_val_2' in data) ok_('test2_val_3' in data) # boolean - not equals - No rv = client.get('/admin/_bools/?flt0_1=0') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_1' in data) ok_('test2_val_2' not in data) ok_('test2_val_3' not in data) # Test float filter view = CustomModelView(Model2, db.session, column_filters=['float_field'], endpoint="_float") admin.add_view(view) eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Float Field']], [ (0, 'equals'), (1, 'not equal'), (2, 'greater than'), (3, 'smaller than'), (4, 'empty'), (5, 'in list'), (6, 'not in list'), ]) # float - equals rv = client.get('/admin/_float/?flt0_0=25.9') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_3' in data) ok_('test2_val_4' not in data) # float - equals - test validation rv = client.get('/admin/_float/?flt0_0=badval') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('Invalid Filter Value' in data) # float - not equal rv = client.get('/admin/_float/?flt0_1=25.9') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_3' not in data) ok_('test2_val_4' in data) # float - greater rv = client.get('/admin/_float/?flt0_2=60.5') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_3' not in data) ok_('test2_val_4' in data) # float - smaller rv = client.get('/admin/_float/?flt0_3=60.5') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_3' in data) ok_('test2_val_4' not in data) # float - empty rv = client.get('/admin/_float/?flt0_4=1') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_1' in data) ok_('test2_val_2' in data) ok_('test2_val_3' not in data) ok_('test2_val_4' not in data) # float - not empty rv = client.get('/admin/_float/?flt0_4=0') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_1' not in data) ok_('test2_val_2' not in data) ok_('test2_val_3' in data) ok_('test2_val_4' in data) # float - in list rv = client.get('/admin/_float/?flt0_5=25.9%2C75.5') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_1' not in data) ok_('test2_val_2' not in data) ok_('test2_val_3' in data) ok_('test2_val_4' in data) # float - in list - test validation rv = client.get('/admin/_float/?flt0_5=25.9%2Cbadval') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('Invalid Filter Value' in data) # float - not in list rv = client.get('/admin/_float/?flt0_6=25.9%2C75.5') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_1' in data) ok_('test2_val_2' in data) ok_('test2_val_3' not in data) ok_('test2_val_4' not in data) # Test filters to joined table field view = CustomModelView( Model2, db.session, endpoint='_model2', column_filters=['model1.bool_field'], column_list=[ 'string_field', 'model1.id', 'model1.bool_field', ] ) admin.add_view(view) rv = client.get('/admin/_model2/?flt1_0=1') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2_val_1' in data) ok_('test2_val_2' not in data) ok_('test2_val_3' not in data) ok_('test2_val_4' not in data) # Test human readable URLs view = CustomModelView( Model1, db.session, column_filters=['test1'], endpoint='_model3', named_filter_urls=True ) admin.add_view(view) rv = client.get('/admin/_model3/?flt1_test1_equals=test1_val_1') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test1_val_1' in data) ok_('test1_val_2' not in data) # Test date, time, and datetime filters view = CustomModelView(Model1, db.session, column_filters=['date_field', 'datetime_field', 'time_field'], endpoint="_datetime") admin.add_view(view) eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Date Field']], [ (0, 'equals'), (1, 'not equal'), (2, 'greater than'), (3, 'smaller than'), (4, 'between'), (5, 'not between'), (6, 'empty'), ]) eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Datetime Field']], [ (7, 'equals'), (8, 'not equal'), (9, 'greater than'), (10, 'smaller than'), (11, 'between'), (12, 'not between'), (13, 'empty'), ]) eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Time Field']], [ (14, 'equals'), (15, 'not equal'), (16, 'greater than'), (17, 'smaller than'), (18, 'between'), (19, 'not between'), (20, 'empty'), ]) # date - equals rv = client.get('/admin/_datetime/?flt0_0=2014-11-17') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('date_obj1' in data) ok_('date_obj2' not in data) # date - not equal rv = client.get('/admin/_datetime/?flt0_1=2014-11-17') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('date_obj1' not in data) ok_('date_obj2' in data) # date - greater rv = client.get('/admin/_datetime/?flt0_2=2014-11-16') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('date_obj1' in data) ok_('date_obj2' not in data) # date - smaller rv = client.get('/admin/_datetime/?flt0_3=2014-11-16') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('date_obj1' not in data) ok_('date_obj2' in data) # date - between rv = client.get('/admin/_datetime/?flt0_4=2014-11-13+to+2014-11-20') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('date_obj1' in data) ok_('date_obj2' not in data) # date - not between rv = client.get('/admin/_datetime/?flt0_5=2014-11-13+to+2014-11-20') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('date_obj1' not in data) ok_('date_obj2' in data) # date - empty rv = client.get('/admin/_datetime/?flt0_6=1') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test1_val_1' in data) ok_('date_obj1' not in data) ok_('date_obj2' not in data) # date - empty rv = client.get('/admin/_datetime/?flt0_6=0') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test1_val_1' not in data) ok_('date_obj1' in data) ok_('date_obj2' in data) # datetime - equals rv = client.get('/admin/_datetime/?flt0_7=2014-04-03+01%3A09%3A00') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('datetime_obj1' in data) ok_('datetime_obj2' not in data) # datetime - not equal rv = client.get('/admin/_datetime/?flt0_8=2014-04-03+01%3A09%3A00') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('datetime_obj1' not in data) ok_('datetime_obj2' in data) # datetime - greater rv = client.get('/admin/_datetime/?flt0_9=2014-04-03+01%3A08%3A00') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('datetime_obj1' in data) ok_('datetime_obj2' not in data) # datetime - smaller rv = client.get('/admin/_datetime/?flt0_10=2014-04-03+01%3A08%3A00') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('datetime_obj1' not in data) ok_('datetime_obj2' in data) # datetime - between rv = client.get('/admin/_datetime/?flt0_11=2014-04-02+00%3A00%3A00+to+2014-11-20+23%3A59%3A59') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('datetime_obj1' in data) ok_('datetime_obj2' not in data) # datetime - not between rv = client.get('/admin/_datetime/?flt0_12=2014-04-02+00%3A00%3A00+to+2014-11-20+23%3A59%3A59') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('datetime_obj1' not in data) ok_('datetime_obj2' in data) # datetime - empty rv = client.get('/admin/_datetime/?flt0_13=1') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test1_val_1' in data) ok_('datetime_obj1' not in data) ok_('datetime_obj2' not in data) # datetime - not empty rv = client.get('/admin/_datetime/?flt0_13=0') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test1_val_1' not in data) ok_('datetime_obj1' in data) ok_('datetime_obj2' in data) # time - equals rv = client.get('/admin/_datetime/?flt0_14=11%3A10%3A09') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('timeonly_obj1' in data) ok_('timeonly_obj2' not in data) # time - not equal rv = client.get('/admin/_datetime/?flt0_15=11%3A10%3A09') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('timeonly_obj1' not in data) ok_('timeonly_obj2' in data) # time - greater rv = client.get('/admin/_datetime/?flt0_16=11%3A09%3A09') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('timeonly_obj1' in data) ok_('timeonly_obj2' not in data) # time - smaller rv = client.get('/admin/_datetime/?flt0_17=11%3A09%3A09') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('timeonly_obj1' not in data) ok_('timeonly_obj2' in data) # time - between rv = client.get('/admin/_datetime/?flt0_18=10%3A40%3A00+to+11%3A50%3A59') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('timeonly_obj1' in data) ok_('timeonly_obj2' not in data) # time - not between rv = client.get('/admin/_datetime/?flt0_19=10%3A40%3A00+to+11%3A50%3A59') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('timeonly_obj1' not in data) ok_('timeonly_obj2' in data) # time - empty rv = client.get('/admin/_datetime/?flt0_20=1') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test1_val_1' in data) ok_('timeonly_obj1' not in data) ok_('timeonly_obj2' not in data) # time - not empty rv = client.get('/admin/_datetime/?flt0_20=0') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test1_val_1' not in data) ok_('timeonly_obj1' in data) ok_('timeonly_obj2' in data) # Test enum filter view = CustomModelView(Model1, db.session, column_filters=['enum_field'], endpoint="_enumfield") admin.add_view(view) # enum - equals rv = client.get('/admin/_enumfield/?flt0_0=model1_v1') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('enum_obj1' in data) ok_('enum_obj2' not in data) # enum - not equal rv = client.get('/admin/_enumfield/?flt0_1=model1_v1') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('enum_obj1' not in data) ok_('enum_obj2' in data) # enum - empty rv = client.get('/admin/_enumfield/?flt0_2=1') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test1_val_1' in data) ok_('enum_obj1' not in data) ok_('enum_obj2' not in data) # enum - not empty rv = client.get('/admin/_enumfield/?flt0_2=0') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test1_val_1' not in data) ok_('enum_obj1' in data) ok_('enum_obj2' in data) # enum - in list rv = client.get('/admin/_enumfield/?flt0_3=model1_v1%2Cmodel1_v2') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test1_val_1' not in data) ok_('enum_obj1' in data) ok_('enum_obj2' in data) # enum - not in list rv = client.get('/admin/_enumfield/?flt0_4=model1_v1%2Cmodel1_v2') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test1_val_1' in data) ok_('enum_obj1' not in data) ok_('enum_obj2' not in data) # Test single custom filter on relation view = CustomModelView(Model2, db.session, column_filters = [ filters.FilterEqual(Model1.test1, "Test1") ], endpoint='_relation_test') admin.add_view(view) rv = client.get('/admin/_relation_test/?flt1_0=test1_val_1') data = rv.data.decode('utf-8') ok_('test1_val_1' in data) ok_('test1_val_2' not in data) def test_hybrid_property(): app, db, admin = setup() class Model1(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String) width = db.Column(db.Integer) height = db.Column(db.Integer) @hybrid_property def number_of_pixels(self): return self.width * self.height db.create_all() db.session.add(Model1(id=1, name="test_row_1", width=25, height=25)) db.session.add(Model1(id=2, name="test_row_2", width=10, height=10)) db.session.commit() client = app.test_client() view = CustomModelView( Model1, db.session, column_default_sort='number_of_pixels', column_filters = [filters.IntGreaterFilter(Model1.number_of_pixels, 'Number of Pixels')] ) admin.add_view(view) # filters - hybrid_property integer - greater rv = client.get('/admin/model1/?flt0_0=600') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test_row_1' in data) ok_('test_row_2' not in data) # sorting rv = client.get('/admin/model1/?sort=0') eq_(rv.status_code, 200) _, data = view.get_list(0, None, None, None, None) eq_(len(data), 2) eq_(data[0].name, 'test_row_2') eq_(data[1].name, 'test_row_1') def test_url_args(): app, db, admin = setup() Model1, Model2 = create_models(db) view = CustomModelView(Model1, db.session, page_size=2, column_searchable_list=['test1'], column_filters=['test1']) admin.add_view(view) db.session.add(Model1('data1')) db.session.add(Model1('data2')) db.session.add(Model1('data3')) db.session.add(Model1('data4')) db.session.commit() client = app.test_client() rv = client.get('/admin/model1/') data = rv.data.decode('utf-8') ok_('data1' in data) ok_('data3' not in data) # page rv = client.get('/admin/model1/?page=1') data = rv.data.decode('utf-8') ok_('data1' not in data) ok_('data3' in data) # sort rv = client.get('/admin/model1/?sort=0&desc=1') data = rv.data.decode('utf-8') ok_('data1' not in data) ok_('data3' in data) ok_('data4' in data) # search rv = client.get('/admin/model1/?search=data1') data = rv.data.decode('utf-8') ok_('data1' in data) ok_('data2' not in data) rv = client.get('/admin/model1/?search=^data1') data = rv.data.decode('utf-8') ok_('data2' not in data) # like rv = client.get('/admin/model1/?flt0=0&flt0v=data1') data = rv.data.decode('utf-8') ok_('data1' in data) # not like rv = client.get('/admin/model1/?flt0=1&flt0v=data1') data = rv.data.decode('utf-8') ok_('data2' in data) def test_non_int_pk(): app, db, admin = setup() class Model(db.Model): id = db.Column(db.String, primary_key=True) test = db.Column(db.String) db.create_all() view = CustomModelView(Model, db.session, form_columns=['id', 'test']) admin.add_view(view) client = app.test_client() rv = client.get('/admin/model/') eq_(rv.status_code, 200) rv = client.post('/admin/model/new/', data=dict(id='test1', test='test2')) eq_(rv.status_code, 302) rv = client.get('/admin/model/') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test1' in data) rv = client.get('/admin/model/edit/?id=test1') eq_(rv.status_code, 200) data = rv.data.decode('utf-8') ok_('test2' in data) def test_form_columns(): app, db, admin = setup() class Model(db.Model): id = db.Column(db.String, primary_key=True) int_field = db.Column(db.Integer) datetime_field = db.Column(db.DateTime) text_field = db.Column(db.UnicodeText) excluded_column = db.Column(db.String) class ChildModel(db.Model): id = db.Column(db.String, primary_key=True) model_id = db.Column(db.Integer, db.ForeignKey(Model.id)) model = db.relationship(Model, backref='backref') db.create_all() view1 = CustomModelView(Model, db.session, endpoint='view1', form_columns=('int_field', 'text_field')) view2 = CustomModelView(Model, db.session, endpoint='view2', form_excluded_columns=('excluded_column',)) view3 = CustomModelView(ChildModel, db.session, endpoint='view3') form1 = view1.create_form() form2 = view2.create_form() form3 = view3.create_form() ok_('int_field' in form1._fields) ok_('text_field' in form1._fields) ok_('datetime_field' not in form1._fields) ok_('excluded_column' not in form2._fields) ok_(type(form3.model).__name__ == 'QuerySelectField') # test form_columns with model objects view4 = CustomModelView(Model, db.session, endpoint='view1', form_columns=[Model.int_field]) form4 = view4.create_form() ok_('int_field' in form4._fields) @raises(Exception) def test_complex_form_columns(): app, db, admin = setup() M1, M2 = create_models(db) # test using a form column in another table view = CustomModelView(M2, db.session, form_columns=['model1.test1']) form = view.create_form() def test_form_args(): app, db, admin = setup() class Model(db.Model): id = db.Column(db.String, primary_key=True) test = db.Column(db.String, nullable=False) db.create_all() shared_form_args = {'test': {'validators': [validators.Regexp('test')]}} view = CustomModelView(Model, db.session, form_args=shared_form_args) admin.add_view(view) create_form = view.create_form() eq_(len(create_form.test.validators), 2) # ensure shared field_args don't create duplicate validators edit_form = view.edit_form() eq_(len(edit_form.test.validators), 2) def test_form_override(): app, db, admin = setup() class Model(db.Model): id = db.Column(db.String, primary_key=True) test = db.Column(db.String) db.create_all() view1 = CustomModelView(Model, db.session, endpoint='view1') view2 = CustomModelView(Model, db.session, endpoint='view2', form_overrides=dict(test=fields.FileField)) admin.add_view(view1) admin.add_view(view2) eq_(view1._create_form_class.test.field_class, fields.StringField) eq_(view2._create_form_class.test.field_class, fields.FileField) def test_form_onetoone(): app, db, admin = setup() class Model1(db.Model): id = db.Column(db.Integer, primary_key=True) test = db.Column(db.String) class Model2(db.Model): id = db.Column(db.Integer, primary_key=True) model1_id = db.Column(db.Integer, db.ForeignKey(Model1.id)) model1 = db.relationship(Model1, backref=db.backref('model2', uselist=False)) db.create_all() view1 = CustomModelView(Model1, db.session, endpoint='view1') view2 = CustomModelView(Model2, db.session, endpoint='view2') admin.add_view(view1) admin.add_view(view2) model1 = Model1(test='test') model2 = Model2(model1=model1) db.session.add(model1) db.session.add(model2) db.session.commit() eq_(model1.model2, model2) eq_(model2.model1, model1) eq_(view1._create_form_class.model2.field_class.widget.multiple, False) eq_(view2._create_form_class.model1.field_class.widget.multiple, False) def test_relations(): # TODO: test relations pass def test_on_model_change_delete(): app, db, admin = setup() Model1, _ = create_models(db) class ModelView(CustomModelView): def on_model_change(self, form, model, is_created): model.test1 = model.test1.upper() def on_model_delete(self, model): self.deleted = True view = ModelView(Model1, db.session) admin.add_view(view) client = app.test_client() client.post('/admin/model1/new/', data=dict(test1='test1large', test2='test2')) model = db.session.query(Model1).first() eq_(model.test1, 'TEST1LARGE') url = '/admin/model1/edit/?id=%s' % model.id client.post(url, data=dict(test1='test1small', test2='test2large')) model = db.session.query(Model1).first() eq_(model.test1, 'TEST1SMALL') url = '/admin/model1/delete/?id=%s' % model.id client.post(url) ok_(view.deleted) def test_multiple_delete(): app, db, admin = setup() M1, _ = create_models(db) db.session.add_all([M1('a'), M1('b'), M1('c')]) db.session.commit() eq_(M1.query.count(), 3) view = ModelView(M1, db.session) admin.add_view(view) client = app.test_client() rv = client.post('/admin/model1/action/', data=dict(action='delete', rowid=[1, 2, 3])) eq_(rv.status_code, 302) eq_(M1.query.count(), 0) def test_default_sort(): app, db, admin = setup() M1, _ = create_models(db) db.session.add_all([M1('c'), M1('b'), M1('a')]) db.session.commit() eq_(M1.query.count(), 3) view = CustomModelView(M1, db.session, column_default_sort='test1') admin.add_view(view) _, data = view.get_list(0, None, None, None, None) eq_(len(data), 3) eq_(data[0].test1, 'a') eq_(data[1].test1, 'b') eq_(data[2].test1, 'c') # test default sort on renamed columns - with column_list scaffolding view2 = CustomModelView(M1, db.session, column_default_sort='test1', column_labels={'test1': 'blah'}, endpoint='m1_2') admin.add_view(view2) _, data = view2.get_list(0, None, None, None, None) eq_(len(data), 3) eq_(data[0].test1, 'a') eq_(data[1].test1, 'b') eq_(data[2].test1, 'c') # test default sort on renamed columns - without column_list scaffolding view3 = CustomModelView(M1, db.session, column_default_sort='test1', column_labels={'test1': 'blah'}, endpoint='m1_3', column_list=['test1']) admin.add_view(view3) _, data = view3.get_list(0, None, None, None, None) eq_(len(data), 3) eq_(data[0].test1, 'a') eq_(data[1].test1, 'b') eq_(data[2].test1, 'c') def test_complex_sort(): app, db, admin = setup() M1, M2 = create_models(db) m1 = M1('b') db.session.add(m1) db.session.add(M2('c', model1=m1)) m2 = M1('a') db.session.add(m2) db.session.add(M2('c', model1=m2)) db.session.commit() # test sorting on relation string - 'model1.test1' view = CustomModelView(M2, db.session, column_list=['string_field', 'model1.test1'], column_sortable_list=['model1.test1']) admin.add_view(view) client = app.test_client() rv = client.get('/admin/model2/?sort=1') eq_(rv.status_code, 200) @raises(Exception) def test_complex_sort_exception(): app, db, admin = setup() M1, M2 = create_models(db) # test column_sortable_list on a related table's column object view = CustomModelView(M2, db.session, endpoint="model2_3", column_sortable_list=[M1.test1]) admin.add_view(view) sort_column = view._get_column_by_idx(0)[0] _, data = view.get_list(0, sort_column, False, None, None) eq_(len(data), 2) eq_(data[0].model1.test1, 'a') eq_(data[1].model1.test1, 'b') def test_default_complex_sort(): app, db, admin = setup() M1, M2 = create_models(db) m1 = M1('b') db.session.add(m1) db.session.add(M2('c', model1=m1)) m2 = M1('a') db.session.add(m2) db.session.add(M2('c', model1=m2)) db.session.commit() view = CustomModelView(M2, db.session, column_default_sort='model1.test1') admin.add_view(view) _, data = view.get_list(0, None, None, None, None) eq_(len(data), 2) eq_(data[0].model1.test1, 'a') eq_(data[1].model1.test1, 'b') # test column_default_sort on a related table's column object view2 = CustomModelView(M2, db.session, endpoint="model2_2", column_default_sort=(M1.test1, False)) admin.add_view(view2) _, data = view2.get_list(0, None, None, None, None) eq_(len(data), 2) eq_(data[0].model1.test1, 'a') eq_(data[1].model1.test1, 'b') def test_extra_fields(): app, db, admin = setup() Model1, _ = create_models(db) view = CustomModelView( Model1, db.session, form_extra_fields={ 'extra_field': fields.StringField('Extra Field') } ) admin.add_view(view) client = app.test_client() rv = client.get('/admin/model1/new/') eq_(rv.status_code, 200) # Check presence and order data = rv.data.decode('utf-8') ok_('Extra Field' in data) pos1 = data.find('Extra Field') pos2 = data.find('Test1') ok_(pos2 < pos1) def test_extra_field_order(): app, db, admin = setup() Model1, _ = create_models(db) view = CustomModelView( Model1, db.session, form_columns=('extra_field', 'test1'), form_extra_fields={ 'extra_field': fields.StringField('Extra Field') } ) admin.add_view(view) client = app.test_client() rv = client.get('/admin/model1/new/') eq_(rv.status_code, 200) # Check presence and order data = rv.data.decode('utf-8') pos1 = data.find('Extra Field') pos2 = data.find('Test1') ok_(pos2 > pos1) def test_modelview_localization(): def test_locale(locale): try: app, db, admin = setup() app.config['BABEL_DEFAULT_LOCALE'] = locale babel = Babel(app) Model1, _ = create_models(db) view = CustomModelView( Model1, db.session, column_filters=['test1', 'bool_field', 'date_field', 'datetime_field', 'time_field'] ) admin.add_view(view) client = app.test_client() rv = client.get('/admin/model1/') eq_(rv.status_code, 200) rv = client.get('/admin/model1/new/') eq_(rv.status_code, 200) except: print("Error on the following locale:", locale) raise locales = ['en', 'cs', 'de', 'es', 'fa', 'fr', 'pt', 'ru', 'zh_CN', 'zh_TW'] for locale in locales: test_locale(locale) def test_modelview_named_filter_localization(): app, db, admin = setup() app.config['BABEL_DEFAULT_LOCALE'] = 'de' Babel(app) Model1, _ = create_models(db) view = CustomModelView( Model1, db.session, named_filter_urls=True, column_filters=['test1'], ) filters = view.get_filters() flt = filters[2] with app.test_request_context(): flt_name = view.get_filter_arg(2, flt) eq_('test1_equals', flt_name) def test_custom_form_base(): app, db, admin = setup() class TestForm(form.BaseForm): pass Model1, _ = create_models(db) view = CustomModelView( Model1, db.session, form_base_class=TestForm ) admin.add_view(view) ok_(hasattr(view._create_form_class, 'test1')) create_form = view.create_form() ok_(isinstance(create_form, TestForm)) def test_ajax_fk(): app, db, admin = setup() Model1, Model2 = create_models(db) view = CustomModelView( Model2, db.session, url='view', form_ajax_refs={ 'model1': { 'fields': ('test1', 'test2') } } ) admin.add_view(view) ok_(u'model1' in view._form_ajax_refs) model = Model1(u'first') model2 = Model1(u'foo', u'bar') db.session.add_all([model, model2]) db.session.commit() # Check loader loader = view._form_ajax_refs[u'model1'] mdl = loader.get_one(model.id) eq_(mdl.test1, model.test1) items = loader.get_list(u'fir') eq_(len(items), 1) eq_(items[0].id, model.id) items = loader.get_list(u'bar') eq_(len(items), 1) eq_(items[0].test1, u'foo') # Check form generation form = view.create_form() eq_(form.model1.__class__.__name__, u'AjaxSelectField') with app.test_request_context('/admin/view/'): ok_(u'value=""' not in form.model1()) form.model1.data = model ok_(u'data-json="[%s, &quot;first&quot;]"' % model.id in form.model1()) ok_(u'value="1"' in form.model1()) # Check querying client = app.test_client() req = client.get(u'/admin/view/ajax/lookup/?name=model1&query=foo') eq_(req.data.decode('utf-8'), u'[[%s, "foo"]]' % model2.id) # Check submitting req = client.post('/admin/view/new/', data={u'model1': as_unicode(model.id)}) mdl = db.session.query(Model2).first() ok_(mdl is not None) ok_(mdl.model1 is not None) eq_(mdl.model1.id, model.id) eq_(mdl.model1.test1, u'first') def test_ajax_fk_multi(): app, db, admin = setup() class Model1(db.Model): __tablename__ = 'model1' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(20)) def __str__(self): return self.name table = db.Table('m2m', db.Model.metadata, db.Column('model1_id', db.Integer, db.ForeignKey('model1.id')), db.Column('model2_id', db.Integer, db.ForeignKey('model2.id')) ) class Model2(db.Model): __tablename__ = 'model2' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(20)) model1_id = db.Column(db.Integer(), db.ForeignKey(Model1.id)) model1 = db.relationship(Model1, backref='models2', secondary=table) db.create_all() view = CustomModelView( Model2, db.session, url='view', form_ajax_refs={ 'model1': { 'fields': ['name'] } } ) admin.add_view(view) ok_(u'model1' in view._form_ajax_refs) model = Model1(name=u'first') db.session.add_all([model, Model1(name=u'foo')]) db.session.commit() # Check form generation form = view.create_form() eq_(form.model1.__class__.__name__, u'AjaxSelectMultipleField') with app.test_request_context('/admin/view/'): ok_(u'data-json="[]"' in form.model1()) form.model1.data = [model] ok_(u'data-json="[[1, &quot;first&quot;]]"' in form.model1()) # Check submitting client = app.test_client() client.post('/admin/view/new/', data={u'model1': as_unicode(model.id)}) mdl = db.session.query(Model2).first() ok_(mdl is not None) ok_(mdl.model1 is not None) eq_(len(mdl.model1), 1) def test_safe_redirect(): app, db, admin = setup() Model1, _ = create_models(db) view = CustomModelView(Model1, db.session) admin.add_view(view) client = app.test_client() rv = client.post('/admin/model1/new/?url=http://localhost/admin/model2view/', data=dict(test1='test1large', test2='test2', _continue_editing='Save and Continue Editing')) eq_(rv.status_code, 302) assert_true(rv.location.startswith('http://localhost/admin/model1/edit/')) assert_true('url=http%3A%2F%2Flocalhost%2Fadmin%2Fmodel2view%2F' in rv.location) assert_true('id=1' in rv.location) rv = client.post('/admin/model1/new/?url=http://google.com/evil/', data=dict(test1='test1large', test2='test2', _continue_editing='Save and Continue Editing')) eq_(rv.status_code, 302) assert_true(rv.location.startswith('http://localhost/admin/model1/edit/')) assert_true('url=%2Fadmin%2Fmodel1%2F' in rv.location) assert_true('id=2' in rv.location) def test_simple_list_pager(): app, db, admin = setup() Model1, _ = create_models(db) class TestModelView(CustomModelView): simple_list_pager = True def get_count_query(self): assert False view = TestModelView(Model1, db.session) admin.add_view(view) count, data = view.get_list(0, None, None, None, None) assert_true(count is None) def test_unlimited_page_size(): app, db, admin = setup() M1, _ = create_models(db) db.session.add_all([M1('1'), M1('2'), M1('3'), M1('4'), M1('5'), M1('6'), M1('7'), M1('8'), M1('9'), M1('10'), M1('11'), M1('12'), M1('13'), M1('14'), M1('15'), M1('16'), M1('17'), M1('18'), M1('19'), M1('20'), M1('21')]) view = CustomModelView(M1, db.session) # test 0 as page_size _, data = view.get_list(0, None, None, None, None, execute=True, page_size=0) eq_(len(data), 21) # test False as page_size _, data = view.get_list(0, None, None, None, None, execute=True, page_size=False) eq_(len(data), 21) def test_advanced_joins(): app, db, admin = setup() class Model1(db.Model): id = db.Column(db.Integer, primary_key=True) val1 = db.Column(db.String(20)) test = db.Column(db.String(20)) class Model2(db.Model): id = db.Column(db.Integer, primary_key=True) val2 = db.Column(db.String(20)) model1_id = db.Column(db.Integer, db.ForeignKey(Model1.id)) model1 = db.relationship(Model1, backref='model2') class Model3(db.Model): id = db.Column(db.Integer, primary_key=True) val2 = db.Column(db.String(20)) model2_id = db.Column(db.Integer, db.ForeignKey(Model2.id)) model2 = db.relationship(Model2, backref='model3') view1 = CustomModelView(Model1, db.session) admin.add_view(view1) view2 = CustomModelView(Model2, db.session) admin.add_view(view2) view3 = CustomModelView(Model3, db.session) admin.add_view(view3) # Test joins attr, path = tools.get_field_with_path(Model2, 'model1.val1') eq_(attr, Model1.val1) eq_(path, [Model2.model1]) attr, path = tools.get_field_with_path(Model1, 'model2.val2') eq_(attr, Model2.val2) eq_(id(path[0]), id(Model1.model2)) attr, path = tools.get_field_with_path(Model3, 'model2.model1.val1') eq_(attr, Model1.val1) eq_(path, [Model3.model2, Model2.model1]) # Test how joins are applied query = view3.get_query() joins = {} q1, joins, alias = view3._apply_path_joins(query, joins, path) ok_((True, Model3.model2) in joins) ok_((True, Model2.model1) in joins) ok_(alias is not None) # Check if another join would use same path attr, path = tools.get_field_with_path(Model2, 'model1.test') q2, joins, alias = view2._apply_path_joins(query, joins, path) eq_(len(joins), 2) for p in q2._join_entities: ok_(p in q1._join_entities) ok_(alias is not None) # Check if normal properties are supported by tools.get_field_with_path attr, path = tools.get_field_with_path(Model2, Model1.test) eq_(attr, Model1.test) eq_(path, [Model1.__table__]) q3, joins, alias = view2._apply_path_joins(view2.get_query(), joins, path) eq_(len(joins), 3) ok_(alias is None) def test_multipath_joins(): app, db, admin = setup() class Model1(db.Model): id = db.Column(db.Integer, primary_key=True) val1 = db.Column(db.String(20)) test = db.Column(db.String(20)) class Model2(db.Model): id = db.Column(db.Integer, primary_key=True) val2 = db.Column(db.String(20)) first_id = db.Column(db.Integer, db.ForeignKey(Model1.id)) first = db.relationship(Model1, backref='first', foreign_keys=[first_id]) second_id = db.Column(db.Integer, db.ForeignKey(Model1.id)) second = db.relationship(Model1, backref='second', foreign_keys=[second_id]) db.create_all() view = CustomModelView(Model2, db.session, filters=['first.test']) admin.add_view(view) client = app.test_client() rv = client.get('/admin/model2/') eq_(rv.status_code, 200) def test_model_default(): app, db, admin = setup() _, Model2 = create_models(db) class ModelView(CustomModelView): pass view = ModelView(Model2, db.session) admin.add_view(view) client = app.test_client() rv = client.post('/admin/model2/new/', data=dict()) assert_true(b'This field is required' not in rv.data) def test_export_csv(): app, db, admin = setup() Model1, Model2 = create_models(db) for x in range(5): fill_db(db, Model1, Model2) view = CustomModelView(Model1, db.session, can_export=True, column_list=['test1', 'test2'], export_max_rows=2, endpoint='row_limit_2') admin.add_view(view) client = app.test_client() # test export_max_rows rv = client.get('/admin/row_limit_2/export/csv/') data = rv.data.decode('utf-8') eq_(rv.status_code, 200) ok_("Test1,Test2\r\n" "test1_val_1,test2_val_1\r\n" "test1_val_2,test2_val_2\r\n" == data) view = CustomModelView(Model1, db.session, can_export=True, column_list=['test1', 'test2'], endpoint='no_row_limit') admin.add_view(view) # test row limit without export_max_rows rv = client.get('/admin/no_row_limit/export/csv/') data = rv.data.decode('utf-8') eq_(rv.status_code, 200) ok_(len(data.splitlines()) > 21)
""" Implementation of the forest model for classification in Deep Forest. This class is modified from: https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/ensemble/_forest.py """ __all__ = ["RandomForestClassifier", "ExtraTreesClassifier"] import numbers from warnings import warn import threading from typing import List from abc import ABCMeta, abstractmethod import numpy as np from scipy.sparse import issparse from joblib import Parallel, delayed from joblib import effective_n_jobs from sklearn.base import clone from sklearn.base import BaseEstimator from sklearn.base import MetaEstimatorMixin from sklearn.base import ClassifierMixin, MultiOutputMixin from sklearn.utils import check_random_state, compute_sample_weight from sklearn.exceptions import DataConversionWarning from sklearn.utils.fixes import _joblib_parallel_args from sklearn.utils.validation import check_is_fitted, _check_sample_weight from sklearn.utils.validation import _deprecate_positional_args from . import _cutils as _LIB from . import _forest as _C_FOREST from .tree import DecisionTreeClassifier, ExtraTreeClassifier from .tree._tree import DOUBLE MAX_INT = np.iinfo(np.int32).max def _get_n_samples_bootstrap(n_samples, max_samples): """ Get the number of samples in a bootstrap sample. Parameters ---------- n_samples : int Number of samples in the dataset. max_samples : int or float The maximum number of samples to draw from the total available: - if float, this indicates a fraction of the total and should be the interval `(0, 1)`; - if int, this indicates the exact number of samples; - if None, this indicates the total number of samples. Returns ------- n_samples_bootstrap : int The total number of samples to draw for the bootstrap sample. """ if max_samples is None: return n_samples if isinstance(max_samples, numbers.Integral): if not (1 <= max_samples <= n_samples): msg = "`max_samples` must be in range 1 to {} but got value {}" raise ValueError(msg.format(n_samples, max_samples)) return max_samples if isinstance(max_samples, numbers.Real): if not (0 < max_samples < 1): msg = "`max_samples` must be in range (0, 1) but got value {}" raise ValueError(msg.format(max_samples)) return int(round(n_samples * max_samples)) msg = "`max_samples` should be int or float, but got type '{}'" raise TypeError(msg.format(type(max_samples))) def _generate_sample_mask(random_state, n_samples, n_samples_bootstrap): """Private function used to _parallel_build_trees function.""" random_instance = check_random_state(random_state) sample_indices = random_instance.randint(0, n_samples, n_samples_bootstrap) sample_indices = sample_indices.astype(np.int32) sample_mask = _LIB._c_sample_mask(sample_indices, n_samples) return sample_mask def _parallel_build_trees( tree, X, y, n_samples_bootstrap, sample_weight, out, lock, ): """ Private function used to fit a single tree in parallel.""" n_samples = X.shape[0] sample_mask = _generate_sample_mask( tree.random_state, n_samples, n_samples_bootstrap ) # Fit the tree on the bootstrapped samples if sample_weight is not None: sample_weight = sample_weight[sample_mask] feature, threshold, children, value = tree.fit( X[sample_mask], y[sample_mask], sample_weight=sample_weight, check_input=False, ) if not children.flags["C_CONTIGUOUS"]: children = np.ascontiguousarray(children) if not value.flags["C_CONTIGUOUS"]: value = np.ascontiguousarray(value) value = np.squeeze(value, axis=1) value /= value.sum(axis=1)[:, np.newaxis] # Set the OOB predictions oob_prediction = _C_FOREST.predict( X[~sample_mask, :], feature, threshold, children, value ) with lock: out[~sample_mask, :] += oob_prediction return feature, threshold, children, value # [Source] Sklearn.ensemble._base.py def _set_random_states(estimator, random_state=None): """Set fixed random_state parameters for an estimator. Finds all parameters ending ``random_state`` and sets them to integers derived from ``random_state``. Parameters ---------- estimator : estimator supporting get/set_params Estimator with potential randomness managed by random_state parameters. random_state : int or RandomState, default=None Pseudo-random number generator to control the generation of the random integers. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Notes ----- This does not necessarily set *all* ``random_state`` attributes that control an estimator's randomness, only those accessible through ``estimator.get_params()``. ``random_state``s not controlled include those belonging to: * cross-validation splitters * ``scipy.stats`` rvs """ random_state = check_random_state(random_state) to_set = {} for key in sorted(estimator.get_params(deep=True)): if key == "random_state" or key.endswith("__random_state"): to_set[key] = random_state.randint(np.iinfo(np.int32).max) if to_set: estimator.set_params(**to_set) # [Source] Sklearn.ensemble._base.py def _partition_estimators(n_estimators, n_jobs): """Private function used to partition estimators between jobs.""" # Compute the number of jobs n_jobs = min(effective_n_jobs(n_jobs), n_estimators) # Partition estimators between jobs n_estimators_per_job = np.full( n_jobs, n_estimators // n_jobs, dtype=np.int ) n_estimators_per_job[: n_estimators % n_jobs] += 1 starts = np.cumsum(n_estimators_per_job) return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist() def _accumulate_prediction(feature, threshold, children, value, X, out, lock): """This is a utility function for joblib's Parallel.""" prediction = _C_FOREST.predict(X, feature, threshold, children, value) with lock: if len(out) == 1: out[0] += prediction else: for i in range(len(out)): out[i] += prediction[i] # [Source] Sklearn.ensemble._base.py class BaseEnsemble(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta): """Base class for all ensemble classes. Warning: This class should not be used directly. Use derived classes instead. Parameters ---------- base_estimator : object The base estimator from which the ensemble is built. n_estimators : int, default=10 The number of estimators in the ensemble. estimator_params : list of str, default=tuple() The list of attributes to use as parameters when instantiating a new base estimator. If none are given, default parameters are used. Attributes ---------- base_estimator_ : estimator The base estimator from which the ensemble is grown. estimators_ : list of estimators The collection of fitted base estimators. """ # overwrite _required_parameters from MetaEstimatorMixin _required_parameters: List[str] = [] @abstractmethod def __init__( self, base_estimator, *, n_estimators=10, estimator_params=tuple() ): # Set parameters self.base_estimator = base_estimator self.n_estimators = n_estimators self.estimator_params = estimator_params # Don't instantiate estimators now! Parameters of base_estimator might # still change. Eg., when grid-searching with the nested object syntax. # self.estimators_ needs to be filled by the derived classes in fit. def _validate_estimator(self, default=None): """Check the estimator and the n_estimator attribute. Sets the base_estimator_` attributes. """ if not isinstance(self.n_estimators, numbers.Integral): raise ValueError( "n_estimators must be an integer, " "got {0}.".format(type(self.n_estimators)) ) if self.n_estimators <= 0: raise ValueError( "n_estimators must be greater than zero, " "got {0}.".format(self.n_estimators) ) if self.base_estimator is not None: self.base_estimator_ = self.base_estimator else: self.base_estimator_ = default if self.base_estimator_ is None: raise ValueError("base_estimator cannot be None") def _make_estimator(self, append=True, random_state=None): """Make and configure a copy of the `base_estimator_` attribute. Warning: This method should be used to properly instantiate new sub-estimators. """ estimator = clone(self.base_estimator_) estimator.set_params( **{p: getattr(self, p) for p in self.estimator_params} ) # Pass the inferred class information to avoid redudant finding. estimator.classes_ = self.classes_ estimator.n_classes_ = np.array(self.n_classes_, dtype=np.int32) if random_state is not None: _set_random_states(estimator, random_state) if append: self.estimators_.append(estimator) return estimator def __len__(self): """Return the number of estimators in the ensemble.""" return len(self.estimators_) def __getitem__(self, index): """Return the index'th estimator in the ensemble.""" return self.estimators_[index] def __iter__(self): """Return iterator over estimators in the ensemble.""" return iter(self.estimators_) class BaseForest(MultiOutputMixin, BaseEnsemble, metaclass=ABCMeta): """ Base class for forests of trees. Warning: This class should not be used directly. Use derived classes instead. """ @abstractmethod def __init__( self, base_estimator, n_estimators=100, *, estimator_params=tuple(), n_jobs=None, random_state=None, verbose=0, class_weight=None, max_samples=None ): super().__init__( base_estimator=base_estimator, n_estimators=n_estimators, estimator_params=estimator_params, ) self.n_jobs = n_jobs self.random_state = random_state self.verbose = verbose self.class_weight = class_weight self.max_samples = max_samples # Internal containers self.features = [] self.thresholds = [] self.childrens = [] self.values = [] def fit(self, X, y, sample_weight=None): """ Build a forest of trees from the training set (X, y). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Internally, its dtype will be converted to ``dtype=np.float32``. If a sparse matrix is provided, it will be converted into a sparse ``csc_matrix``. y : array-like of shape (n_samples,) or (n_samples, n_outputs) The target values (class labels in classification, real numbers in regression). sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. In the case of classification, splits are also ignored if they would result in any single class carrying a negative weight in either child node. Returns ------- self : object """ # Validate or convert input data if issparse(y): raise ValueError( "sparse multilabel-indicator for y is not supported." ) if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X) if issparse(X): # Pre-sort indices to avoid that each individual tree of the # ensemble sorts the indices. X.sort_indices() # Remap output n_samples, self.n_features_ = X.shape y = np.atleast_1d(y) if y.ndim == 2 and y.shape[1] == 1: warn( "A column-vector y was passed when a 1d array was" " expected. Please change the shape of y to " "(n_samples,), for example using ravel().", DataConversionWarning, stacklevel=2, ) if y.ndim == 1: # reshape is necessary to preserve the data contiguity against vs # [:, np.newaxis] that does not. y = np.reshape(y, (-1, 1)) self.n_outputs_ = y.shape[1] y, expanded_class_weight = self._validate_y_class_weight(y) if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous: y = np.ascontiguousarray(y, dtype=DOUBLE) # Get bootstrap sample size n_samples_bootstrap = _get_n_samples_bootstrap( n_samples=X.shape[0], max_samples=self.max_samples ) # Check parameters self._validate_estimator() random_state = check_random_state(self.random_state) n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs) trees = [ self._make_estimator(append=False, random_state=random_state) for i in range(self.n_estimators) ] # Pre-allocate OOB estimations oob_decision_function = np.zeros( (n_samples, self.classes_[0].shape[0]) ) lock = threading.Lock() rets = Parallel( n_jobs=n_jobs, verbose=self.verbose, **_joblib_parallel_args(prefer="threads", require="sharedmem") )( delayed(_parallel_build_trees)( t, X, y, n_samples_bootstrap, sample_weight, oob_decision_function, lock, ) for i, t in enumerate(trees) ) # Collect newly grown trees for feature, threshold, children, value in rets: # No check on feature and threshold since 1-D array is always # C-aligned and F-aligned. self.features.append(feature) self.thresholds.append(threshold) self.childrens.append(children) self.values.append(value) # Check the OOB predictions if (oob_decision_function.sum(axis=1) == 0).any(): warn( "Some inputs do not have OOB predictions. " "This probably means too few trees were used " "to compute any reliable oob predictions." ) prediction = ( oob_decision_function / oob_decision_function.sum(axis=1)[:, np.newaxis] ) self.oob_decision_function_ = prediction # Decapsulate classes_ attributes if hasattr(self, "classes_") and self.n_outputs_ == 1: self.n_classes_ = self.n_classes_[0] self.classes_ = self.classes_[0] return self def _validate_y_class_weight(self, y): # Default implementation return y, None class ForestClassifier(ClassifierMixin, BaseForest, metaclass=ABCMeta): """ Base class for forest of trees-based classifiers. Warning: This class should not be used directly. Use derived classes instead. """ @abstractmethod def __init__( self, base_estimator, n_estimators=100, *, estimator_params=tuple(), n_jobs=None, random_state=None, verbose=0, class_weight=None, max_samples=None ): super().__init__( base_estimator, n_estimators=n_estimators, estimator_params=estimator_params, n_jobs=n_jobs, random_state=random_state, verbose=verbose, class_weight=class_weight, max_samples=max_samples, ) def _validate_y_class_weight(self, y): y = np.copy(y) expanded_class_weight = None if self.class_weight is not None: y_original = np.copy(y) self.classes_ = [] self.n_classes_ = [] y_store_unique_indices = np.zeros(y.shape, dtype=np.int) for k in range(self.n_outputs_): classes_k, y_store_unique_indices[:, k] = np.unique( y[:, k], return_inverse=True ) self.classes_.append(classes_k) self.n_classes_.append(classes_k.shape[0]) y = y_store_unique_indices if self.class_weight is not None: valid_presets = ("balanced", "balanced_subsample") if isinstance(self.class_weight, str): if self.class_weight not in valid_presets: raise ValueError( "Valid presets for class_weight include " '"balanced" and "balanced_subsample".' 'Given "%s".' % self.class_weight ) if self.class_weight != "balanced_subsample" or not self.bootstrap: if self.class_weight == "balanced_subsample": class_weight = "balanced" else: class_weight = self.class_weight expanded_class_weight = compute_sample_weight( class_weight, y_original ) return y, expanded_class_weight def predict(self, X): proba = self.predict_proba(X) return self.classes_.take(np.argmax(proba, axis=1), axis=0) def predict_proba(self, X): check_is_fitted(self) # Assign chunk of trees to jobs n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs) # Avoid storing the output of every estimator by summing them here all_proba = [ np.zeros((X.shape[0], j), dtype=np.float64) for j in np.atleast_1d(self.n_classes_) ] lock = threading.Lock() Parallel( n_jobs=n_jobs, verbose=self.verbose, **_joblib_parallel_args(require="sharedmem") )( delayed(_accumulate_prediction)( self.features[i], self.thresholds[i], self.childrens[i], self.values[i], X, all_proba, lock, ) for i in range(self.n_estimators) ) for proba in all_proba: proba /= len(self.features) if len(all_proba) == 1: return all_proba[0] else: return all_proba class RandomForestClassifier(ForestClassifier): @_deprecate_positional_args def __init__( self, n_estimators=100, *, criterion="gini", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features="sqrt", min_impurity_decrease=0.0, min_impurity_split=None, n_jobs=None, random_state=None, verbose=0, class_weight=None, max_samples=None ): super().__init__( base_estimator=DecisionTreeClassifier(), n_estimators=n_estimators, estimator_params=( "criterion", "max_depth", "min_samples_split", "min_samples_leaf", "min_weight_fraction_leaf", "max_features", "min_impurity_decrease", "min_impurity_split", "random_state", ), n_jobs=n_jobs, random_state=random_state, verbose=verbose, class_weight=class_weight, max_samples=max_samples, ) self.criterion = criterion self.max_depth = max_depth self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.max_features = max_features self.min_impurity_decrease = min_impurity_decrease self.min_impurity_split = min_impurity_split class ExtraTreesClassifier(ForestClassifier): @_deprecate_positional_args def __init__( self, n_estimators=100, *, criterion="gini", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features="sqrt", min_impurity_decrease=0.0, min_impurity_split=None, n_jobs=None, random_state=None, verbose=0, class_weight=None, max_samples=None ): super().__init__( base_estimator=ExtraTreeClassifier(), n_estimators=n_estimators, estimator_params=( "criterion", "max_depth", "min_samples_split", "min_samples_leaf", "min_weight_fraction_leaf", "max_features", "min_impurity_decrease", "min_impurity_split", "random_state", ), n_jobs=n_jobs, random_state=random_state, verbose=verbose, class_weight=class_weight, max_samples=max_samples, ) self.criterion = criterion self.max_depth = max_depth self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.max_features = max_features self.min_impurity_decrease = min_impurity_decrease self.min_impurity_split = min_impurity_split
from chia.protocols.protocol_message_types import ProtocolMessageTypes as pmt, ProtocolMessageTypes NO_REPLY_EXPECTED = [ # full_node -> full_node messages pmt.new_peak, pmt.new_transaction, pmt.new_unfinished_block, pmt.new_signage_point_or_end_of_sub_slot, pmt.request_mempool_transactions, pmt.new_compact_vdf, pmt.request_mempool_transactions, ] """ VALID_REPLY_MESSAGE_MAP: key: sent message type. value: valid reply message types, from the view of the requester. A state machine can be built from this message map. """ VALID_REPLY_MESSAGE_MAP = { # messages for all services # pmt.handshake is handled in WSChiaConnection.perform_handshake # full_node -> full_node protocol messages pmt.request_transaction: [pmt.respond_transaction], pmt.request_proof_of_weight: [pmt.respond_proof_of_weight], pmt.request_block: [pmt.respond_block, pmt.reject_block], pmt.request_blocks: [pmt.respond_blocks, pmt.reject_blocks], pmt.request_unfinished_block: [pmt.respond_unfinished_block], pmt.request_block_header: [pmt.respond_block_header, pmt.reject_header_request], pmt.request_signage_point_or_end_of_sub_slot: [pmt.respond_signage_point, pmt.respond_end_of_sub_slot], pmt.request_compact_vdf: [pmt.respond_compact_vdf], pmt.request_peers: [pmt.respond_peers], pmt.request_header_blocks: [pmt.respond_header_blocks, pmt.reject_header_blocks], } def static_check_sent_message_response() -> None: """Check that allowed message data structures VALID_REPLY_MESSAGE_MAP and NO_REPLY_EXPECTED are consistent.""" # Reply and non-reply sets should not overlap: This check should be static overlap = set(NO_REPLY_EXPECTED).intersection(set(VALID_REPLY_MESSAGE_MAP.keys())) if len(overlap) != 0: raise AssertionError(f"Overlapping NO_REPLY_EXPECTED and VALID_REPLY_MESSAGE_MAP values: {overlap}") def message_requires_reply(sent: ProtocolMessageTypes) -> bool: """Return True if message has an entry in the full node P2P message map""" # If we knew the peer NodeType is FULL_NODE, we could also check `sent not in NO_REPLY_EXPECTED` return sent in VALID_REPLY_MESSAGE_MAP def message_response_ok(sent: ProtocolMessageTypes, received: ProtocolMessageTypes) -> bool: """ Check to see that peers respect protocol message types in reply. Call with received == None to indicate that we do not expect a specific reply message type. """ # Errors below are runtime protocol message mismatches from peers if sent in VALID_REPLY_MESSAGE_MAP: if received not in VALID_REPLY_MESSAGE_MAP[sent]: return False return True # Run `static_check_sent_message_response` to check this static invariant at import time static_check_sent_message_response()
################################################################################ # Example : perform live fire detection in video using superpixel localization # and the superpixel trained version of the InceptionV1-OnFire CNN # Copyright (c) 2017/18 - Andrew Dunnings / Toby Breckon, Durham University, UK # License : https://github.com/tobybreckon/fire-detection-cnn/blob/master/LICENSE ################################################################################ import cv2 import os import sys import math import numpy as np ################################################################################ import tflearn from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.conv import conv_2d, max_pool_2d, avg_pool_2d from tflearn.layers.normalization import local_response_normalization from tflearn.layers.merge_ops import merge from tflearn.layers.estimator import regression ################################################################################ from inceptionV1OnFire import construct_inceptionv1onfire ################################################################################ # construct and display model model = construct_inceptionv1onfire (224, 224, training=False) print("Constructed SP-InceptionV1-OnFire ...") model.load(os.path.join("models/SP-InceptionV1-OnFire", "sp-inceptiononv1onfire"),weights_only=True) print("CNN network weights loaded...") ################################################################################ # network input sizes rows = 224 cols = 224 # display and loop settings keepProcessing = True; ################################################################################ if len(sys.argv) == 2: # load video file from first command line argument video = cv2.VideoCapture(sys.argv[1]) print("Loading video ...") # get video properties width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)); height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) res = cv2.VideoWriter('res.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (224,224)) while (keepProcessing): # get video frame from file, handle end of file ret, frame = video.read() if not ret: print("... end of video file reached"); break; # re-size image to network input size and perform prediction small_frame = cv2.resize(frame, (rows, cols), cv2.INTER_AREA); # OpenCV imgproc SLIC superpixels implementation below slic = cv2.ximgproc.createSuperpixelSLIC(small_frame, region_size=22) slic.iterate(10) # getLabels method returns the different superpixel segments segments = slic.getLabels() # loop over the unique segment values for (i, segVal) in enumerate(np.unique(segments)): # Construct a mask for the segment mask = np.zeros(small_frame.shape[:2], dtype = "uint8") mask[segments == segVal] = 255 # get contours (first checking if OPENCV >= 4.x) if (int(cv2.__version__.split(".")[0]) >= 4): contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) else: im2, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # create the superpixel by applying the mask superpixel = cv2.bitwise_and(small_frame, small_frame, mask = mask) # use loaded model to make prediction on given superpixel segments output = model.predict([superpixel]) # summarize the shape of the list of arrays print([a.shape for a in output]) if round(output[0][0]) == 1: # if prediction for FIRE was TRUE (round to 1), draw GREEN contour for superpixel cv2.drawContours(small_frame, contours, -1, (0,255,0), 1) else: # if prediction for FIRE was FALSE, draw RED contour for superpixel cv2.drawContours(small_frame, contours, -1, (0,0,255), 1) # image display and key handling res.write(small_frame) #cv2.imwrite('res.jpg',mask) # wait fps time or less depending on processing time taken (e.g. 1000ms / 25 fps = 40 ms) key = cv2.waitKey(1) if (key == 27): keepProcessing = False; else: print("usage: python superpixel-inceptionV1-OnFire.py videofile.ext"); ################################################################################ # When everything done, release the capture video.release() #res.release() cv2.destroyAllWindows()
import sys import msgpack import json with open(sys.argv[1], mode='rb') as msgpack_data: data = msgpack.unpackb(msgpack_data.read()) print(json.dumps(data, indent=4))
__version__ = "1.3.21"
from .batch_norm import BatchNorm from .instance_norm import InstanceNorm from .layer_norm import LayerNorm from .graph_size_norm import GraphSizeNorm from .pair_norm import PairNorm from .msg_norm import MessageNorm __all__ = [ 'BatchNorm', 'InstanceNorm', 'LayerNorm', 'GraphSizeNorm', 'PairNorm', 'MessageNorm', ] classes = sorted(__all__)
from utils import StrDictMixin class LivingPlace: pass class House(StrDictMixin, LivingPlace): def __init__(self, floors_count, has_garage, has_electricity, rooms_count, balconies_count, has_pool, town): self.floors_count = floors_count self.has_garage = has_garage self.has_electricity = has_electricity self.rooms_count = rooms_count self.balconies_count = balconies_count self.town = town self.has_pool = has_pool @classmethod def in_sofia(cls, floors_count, rooms_count, balconies_count): return cls(floors_count, False, True, rooms_count, balconies_count, False, 'Sofia')
# built-ins import os import json from os.path import split as split_path, join as join_path from fnmatch import filter as fnfilter import logging import itertools as it import subprocess import tempfile as tmp # libraries import h5py from PIL import Image from scipy.ndimage.measurements import label from numpy import array, uint8, uint16, uint32, uint64, zeros, \ zeros_like, squeeze, fromstring, ndim, concatenate, newaxis, swapaxes, \ savetxt, unique, double, cumsum, ndarray import numpy as np from skimage.io.collection import alphanumeric_key from skimage.io import imread # local files from . import evaluate from . import morpho ### Auto-detect file format supported_image_extensions = ['png', 'tif', 'tiff', 'jpg', 'jpeg'] def read_image_stack(fn, *args, **kwargs): """Read a 3D volume of images in image or .h5 format into a numpy.ndarray. This function attempts to automatically determine input file types and wraps specific image-reading functions. Parameters ---------- fn : filename (string) A file path or glob pattern specifying one or more valid image files. The file format is automatically determined from this argument. *args : filenames (string, optional) More than one positional argument will be interpreted as a list of filenames pointing to all the 2D images in the stack. **kwargs : keyword arguments (optional) Arguments to be passed to the underlying functions. A 'crop' keyword argument is supported, as a list of length 6: [xmin, xmax, ymin, ymax, zmin, zmax]. Use 'None' for no crop in that coordinate. Returns ------- stack : 3-dimensional numpy ndarray Notes ----- If reading in .h5 format, keyword arguments are passed through to read_h5_stack(). Automatic file type detection may be deprecated in the future. """ # TODO: Refactor. Rather than have implicit designation of stack format # based on filenames (*_boundpred.h5, etc), require explicit parameters # in config JSON files. if os.path.isdir(fn): fn += '/' d, fn = split_path(os.path.expanduser(fn)) if len(d) == 0: d = '.' crop = kwargs.get('crop', [None]*6) if crop is None: crop = [None]*6 if len(crop) == 4: crop.extend([None]*2) elif len(crop) == 2: crop = [None]*4 + crop kwargs['crop'] = crop if any(fn.endswith(ext) for ext in supported_image_extensions): # image types, such as a set of pngs or a multi-page tiff xmin, xmax, ymin, ymax, zmin, zmax = crop if len(args) > 0 and type(args[0]) == str and args[0].endswith(fn[-3:]): # input is a list of filenames fns = [fn] + [split_path(f)[1] for f in args] else: # input is a filename pattern to match fns = fnfilter(os.listdir(d), fn) if len(fns) == 1 and fns[0].endswith('.tif'): stack = read_multi_page_tif(join_path(d,fns[0]), crop) else: fns.sort(key=alphanumeric_key) # sort filenames numerically fns = fns[zmin:zmax] im0 = imread(join_path(d, fns[0])) ars = (imread(join_path(d, fn)) for fn in fns) im0 = im0[xmin:xmax, ymin:ymax] dtype = im0.dtype stack = zeros((len(fns),)+im0.shape, dtype) for i, im in enumerate(ars): stack[i] = im[xmin:xmax,ymin:ymax] elif fn.endswith('_boundpred.h5') or fn.endswith('_processed.h5'): # Ilastik batch prediction output file stack = read_prediction_from_ilastik_batch(os.path.join(d,fn), **kwargs) elif fn.endswith('.h5'): # other HDF5 file stack = read_h5_stack(join_path(d,fn), *args, **kwargs) elif os.path.isfile(os.path.join(d, 'superpixel_to_segment_map.txt')): # Raveler export stack = raveler_to_labeled_volume(d, *args, **kwargs) return squeeze(stack) def write_image_stack(npy_vol, fn, **kwargs): """Write a numpy.ndarray 3D volume to a stack of images or an HDF5 file. Parameters ---------- npy_vol : numpy ndarray The volume to be written to disk. fn : string The filename to be written, or a format string when writing a 3D stack to a 2D format (e.g. a png image stack). **kwargs : keyword arguments Keyword arguments to be passed to wrapped functions. See corresponding docs for valid arguments. Returns ------- out : None Examples -------- >>> import numpy as np >>> from gala.imio import write_image_stack >>> im = 255 * np.array([ ... [[0, 1, 0], [1, 0, 1], [0, 1, 0]], ... [[1, 0, 1], [0, 1, 0], [1, 0, 1]]], dtype=uint8) >>> im.shape (2, 3, 3) >>> write_image_stack(im, 'image-example-%02i.png', axis=0) >>> import os >>> fns = sorted(filter(lambda x: x.endswith('.png'), os.listdir('.'))) >>> fns # two 3x3 images ['image-example-00.png', 'image-example-01.png'] >>> os.remove(fns[0]); os.remove(fns[1]) # doctest cleanup """ fn = os.path.expanduser(fn) if fn.endswith('.png'): write_png_image_stack(npy_vol, fn, **kwargs) elif fn.endswith('.h5'): write_h5_stack(npy_vol, fn, **kwargs) elif fn.endswith('.vtk'): write_vtk(npy_vol, fn, **kwargs) else: raise ValueError('Image format not supported: ' + fn + '\n') ### Standard image formats (png, tiff, etc.) def pil_to_numpy(img): """Convert an Image object to a numpy array. Parameters ---------- img : Image object (from the Python Imaging Library) Returns ------- ar : numpy ndarray The corresponding numpy array (same shape as the image) """ ar = squeeze(array(img.getdata()).reshape((img.size[1], img.size[0], -1))) return ar def read_multi_page_tif(fn, crop=[None]*6): """Read a multi-page tif file into a numpy array. Parameters ---------- fn : string The filename of the image file being read. Returns ------- ar : numpy ndarray The image stack in array format. Notes ----- Currently, only grayscale images are supported. """ xmin, xmax, ymin, ymax, zmin, zmax = crop img = Image.open(fn) pages = [] if zmin is not None and zmin > 0: img.seek(zmin) eof = False while not eof and img.tell() != zmax: pages.append(pil_to_numpy(img)[...,newaxis]) try: img.seek(img.tell()+1) except EOFError: eof = True return concatenate(pages, axis=-1) def write_png_image_stack(npy_vol, fn, axis=-1, bitdepth=None): """Write a numpy.ndarray 3D volume to a stack of .png images. Parameters ---------- npy_vol : numpy ndarray, shape (M, N, P) The volume to be written to disk. fn : format string The file pattern to which to write the volume. axis : int, optional (default = -1) The axis along which output the images. If the input array has shape (M, N, P), and axis is 1, the function will write N images of shape (M, P) to disk. In keeping with Python convention, -1 specifies the last axis. Returns ------- None : None No value is returned. Notes ----- Only 8-bit and 16-bit single-channel images are currently supported. """ npy_vol = swapaxes(npy_vol, 0, axis) fn = os.path.expanduser(fn) if 0 <= npy_vol.max() <= 1 and npy_vol.dtype == double: bitdepth = 16 if None else bitdepth imdtype = uint16 if bitdepth == 16 else uint8 npy_vol = ((2**bitdepth-1)*npy_vol).astype(imdtype) if 1 < npy_vol.max() < 256 and bitdepth is None or bitdepth == 8: mode = 'L' mode_base = 'L' npy_vol = uint8(npy_vol) elif 256 <= np.max(npy_vol) < 2**16 and bitdepth is None or \ bitdepth == 16: mode = 'I;16' mode_base = 'I' npy_vol = uint16(npy_vol) else: mode = 'RGBA' mode_base = 'RGBA' npy_vol = uint32(npy_vol) for z, pl in enumerate(npy_vol): im = Image.new(mode_base, pl.T.shape) im.frombytes(pl.tostring(), 'raw', mode) im.save(fn % z) ### VTK structured points array format def extract_segments(seg, ids): """Get a uint8 volume containing only the specified segment ids. Parameters ---------- seg : array of int The input segmentation. ids : list of int, maximum length 255 A list of segments to extract from `seg`. Returns ------- segs : array of uint8 A volume with 1, 2, ..., ``len(ids)`` labels where the required segments were, and 0 elsewhere. Notes ----- This function is designed to output volumes to VTK format for viewing in ITK-SNAP Examples -------- >>> segments = array([[45, 45, 51, 51], ... [45, 83, 83, 51]]) >>> extract_segments(segments, [83, 45]) array([[2, 2, 0, 0], [2, 1, 1, 0]], dtype=uint8) """ segs = np.zeros(seg.shape, dtype=np.uint8) for i, s in enumerate(ids): segs[seg == s] = i + 1 return segs numpy_type_to_vtk_string = { np.uint8:'unsigned_char', np.int8:'char', np.uint16:'unsigned_short', np.int16:'short', np.uint32:'unsigned_int', np.int32:'int', np.uint64:'unsigned_long', np.int64:'long', np.float32:'float', np.float64:'double' } vtk_string_to_numpy_type = \ dict([(v,k) for k, v in numpy_type_to_vtk_string.items()]) def write_vtk(ar, fn, spacing=[1.0, 1.0, 1.0]): """Write 3D volume to VTK structured points format file. Code adapted from Erik Vidholm's writeVTK.m Matlab implementation. Parameters ---------- ar : a numpy array, shape (M, N, P) The array to be written to disk. fn : string The desired output filename. spacing : iterable of float, optional (default: [1.0, 1.0, 1.0]) The voxel spacing in x, y, and z. Returns ------- None : None This function does not have a return value. """ # write header f = open(fn, 'wb') f.write(b'# vtk DataFile Version 3.0\n') f.write(b'created by write_vtk (Python implementation by JNI)\n') f.write(b'BINARY\n') f.write(b'DATASET STRUCTURED_POINTS\n') f.write(str.encode(' '.join(['DIMENSIONS'] + list(map(str, ar.shape[-1::-1]))) + '\n')) f.write(str.encode(' '.join(['ORIGIN'] + list(map(str, zeros(3)))) + '\n')) f.write(str.encode(' '.join(['SPACING'] + list(map(str, spacing))) + '\n')) f.write(str.encode('POINT_DATA ' + str(ar.size) + '\n')) f.write(str.encode('SCALARS image_data ' + numpy_type_to_vtk_string[ar.dtype.type] + '\n')) f.write(b'LOOKUP_TABLE default\n'); f.close() # write data as binary f = open(fn, 'ab') f.write(ar.tobytes()) f.close() def read_vtk(fin): """Read a numpy volume from a VTK structured points file. Code adapted from Erik Vidholm's readVTK.m Matlab implementation. Parameters ---------- fin : string The input filename. Returns ------- ar : numpy ndarray The array contained in the file. """ f = open(fin, 'rb') num_lines_in_header = 10 lines = [bytes.decode(f.readline()) for i in range(num_lines_in_header)] shape_line = [line for line in lines if line.startswith('DIMENSIONS')][0] type_line = [line for line in lines if line.startswith('SCALARS') or line.startswith('VECTORS')][0] ar_shape = [int(b) for b in shape_line.rstrip().split(' ')[-1:0:-1]] ar_type = vtk_string_to_numpy_type[type_line.rstrip().split(' ')[2]] if type_line.startswith('VECTORS'): ar_shape.append(-1) ar = fromstring(f.read(), ar_type).reshape(ar_shape) return ar ### HDF5 format def read_h5_stack(fn, group='stack', crop=[None]*6, **kwargs): """Read a volume in HDF5 format into numpy.ndarray. Parameters ---------- fn : string The filename of the input HDF5 file. group : string, optional (default 'stack') The group within the HDF5 file containing the dataset. crop : list of int, optional (default '[None]*6', no crop) A crop to get of the volume of interest. Only available for 2D and 3D volumes. Returns ------- stack : numpy ndarray The stack contained in fn, possibly cropped. """ fn = os.path.expanduser(fn) dset = h5py.File(fn, 'r') if group not in dset: raise ValueError("HDF5 file (%s) doesn't have group (%s)!" % (fn, group)) a = dset[group] if ndim(a) == 2: xmin, xmax, ymin, ymax = crop[:4] a = a[xmin:xmax, ymin:ymax] elif ndim(a) == 3: xmin, xmax, ymin, ymax, zmin, zmax = crop a = a[xmin:xmax, ymin:ymax, zmin:zmax] stack = array(a) dset.close() return stack def compute_sp_to_body_map(sps, bodies): """Return unique (sp, body) pairs from a superpixel map and segmentation. Parameters ---------- sps : numpy ndarray, arbitrary shape The superpixel (supervoxel) map. bodies : numpy ndarray, same shape as sps The corresponding segmentation. Returns ------- sp_to_body : numpy ndarray, shape (NUM_SPS, 2) Notes ----- No checks are made for sane inputs. This means that incorrect input, such as non-matching shapes, or superpixels mapping to more than one segment, will result in undefined behavior downstream with no warning. """ sp_to_body = unique(list(zip(sps.ravel(), bodies.ravel()))).astype(uint64) return sp_to_body def write_mapped_segmentation(superpixel_map, sp_to_body_map, fn, sp_group='stack', sp_to_body_group='transforms'): """Write a mapped segmentation to an HDF5 file. Parameters ---------- superpixel_map : numpy ndarray, arbitrary shape sp_to_body_map : numpy ndarray, shape (NUM_SPS, 2) A many-to-one map of superpixels to bodies (segments), specified as rows of (superpixel, body) pairs. fn : string The output filename. sp_group : string, optional (default 'stack') the group within the HDF5 file to store the superpixel map. sp_to_body_group : string, optional (default 'transforms') the group within the HDF5 file to store the superpixel to body map. Returns ------- None """ fn = os.path.expanduser(fn) fout = h5py.File(fn, 'w') fout.create_dataset(sp_group, data=superpixel_map) fout.create_dataset(sp_to_body_group, data=sp_to_body_map) fout.close() def read_mapped_segmentation(fn, sp_group='stack', sp_to_body_group='transforms'): """Read a volume in mapped HDF5 format into a numpy.ndarray pair. Parameters ---------- fn : string The filename to open. sp_group : string, optional (default 'stack') The group within the HDF5 file where the superpixel map is stored. sp_to_body_group : string, optional (default 'transforms') The group within the HDF5 file where the superpixel to body map is stored. Returns ------- segmentation : numpy ndarray, same shape as 'superpixels', int type The segmentation induced by the superpixels and map. """ sps, sp2body = read_mapped_segmentation_raw(fn, sp_group, sp_to_body_group) segmentation = apply_segmentation_map(sps, sp2body) return segmentation def apply_segmentation_map(superpixels, sp_to_body_map): """Return a segmentation from superpixels and a superpixel to body map. Parameters ---------- superpixels : numpy ndarray, arbitrary shape, int type A superpixel (or supervoxel) map (aka label field). sp_to_body_map : numpy ndarray, shape (NUM_SUPERPIXELS, 2), int type An array of (superpixel, body) map pairs. Returns ------- segmentation : numpy ndarray, same shape as 'superpixels', int type The segmentation induced by the superpixels and map. """ forward_map = np.zeros(sp_to_body_map[:, 0].max() + 1, sp_to_body_map.dtype) forward_map[sp_to_body_map[:, 0]] = sp_to_body_map[:, 1] segmentation = forward_map[superpixels] return segmentation def read_mapped_segmentation_raw(fn, sp_group='stack', sp_to_body_group='transforms'): """Read a volume in mapped HDF5 format into a numpy.ndarray pair. Parameters ---------- fn : string The filename to open. sp_group : string, optional (default 'stack') The group within the HDF5 file where the superpixel map is stored. sp_to_body_group : string, optional (default 'transforms') The group within the HDF5 file where the superpixel to body map is stored. Returns ------- sp_map : numpy ndarray, arbitrary shape The superpixel (or supervoxel) map. sp_to_body_map : numpy ndarray, shape (NUM_SUPERPIXELS, 2) The superpixel to body (segment) map, as (superpixel, body) pairs. """ fn = os.path.expanduser(fn) dset = h5py.File(fn, 'r') if sp_group not in dset: raise ValueError( "HDF5 file (%s) doesn't have group (%s)!" % (fn, sp_group)) if sp_to_body_group not in dset: raise ValueError( "HDF5 file (%s) doesn't have group (%s)!" % (fn, sp_to_body_group)) sp_map = array(dset[sp_group]) sp_to_body_map = array(dset[sp_to_body_group]) dset.close() return sp_map, sp_to_body_map def write_h5_stack(npy_vol, fn, group='stack', compression=None, chunks=None, shuffle=None, attrs=None): """Write a numpy.ndarray 3D volume to an HDF5 file. Parameters ---------- npy_vol : numpy ndarray The array to be saved to HDF5. fn : string The output filename. group : string, optional (default: 'stack') The group within the HDF5 file to write to. compression : {None, 'gzip', 'szip', 'lzf'}, optional (default: None) The compression to use, if any. Note that 'lzf' is only available through h5py, so implementations in other languages will not be able to read files created with this compression. chunks : tuple, True, or None (default: None) Whether to use chunking in the HDF5 dataset. Default is None. True lets h5py choose a chunk size automatically. Otherwise, use a tuple of int of the same length as `npy_vol.ndim`. From the h5py documentation: "In the real world, chunks of size 10kB - 300kB work best, especially for compression. Very small chunks lead to lots of overhead in the file, while very large chunks can result in inefficient I/O." shuffle : bool, optional Shuffle the bytes on disk to improve compression efficiency. attrs : dict, optional A dictionary, keyed by string, of attributes to append to the dataset. """ fn = os.path.expanduser(fn) fout = h5py.File(fn, 'a') if group in fout: del fout[group] fout.create_dataset(group, data=npy_vol, compression=compression, chunks=chunks, shuffle=shuffle) if attrs is not None: for attr, value in attrs.items(): fout[group].attrs[attr] = value fout.close() ### Raveler format def ucm_to_raveler(ucm, sp_threshold=0.0, body_threshold=0.1, **kwargs): """Return Raveler map from a UCM. Parameters ---------- ucm : numpy ndarray, shape (M, N, P) An ultrametric contour map. This is a map of scored segment boundaries such that if A, B, and C are segments, then score(A, B) = score(B, C) >= score(A, C), for some permutation of A, B, and C. A hierarchical agglomeration process produces a UCM. sp_threshold : float, optional (default: 0.0) The value for which to threshold the UCM to obtain the superpixels. body_threshold : float, optional (default: 0.1) The value for which to threshold the UCM to obtain the segments/bodies. The condition `body_threshold >= sp_threshold` should hold in order to obtain sensible results. **kwargs : dict, optional Keyword arguments to be passed through to `segs_to_raveler`. Returns ------- superpixels : numpy ndarray, shape (M, N, P) The superpixel map. Non-zero superpixels are unique to each plane. That is, `np.unique(superpixels[i])` and `np.unique(superpixels[j])` have only 0 as their intersection. sp_to_segment : numpy ndarray, shape (Q, 3) The superpixel to segment map. Segments are unique to each plane. The first number on each line is the plane number. segment_to_body : numpy ndarray, shape (R, 2) The segment to body map. """ sps = label(ucm < sp_threshold)[0] bodies = label(ucm <= body_threshold)[0] return segs_to_raveler(sps, bodies, **kwargs) def segs_to_raveler(sps, bodies, min_size=0, do_conn_comp=False, sps_out=None): """Return a Raveler tuple from 3D superpixel and body maps. Parameters ---------- sps : numpy ndarray, shape (M, N, P) The supervoxel map. bodies : numpy ndarray, shape (M, N, P) The body map. Superpixels should not map to more than one body. min_size : int, optional (default: 0) Superpixels smaller than this size on a particular plane are blacked out. do_conn_comp : bool (default: False) Whether to do a connected components operation on each plane. This is required if we want superpixels to be contiguous on each plane, since 3D-contiguous superpixels are not guaranteed to be contiguous along a slice. sps_out : numpy ndarray, shape (M, N, P), optional (default: None) A Raveler-compatible superpixel map, meaning that superpixels are unique to each plane along axis 0. (See `superpixels` in the return values.) If provided, this saves significant computation time. Returns ------- superpixels : numpy ndarray, shape (M, N, P) The superpixel map. Non-zero superpixels are unique to each plane. That is, `np.unique(superpixels[i])` and `np.unique(superpixels[j])` have only 0 as their intersection. sp_to_segment : numpy ndarray, shape (Q, 3) The superpixel to segment map. Segments are unique to each plane. The first number on each line is the plane number. segment_to_body : numpy ndarray, shape (R, 2) The segment to body map. """ if sps_out is None: sps_out = raveler_serial_section_map(sps, min_size, do_conn_comp, False) segment_map = raveler_serial_section_map(bodies, min_size, do_conn_comp) segment_to_body = unique(list(zip(segment_map.ravel(), bodies.ravel()))) segment_to_body = segment_to_body[segment_to_body[:,0] != 0] segment_to_body = concatenate((array([[0,0]]), segment_to_body), axis=0) sp_to_segment = [] for i, (sp_map_i, segment_map_i, body_map_i) in \ enumerate(zip(sps_out, segment_map, bodies)): segment_map_i *= sp_map_i.astype(bool) valid = (sp_map_i != 0) + (segment_map_i == 0) sp_to_segment.append( unique(list(zip(it.repeat(i), sp_map_i[valid], segment_map_i[valid])))) valid = segment_map != 0 logging.debug('plane %i done'%i) logging.info('total superpixels before: ' + str(len(unique(sps))) + ' total superpixels after: ' + str(len(unique(sps_out)))) sp_to_segment = concatenate(sp_to_segment, axis=0) return sps_out, sp_to_segment, segment_to_body def raveler_serial_section_map(nd_map, min_size=0, do_conn_comp=False, globally_unique_ids=True): """Produce `serial_section_map` and label one corner of each plane as 0. Raveler chokes when there are no pixels with label 0 on a plane, so this function produces the serial section map as normal but then adds a 0 to the [0, 0] corner of each plane, IF the volume doesn't already have 0 pixels. Notes ----- See `serial_section_map` for more info. """ nd_map = serial_section_map(nd_map, min_size, do_conn_comp, globally_unique_ids) if not (nd_map == 0).any(): nd_map[:,0,0] = 0 return nd_map def serial_section_map(nd_map, min_size=0, do_conn_comp=False, globally_unique_ids=True): """Produce a plane-by-plane superpixel map with unique IDs. Raveler requires sps to be unique and different on each plane. This function converts a fully 3D superpixel map to a serial-2D superpixel map compatible with Raveler. Parameters ---------- nd_map : np.ndarray, int, shape (M, N, P) The original superpixel map. min_size : int (optional, default 0) Remove superpixels smaller than this size (on each plane) do_conn_comp : bool (optional, default False) In some cases, a single supervoxel may result in two disconnected superpixels in 2D. Set to True to force these to have different IDs. globally_unique_ids : bool (optional, default True) If True, every plane has unique IDs, with plane n having IDs {i1, i2, ..., in} and plane n+1 having IDs {in+1, in+2, ..., in+ip}, and so on. Returns ------- relabeled_planes : np.ndarray, int, shape (M, N, P) A volume equal to nd_map but with superpixels relabeled along axis 0. That is, the input volume is reinterpreted as M slices of shape (N, P). """ if do_conn_comp: label_fct = label else: def label_fct(a): relabeled, fmap, imap = evaluate.relabel_from_one(a) return relabeled, len(imap) def remove_small(a): return morpho.remove_small_connected_components(a, min_size) mplanes = map(remove_small, nd_map) relabeled_planes, nids_per_plane = zip(*map(label_fct, mplanes)) start_ids = concatenate((array([0], int), cumsum(nids_per_plane)[:-1])) \ if globally_unique_ids else [0]*len(nids_per_plane) relabeled_planes = [(relabeled_plane + start_id)[newaxis, ...] for relabeled_plane, start_id in zip(relabeled_planes, start_ids)] return concatenate(relabeled_planes, axis=0) def write_to_raveler(sps, sp_to_segment, segment_to_body, directory, gray=None, raveler_dir='/usr/local/raveler-hdf', nproc_contours=16, body_annot=None): """Output a segmentation to Raveler format. Parameters ---------- sps : np.ndarray, int, shape (nplanes, nx, ny) The superpixel map. Superpixels can only occur on one plane. sp_to_segment : np.ndarray, int, shape (nsps + nplanes, 3) Superpixel-to-segment map as a 3 column list of (plane number, superpixel id, segment id). Segments must be unique to a plane, and each plane must contain the map {0: 0} segment_to_body: np.ndarray, int, shape (nsegments, 2) The segment to body map. directory: string The directory in which to write the stack. This directory and all necessary subdirectories will be created. gray: np.ndarray, uint8 or uint16, shape (nplanes, nx, ny) (optional) The grayscale images corresponding to the superpixel maps. raveler dir: string (optional, default `/usr/local/raveler-hdf`) Where Raveler is installed. nproc_contours: int (optional, default 16) How many processes to use when generating the Raveler contours. body_annot: dict or np.ndarray (optional) Either a dictionary to write to JSON in Raveler body annotation format, or a numpy ndarray of the segmentation from which to compute orphans and non traversing bodies (which then get written out as body annotations). Returns ------- None Notes ----- Raveler is the EM segmentation proofreading tool developed in-house at Janelia for the FlyEM project. """ sp_path = os.path.join(directory, 'superpixel_maps') im_path = os.path.join(directory, 'grayscale_maps') tile_path = os.path.join(directory, 'tiles') if not os.path.exists(directory): os.makedirs(directory) # write superpixel->segment->body maps savetxt(os.path.join(directory, 'superpixel_to_segment_map.txt'), sp_to_segment, '%i') savetxt(os.path.join(directory, 'segment_to_body_map.txt'), segment_to_body, '%i') # write superpixels if not os.path.exists(sp_path): os.mkdir(sp_path) write_png_image_stack(sps, os.path.join(sp_path, 'sp_map.%05i.png'), bitdepth=16, axis=0) # write grayscale if gray is not None: if not os.path.exists(im_path): os.mkdir(im_path) write_png_image_stack(gray, os.path.join(im_path, 'img.%05d.png'), axis=0) # body annotations if body_annot is not None: if type(body_annot) == ndarray: orphans = morpho.orphans(body_annot) non_traversing = morpho.non_traversing_segments(body_annot) body_annot = raveler_body_annotations(orphans, non_traversing) write_json(body_annot, os.path.join(directory, 'annotations-body.json')) # make tiles, bounding boxes, and contours, and compile HDF5 stack info. with tmp.TemporaryFile() as tmp_stdout: try: def call(arglist): return subprocess.call(arglist, stdout=tmp_stdout) r1 = call(['createtiles', im_path, sp_path, tile_path]) r2 = call(['bounds', directory]) r3 = call(['compilestack', directory]) except: logging.warning( 'Error during Raveler export post-processing step. ' + 'Possible causes are that you do not have Raveler installed ' + 'or you did not specify the correct installation path.') logging.warning('Return codes: %i, %i, %i' % (r1, r2, r3)) # with sys.exc_info() as ex: # logging.warning('Exception info:\n' + '\n'.join(map(str, ex))) # make permissions friendly for proofreaders. try: subprocess.call(['chmod', '-R', 'go=u', directory]) except: logging.warning('Could not change Raveler export permissions.') def raveler_output_shortcut(svs, seg, gray, outdir, sps_out=None): """Compute the Raveler format and write to directory, all at once. Parameters ---------- svs : np.ndarray, int, shape (M, N, P) The supervoxel map. seg : np.ndarray, int, shape (M, N, P) The segmentation map. It is assumed that no supervoxel crosses any segment boundary. gray : np.ndarray, uint8, shape (M, N, P) The grayscale EM images corresponding to the above segmentations. outdir : string The export directory for the Raveler volume. sps_out : np.ndarray, int, shape (M, N, P) (optional) The precomputed serial section 2D superpixel map. Output will be much faster if this is provided. Returns ------- sps_out : np.ndarray, int, shape (M, N, P) The computed serial section 2D superpixel map. Keep this when making multiple calls to `raveler_output_shortcut` with the same supervoxel map. """ sps_out, sp2seg, seg2body = segs_to_raveler(svs, seg, sps_out=sps_out) write_to_raveler(sps_out, sp2seg, seg2body, outdir, gray, body_annot=seg) return sps_out def raveler_body_annotations(orphans, non_traversing=None): """Return a Raveler body annotation dictionary of orphan segments. Orphans are labeled as body annotations with `not sure` status and a string indicating `orphan` in the comments field. Non-traversing segments have only one contact with the surface of the volume, and are labeled `does not traverse` in the comments. Parameters ---------- orphans : iterable of int The ID numbers corresponding to orphan segments. non_traversing : iterable of int (optional, default None) The ID numbers of segments having only one exit point in the volume. Returns ------- body_annotations : dict A dictionary containing entries for 'data' and 'metadata' as specified in the Raveler body annotations format [1, 2]. References ---------- [1] https://wiki.janelia.org/wiki/display/flyem/body+annotation+file+format and: [2] https://wiki.janelia.org/wiki/display/flyem/generic+file+format """ data = [{'status': 'not sure', 'comment': 'orphan', 'body ID': int(o)} for o in orphans] if non_traversing is not None: data.extend([{'status': 'not sure', 'comment': 'does not traverse', 'body ID': int(n)} for n in non_traversing]) metadata = {'description': 'body annotations', 'file version': 2} return {'data': data, 'metadata': metadata} def write_json(annot, fn='annotations-body.json', directory=None): """Write an annotation dictionary in Raveler format to a JSON file. The annotation file format is described in: https://wiki.janelia.org/wiki/display/flyem/body+annotation+file+format and: https://wiki.janelia.org/wiki/display/flyem/generic+file+format Parameters ---------- annot : dict A body annotations dictionary (described in pages above). fn : string (optional, default 'annotations-body.json') The filename to which to write the file. directory : string (optional, default None, or '.') A directory in which to write the file. Returns ------- None """ if directory is not None: fn = join_path(directory, fn) with open(fn, 'w') as f: json.dump(annot, f, indent=2) def raveler_rgba_to_int(im, ignore_alpha=True): """Convert a volume using Raveler's RGBA encoding to int. [1] Parameters ---------- im : np.ndarray, shape (M, N, P, 4) The image stack to be converted. ignore_alpha : bool, optional By default, the alpha channel does not encode anything. However, if we ever need 32 bits, it would be used. This function supports that with `ignore_alpha=False`. (default is True.) Returns ------- im_int : np.ndarray, shape (M, N, P) The label volume. References ---------- [1] https://wiki.janelia.org/wiki/display/flyem/Proofreading+data+and+formats """ if im.ndim == 4 and im.shape[3] == 4: if ignore_alpha: im = im[..., :3] im_int = (im * 255 ** np.arange(im.shape[3])).sum(axis=3) else: im_int = im return im_int def raveler_to_labeled_volume(rav_export_dir, get_glia=False, use_watershed=False, probability_map=None, crop=None): """Import a raveler export stack into a labeled segmented volume. Parameters ---------- rav_export_dir : string The directory containing the Raveler stack. get_glia : bool (optional, default False) Return the segment numbers corresponding to glia, if available. use_watershed : bool (optional, default False) Fill in 0-labeled voxels using watershed. probability_map : np.ndarray, same shape as volume to be read (optional) If `use_watershed` is True, use `probability_map` as the landscape. If this is not provided, it uses a flat landscape. crop : tuple of int (optional, default None) A 6-tuple of [xmin, xmax, ymin, ymax, zmin, zmax]. Returns ------- output_volume : np.ndarray, shape (Z, X, Y) The segmentation in the Raveler volume. glia : list of int (optional, only returned if `get_glia` is True) The IDs in the segmentation corresponding to glial cells. """ from . import morpho spmap = read_image_stack( os.path.join(rav_export_dir, 'superpixel_maps', '*.png'), crop=crop) spmap = raveler_rgba_to_int(spmap) sp2seg_list = np.loadtxt( os.path.join(rav_export_dir, 'superpixel_to_segment_map.txt'), uint32) seg2bod_list = np.loadtxt( os.path.join(rav_export_dir, 'segment_to_body_map.txt'), uint32) sp2seg = {} max_sp = sp2seg_list[:,1].max() start_plane = sp2seg_list[:,0].min() for z, sp, seg in sp2seg_list: if z not in sp2seg: sp2seg[z] = zeros(max_sp+1, uint32) sp2seg[z][sp] = seg max_seg = seg2bod_list[:,0].max() seg2bod = zeros(max_seg+1, uint32) seg2bod[seg2bod_list[:,0]] = seg2bod_list[:,1] initial_output_volume = zeros_like(spmap) for i, m in enumerate(spmap): j = start_plane + i initial_output_volume[i] = seg2bod[sp2seg[j][m]] if use_watershed: probs = np.ones_like(spmap) if probability_map is None \ else probability_map output_volume = morpho.watershed(probs, seeds=initial_output_volume) else: output_volume = initial_output_volume if (output_volume[:, 0, 0] == 0).all() and \ (output_volume == 0).sum() == output_volume.shape[0]: output_volume[:, 0, 0] = output_volume[:, 0, 1] if get_glia: annots = json.load( open(os.path.join(rav_export_dir, 'annotations-body.json'), 'r')) glia = [a['body ID'] for a in annots['data'] if a.get('comment', None) == 'glia'] return output_volume, glia else: return output_volume ### Ilastik formats # obtained from Ilastik 0.5.4 ilastik_label_colors = \ [0xffff0000, 0xff00ff00, 0xffffff00, 0xff0000ff, 0xffff00ff, 0xff808000, 0xffc0c0c0, 0xfff2022d] def write_ilastik_project(images, labels, fn, label_names=None): """Write one or more image volumes and corresponding labels to Ilastik. Parameters ---------- images : np.ndarray or list of np.ndarray, shapes (M_i, N_i[, P_i]) The grayscale images to be saved. labels : np.ndarray or list of np.ndarray, same shapes as `images` The label maps corresponding to the images. fn : string The filename to save the project in. label_names : list of string (optional) The names corresponding to each label in `labels`. (Not implemented!) Returns ------- None Notes ----- Limitations: Assumes the same labels are used for all images. Supports only grayscale images and volumes, and a maximum of 8 labels. Requires at least one unlabeled voxel in the label field. """ f = h5py.File(fn, 'w') if type(images) != list: images = [images] labels = [labels] ulbs = unique(concatenate(list(map(unique, labels))))[1:] colors = array(ilastik_label_colors[:len(ulbs)]) names = ['Label %i'%i for i in ulbs] names = array(names, '|S%i'%max(map(len, names))) label_attributes = {'color':colors, 'name':names, 'number':ulbs} for i, (im, lb) in enumerate(zip(images, labels)): if im.ndim == 2: new_shape = (1,1)+im.shape+(1,) elif im.ndim == 3: new_shape = (1,)+im.shape+(1,) else: raise ValueError('Unsupported number of dimensions in image.') im = im.reshape(new_shape) lb = lb.reshape(new_shape) root = 'DataSets/dataItem%02i/'%i f[root+'data'] = im f[root+'labels'] = lb for k, v in label_attributes.items(): f[root+'labels'].attrs[k] = v f[root].attrs['Name'] = '' f[root].attrs['fileName'] = '' for subgroup in ['Description', 'Labeler', 'Name']: f['Project/%s'%subgroup] = array('', dtype='|S1') f['ilastikVersion'] = array(0.5) f.close() def write_ilastik_batch_volume(im, fn): """Write a volume to an HDF5 file for Ilastik batch processing. Parameters ---------- im : np.ndarray, shape (M, N[, P]) The image volume to be saved. fn : string The filename in which to save the volume. Returns ------- None """ if im.ndim == 2: im = im.reshape((1,1)+im.shape+(1,)) elif im.ndim == 3: im = im.reshape((1,)+im.shape+(1,)) else: raise ValueError('Unsupported number of dimensions in image.') write_h5_stack(im, fn, group='/volume/data') def read_prediction_from_ilastik_batch(fn, **kwargs): """Read the prediction produced by Ilastik from batch processing. Parameters ---------- fn : string The filename to read from. group : string (optional, default '/volume/prediction') Where to read from in the HDF5 file hierarchy. single_channel : bool (optional, default True) Read only the 0th channel (final dimension) from the volume. Returns ------- None """ if 'group' not in kwargs: kwargs['group'] = '/volume/prediction' a = squeeze(read_h5_stack(fn, **kwargs)) if kwargs.get('single_channel', True): a = a[..., 0] return a def read_cremi(fn, datasets=['/volumes/raw', '/volumes/labels/neuron_ids']): """Read volume formatted as described in CREMI data challenge [1]_. The format is HDF5, with: - raw image data (uint8) in: /volumes/raw - (optional) membrane prediction data (uint8, inverted) in: /volumes/membrane - synaptic cleft annotations in: /volumes/labels/clefts - neuron ids (uint64) in: /volumes/labels/neuron_ids - (optional) fragment data (uint64) in: /volumes/labels/fragments We currently ignore the synaptic cleft annotations, and return only the raw image and the neuron ids. Parameters ---------- fn : string The input filename. Returns ------- datasets : list of array The arrays corresponding to the requested datasets. References ---------- .. [1]: https://cremi.org/data/ """ out = [read_h5_stack(fn, group=ds) for ds in datasets] return out def write_cremi(data_dict, fn, resolution=(40., 4., 4.)): """Write a volume formatted as described in CREMI data challenge [1]_. Parameters ---------- data_dict : dictionary of string to arrays The data dictionary mapping HDF groups to arrays. fn : string The filename to write to. resolution : tuple of float, optional The resolution along each axis of the datasets. Currently, this is the same for each dataset written. """ for group, data in data_dict.items(): write_h5_stack(data, fn, group=group, compression='gzip', attrs={'resolution': resolution})
from hashlib import md5 import string from threading import Event, RLock import json import uuid as UUID import ssl import random import copy import time from meross_iot.cloud.timeouts import SHORT_TIMEOUT from meross_iot.cloud.exceptions.CommandTimeoutException import CommandTimeoutException from meross_iot.logger import CONNECTION_MANAGER_LOGGER as l import paho.mqtt.client as mqtt from meross_iot.credentials import MerossCloudCreds from meross_iot.cloud.client_status import ClientStatus from meross_iot.cloud.connection import ConnectionStatusManager from meross_iot.utilities.synchronization import AtomicCounter from meross_iot.logger import NETWORK_DATA as networkl def build_client_request_topic(client_uuid): return "/appliance/%s/subscribe" % client_uuid class PendingMessageResponse(object): """ This class is used as an Handle for mqtt messages that expect an ACK back. When a callback is passed to the constructor, this object is configured as an "async" waiter. Instead, passing a None callback, makes this object to act as a synchronously waiter. It is meant to be used internally by the library, in order to handle ACK waiting and callback calling. Note that this object is not thread safe. """ _message_id = None _callback = None _event = None _response = None _error = None def __init__(self, message_id, callback=None): self._message_id = message_id # Only instantiate an event if no callback has been specified if callback is None: self._event = Event() else: self._callback = callback def wait_for_response(self, timeout=SHORT_TIMEOUT): """ This method blocks until an ACK/RESPONSE message is received for the corresponding message_id that it refers to. Note that this method only works when the user is synchronously waiting for the response message. This method raises an exception if invoked when a callback was specified in the constructor. :param timeout: :return: """ if self._event is None: raise Exception("Error: you can invoke this method only if you don't use a callback (i.e. sync invocation)") # Wait until we receive the message. # If timeout occurs, return failure and None as received message. success = self._event.wait(timeout=timeout) return success, self._response def notify_message_received(self, error=None, response=None): self._response = copy.deepcopy(response) self._error = error if self._event is not None: self._event.set() elif self._callback is not None: try: self._callback(self._error, self._response) except: l.exception("Unhandled error occurred while executing the callback") class MerossCloudClient(object): # Meross Cloud credentials, which are provided by the HTTP Api. _cloud_creds = None # Connection info connection_status = None _domain = None _port = 2001 _ca_cert = None # App and client ID _app_id = None _client_id = None # Paho mqtt client object _mqtt_client = None # Callback to be invoked every time a push notification is received from the MQTT broker _push_message_callback = None # This dictionary is used to keep track of messages issued to the broker that are waiting for an ACK # The key is the message_id, the value is the PendingMessageResponse object. # Access to this resource is protected with exclusive locking _pending_response_messages = None _pending_responses_lock = None def __init__(self, cloud_credentials, # type: MerossCloudCreds push_message_callback=None, # type: callable **kwords): self.connection_status = ConnectionStatusManager() self._cloud_creds = cloud_credentials self._pending_response_messages = dict() self._pending_responses_lock = RLock() self._push_message_callback = push_message_callback self._subscription_count = AtomicCounter(0) if "domain" in kwords: self._domain = kwords['domain'] else: self._domain = "iot.meross.com" # Lookup port and certificate for MQTT server self._port = kwords.get('port', MerossCloudClient._port) self._ca_cert = kwords.get('ca_cert', None) self._generate_client_and_app_id() # Password is calculated as the MD5 of USERID concatenated with KEY md5_hash = md5() clearpwd = "%s%s" % (self._cloud_creds.user_id, self._cloud_creds.key) md5_hash.update(clearpwd.encode("utf8")) hashed_password = md5_hash.hexdigest() # Start the mqtt client self._mqtt_client = mqtt.Client(client_id=self._client_id, protocol=mqtt.MQTTv311) # ex. app-id -> app:08d4c9f99da40203ebc798a76512ec14 self._mqtt_client.on_connect = self._on_connect self._mqtt_client.on_message = self._on_message self._mqtt_client.on_disconnect = self._on_disconnect self._mqtt_client.on_subscribe = self._on_subscribe # Avoid login if user_id is None if self._cloud_creds.user_id is not None: self._mqtt_client.username_pw_set(username=self._cloud_creds.user_id, password=hashed_password) self._mqtt_client.tls_set(ca_certs=self._ca_cert, certfile=None, keyfile=None, cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.PROTOCOL_TLS, ciphers=None) def close(self): l.info("Closing the MQTT connection...") self._mqtt_client.disconnect() l.debug("Waiting for the client to disconnect...") self.connection_status.wait_for_status(ClientStatus.CONNECTION_DROPPED) # Starts a new thread that handles mqtt protocol and calls us back via callbacks l.debug("Stopping the MQTT looper.") self._mqtt_client.loop_stop(True) l.info("Client has been fully disconnected.") def connect(self): """ Starts the connection to the MQTT broker :return: """ l.info("Initializing the MQTT connection...") self._mqtt_client.connect(self._domain, self._port, keepalive=30) self.connection_status.update_status(ClientStatus.CONNECTING) # Starts a new thread that handles mqtt protocol and calls us back via callbacks l.debug("(Re)Starting the MQTT looper.") self._mqtt_client.loop_stop(True) self._mqtt_client.loop_start() l.debug("Waiting for the client to connect...") self.connection_status.wait_for_status(ClientStatus.SUBSCRIBED) l.info("Client connected to MQTT broker and subscribed to relevant topics.") # ------------------------------------------------------------------------------------------------ # MQTT Handlers # ------------------------------------------------------------------------------------------------ def _on_disconnect(self, client, userdata, rc): l.info("Disconnection detected. Reason: %s" % str(rc)) # When the mqtt connection is dropped, we need to reset the subscription counter. self._subscription_count = AtomicCounter(0) self.connection_status.update_status(ClientStatus.CONNECTION_DROPPED) # TODO: should we handle disconnection in some way at this level? if rc == mqtt.MQTT_ERR_SUCCESS: pass else: client.loop_stop(True) def _on_unsubscribe(self): l.debug("Unsubscribed from topic") self._subscription_count.dec() def _on_subscribe(self, client, userdata, mid, granted_qos): l.debug("Succesfully subscribed to topic. Subscription count: %d" % self._subscription_count.get()) if self._subscription_count.inc() == 2: self.connection_status.update_status(ClientStatus.SUBSCRIBED) def _on_connect(self, client, userdata, rc, other): l.debug("Connected with result code %s" % str(rc)) self.connection_status.update_status(ClientStatus.CONNECTED) self._client_response_topic = "/app/%s-%s/subscribe" % (self._cloud_creds.user_id, self._app_id) self._user_topic = "/app/%s/subscribe" % self._cloud_creds.user_id # Subscribe to the relevant topics l.debug("Subscribing to topics...") client.subscribe(self._user_topic) client.subscribe(self._client_response_topic) def _on_message(self, client, userdata, msg): """ This handler is called when a message is received from the MQTT broker, on the subscribed topics. The current implementation checks the validity of the message itself, by verifying its signature. :param client: is the MQTT client reference, useful to respond back :param userdata: metadata about the received data :param msg: message that was received :return: nothing, it simply handles the message accordingly. """ networkl.debug(msg.topic + " --> " + str(msg.payload)) try: message = json.loads(str(msg.payload, "utf8")) header = message['header'] message_hash = md5() strtohash = "%s%s%s" % (header['messageId'], self._cloud_creds.key, header['timestamp']) message_hash.update(strtohash.encode("utf8")) expected_signature = message_hash.hexdigest().lower() if header['sign'] != expected_signature: # TODO: custom exception for invalid signature raise Exception('The signature did not match!') # Check if there is any thread waiting for this message or if there is a callback that we need to invoke. # If so, do it here. handle = None with self._pending_responses_lock: msg_id = header['messageId'] handle = self._pending_response_messages.get(msg_id) from_myself = False if handle is not None: # There was a handle for this message-id. It means it is a response message to some # request performed by the library itself. from_myself = True try: l.debug("Calling handle event handler for message %s" % msg_id) # Call the handler handle.notify_message_received(error=None, response=message) l.debug("Done handler for message %s" % msg_id) # Remove the message from the pending queue with self._pending_responses_lock: del self._pending_response_messages[msg_id] except: l.exception("Error occurred while invoking message handler") # Let's also catch all the "PUSH" notifications and dispatch them to the push_notification_callback. if self._push_message_callback is not None and header['method'] == "PUSH" and 'namespace' in header: self._push_message_callback(message, from_myself=from_myself) except Exception: l.exception("Failed to process message.") # ------------------------------------------------------------------------------------------------ # Protocol Handlers # ------------------------------------------------------------------------------------------------ def execute_cmd(self, dst_dev_uuid, method, namespace, payload, callback=None, timeout=SHORT_TIMEOUT): start = time.time() # Build the mqtt message we will send to the broker message, message_id = self._build_mqtt_message(method, namespace, payload) # Register the waiting handler for that message handle = PendingMessageResponse(message_id=message_id, callback=callback) with self._pending_responses_lock: self._pending_response_messages[message_id] = handle # Send the message to the broker l.debug("Executing message-id %s, %s on %s command for device %s" % (message_id, method, namespace, dst_dev_uuid)) self._mqtt_client.publish(topic=build_client_request_topic(dst_dev_uuid), payload=message) # If the caller has specified a callback, we don't need to actively wait for the message ACK. So we can # immediately return. if callback is not None: return None # Otherwise, we need to wait until the message is received. l.debug("Waiting for response to message-id %s" % message_id) success, resp = handle.wait_for_response(timeout=timeout) if not success: raise CommandTimeoutException("A timeout occurred while waiting for the ACK: %d" % timeout) elapsed = time.time() - start l.debug("Message-id: %s, command %s-%s command for device %s took %s" % (message_id, method, namespace, dst_dev_uuid, str(elapsed))) return resp['payload'] # ------------------------------------------------------------------------------------------------ # Protocol utilities # ------------------------------------------------------------------------------------------------ def _build_mqtt_message(self, method, namespace, payload): """ Sends a message to the Meross MQTT broker, respecting the protocol payload. :param method: :param namespace: :param payload: :return: """ # Generate a random 16 byte string randomstring = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(16)) # Hash it as md5 md5_hash = md5() md5_hash.update(randomstring.encode('utf8')) messageId = md5_hash.hexdigest().lower() timestamp = int(round(time.time())) # Hash the messageId, the key and the timestamp md5_hash = md5() strtohash = "%s%s%s" % (messageId, self._cloud_creds.key, timestamp) md5_hash.update(strtohash.encode("utf8")) signature = md5_hash.hexdigest().lower() data = { "header": { "from": self._client_response_topic, "messageId": messageId, # Example: "122e3e47835fefcd8aaf22d13ce21859" "method": method, # Example: "GET", "namespace": namespace, # Example: "Appliance.System.All", "payloadVersion": 1, "sign": signature, # Example: "b4236ac6fb399e70c3d61e98fcb68b74", "timestamp": timestamp }, "payload": payload } strdata = json.dumps(data) return strdata.encode("utf-8"), messageId def _generate_client_and_app_id(self): md5_hash = md5() rnd_uuid = UUID.uuid4() md5_hash.update(("%s%s" % ("API", rnd_uuid)).encode("utf8")) self._app_id = md5_hash.hexdigest() self._client_id = 'app:%s' % md5_hash.hexdigest()
from django.conf.urls import url, include from django.conf.urls.static import static from .import views urlpatterns = [ url(r'^register/', views.register_customer, name='registercustomer'), ]
import diff_match_patch import re from contextlib import suppress import ghdiff from funcy.seqs import first, rest def resolve_identifier(identifier): match = re.match("@?([\w\-\.]*)/([\w\-]*)", identifier) if not hasattr(match, "group"): raise ValueError("Invalid identifier") return match.group(1), match.group(2) def comment_history(db, uri_or_identifier): with suppress(TypeError, ValueError): identifier = parse_identifier(uri_or_identifier) return get_comment_history(db, *resolve_identifier(identifier)) def parse_identifier(uri): return '@%s' % uri.split('@')[-1] def get_comment_history(db, author, permlink): conditions = { # 'account': author, 'author': author, 'type': 'comment', 'permlink': permlink, } return list(db['Operations'].find(conditions).sort('timestamp', 1)) def reverse_patch(body_diffs): """ Take a diff_match_patch C++ library diffs, and re-create original full-body texts.""" p = diff_match_patch.diff_match_patch() original, *diffs = body_diffs full_versions = [original] for diff in diffs: try: reverse = p.patch_apply(p.patch_fromText(diff), full_versions[-1]) full_versions.append(reverse[0]) except ValueError: # some comments have their bodies overwritten completely, rather than diffed full_versions.append(diff) return full_versions def recreate_body_diffs(comments): """ Take a list of comments, extract their bodies, re-create full text from diffs and generate pretty html diffs. Returns: (OriginalText, [diffs]) """ body_diffs = [x['body'] for x in comments] full_versions = reverse_patch(body_diffs) results = [full_versions[0]] for i, diff in enumerate(full_versions[1:]): results.append(ghdiff.diff(full_versions[i], diff).replace('\n', '')) return first(results), list(rest(results))
from elasticsearch import Elasticsearch from elasticsearch_dsl import Search, Q from collections import OrderedDict from itertools import product import json from pandas.io.json import json_normalize import copy import pandas as pd CLIENT = Elasticsearch() INDEX = 'associations' EXPECTED_CGI_EVIDENCE_COUNT = 1347 EXPECTED_CGI_ONCOGENIC_EVIDENCE_COUNT = 289 # Based on data docs count minus three with no gene information EXPECTED_ONCOKB_EVIDENCE_COUNT = 4045 EXPECTED_ONCOKB_ONCGENIC_EVIDENCE_COUNT = 35 def test_cgi_evidence_counts(): s = Search(using=CLIENT, index=INDEX) s = s.params(size=0) s = s.query("query_string", query="+source:cgi") s.aggs.bucket('cgi_count', 'terms', field='source.keyword') agg = s.execute().aggregations assert agg.cgi_count.buckets[0].doc_count == EXPECTED_CGI_EVIDENCE_COUNT def test_cgi_oncogenic_evidence_counts(): s = Search(using=CLIENT, index=INDEX) s = s.params(size=0) s = s.query("query_string", query='+source:cgi +association.oncogenic:"*oncogenic mutation"') s.aggs.bucket('cgi_count', 'terms', field='source.keyword') agg = s.execute().aggregations assert agg.cgi_count.buckets[0].doc_count == EXPECTED_CGI_ONCOGENIC_EVIDENCE_COUNT def test_cgi_oncogenic_spotcheck_evidence_feature_counts(): query = '+source:cgi +association.description:"[\'KRAS\'] BET inhibitors Responsive"' s = Search(using=CLIENT, index=INDEX) s = s.params(size=0) s = s.query("query_string", query=query) s.aggs.bucket('cgi_count', 'terms', field='source.keyword') agg = s.execute().aggregations assert agg.cgi_count.buckets[0].doc_count == 1 s = Search(using=CLIENT, index=INDEX) s = s.params(size=1) s = s.query("query_string", query=query) result = s.execute() assert len(result.hits[0].features) == 14 def test_oncokb_evidence_counts(): s = Search(using=CLIENT, index=INDEX) s = s.params(size=0) s = s.query("query_string", query="+source:oncokb") s.aggs.bucket('cgi_count', 'terms', field='source.keyword') agg = s.execute().aggregations assert agg.cgi_count.buckets[0].doc_count == EXPECTED_ONCOKB_EVIDENCE_COUNT def test_oncokb_oncogenic_evidence_counts(): s = Search(using=CLIENT, index=INDEX).source(includes=['oncokb']) s = s.query("query_string", query="+source:oncokb ") # peek into opaque source to see if oncogenic feature hits = [hit for hit in s.scan() if 'Oncogenic Mutations' in hit.oncokb] assert len(hits) == EXPECTED_ONCOKB_ONCGENIC_EVIDENCE_COUNT def test_oncokb_oncogenic_spotcheck_evidence_feature_counts(): query = '+source:oncokb +association.phenotype.type.id:"DOID\:9256" +association.publication_url.keyword:"http\://www.ncbi.nlm.nih.gov/pubmed/21228335"' s = Search(using=CLIENT, index=INDEX) s = s.params(size=0) s = s.query("query_string", query=query) s.aggs.bucket('oncokb_count', 'terms', field='source.keyword') agg = s.execute().aggregations assert agg.oncokb_count.buckets[0].doc_count == 1 s = Search(using=CLIENT, index=INDEX) s = s.params(size=1) s = s.query("query_string", query=query) result = s.execute() assert len(result.hits[0].features) == 46
from .browser import Browser def _jupyter_server_extension_paths(): return [{ "module": "igv" }] def _jupyter_nbextension_paths(): return [dict( section="notebook", # the path is relative to the `igv` directory src="static", # directory in the `nbextension/` namespace dest="igv", # also_ in the `nbextension/` namespace require="igv/extension")] def load_jupyter_server_extension(nbapp): nbapp.log.info("igv enabled!")
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2018, 2019. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Client for accessing authentication features of IBM Q Experience.""" from typing import Dict, List from ..exceptions import AuthenticationLicenseError, RequestsApiError from ..rest import Api, Auth from ..session import RetrySession from .base import BaseClient class AuthClient(BaseClient): """Client for accessing authentication features of IBM Q Experience.""" def __init__(self, api_token: str, auth_url: str, **request_kwargs: Dict) -> None: """AuthClient constructor. Args: api_token (str): IBM Q Experience API token. auth_url (str): URL for the authentication service. **request_kwargs (dict): arguments for the `requests` Session. """ self.api_token = api_token self.auth_url = auth_url self._service_urls = {} self.client_auth = Auth(RetrySession(auth_url, **request_kwargs)) self.client_api = self._init_service_clients(**request_kwargs) def _init_service_clients(self, **request_kwargs: Dict) -> Api: """Initialize the clients used for communicating with the API and ws. Args: **request_kwargs (dict): arguments for the `requests` Session. Returns: Api: client for the api server. """ # Request an access token. access_token = self._request_access_token() # Use the token for the next auth server requests. self.client_auth.session.access_token = access_token self._service_urls = self.user_urls() # Create the api server client, using the access token. client_api = Api(RetrySession(self._service_urls['http'], access_token, **request_kwargs)) return client_api def _request_access_token(self) -> str: """Request a new access token from the API authentication server. Returns: str: access token. Raises: AuthenticationLicenseError: if the user hasn't accepted the license agreement. RequestsApiError: if the request failed. """ try: response = self.client_auth.login(self.api_token) return response['id'] except RequestsApiError as ex: response = ex.original_exception.response if response is not None and response.status_code == 401: try: error_code = response.json()['error']['name'] if error_code == 'ACCEPT_LICENSE_REQUIRED': message = response.json()['error']['message'] raise AuthenticationLicenseError(message) except (ValueError, KeyError): # the response did not contain the expected json. pass raise # User account-related public functions. def user_urls(self) -> Dict[str, str]: """Retrieve the API URLs from the authentication server. Returns: dict: a dict with the base URLs for the services. Currently supported keys: * ``http``: the API URL for http communication. * ``ws``: the API URL for websocket communication. """ response = self.client_auth.user_info() return response['urls'] def user_hubs(self) -> List[Dict[str, str]]: """Retrieve the hubs available to the user. The first entry in the list will be the default one, as indicated by the API (by having `isDefault` in all hub, group, project fields). Returns: list[dict]: a list of dicts with the hubs, which contains the keys `hub`, `group`, `project`. """ response = self.client_api.hubs() hubs = [] for hub in response: hub_name = hub['name'] for group_name, group in hub['groups'].items(): for project_name, project in group['projects'].items(): entry = {'hub': hub_name, 'group': group_name, 'project': project_name} # Move to the top if it is the default h/g/p. if project.get('isDefault'): hubs.insert(0, entry) else: hubs.append(entry) return hubs # Miscellaneous public functions. def api_version(self) -> Dict[str, str]: """Return the version of the API. Returns: dict: versions of the API components. """ return self.client_api.version() def current_access_token(self) -> str: """Return the current access token. Returns: str: the access token in use. """ return self.client_auth.session.access_token def current_service_urls(self) -> Dict[str, str]: """Return the current service URLs. Returns: dict: a dict with the base URLs for the services, in the same format as `.user_urls()`. """ return self._service_urls
# MIT License # # Copyright (c) 2018 # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. r"""Generates a hard dataset i.e. RNet or ONet dataset. Usage: ```shell $ python tfmtcnn/tfmtcnn/generate_hard_dataset.py \ --network_name RNet \ --train_root_dir ../data/models/mtcnn/train \ --annotation_image_dir ../data/WIDER_Face/WIDER_train/images \ --annotation_file_name ../data/WIDER_Face/WIDER_train/wider_face_train_bbx_gt.txt \ --landmark_image_dir ../data/CelebA/images \ --landmark_file_name ../data/CelebA/CelebA.txt \ --base_number_of_images 700000 \ --target_root_dir ../data/datasets/mtcnn $ python tfmtcnn/tfmtcnn/generate_hard_dataset.py \ --network_name ONet \ --train_root_dir ../data/models/mtcnn/train \ --annotation_image_dir ../data/WIDER_Face/WIDER_train/images \ --annotation_file_name ../data/WIDER_Face/WIDER_train/wider_face_train_bbx_gt.txt \ --landmark_image_dir ../data/CelebA/images \ --landmark_file_name ../data/CelebA/CelebA.txt \ --base_number_of_images 700000 \ --target_root_dir ../data/datasets/mtcnn ``` """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import os import argparse from tfmtcnn.datasets.HardDataset import HardDataset import tfmtcnn.datasets.constants as datasets_constants default_base_number_of_images = datasets_constants.default_base_number_of_images def parse_arguments(argv): parser = argparse.ArgumentParser() parser.add_argument( '--network_name', type=str, help='The name of the network.', default='ONet') parser.add_argument( '--train_root_dir', type=str, help='Input train root directory where model weights are saved.', default=None) parser.add_argument( '--annotation_file_name', type=str, help='Input WIDER face dataset annotations file.', default=None) parser.add_argument( '--annotation_image_dir', type=str, help='Input WIDER face dataset training image directory.', default=None) parser.add_argument( '--landmark_image_dir', type=str, help='Input landmark dataset training image directory.', default=None) parser.add_argument( '--landmark_file_name', type=str, help='Input landmark dataset annotation file.', default=None) parser.add_argument( '--base_number_of_images', type=int, help='Input base number of images.', default=default_base_number_of_images) parser.add_argument( '--target_root_dir', type=str, help= 'Output directory where output images and TensorFlow data files are saved.', default=None) return (parser.parse_args(argv)) def main(args): if (not args.annotation_file_name): raise ValueError( 'You must supply input WIDER face dataset annotations file with --annotation_file_name.' ) if (not args.annotation_image_dir): raise ValueError( 'You must supply input WIDER face dataset training image directory with --annotation_image_dir.' ) if (not args.landmark_image_dir): raise ValueError( 'You must supply input landmark dataset training image directory with --landmark_image_dir.' ) if (not args.landmark_file_name): raise ValueError( 'You must supply input landmark dataset annotation file with --landmark_file_name.' ) if (not args.target_root_dir): raise ValueError( 'You must supply output directory for storing output images and TensorFlow data files with --target_root_dir.' ) if (not (args.network_name in ['RNet', 'ONet'])): raise ValueError('The network name should be either RNet or ONet.') if (args.base_number_of_images < 1): base_number_of_images = default_base_number_of_images else: base_number_of_images = args.base_number_of_images hard_dataset = HardDataset(args.network_name) status = hard_dataset.generate( args.annotation_image_dir, args.annotation_file_name, args.landmark_image_dir, args.landmark_file_name, args.train_root_dir, base_number_of_images, args.target_root_dir) if (status): print(args.network_name + ' network dataset is generated at ' + args.target_root_dir) else: print('Error generating hard dataset.') if __name__ == '__main__': os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' main(parse_arguments(sys.argv[1:]))
from django.contrib import admin from .models import * # Register your models here. admin.site.register(UserInfo) admin.site.register(ReceiveInfo)
''' Correlation loss, 2 to 1 ''' import numpy as np import os import tensorflow as tf import sys sys.path.append('tfutils') sys.path.append('curiosity') import numpy as np from tfutils import base, optimizer from curiosity.data.short_long_sequence_data import ShortLongSequenceDataProvider import curiosity.models.jerk_models as modelsource DATA_PATH = '/mnt/fs0/datasets/two_world_dataset/new_tfdata' VALDATA_PATH = '/mnt/fs0/datasets/two_world_dataset/new_tfvaldata' DATA_BATCH_SIZE = 256 MODEL_BATCH_SIZE = 256 TIME_SEEN = 3 SHORT_LEN = TIME_SEEN LONG_LEN = 4 MIN_LEN = 4 CACHE_DIR = '/mnt/fs0/nhaber' NUM_BATCHES_PER_EPOCH = 115 * 70 * 256 / MODEL_BATCH_SIZE STATS_FILE = '/mnt/fs0/datasets/two_world_dataset/statistics/stats_again.pkl' IMG_HEIGHT = 160 IMG_WIDTH = 375 SCALE_DOWN_HEIGHT = 40 SCALE_DOWN_WIDTH = 94 L2_COEF = 200. NUM_CLASSES = 40, if not os.path.exists(CACHE_DIR): os.mkdir(CACHE_DIR) def table_norot_grab_func(path): all_filenames = os.listdir(path) print('got to file grabber!') return [os.path.join(path, fn) for fn in all_filenames if '.tfrecords' in fn and 'TABLE' in fn and ':ROT:' not in fn] def append_it(x, y, step): if x is None: x = [] x.append(y) return x def mean_losses_subselect_rest(val_res, skip_num): retval = {} keys = val_res[0].keys() for k in keys: if 'loss' in k: plucked = [d[k] for d in val_res] retval[k] = np.mean(plucked) elif 'reference_ids' in k: retval[k] = [d[k] for d in val_res] else: retval[k] = [val_res[i][k] for i in range(len(val_res)) if i % skip_num == 0] return retval def just_keep_everything(val_res): keys = val_res[0].keys() return dict((k, [d[k] for d in val_res]) for k in keys) SAVE_TO_GFS = ['object_data_future', 'pred', 'object_data_seen_1d', 'reference_ids', 'master_filter'] def grab_all(inputs, outputs, num_to_save = 1, **garbage_params): retval = {} batch_size = outputs['pred'].get_shape().as_list()[0] for k in SAVE_TO_GFS: if k != 'reference_ids': retval[k] = outputs[k][:num_to_save] else: retval[k] = outputs[k] retval['loss'] = modelsource.discretized_loss(outputs) return retval #cfg_simple lr .05, no normalization #cfg_simple_norm lr .05, normalization #cfg_2: lr .05, normalization, diff loss, try some rmsprop #cfg_2_lr-3, lr .001 #cfg_2_rmsprop lr .001 now with rmsprop #rms_-4 3123 #big_lr rms, lr .05 #fixed_end fixed end nonlinearity, otherwise like big_lr #rms_-4_fixed #rms_5-2_fixed rms_prop #rms_1-5_fixed lr 1-05 #rms_1-6_fixed #nrmfx_5-2 params = { 'save_params' : { 'host' : 'localhost', 'port' : 27017, 'dbname' : 'future_prediction', 'collname' : 'jerk', 'exp_id' : 'jerk_disc', 'save_valid_freq' : 2000, 'save_filters_freq': 30000, 'cache_filters_freq': 2000, 'save_initial_filters' : False, 'cache_dir' : CACHE_DIR, 'save_to_gfs' : SAVE_TO_GFS }, 'model_params' : { 'func' : modelsource.basic_jerk_model, 'cfg' : modelsource.cfg_class_jerk, 'time_seen' : TIME_SEEN, 'normalization_method' : {'object_data' : 'screen_normalize', 'actions' : 'standard'}, 'stats_file' : STATS_FILE, 'image_height' : IMG_HEIGHT, 'image_width' : IMG_WIDTH, 'scale_down_height' : SCALE_DOWN_HEIGHT, 'scale_down_width' : SCALE_DOWN_WIDTH, 'add_depth_gaussian' : True, 'include_pose' : False, 'num_classes' : 40. }, 'train_params' : { 'data_params' : { 'func' : ShortLongSequenceDataProvider, 'data_path' : DATA_PATH, 'short_sources' : ['normals', 'normals2', 'images'], 'long_sources' : ['actions', 'object_data', 'reference_ids'], 'short_len' : SHORT_LEN, 'long_len' : LONG_LEN, 'min_len' : MIN_LEN, 'filters' : ['is_not_teleporting', 'is_object_there', 'is_object_in_view', 'is_object_in_view2'], 'shuffle' : True, 'shuffle_seed' : 0, 'n_threads' : 1, 'batch_size' : DATA_BATCH_SIZE, 'file_grab_func' : table_norot_grab_func, 'is_there_subsetting_rule' : 'just_first', 'is_in_view_subsetting_rule' : 'last_seen_and_first_not' }, 'queue_params' : { 'queue_type' : 'random', 'batch_size' : MODEL_BATCH_SIZE, 'seed' : 0, 'capacity' : MODEL_BATCH_SIZE * 40 #TODO change! }, 'num_steps' : float('inf'), 'thres_loss' : float('inf') }, 'loss_params' : { 'targets' : [], 'agg_func' : tf.reduce_mean, 'loss_per_case_func' : modelsource.discretized_loss, 'loss_func_kwargs' : {}, 'loss_per_case_func_params' : {} }, 'learning_rate_params': { 'func': tf.train.exponential_decay, 'learning_rate': 1e-5, 'decay_rate': 0.95, 'decay_steps': NUM_BATCHES_PER_EPOCH, # exponential decay each epoch 'staircase': True }, 'optimizer_params': { 'func': optimizer.ClipOptimizer, 'optimizer_class': tf.train.AdamOptimizer, 'clip': True, # 'momentum': .9 }, 'validation_params' : { 'valid0' : { 'data_params' : { 'func' : ShortLongSequenceDataProvider, 'data_path' : VALDATA_PATH, 'short_sources' : ['normals', 'normals2', 'images'], 'long_sources' : ['actions', 'object_data', 'reference_ids'], 'short_len' : SHORT_LEN, 'long_len' : LONG_LEN, 'min_len' : MIN_LEN, 'filters' : ['is_not_teleporting', 'is_object_there', 'is_object_in_view', 'is_object_in_view2'], 'shuffle' : True, 'shuffle_seed' : 0, 'n_threads' : 1, 'batch_size' : DATA_BATCH_SIZE, 'file_grab_func' : table_norot_grab_func, 'is_there_subsetting_rule' : 'just_first', 'is_in_view_subsetting_rule' : 'last_seen_and_first_not' }, 'queue_params' : { 'queue_type' : 'random', 'batch_size' : MODEL_BATCH_SIZE, 'seed' : 0, 'capacity' : 20 * MODEL_BATCH_SIZE }, 'targets' : { 'func' : grab_all, 'targets' : [], 'num_to_save' : MODEL_BATCH_SIZE, }, # 'agg_func' : lambda val_res : mean_losses_subselect_rest(val_res, 1), 'agg_func' : just_keep_everything, 'online_agg_func' : append_it, 'num_steps' : 50 }, # 'valid1' : { # 'data_params' : { # 'func' : ShortLongSequenceDataProvider, # 'data_path' : DATA_PATH, # 'short_sources' : ['normals', 'normals2', 'images', 'images2', 'objects', 'objects2'], # 'long_sources' : ['actions', 'object_data', 'reference_ids'], # 'short_len' : SHORT_LEN, # 'long_len' : LONG_LEN, # 'min_len' : MIN_LEN, # 'filters' : ['is_not_teleporting', 'is_object_there'], # 'shuffle' : True, # 'shuffle_seed' : 0, # 'n_threads' : 1, # 'batch_size' : DATA_BATCH_SIZE, # 'file_grab_func' : table_norot_grab_func, # 'is_there_subsetting_rule' : 'just_first' # }, # # 'queue_params' : { # 'queue_type' : 'fifo', # 'batch_size' : MODEL_BATCH_SIZE, # 'seed' : 0, # 'capacity' : MODEL_BATCH_SIZE # }, # # 'targets' : { # 'func' : grab_all, # 'targets' : [], # 'num_to_save' : MODEL_BATCH_SIZE, # }, # # 'agg_func' : lambda val_res : mean_losses_subselect_rest(val_res, 1), # 'agg_func' : just_keep_everything, # 'online_agg_func' : append_it, # 'num_steps' : 20 # } } } if __name__ == '__main__': base.get_params() base.train_from_params(**params)
import scrapy from selenium import webdriver import re import json import requests import os from kototo.items import KototoItem import pymysql class KototoSpider(scrapy.Spider): name = 'kototo' start_urls = [] # 指定的需要爬取的up主的b站投稿页面 space_url = 'https://space.bilibili.com/17485141/video' def __init__(self): """ 构造器,主要初始化了selenium对象并实现无头浏览器,以及 初始化需要爬取的url地址,因为b站的翻页是js实现的,所以要手动处理一下 """ super().__init__() # 构造无头浏览器 from selenium.webdriver.chrome.options import Options chrome_options = Options() chrome_options.add_argument('--headless') chrome_options.add_argument('--disable-gpu') self.bro = webdriver.Chrome(chrome_options=chrome_options) # 初始化需要爬取的列表页 self.init_start_urls(self.start_urls, self.space_url) # 创建桌面文件夹 self.desktop_path = os.path.join(os.path.expanduser('~'), 'Desktop\\' + self.name + '\\') if not os.path.exists(self.desktop_path): os.mkdir(self.desktop_path) def parse(self, response): """ 解析方法,解析列表页的视频li,拿到标题和详情页,然后主动请求详情页 :param response: :return: """ li_list = response.xpath('//*[@id="submit-video-list"]/ul[2]/li') for li in li_list: print(li.xpath('./a[2]/@title').extract_first()) print(detail_url := 'https://' + li.xpath('./a[2]/@href').extract_first()[2:]) yield scrapy.Request(url=detail_url, callback=self.parse_detail) def parse_detail(self, response): """ 增量爬取: 解析详情页的音视频地址并交给管道处理 使用mysql实现 :param response: :return: """ title = response.xpath('//*[@id="viewbox_report"]/h1/@title').extract_first() # 替换掉视频名称中无法用在文件名中或会导致cmd命令出错的字符 title = title.replace('-', '').replace(' ', '').replace('/', '').replace('|', '') play_info_list = self.get_play_info(response) # 这里使用mysql的唯一索引实现增量爬取,如果是服务器上跑也可以用redis if self.insert_info(title, play_info_list[1]): video_temp_path = (self.desktop_path + title + '_temp.mp4').replace('-', '') video_path = self.desktop_path + title + '.mp4' audio_path = self.desktop_path + title + '.mp3' item = KototoItem() item['video_url'] = play_info_list[0] item['audio_url'] = play_info_list[1] item['video_path'] = video_path item['audio_path'] = audio_path item['video_temp_path'] = video_temp_path yield item else: print(title + ': 已经下载过了!') def insert_info(self, vtitle, vurl): """ mysql持久化存储爬取过的视频内容信息 :param vtitle: 标题 :param vurl: 视频链接 :return: """ with Mysql() as conn: cursor = conn.cursor(pymysql.cursors.DictCursor) try: sql = 'insert into tb_kototo(title,url) values("%s","%s")' % (vtitle, vurl) res = cursor.execute(sql) conn.commit() if res == 1: return True else: return False except: return False def get_play_info(self, resp): """ 解析详情页的源代码,提取其中的视频和文件真实地址 :param resp: :return: """ json_data = json.loads(re.findall('<script>window\.__playinfo__=(.*?)</script>', resp.text)[0]) # 拿到视频和音频的真实链接地址 video_url = json_data['data']['dash']['video'][0]['baseUrl'] audio_url = json_data['data']['dash']['audio'][0]['backupUrl'][0] return video_url, audio_url def init_start_urls(self, url_list, person_page): """ 初始化需要爬取的列表页,由于b站使用js翻页,无法在源码中找到翻页地址, 需要自己手动实现解析翻页url的操作 :param url_list: :param person_page: :return: """ mid = re.findall('https://space.bilibili.com/(.*?)/video\w*', person_page)[0] url = 'https://api.bilibili.com/x/space/arc/search?mid=' + mid + '&ps=30&tid=0&pn=1&keyword=&order=pubdate&jsonp=jsonp' headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36', 'Referer': 'https://www.bilibili.com' } json_data = requests.get(url=url, headers=headers).json() total_count = json_data['data']['page']['count'] page_size = json_data['data']['page']['ps'] if total_count <= page_size: page_count = 1 elif total_count % page_size == 0: page_count = total_count // page_size else: page_count = total_count // page_size + 1 url_template = 'https://space.bilibili.com/' + mid + '/video?tid=0&page=' + '%d' + '&keyword=&order=pubdate' for i in range(page_count): page_no = i + 1 url_list.append(url_template % page_no) def closed(self, spider): """ 爬虫结束关闭selenium窗口 :param spider: :return: """ self.bro.quit() class Mysql(object): def __enter__(self): self.connection = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='root', database='python') return self.connection def __exit__(self, exc_type, exc_val, exc_tb): self.connection.close()
import os, sys, time path = os.path.join(os.path.dirname(__file__), '../lib/') sys.path.insert(0, path) from thrift.transport import THttpClient from thrift.protocol import TCompactProtocol from curve import LineService from curve.ttypes import * class Poll: client = None auth_query_path = "/api/v4/TalkService.do"; http_query_path = "/S4"; polling_path = "/P4"; host = "gd2.line.naver.jp"; port = 443; UA = "Line/7.18.0" #"Mozilla/5.0" #LA = 'IOSIPAD\x097.14.0\x09iPhone_OS\x0910.12.0' #UA = "Line/7.18.0"#"Mozilla/5.0" #LA = "CHROMEOS\t.1.4.5\tChrome_HP\t1"#"CHROMEOS\x091.4.13\x09Chrome_OS\x091" #UA = "Line/6.0.0 iPad4,1 9.0.2" LA = "DESKTOPMAC 10.10.2-YOSEMITE-x64 MAC 4.5.0" rev = 0 def __init__(self, authToken): self.transport = THttpClient.THttpClient('https://gd2.line.naver.jp:443'+ self.http_query_path) self.transport.setCustomHeaders({ "User-Agent" : self.UA, "X-Line-Application" : self.LA, "X-Line-Access": authToken }); self.protocol = TCompactProtocol.TCompactProtocol(self.transport); self.client = LineService.Client(self.protocol) self.rev = self.client.getLastOpRevision() self.transport.path = self.polling_path self.transport.open() def stream(self, sleep=50000): #usleep = lambda x: time.sleep(x/1000000.0) while True: try: Ops = self.client.fetchOps(self.rev, 5) except EOFError: raise Exception("It might be wrong revision\n" + str(self.rev)) for Op in Ops: # print Op.type if (Op.type != OpType.END_OF_OPERATION): self.rev = max(self.rev, Op.revision) return Op #usleep(sleep)
import re from collections.abc import Sequence from numbers import Integral from urllib.parse import urlparse from django.conf import settings from django.core import checks from corsheaders.conf import conf re_type = type(re.compile("")) def check_settings(app_configs, **kwargs): errors = [] if not is_sequence(conf.CORS_ALLOW_HEADERS, str): errors.append( checks.Error( "CORS_ALLOW_HEADERS should be a sequence of strings.", id="corsheaders.E001", ) ) if not is_sequence(conf.CORS_ALLOW_METHODS, str): errors.append( checks.Error( "CORS_ALLOW_METHODS should be a sequence of strings.", id="corsheaders.E002", ) ) if not isinstance(conf.CORS_ALLOW_CREDENTIALS, bool): errors.append( checks.Error( "CORS_ALLOW_CREDENTIALS should be a bool.", id="corsheaders.E003" ) ) if ( not isinstance(conf.CORS_PREFLIGHT_MAX_AGE, Integral) or conf.CORS_PREFLIGHT_MAX_AGE < 0 ): errors.append( checks.Error( ( "CORS_PREFLIGHT_MAX_AGE should be an integer greater than " + "or equal to zero." ), id="corsheaders.E004", ) ) if not isinstance(conf.CORS_ALLOW_ALL_ORIGINS, bool): if hasattr(settings, "CORS_ALLOW_ALL_ORIGINS"): allow_all_alias = "CORS_ALLOW_ALL_ORIGINS" else: allow_all_alias = "CORS_ORIGIN_ALLOW_ALL" errors.append( checks.Error( f"{allow_all_alias} should be a bool.", id="corsheaders.E005", ) ) if hasattr(settings, "CORS_ALLOWED_ORIGINS"): allowed_origins_alias = "CORS_ALLOWED_ORIGINS" else: allowed_origins_alias = "CORS_ORIGIN_WHITELIST" if not is_sequence(conf.CORS_ALLOWED_ORIGINS, str): errors.append( checks.Error( f"{allowed_origins_alias} should be a sequence of strings.", id="corsheaders.E006", ) ) else: special_origin_values = ( # From 'security sensitive' contexts "null", # From files on Chrome on Android # https://bugs.chromium.org/p/chromium/issues/detail?id=991107 "file://", ) for origin in conf.CORS_ALLOWED_ORIGINS: if origin in special_origin_values: continue parsed = urlparse(origin) if parsed.scheme == "" or parsed.netloc == "": errors.append( checks.Error( "Origin {} in {} is missing scheme or netloc".format( repr(origin), allowed_origins_alias ), id="corsheaders.E013", hint=( "Add a scheme (e.g. https://) or netloc (e.g. " + "example.com)." ), ) ) else: # Only do this check in this case because if the scheme is not # provided, netloc ends up in path for part in ("path", "params", "query", "fragment"): if getattr(parsed, part) != "": errors.append( checks.Error( "Origin {} in {} should not have {}".format( repr(origin), allowed_origins_alias, part ), id="corsheaders.E014", ) ) if hasattr(settings, "CORS_ALLOWED_ORIGIN_REGEXES"): allowed_regexes_alias = "CORS_ALLOWED_ORIGIN_REGEXES" else: allowed_regexes_alias = "CORS_ORIGIN_REGEX_WHITELIST" if not is_sequence(conf.CORS_ALLOWED_ORIGIN_REGEXES, (str, re_type)): errors.append( checks.Error( "{} should be a sequence of strings and/or compiled regexes.".format( allowed_regexes_alias ), id="corsheaders.E007", ) ) if not is_sequence(conf.CORS_EXPOSE_HEADERS, str): errors.append( checks.Error( "CORS_EXPOSE_HEADERS should be a sequence.", id="corsheaders.E008" ) ) if not isinstance(conf.CORS_URLS_REGEX, (str, re_type)): errors.append( checks.Error( "CORS_URLS_REGEX should be a string or regex.", id="corsheaders.E009" ) ) if not isinstance(conf.CORS_REPLACE_HTTPS_REFERER, bool): errors.append( checks.Error( "CORS_REPLACE_HTTPS_REFERER should be a bool.", id="corsheaders.E011" ) ) if hasattr(settings, "CORS_MODEL"): errors.append( checks.Error( ( "The CORS_MODEL setting has been removed - see " + "django-cors-headers' HISTORY." ), id="corsheaders.E012", ) ) return errors def is_sequence(thing, type_or_types): return isinstance(thing, Sequence) and all( isinstance(x, type_or_types) for x in thing )
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ConstantOp.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from google.protobuf import text_format from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import tensor_pb2 from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes as dtypes_lib from tensorflow.python.framework import errors_impl from tensorflow.python.framework import importer from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import logging_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test from tensorflow.python.util import compat class ConstantTest(test.TestCase): def _testCpu(self, x): np_ans = np.array(x) with self.test_session(use_gpu=False): tf_ans = ops.convert_to_tensor(x).eval() dtype = dtypes_lib.as_dtype(np_ans.dtype) if dtype.is_floating or dtype.is_complex: self.assertAllClose(np_ans, tf_ans) else: self.assertAllEqual(np_ans, tf_ans) def _testGpu(self, x): np_ans = np.array(x) with self.test_session(use_gpu=True): tf_ans = ops.convert_to_tensor(x).eval() dtype = dtypes_lib.as_dtype(np_ans.dtype) if dtype.is_floating or dtype.is_complex: self.assertAllClose(np_ans, tf_ans) else: self.assertAllEqual(np_ans, tf_ans) def _testAll(self, x): self._testCpu(x) self._testGpu(x) def testBFloat16(self): bfloat16 = dtypes_lib.bfloat16.as_numpy_dtype self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(bfloat16)) self._testAll( np.random.normal(size=30).reshape([2, 3, 5]).astype(bfloat16)) self._testAll(np.empty((2, 0, 5)).astype(bfloat16)) def testHalf(self): self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float16)) self._testAll( np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float16)) self._testAll(np.empty((2, 0, 5)).astype(np.float16)) def testFloat(self): self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32)) self._testAll( np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float32)) self._testAll(np.empty((2, 0, 5)).astype(np.float32)) def testDouble(self): self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float64)) self._testAll( np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float64)) self._testAll(np.empty((2, 0, 5)).astype(np.float64)) def testInt32(self): self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32)) self._testAll((100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype( np.int32)) self._testAll(np.empty((2, 0, 5)).astype(np.int32)) def testInt64(self): self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64)) self._testAll((100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype( np.int64)) self._testAll(np.empty((2, 0, 5)).astype(np.int64)) def testComplex64(self): self._testAll( np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex64)) self._testAll( np.complex(1, 2) * np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex64)) self._testAll(np.empty((2, 0, 5)).astype(np.complex64)) def testComplex128(self): self._testAll( np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex128)) self._testAll( np.complex(1, 2) * np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex128)) self._testAll(np.empty((2, 0, 5)).astype(np.complex128)) def testString(self): self._testCpu( np.array([compat.as_bytes(str(x)) for x in np.arange(-15, 15)]).reshape( [2, 3, 5])) self._testCpu(np.empty((2, 0, 5)).astype(np.str_)) def testVariant(self): # TODO(ebrevdo): Re-enable use_gpu=True once non-DMA Variant # copying between CPU and GPU is supported. with self.test_session(use_gpu=False): variant_tensor = tensor_pb2.TensorProto( dtype=dtypes_lib.variant.as_datatype_enum, tensor_shape=tensor_shape.TensorShape([]).as_proto(), variant_val=[ tensor_pb2.VariantTensorDataProto( # Match registration in variant_op_registry.cc type_name=b"int", metadata=np.array(1, dtype=np.int32).tobytes()) ]) const = constant_op.constant(variant_tensor) const_value = const.op.get_attr("value") # Ensure we stored the tensor proto properly. self.assertProtoEquals(variant_tensor, const_value) # Smoke test -- ensure this executes without trouble. # Right now, non-numpy-compatible objects cannot be returned from a # session.run call; similarly, objects that can't be converted to # native numpy types cannot be passed to ops.convert_to_tensor. # TODO(ebrevdo): Add registration mechanism for # ops.convert_to_tensor and for session.run output. logging_const_op = logging_ops.Print( const, [const], message="Variant storing an int, decoded const value:").op logging_const_op.run() def testStringWithNulls(self): with self.test_session(): val = ops.convert_to_tensor(b"\0\0\0\0").eval() self.assertEqual(len(val), 4) self.assertEqual(val, b"\0\0\0\0") with self.test_session(): val = ops.convert_to_tensor(b"xx\0xx").eval() self.assertEqual(len(val), 5) self.assertAllEqual(val, b"xx\0xx") nested = [[b"\0\0\0\0", b"xx\0xx"], [b"\0_\0_\0_\0", b"\0"]] with self.test_session(): val = ops.convert_to_tensor(nested).eval() # NOTE(mrry): Do not use assertAllEqual, because it converts nested to a # numpy array, which loses the null terminators. self.assertEqual(val.tolist(), nested) def testExplicitShapeNumPy(self): with ops.Graph().as_default(): c = constant_op.constant( np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32), shape=[2, 3, 5]) self.assertEqual(c.get_shape(), [2, 3, 5]) def testImplicitShapeNumPy(self): with ops.Graph().as_default(): c = constant_op.constant( np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32)) self.assertEqual(c.get_shape(), [2, 3, 5]) def testExplicitShapeList(self): with ops.Graph().as_default(): c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[7]) self.assertEqual(c.get_shape(), [7]) def testImplicitShapeList(self): with ops.Graph().as_default(): c = constant_op.constant([1, 2, 3, 4, 5, 6, 7]) self.assertEqual(c.get_shape(), [7]) def testExplicitShapeNumber(self): with ops.Graph().as_default(): c = constant_op.constant(1, shape=[1]) self.assertEqual(c.get_shape(), [1]) def testImplicitShapeNumber(self): with ops.Graph().as_default(): c = constant_op.constant(1) self.assertEqual(c.get_shape(), []) def testShapeInconsistent(self): with ops.Graph().as_default(): c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[10]) self.assertEqual(c.get_shape(), [10]) # pylint: disable=g-long-lambda def testShapeWrong(self): with ops.Graph().as_default(): with self.assertRaisesWithPredicateMatch( ValueError, lambda e: ("Too many elements provided. Needed at most 5, " "but received 7" == str(e))): constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5]) # pylint: enable=g-long-lambda # TODO(b/35396543): Temporarily disable: suspicion that # this is causing test timeouts. def _testTooLargeConstant(self): with ops.Graph().as_default(): large_array = np.zeros((512, 1024, 1024), dtype=np.float32) with self.assertRaisesRegexp( ValueError, "Cannot create a tensor proto whose content is larger than 2GB."): c = constant_op.constant(large_array) # TODO(b/35396543): Temporarily disable: suspicion that # this is causing test timeouts. def _testTooLargeGraph(self): with ops.Graph().as_default() as g: large_array = np.zeros((256, 1024, 1024), dtype=np.float32) c = constant_op.constant(large_array) d = constant_op.constant(large_array) with self.assertRaisesRegexp(ValueError, "GraphDef cannot be larger than 2GB."): g.as_graph_def() def testSparseValuesRaiseErrors(self): with self.assertRaisesRegexp(ValueError, "setting an array element with a sequence"): c = constant_op.constant([[1, 2], [3]], dtype=dtypes_lib.int32) with self.assertRaisesRegexp(ValueError, "must be a dense"): c = constant_op.constant([[1, 2], [3]]) with self.assertRaisesRegexp(ValueError, "must be a dense"): c = constant_op.constant([[1, 2], [3], [4, 5]]) class AsTensorTest(test.TestCase): def testAsTensorForTensorInput(self): with ops.Graph().as_default(): t = constant_op.constant(10.0) x = ops.convert_to_tensor(t) self.assertIs(t, x) def testAsTensorForNonTensorInput(self): with ops.Graph().as_default(): x = ops.convert_to_tensor(10.0) self.assertTrue(isinstance(x, ops.Tensor)) def testAsTensorForShapeInput(self): with self.test_session(): x = ops.convert_to_tensor(tensor_shape.TensorShape([])) self.assertEqual(dtypes_lib.int32, x.dtype) self.assertAllEqual([], x.eval()) x = ops.convert_to_tensor(tensor_shape.TensorShape([1, 2, 3])) self.assertEqual(dtypes_lib.int32, x.dtype) self.assertAllEqual([1, 2, 3], x.eval()) x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31-1, 2, 3])) self.assertEqual(dtypes_lib.int32, x.dtype) self.assertAllEqual([2**31-1, 2, 3], x.eval()) x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31-1, 2, 3]), dtype=dtypes_lib.int32) self.assertEqual(dtypes_lib.int32, x.dtype) self.assertAllEqual([2**31-1, 2, 3], x.eval()) x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3])) self.assertEqual(dtypes_lib.int64, x.dtype) self.assertAllEqual([2**31, 2, 3], x.eval()) x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3]), dtype=dtypes_lib.int64) self.assertEqual(dtypes_lib.int64, x.dtype) self.assertAllEqual([2**31, 2, 3], x.eval()) with self.assertRaisesRegexp( ValueError, "a dimension is too large .2147483648."): x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3]), dtype=dtypes_lib.int32) x = ops.convert_to_tensor( tensor_shape.TensorShape([1, 2, 3]), dtype=dtypes_lib.int64) self.assertEqual(dtypes_lib.int64, x.dtype) self.assertAllEqual([1, 2, 3], x.eval()) x = array_ops.reshape( array_ops.zeros([6]), tensor_shape.TensorShape([2, 3])) self.assertAllEqual([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], x.eval()) with self.assertRaisesRegexp(ValueError, "partially known"): ops.convert_to_tensor(tensor_shape.TensorShape(None)) with self.assertRaisesRegexp(ValueError, "partially known"): ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64])) with self.assertRaises(TypeError): ops.convert_to_tensor( tensor_shape.TensorShape([1, 2, 3]), dtype=dtypes_lib.float32) def testAsTensorForDimensionInput(self): with self.test_session(): x = ops.convert_to_tensor(tensor_shape.TensorShape([1, 2, 3])[1]) self.assertEqual(dtypes_lib.int32, x.dtype) self.assertAllEqual(2, x.eval()) x = ops.convert_to_tensor( tensor_shape.TensorShape([1, 2, 3])[1], dtype=dtypes_lib.int64) self.assertEqual(dtypes_lib.int64, x.dtype) self.assertAllEqual(2, x.eval()) with self.assertRaisesRegexp(ValueError, "unknown Dimension"): ops.convert_to_tensor(tensor_shape.TensorShape(None)[1]) with self.assertRaisesRegexp(ValueError, "unknown Dimension"): ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64])[1]) with self.assertRaises(TypeError): ops.convert_to_tensor( tensor_shape.TensorShape([1, 2, 3])[1], dtype=dtypes_lib.float32) class IdentityOpTest(test.TestCase): def testIdTensor(self): with ops.Graph().as_default(): x = constant_op.constant(2.0, shape=[6], name="input") id_op = array_ops.identity(x, name="id") self.assertTrue(isinstance(id_op.op.inputs[0], ops.Tensor)) self.assertProtoEquals("name: 'id' op: 'Identity' input: 'input' " "attr { key: 'T' value { type: DT_FLOAT } }", id_op.op.node_def) class ZerosTest(test.TestCase): def _Zeros(self, shape): with self.test_session(): ret = array_ops.zeros(shape) self.assertEqual(shape, ret.get_shape()) return ret.eval() def testConst(self): self.assertTrue( np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] * 2))) def testScalar(self): self.assertEqual(0, self._Zeros([])) self.assertEqual(0, self._Zeros(())) with self.test_session(): scalar = array_ops.zeros(constant_op.constant([], dtype=dtypes_lib.int32)) self.assertEqual(0, scalar.eval()) def testDynamicSizes(self): np_ans = np.array([[0] * 3] * 2) with self.test_session(): # Creates a tensor of 2 x 3. d = array_ops.fill([2, 3], 12., name="fill") # Constructs a tensor of zeros of the same dimensions as "d". z = array_ops.zeros(array_ops.shape(d)) out = z.eval() self.assertAllEqual(np_ans, out) self.assertShapeEqual(np_ans, d) self.assertShapeEqual(np_ans, z) def testDtype(self): with self.test_session(): d = array_ops.fill([2, 3], 12., name="fill") self.assertEqual(d.get_shape(), [2, 3]) # Test default type for both constant size and dynamic size z = array_ops.zeros([2, 3]) self.assertEqual(z.dtype, dtypes_lib.float32) self.assertEqual([2, 3], z.get_shape()) self.assertAllEqual(z.eval(), np.zeros([2, 3])) z = array_ops.zeros(array_ops.shape(d)) self.assertEqual(z.dtype, dtypes_lib.float32) self.assertEqual([2, 3], z.get_shape()) self.assertAllEqual(z.eval(), np.zeros([2, 3])) # Test explicit type control for dtype in [ dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32, dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64, dtypes_lib.bool, dtypes_lib.string ]: z = array_ops.zeros([2, 3], dtype=dtype) self.assertEqual(z.dtype, dtype) self.assertEqual([2, 3], z.get_shape()) z_value = z.eval() self.assertFalse(np.any(z_value)) self.assertEqual((2, 3), z_value.shape) z = array_ops.zeros(array_ops.shape(d), dtype=dtype) self.assertEqual(z.dtype, dtype) self.assertEqual([2, 3], z.get_shape()) z_value = z.eval() self.assertFalse(np.any(z_value)) self.assertEqual((2, 3), z_value.shape) class ZerosLikeTest(test.TestCase): def _compareZeros(self, dtype, fully_defined_shape, use_gpu): with self.test_session(use_gpu=use_gpu): # Creates a tensor of non-zero values with shape 2 x 3. # NOTE(kearnes): The default numpy dtype associated with tf.string is # np.object (and can't be changed without breaking a lot things), which # causes a TypeError in constant_op.constant below. Here we catch the # special case of tf.string and set the numpy dtype appropriately. if dtype == dtypes_lib.string: numpy_dtype = np.string_ else: numpy_dtype = dtype.as_numpy_dtype if fully_defined_shape: d = constant_op.constant( np.ones((2, 3), dtype=numpy_dtype), dtype=dtype) else: d = array_ops.placeholder(dtype=dtype) # Constructs a tensor of zeros of the same dimensions and type as "d". z_var = array_ops.zeros_like(d) # Test that the type is correct self.assertEqual(z_var.dtype, dtype) # Test that the shape is correct if fully_defined_shape: self.assertEqual([2, 3], z_var.get_shape()) # Test that the value is correct feed_dict = {} if not fully_defined_shape: feed_dict[d] = np.ones((2, 3), dtype=numpy_dtype) z_value = z_var.eval(feed_dict=feed_dict) self.assertFalse(np.any(z_value)) self.assertEqual((2, 3), z_value.shape) def testZerosLikeCPU(self): for dtype in [ dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int8, dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.uint16, dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.bool, dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.string ]: self._compareZeros(dtype, fully_defined_shape=False, use_gpu=False) self._compareZeros(dtype, fully_defined_shape=True, use_gpu=False) def testZerosLikeGPU(self): for dtype in [ dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32, dtypes_lib.bool, dtypes_lib.int64, dtypes_lib.string ]: self._compareZeros(dtype, fully_defined_shape=False, use_gpu=True) self._compareZeros(dtype, fully_defined_shape=True, use_gpu=True) def testZerosLikePartialShape(self): d = array_ops.placeholder(dtypes_lib.float32, shape=[None, 4, None]) z = array_ops.zeros_like(d) self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list()) def testZerosLikeDtype(self): # Make sure zeros_like works even for dtypes that cannot be cast between with self.test_session(): shape = (3, 5) dtypes = np.float32, np.complex64 for in_type in dtypes: x = np.arange(15).astype(in_type).reshape(*shape) for out_type in dtypes: y = array_ops.zeros_like(x, dtype=out_type).eval() self.assertEqual(y.dtype, out_type) self.assertEqual(y.shape, shape) self.assertAllEqual(y, np.zeros(shape, dtype=out_type)) def testZerosLikeVariant(self): # TODO(ebrevdo): Re-enable use_gpu=True once non-DMA Variant # copying between CPU and GPU is supported AND we register a # ZerosLike callback for GPU for Variant storing primitive types # in variant_op_registry.cc. with self.test_session(use_gpu=False): variant_tensor = tensor_pb2.TensorProto( dtype=dtypes_lib.variant.as_datatype_enum, tensor_shape=tensor_shape.TensorShape([]).as_proto(), variant_val=[ tensor_pb2.VariantTensorDataProto( # Match registration in variant_op_registry.cc type_name=b"int", metadata=np.array(1, dtype=np.int32).tobytes()) ]) const_variant = constant_op.constant(variant_tensor) zeros_like = array_ops.zeros_like(const_variant) zeros_like_op = logging_ops.Print( zeros_like, [const_variant, zeros_like], message="Variant storing an int, input and output of zeros_like:").op # Smoke test -- ensure this executes without trouble. # Right now, non-numpy-compatible objects cannot be returned from a # session.run call; similarly, objects that can't be converted to # native numpy types cannot be passed to ops.convert_to_tensor. # TODO(ebrevdo): Add registration mechanism for # ops.convert_to_tensor and for session.run output. zeros_like_op.run() class OnesTest(test.TestCase): def _Ones(self, shape): with self.test_session(): ret = array_ops.ones(shape) self.assertEqual(shape, ret.get_shape()) return ret.eval() def testConst(self): self.assertTrue(np.array_equal(self._Ones([2, 3]), np.array([[1] * 3] * 2))) def testScalar(self): self.assertEqual(1, self._Ones([])) self.assertEqual(1, self._Ones(())) with self.test_session(): scalar = array_ops.ones(constant_op.constant([], dtype=dtypes_lib.int32)) self.assertEqual(1, scalar.eval()) def testDynamicSizes(self): np_ans = np.array([[1] * 3] * 2) with self.test_session(): # Creates a tensor of 2 x 3. d = array_ops.fill([2, 3], 12., name="fill") # Constructs a tensor of ones of the same dimensions as "d". z = array_ops.ones(array_ops.shape(d)) out = z.eval() self.assertAllEqual(np_ans, out) self.assertShapeEqual(np_ans, d) self.assertShapeEqual(np_ans, z) def testAutoPack(self): with self.test_session(): h = array_ops.placeholder(dtypes_lib.int32, shape=[]) w = array_ops.placeholder(dtypes_lib.int32, shape=[]) z = array_ops.ones([h, w]) out = z.eval(feed_dict={h: 4, w: 16}) self.assertAllEqual(out, np.array([[1] * 16] * 4)) def testDtype(self): with self.test_session(): d = array_ops.fill([2, 3], 12., name="fill") self.assertEqual(d.get_shape(), [2, 3]) # Test default type for both constant size and dynamic size z = array_ops.ones([2, 3]) self.assertEqual(z.dtype, dtypes_lib.float32) self.assertEqual([2, 3], z.get_shape()) self.assertAllEqual(z.eval(), np.ones([2, 3])) z = array_ops.ones(array_ops.shape(d)) self.assertEqual(z.dtype, dtypes_lib.float32) self.assertEqual([2, 3], z.get_shape()) self.assertAllEqual(z.eval(), np.ones([2, 3])) # Test explicit type control for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32, dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64, dtypes_lib.bool): z = array_ops.ones([2, 3], dtype=dtype) self.assertEqual(z.dtype, dtype) self.assertEqual([2, 3], z.get_shape()) self.assertAllEqual(z.eval(), np.ones([2, 3])) z = array_ops.ones(array_ops.shape(d), dtype=dtype) self.assertEqual(z.dtype, dtype) self.assertEqual([2, 3], z.get_shape()) self.assertAllEqual(z.eval(), np.ones([2, 3])) class OnesLikeTest(test.TestCase): def testOnesLike(self): for dtype in [ dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int8, dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.uint16, dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.bool, dtypes_lib.complex64, dtypes_lib.complex128 ]: numpy_dtype = dtype.as_numpy_dtype with self.test_session(): # Creates a tensor of non-zero values with shape 2 x 3. d = constant_op.constant( np.ones( (2, 3), dtype=numpy_dtype), dtype=dtype) # Constructs a tensor of zeros of the same dimensions and type as "d". z_var = array_ops.ones_like(d) # Test that the type is correct self.assertEqual(z_var.dtype, dtype) z_value = z_var.eval() # Test that the value is correct self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2))) self.assertEqual([2, 3], z_var.get_shape()) def testOnesLikePartialShape(self): d = array_ops.placeholder(dtypes_lib.float32, shape=[None, 4, None]) z = array_ops.ones_like(d) self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list()) class FillTest(test.TestCase): def _compare(self, dims, val, np_ans, use_gpu): with self.test_session(use_gpu=use_gpu): tf_ans = array_ops.fill(dims, val, name="fill") out = tf_ans.eval() self.assertAllClose(np_ans, out) # Fill does not set the shape. # self.assertShapeEqual(np_ans, tf_ans) def _compareAll(self, dims, val, np_ans): self._compare(dims, val, np_ans, False) self._compare(dims, val, np_ans, True) def testFillFloat(self): np_ans = np.array([[3.1415] * 3] * 2).astype(np.float32) self._compareAll([2, 3], np_ans[0][0], np_ans) def testFillDouble(self): np_ans = np.array([[3.1415] * 3] * 2).astype(np.float64) self._compareAll([2, 3], np_ans[0][0], np_ans) def testFillInt32(self): np_ans = np.array([[42] * 3] * 2).astype(np.int32) self._compareAll([2, 3], np_ans[0][0], np_ans) def testFillInt64(self): np_ans = np.array([[-42] * 3] * 2).astype(np.int64) self._compareAll([2, 3], np_ans[0][0], np_ans) def testFillComplex64(self): np_ans = np.array([[0.15] * 3] * 2).astype(np.complex64) self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False) def testFillComplex128(self): np_ans = np.array([[0.15] * 3] * 2).astype(np.complex128) self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False) def testFillString(self): np_ans = np.array([[b"yolo"] * 3] * 2) with self.test_session(use_gpu=False): tf_ans = array_ops.fill([2, 3], np_ans[0][0], name="fill").eval() self.assertAllEqual(np_ans, tf_ans) def testFillNegative(self): with self.test_session(): for shape in (-1,), (2, -1), (-1, 2), (-2), (-3): with self.assertRaises(ValueError): array_ops.fill(shape, 7) # Using a placeholder so this won't be caught in static analysis. dims = array_ops.placeholder(dtypes_lib.int32) fill_t = array_ops.fill(dims, 3.0) for shape in (-1,), (2, -1), (-1, 2), (-2), (-3): with self.assertRaises(errors_impl.InvalidArgumentError): fill_t.eval({dims: shape}) def testShapeFunctionEdgeCases(self): # Non-vector dimensions. with self.assertRaises(ValueError): array_ops.fill([[0, 1], [2, 3]], 1.0) # Non-scalar value. with self.assertRaises(ValueError): array_ops.fill([3, 2], [1.0, 2.0]) # Partial dimension information. f = array_ops.fill(array_ops.placeholder(dtypes_lib.int32, shape=(4,)), 3.0) self.assertEqual([None, None, None, None], f.get_shape().as_list()) f = array_ops.fill( [array_ops.placeholder( dtypes_lib.int32, shape=()), 17], 1.0) self.assertEqual([None, 17], f.get_shape().as_list()) def testGradient(self): with self.test_session(): in_v = constant_op.constant(5.0) out_shape = [3, 2] out_filled = array_ops.fill(out_shape, in_v) err = gradient_checker.compute_gradient_error(in_v, [], out_filled, out_shape) self.assertLess(err, 1e-3) class PlaceholderTest(test.TestCase): def testDtype(self): with self.test_session(): p = array_ops.placeholder(dtypes_lib.float32, shape=(10, 10), name="p") p_identity = array_ops.identity(p) feed_array = np.random.rand(10, 10) self.assertAllClose( p_identity.eval(feed_dict={p: feed_array}), feed_array) with self.assertRaisesOpError( "must feed a value for placeholder tensor 'p' with dtype float"): p_identity.eval() def testShape(self): with self.test_session(): p = array_ops.placeholder(dtypes_lib.float32, shape=(10, 10), name="p") p_identity = array_ops.identity(p) feed_array = np.random.rand(10, 10) self.assertAllClose( p_identity.eval(feed_dict={p: feed_array}), feed_array) with self.assertRaisesOpError( "must feed a value for placeholder tensor 'p' with dtype float and " r"shape \[10,10\]"): p_identity.eval() with self.assertRaisesWithPredicateMatch( ValueError, lambda e: "Cannot feed value of shape" in str(e)): p_identity.eval(feed_dict={p: feed_array[:5, :5]}) def testUnknownShape(self): with self.test_session(): p = array_ops.placeholder(dtypes_lib.float32, shape=None, name="p") p_identity = array_ops.identity(p) # can feed anything feed_array = np.random.rand(10, 3) self.assertAllClose( p_identity.eval(feed_dict={p: feed_array}), feed_array) feed_array = np.random.rand(4, 2, 5) self.assertAllClose( p_identity.eval(feed_dict={p: feed_array}), feed_array) def testScalarShape(self): with self.test_session(): p = array_ops.placeholder(dtypes_lib.float32, shape=[], name="p") p_identity = array_ops.identity(p) self.assertAllClose(p_identity.eval(feed_dict={p: 5}), 5) def testPartialShape(self): with self.test_session(): p = array_ops.placeholder(dtypes_lib.float32, shape=[None, 3], name="p") p_identity = array_ops.identity(p) feed_array = np.random.rand(10, 3) self.assertAllClose( p_identity.eval(feed_dict={p: feed_array}), feed_array) with self.assertRaisesWithPredicateMatch( ValueError, lambda e: "Cannot feed value of shape" in str(e)): p_identity.eval(feed_dict={p: feed_array[:5, :2]}) def testPartialShapeWhenNotFed(self): with self.test_session(): p = array_ops.placeholder(dtypes_lib.float32, shape=[None, 3], name="p") p_identity = array_ops.identity(p) # Should trigger an operator error, not a shape error. with self.assertRaisesOpError( "must feed a value for placeholder tensor 'p' with dtype float"): p_identity.eval() def testControlDependency(self): with self.test_session(): p = array_ops.placeholder(dtypes_lib.int32, shape=[], name="p") with ops.control_dependencies([p]): c = constant_op.constant(5, dtypes_lib.int32) d = math_ops.multiply(p, c) val = np.array(2).astype(np.int) self.assertEqual(10, d.eval(feed_dict={p: val})) def testBadShape(self): with self.assertRaises(ValueError): array_ops.placeholder(dtypes_lib.float32, shape=(-1, 10)) def testTensorStr(self): a = array_ops.placeholder(dtypes_lib.float32, shape=None, name="a") self.assertEqual("<tf.Tensor 'a:0' shape=<unknown> dtype=float32>", repr(a)) b = array_ops.placeholder(dtypes_lib.int32, shape=(32, 40), name="b") self.assertEqual("<tf.Tensor 'b:0' shape=(32, 40) dtype=int32>", repr(b)) c = array_ops.placeholder(dtypes_lib.qint32, shape=(32, None, 2), name="c") self.assertEqual("<tf.Tensor 'c:0' shape=(32, ?, 2) dtype=qint32>", repr(c)) def testOldGraph(self): # Load graph generated from earlier version of TF where # placeholder shape was not set. # # a = tf.placeholder(tf.float32) # b = a + 1.0 # # Older graph's default shape is 'shape {}', not 'shape { # unknown_rank: true }' graph = """ node { name: "Placeholder" op: "Placeholder" attr { key: "dtype" value { type: DT_FLOAT } } attr { key: "shape" value { shape { } } } } node { name: "add/y" op: "Const" attr { key: "dtype" value { type: DT_FLOAT } } attr { key: "value" value { tensor { dtype: DT_FLOAT tensor_shape { } float_val: 1.0 } } } } node { name: "add" op: "Add" input: "Placeholder" input: "add/y" attr { key: "T" value { type: DT_FLOAT } } } versions { producer: 21 } """ gdef = graph_pb2.GraphDef() text_format.Merge(graph, gdef) with self.test_session(): p, ret = importer.import_graph_def( gdef, return_elements=["Placeholder:0", "add:0"]) # Feed in a vector of two elements. Since the producer version # of 21, a shape of {} is interpreted as "any shape". If # producer version were 22, then we'd get a shape mismatch # error. self.assertAllEqual([2.0, 3.0], ret.eval(feed_dict={p: [1.0, 2.0]})) class PlaceholderWithDefaultTest(test.TestCase): def testFullShape(self): with self.test_session(): p = array_ops.placeholder_with_default([[2, 2], [2, 2]], shape=[2, 2]) a = array_ops.identity(p) self.assertAllEqual([[2, 2], [2, 2]], a.eval()) self.assertAllEqual( [[3, 3], [3, 3]], a.eval(feed_dict={p: [[3, 3], [3, 3]]})) with self.assertRaises(ValueError): a.eval(feed_dict={p: [[6, 6, 6], [6, 6, 6]]}) def testPartialShape(self): with self.test_session(): p = array_ops.placeholder_with_default([1, 2, 3], shape=[None]) a = array_ops.identity(p) self.assertAllEqual([1, 2, 3], a.eval()) self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]})) with self.assertRaises(ValueError): a.eval(feed_dict={p: [[2, 2], [2, 2]]}) def testNoShape(self): with self.test_session(): p = array_ops.placeholder_with_default([17], shape=None) a = array_ops.identity(p) self.assertAllEqual([17], a.eval()) self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]})) self.assertAllEqual( [[3, 3], [3, 3]], a.eval(feed_dict={p: [[3, 3], [3, 3]]})) def testGradient(self): with self.test_session(): x = array_ops.placeholder(dtypes_lib.float32, [5, 7]) y = array_ops.placeholder_with_default(x, None) err = gradient_checker.compute_gradient_error(x, [5, 7], y, [5, 7]) self.assertLess(err, 1e-3) if __name__ == "__main__": test.main()
from setuptools import setup, find_packages if __name__ == "__main__": setup(name='wtfpython', version='0.2', description='What the f*ck Python!', author="Satwik Kansal", maintainer="Satwik Kansal", maintainer_email='satwikkansal@gmail.com', url='https://github.com/satwikkansal/wtfpython', platforms='any', license="WTFPL 2.0", long_description="An interesting collection of subtle & tricky Python Snippets" " and features.", keywords="wtfpython gotchas snippets tricky", packages=find_packages(), entry_points = { 'console_scripts': ['wtfpython = wtf_python.main:load_and_read'] }, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Environment :: MacOS X', 'Environment :: Win32 (MS Windows)', 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: End Users/Desktop', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 2', 'Topic :: Documentation', 'Topic :: Education', 'Topic :: Scientific/Engineering', 'Topic :: Software Development'], )
# Lint as: python3 # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for variance_swaps.""" from absl.testing import parameterized import numpy as np import tensorflow.compat.v2 as tf import tf_quant_finance as tff from tf_quant_finance.black_scholes import variance_swaps from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes class VarianceSwapsTest(parameterized.TestCase, tf.test.TestCase): """Tests for the variance_swaps module.""" @parameterized.named_parameters( { 'testcase_name': 'Puts', 'strikes': np.array([100.0, 90.0, 80.0, 70.0]) }, { 'testcase_name': 'Calls', 'strikes': np.array([100.0, 110.0, 120.0, 130.0]) }) def test_replicating_weights(self, strikes): """Tests ability to match 'hand' calculated variance replicating weights.""" reference_strikes = 100.0 delta_strike = 10.0 expiries = 1.0 # This is the value of (A 4) in Demeterfi et al. payoff_values = 2 * ((strikes - reference_strikes) / reference_strikes - np.log(strikes / reference_strikes)) # This is the value of the ratio term in (A 7) in Demeterfi et al. slope_values = np.diff(payoff_values / delta_strike) # Literal calculation of (A 7/8) for all weights. The library uses # first differences rather than cumsums for efficiency due to algebra. expected_weights = [] for v in slope_values: expected_weights.append(v - np.sum(expected_weights)) weights = self.evaluate( variance_swaps.replicating_weights( strikes, reference_strikes, expiries, dtype=tf.float64)) self.assertAllClose(weights, expected_weights, 1e-6) def test_replicating_weights_supports_batching(self): put_strikes = tf.constant([[100, 95, 90, 85]], dtype=np.float64) batch_put_strikes = batch_put_strikes = tf.concat( [put_strikes, put_strikes, 2 * (put_strikes - 100) + 100], axis=0) batch_reference = tf.math.reduce_max(batch_put_strikes, axis=1) batch_expiries = tf.constant([0.25, 0.5, 0.25], dtype=tf.float64) expected_shape = np.array(batch_put_strikes.shape) expected_shape[-1] = expected_shape[-1] - 1 batch_weights = self.evaluate( variance_swaps.replicating_weights( batch_put_strikes, batch_reference, batch_expiries)) self.assertAllEqual(batch_weights.shape, expected_shape) for i in range(3): row_weights = self.evaluate( variance_swaps.replicating_weights( batch_put_strikes[i, :], batch_reference[i], batch_expiries[i])) self.assertAllEqual(row_weights, batch_weights[i, :]) def test_replicating_weights_raises_validation_error(self): strikes = np.array([1, 2, 3, 2, 1]) reference_strike = 3 expiry = 1 with self.assertRaises(tf.errors.InvalidArgumentError): _ = self.evaluate( variance_swaps.replicating_weights( strikes, reference_strike, expiry, validate_args=True, dtype=tf.float64)) @parameterized.named_parameters({ 'testcase_name': 'Demeterfi_et_al', 'call_strikes': np.array([100., 105., 110., 115., 120., 125., 130., 135., 140.]), 'call_weights': np.array([19.63, 36.83, 33.55, 30.69, 28.19, 25.98, 24.02, 22.27]), 'call_volatilities': np.array([0.2, 0.19, 0.18, 0.17, 0.16, 0.15, 0.14, 0.13, np.nan]), 'put_strikes': np.array( [100., 95., 90., 85., 80., 75., 70., 65., 60., 55., 50., 45.]), 'put_weights': np.array([ 20.98, 45., 50.15, 56.23, 63.49, 72.26, 82.98, 96.27, 113.05, 134.63, 163.04 ]), 'put_volatilities': np.array([ 0.2, 0.21, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.28, 0.29, 0.30, np.nan ]), 'reference_strikes': 100.0, 'expiries': 0.25, 'discount_rates': 0.05, # Paper rounds to 2 dp in places (and variably within columns elsewhere) 'tolerance': 1e-2, 'k_var': 0.20467**2, # Paper works on % scale. }) def test_variance_swap_demeterfi_example(self, call_strikes, call_weights, call_volatilities, put_strikes, put_weights, put_volatilities, reference_strikes, expiries, discount_rates, tolerance, k_var): """Tests ability to match 'hand' calculated variance replicating weights.""" # Paper quotes weights inflated to forward values. discount_factor = np.exp(discount_rates * expiries) calculated_call_weights = self.evaluate( variance_swaps.replicating_weights( call_strikes, reference_strikes, expiries, dtype=tf.float64)) matched_call_weights = discount_factor * 100.0**2 * calculated_call_weights self.assertAllClose(matched_call_weights, call_weights, tolerance) calculated_put_weights = self.evaluate( variance_swaps.replicating_weights( put_strikes, reference_strikes, expiries, dtype=tf.float64)) matched_put_weights = discount_factor * 100.0**2 * calculated_put_weights self.assertAllClose(matched_put_weights, put_weights, tolerance) variance_price = self.evaluate( tff.black_scholes.variance_swap_fair_strike( put_strikes, put_volatilities, call_strikes, call_volatilities, expiries, discount_rates, reference_strikes, reference_strikes, dtype=tf.float64)) self.assertAllClose(variance_price, k_var, 1e-2) @parameterized.named_parameters( { 'testcase_name': 'with_validation', 'validate_args': True }, { 'testcase_name': 'without_validation', 'validate_args': False }) def test_variance_swap_fair_strike_supports_batching(self, validate_args): dtype = tf.float64 batch_call_strikes = tf.repeat( tf.expand_dims(tf.range(100, 120, 5, dtype=dtype), 0), 3, axis=0) batch_put_strikes = tf.repeat( tf.expand_dims(tf.range(100, 80, -5, dtype=dtype), 0), 3, axis=0) batch_vols = 0.2 * tf.ones((3, 4), dtype=dtype) batch_shape = (3,) reference_strikes = 100.0 * tf.ones(batch_shape, dtype=dtype) batch_expiries = tf.constant([0.25, 0.5, 1.0], dtype=dtype) discount_rates = 0.05 * tf.ones(batch_shape, dtype=dtype) batch_variance_price = self.evaluate( tff.black_scholes.variance_swap_fair_strike( batch_put_strikes, batch_vols, batch_call_strikes, batch_vols, batch_expiries, discount_rates, reference_strikes, reference_strikes, validate_args=validate_args, dtype=dtype)) self.assertEqual(batch_variance_price.shape, batch_shape) for i in range(3): row_variance_price = self.evaluate( tff.black_scholes.variance_swap_fair_strike( batch_put_strikes[i, :], batch_vols[i, :], batch_call_strikes[i, :], batch_vols[i, :], batch_expiries[i], discount_rates[i], reference_strikes[i], reference_strikes[i], dtype=tf.float64)) self.assertAllEqual(row_variance_price, batch_variance_price[i]) def test_variance_swap_fair_strike_raises_validation_error(self): dtype = tf.float64 # Mismatching shapes for strikes and vols. strikes = tf.ones((3, 2), dtype=dtype) vols = tf.ones((3, 4), dtype=dtype) reference_strike = 1.0 discount_rate = 0.0 expiry = 1.0 with self.assertRaises(tf.errors.InvalidArgumentError): _ = self.evaluate( tff.black_scholes.variance_swap_fair_strike( strikes, vols, strikes, vols, expiry, discount_rate, reference_strike, reference_strike, validate_args=True, dtype=dtype)) if __name__ == '__main__': tf.test.main()
# -*- coding: utf-8 -*- # File: crop.py import numpy as np import cv2 from ...utils.argtools import shape2d from .base import ImageAugmentor from .transform import CropTransform, TransformAugmentorBase from .misc import ResizeShortestEdge __all__ = ['RandomCrop', 'CenterCrop', 'RandomCropRandomShape', 'GoogleNetRandomCropAndResize'] class RandomCrop(TransformAugmentorBase): """ Randomly crop the image into a smaller one """ def __init__(self, crop_shape): """ Args: crop_shape: (h, w) tuple or a int """ crop_shape = shape2d(crop_shape) super(RandomCrop, self).__init__() self._init(locals()) def _get_augment_params(self, img): orig_shape = img.shape assert orig_shape[0] >= self.crop_shape[0] \ and orig_shape[1] >= self.crop_shape[1], orig_shape diffh = orig_shape[0] - self.crop_shape[0] h0 = 0 if diffh == 0 else self.rng.randint(diffh) diffw = orig_shape[1] - self.crop_shape[1] w0 = 0 if diffw == 0 else self.rng.randint(diffw) return CropTransform(h0, w0, self.crop_shape[0], self.crop_shape[1]) class CenterCrop(TransformAugmentorBase): """ Crop the image at the center""" def __init__(self, crop_shape): """ Args: crop_shape: (h, w) tuple or a int """ crop_shape = shape2d(crop_shape) self._init(locals()) def _get_augment_params(self, img): orig_shape = img.shape assert orig_shape[0] >= self.crop_shape[0] \ and orig_shape[1] >= self.crop_shape[1], orig_shape h0 = int((orig_shape[0] - self.crop_shape[0]) * 0.5) w0 = int((orig_shape[1] - self.crop_shape[1]) * 0.5) return CropTransform(h0, w0, self.crop_shape[0], self.crop_shape[1]) class RandomCropRandomShape(TransformAugmentorBase): """ Random crop with a random shape""" def __init__(self, wmin, hmin, wmax=None, hmax=None, max_aspect_ratio=None): """ Randomly crop a box of shape (h, w), sampled from [min, max] (both inclusive). If max is None, will use the input image shape. Args: wmin, hmin, wmax, hmax: range to sample shape. max_aspect_ratio (float): the upper bound of ``max(w,h)/min(w,h)``. """ if max_aspect_ratio is None: max_aspect_ratio = 9999999 self._init(locals()) def _get_augment_params(self, img): hmax = self.hmax or img.shape[0] wmax = self.wmax or img.shape[1] h = self.rng.randint(self.hmin, hmax + 1) w = self.rng.randint(self.wmin, wmax + 1) diffh = img.shape[0] - h diffw = img.shape[1] - w assert diffh >= 0 and diffw >= 0 y0 = 0 if diffh == 0 else self.rng.randint(diffh) x0 = 0 if diffw == 0 else self.rng.randint(diffw) return CropTransform(y0, x0, h, w) class GoogleNetRandomCropAndResize(ImageAugmentor): """ The random crop and resize augmentation proposed in Sec. 6 of `Going Deeper with Convolutions` by Google. This implementation follows the details in `fb.resnet.torch`. It attempts to crop a random rectangle with 8%~100% area of the original image, and keep the aspect ratio between 3/4 to 4/3. Then it resize this crop to the target shape. If such crop cannot be found in 10 iterations, it will to a ResizeShortestEdge + CenterCrop. """ def __init__(self, crop_area_fraction=(0.08, 1.), aspect_ratio_range=(0.75, 1.333), target_shape=224, interp=cv2.INTER_LINEAR): """ Args: crop_area_fraction (tuple(float)): Defaults to crop 8%-100% area. aspect_ratio_range (tuple(float)): Defaults to make aspect ratio in 3/4-4/3. target_shape (int): Defaults to 224, the standard ImageNet image shape. """ self._init(locals()) def _augment(self, img, _): h, w = img.shape[:2] area = h * w for _ in range(10): targetArea = self.rng.uniform(*self.crop_area_fraction) * area aspectR = self.rng.uniform(*self.aspect_ratio_range) ww = int(np.sqrt(targetArea * aspectR) + 0.5) hh = int(np.sqrt(targetArea / aspectR) + 0.5) if self.rng.uniform() < 0.5: ww, hh = hh, ww if hh <= h and ww <= w: x1 = 0 if w == ww else self.rng.randint(0, w - ww) y1 = 0 if h == hh else self.rng.randint(0, h - hh) out = img[y1:y1 + hh, x1:x1 + ww] out = cv2.resize(out, (self.target_shape, self.target_shape), interpolation=self.interp) return out out = ResizeShortestEdge(self.target_shape, interp=cv2.INTER_CUBIC).augment(img) out = CenterCrop(self.target_shape).augment(out) return out def _augment_coords(self, coords, param): raise NotImplementedError()
import json,time import numpy as np import pandas as pd import os, subprocess import argparse from collections import OrderedDict import operator parser = argparse.ArgumentParser(description='Shape the answer') parser.add_argument('--nbest_path', type=str, help='location of nbest_predictions.json') parser.add_argument('--output_path', type=str, help='location of nbest_predictions.json') args = parser.parse_args() def textrip(text): if text=="": return text if text[-1]==',' or text[-1]=='.' or text[-1]==' ': return text[:-1] if len(text)>2 and text[0]=='(' and text[-1]==')': if text.count('(')==1 and text.count(')')==1: return text[1:-1] if ('(' in text) and (')' not in text): return "" if ('(' not in text) and (')' in text): return "" return text ### Setting basic strings #### Info : This script is only for factoid question #### Checking nbest_BioASQ-test prediction.json if not os.path.exists(args.nbest_path): print("No file exists!\n#### Fatal Error : Abort!") raise #### Reading Pred File with open(args.nbest_path, "r") as reader: test=json.load(reader) qidDict=dict() if True: for multiQid in test: print(len(multiQid)) #assert len(multiQid)==(24+4) # all multiQid should have length of 24 + 3 if not multiQid[:-4] in qidDict: qidDict[multiQid[:-4]]=[test[multiQid]] else : qidDict[multiQid[:-4]].append(test[multiQid]) else: # single output qidDict={qid:[test[qid]] for qid in test} entryList=[] entryListWithProb=[] # for ~ : TODO : multi input for qid in qidDict: jsonList=[] for jsonele in qidDict[qid]: # value of qidDict is a list jsonList+=jsonele #if not args.multi_output: qidDf=pd.DataFrame().from_dict(jsonList) #else: # args.multi_output==True sortedDf=qidDf.sort_values(by='probability', axis=0, ascending=False) sortedSumDict=OrderedDict() sortedSumDictKeyDict=dict() # key : noramlized key for index in sortedDf.index: text=sortedDf.iloc[index]["text"] text=textrip(text) if text=="": pass elif len(text)>100: pass elif text.lower() in sortedSumDictKeyDict: sortedSumDict[sortedSumDictKeyDict[text.lower()]] += sortedDf.iloc[index]["probability"] else: sortedSumDictKeyDict[text.lower()]=text sortedSumDict[sortedSumDictKeyDict[text.lower()]] = sortedDf.iloc[index]["probability"] finalSorted=sorted(sortedSumDict.items(), key=operator.itemgetter(1), reverse=True) # for python 2, use sortedSumDict.iteritems() instead of sortedSumDict.items() entry={u"type":"factoid", #u"body":qas, u"id":qid, # must be 24 char u"ideal_answer":["Dummy"], u"exact_answer":[[ans[0]] for ans in finalSorted[:5]], # I think enough? } entryList.append(entry) entryWithProb={u"type":"factoid", u"id":qid, # must be 24 char u"ideal_answer":["Dummy"], u"exact_answer":[ans for ans in finalSorted[:20]], } entryListWithProb.append(entryWithProb) finalformat={u'questions':entryList} finalformatWithProb={u'questions':entryListWithProb} if os.path.isdir(args.output_path): outfilepath=os.path.join(args.output_path, "BioASQform_BioASQ-answer.json") outWithProbfilepath=os.path.join(args.output_path, "WithProb_BioASQform_BioASQ-answer.json") else: outfilepath=args.output_path outWithProbfilepath=args.output_path+"_WithProb" with open(outfilepath, "w") as outfile: json.dump(finalformat, outfile, indent=2) with open(outWithProbfilepath, "w") as outfile_prob: json.dump(finalformatWithProb, outfile_prob, indent=2)
from unittest import TestCase from pytezos import pytezos class CallbackViewTestCase(TestCase): def test_balance_of(self): usds = pytezos.using('mainnet').contract('KT1REEb5VxWRjcHm5GzDMwErMmNFftsE5Gpf') res = usds.balance_of(requests=[ {'owner': 'tz1PNsHbJRejCnnYzbsQ1CR8wUdEQqVjWen1', 'token_id': 0}, {'owner': 'tz1i2tE6hic2ASe9Kvy85ar5hGSSc58bYejT', 'token_id': 0}, {'owner': 'tz2QegZQXyz8b74iTdaqKsGRF7YQb88Wu9CS', 'token_id': 0} ], callback=None).callback_view() print(res)
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, Optional, TypeVar import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class DeviceOperations: """DeviceOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.security.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def get( self, resource_id: str, device_id: str, **kwargs ) -> "_models.Device": """Get device. :param resource_id: The identifier of the resource. :type resource_id: str :param device_id: Identifier of the device. :type device_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Device, or the result of cls(response) :rtype: ~azure.mgmt.security.models.Device :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.Device"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-08-06-preview" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True), 'deviceId': self._serialize.url("device_id", device_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('Device', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/{resourceId}/providers/Microsoft.Security/devices/{deviceId}'} # type: ignore
#!/usr/bin/python #================================================================================# # ADS-B FEEDER PORTAL # # ------------------------------------------------------------------------------ # # Copyright and Licensing Information: # # # # The MIT License (MIT) # # # # Copyright (c) 2015-2016 Joseph A. Prochazka # # # # Permission is hereby granted, free of charge, to any person obtaining a copy # # of this software and associated documentation files (the "Software"), to deal # # in the Software without restriction, including without limitation the rights # # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # # copies of the Software, and to permit persons to whom the Software is # # furnished to do so, subject to the following conditions: # # # # The above copyright notice and this permission notice shall be included in all # # copies or substantial portions of the Software. # # # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # # SOFTWARE. # #================================================================================# import json import os import datetime import time while True: ## Read the configuration file. with open(os.path.dirname(os.path.realpath(__file__)) + '/config.json') as config_file: config = json.load(config_file) ## Import the needed database library and set up database connection. if config["database"]["type"] == "mysql": import MySQLdb db = MySQLdb.connect(host=config["database"]["host"], user=config["database"]["user"], passwd=config["database"]["passwd"], db=config["database"]["db"]) if config["database"]["type"] == "sqlite": import sqlite3 db = sqlite3.connect(config["database"]["db"]) cursor = db.cursor() ## Get maintenance settings. purge_aircraft = False # MySQL if config["database"]["type"] == "mysql": cursor.execute("SELECT value FROM adsb_settings WHERE name = %s", "purgeAircraft") # SQLite if config["database"]["type"] == "sqlite": params = ("purgeAircraft",) cursor.execute("SELECT value FROM adsb_settings WHERE name = ?", params) row = cursor.fetchone() purge_aircraft = row purge_flights = False # MySQL if config["database"]["type"] == "mysql": cursor.execute("SELECT value FROM adsb_settings WHERE name = %s", "purgeFlights") # SQLite if config["database"]["type"] == "sqlite": params = ("purgeFlights",) cursor.execute("SELECT value FROM adsb_settings WHERE name = ?", params) row = cursor.fetchone() if row: purge_flights = row purge_positions = False # MySQL if config["database"]["type"] == "mysql": cursor.execute("SELECT value FROM adsb_settings WHERE name = %s", "purgePositions") # SQLite if config["database"]["type"] == "sqlite": params = ("purgePositions",) cursor.execute("SELECT value FROM adsb_settings WHERE name = ?", params) row = cursor.fetchone() if row: purge_positions = row purge_days_old = False # MySQL if config["database"]["type"] == "mysql": cursor.execute("SELECT value FROM adsb_settings WHERE name = %s", "purgeDaysOld") # SQLite if config["database"]["type"] == "sqlite": params = ("purgeDaysOld",) cursor.execute("SELECT value FROM adsb_settings WHERE name = ?", params) row = cursor.fetchone() purge_days_old = row ## Create the purge date from the age specified. if purge_days_old: purge_datetime = datetime.datetime.utcnow() - timedelta(days=purge_days_old) purge_date = purge_datetime.strftime("%Y/%m/%d %H:%M:%S") else: purge_datetime = None purge_date = None ## Remove aircraft not seen since the specified date. if purge_aircraft and purge_date: # MySQL if config["database"]["type"] == "mysql": cursor.execute("SELECT id FROM adsb_aircraft WHERE lastSeen < %s", purge_date) rows = cursor.fetchall() for row in rows: cursor.execute("DELETE FROM adsb_positions WHERE aircraft = %s", row[0]) cursor.execute("DELETE FROM adsb_flights WHERE aircraft = %s", row[0]) cursor.execute("DELETE FROM adsb_aircraft WHERE id = %s", row[0]) # SQLite if config["database"]["type"] == "sqlite": params = (purge_date,) cursor.execute("SELECT id FROM adsb_aircraft WHERE lastSeen < ?", params) rows = cursor.fetchall() for row in rows: params = (row[0],) cursor.execute("DELETE FROM adsb_positions WHERE aircraft = ?", params) cursor.execute("DELETE FROM adsb_flights WHERE aircraft = ?", params) cursor.execute("DELETE FROM adsb_aircraft WHERE id = ?", params) ## Remove flights not seen since the specified date. if purge_flights and purge_date: # MySQL if config["database"]["type"] == "mysql": cursor.execute("SELECT id FROM adsb_flights WHERE lastSeen < %s", purge_date) rows = cursor.fetchall() for row in rows: cursor.execute("DELETE FROM adsb_positions WHERE flight = %s", row[0]) cursor.execute("DELETE FROM adsb_flights WHERE id = %s", row[0]) #SQLite if config["database"]["type"] == "sqlite": params = (purge_date,) cursor.execute("SELECT id FROM adsb_flights WHERE lastSeen < ?", params) rows = cursor.fetchall() for row in rows: params = (row[0],) cursor.execute("DELETE FROM adsb_positions WHERE flight = ?", params) cursor.execute("DELETE FROM adsb_flights WHERE id = ?", params) ## Remove positions older than the specified date. if purge_positions and purge_date: # MySQL if config["database"]["type"] == "mysql": cursor.execute("DELETE FROM adsb_positions WHERE time < %s", purge_date) #SQLite if config["database"]["type"] == "sqlite": params = (purge_date,) cursor.execute("DELETE FROM adsb_positions WHERE time < ?", params) ## Close the database connection. db.commit() db.close() ## Sleep until the next run. time.sleep(3600)
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for keras.layers.preprocessing.normalization.""" import tensorflow.compat.v2 as tf import os from absl.testing import parameterized import numpy as np import keras from keras import keras_parameterized from keras import testing_utils from keras.layers.preprocessing import normalization from keras.layers.preprocessing import preprocessing_test_utils from keras.utils.generic_utils import CustomObjectScope def _get_layer_computation_test_cases(): test_cases = ({ "adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32), "axis": -1, "test_data": np.array([[1.], [2.], [3.]], np.float32), "expected": np.array([[-1.414214], [-.707107], [0]], np.float32), "testcase_name": "2d_single_element" }, { "adapt_data": np.array([[1], [2], [3], [4], [5]], dtype=np.int32), "axis": -1, "test_data": np.array([[1], [2], [3]], np.int32), "expected": np.array([[-1.414214], [-.707107], [0]], np.float32), "testcase_name": "2d_int_data" }, { "adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32), "axis": None, "test_data": np.array([[1.], [2.], [3.]], np.float32), "expected": np.array([[-1.414214], [-.707107], [0]], np.float32), "testcase_name": "2d_single_element_none_axis" }, { "adapt_data": np.array([[1., 2., 3., 4., 5.]], dtype=np.float32), "axis": None, "test_data": np.array([[1.], [2.], [3.]], np.float32), "expected": np.array([[-1.414214], [-.707107], [0]], np.float32), "testcase_name": "2d_single_element_none_axis_flat_data" }, { "adapt_data": np.array([[[1., 2., 3.], [2., 3., 4.]], [[3., 4., 5.], [4., 5., 6.]]], np.float32), "axis": 1, "test_data": np.array([[[1., 2., 3.], [2., 3., 4.]], [[3., 4., 5.], [4., 5., 6.]]], np.float32), "expected": np.array([[[-1.549193, -0.774597, 0.], [-1.549193, -0.774597, 0.]], [[0., 0.774597, 1.549193], [0., 0.774597, 1.549193]]], np.float32), "testcase_name": "3d_internal_axis" }, { "adapt_data": np.array( [[[1., 0., 3.], [2., 3., 4.]], [[3., -1., 5.], [4., 5., 8.]]], np.float32), "axis": (1, 2), "test_data": np.array( [[[3., 1., -1.], [2., 5., 4.]], [[3., 0., 5.], [2., 5., 8.]]], np.float32), "expected": np.array( [[[1., 3., -5.], [-1., 1., -1.]], [[1., 1., 1.], [-1., 1., 1.]]], np.float32), "testcase_name": "3d_multiple_axis" }, { "adapt_data": np.zeros((3, 4)), "axis": -1, "test_data": np.zeros((3, 4)), "expected": np.zeros((3, 4)), "testcase_name": "zero_variance" }) crossed_test_cases = [] # Cross above test cases with use_dataset in (True, False) for use_dataset in (True, False): for case in test_cases: case = case.copy() if use_dataset: case["testcase_name"] = case["testcase_name"] + "_with_dataset" case["use_dataset"] = use_dataset crossed_test_cases.append(case) return crossed_test_cases @keras_parameterized.run_all_keras_modes class NormalizationTest(keras_parameterized.TestCase, preprocessing_test_utils.PreprocessingLayerTest): def test_broadcasting_during_direct_setting(self): layer = normalization.Normalization(axis=-1, mean=[1.0], variance=[1.0]) output = layer(np.array([[1., 2.]])) expected_output = [[0., 1.]] self.assertAllClose(output, expected_output) self.assertAllClose(layer.get_weights(), []) def test_broadcasting_during_direct_setting_with_tensors(self): if not tf.executing_eagerly(): self.skipTest("Only supported in TF2.") layer = normalization.Normalization( axis=-1, mean=tf.constant([1.0]), variance=tf.constant([1.0])) output = layer(np.array([[1., 2.]])) expected_output = [[0., 1.]] self.assertAllClose(output, expected_output) self.assertAllClose(layer.get_weights(), []) def test_broadcasting_during_direct_setting_with_variables_fails(self): with self.assertRaisesRegex(ValueError, "passing a Variable"): _ = normalization.Normalization( axis=-1, mean=tf.Variable([1.0]), variance=tf.Variable([2.0])) @parameterized.parameters( {"axis": 0}, {"axis": (-1, 0)}, ) def test_zeros_fail_init(self, axis): with self.assertRaisesRegex(ValueError, "The argument 'axis' may not be 0."): normalization.Normalization(axis=axis) @parameterized.parameters( # Out of bounds {"axis": 3}, {"axis": -3}, # In a tuple {"axis": (1, 3)}, {"axis": (1, -3)}, ) def test_bad_axis_fail_build(self, axis): layer = normalization.Normalization(axis=axis) with self.assertRaisesRegex(ValueError, r"in the range"): layer.build([None, 2, 3]) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) class NormalizationAdaptTest(keras_parameterized.TestCase, preprocessing_test_utils.PreprocessingLayerTest): def test_layer_api_compatibility(self): cls = normalization.Normalization with CustomObjectScope({"Normalization": cls}): output_data = testing_utils.layer_test( cls, kwargs={"axis": -1}, input_shape=(None, 3), input_data=np.array([[3, 1, 2], [6, 5, 4]], dtype=np.float32), validate_training=False, adapt_data=np.array([[1, 2, 1], [2, 3, 4], [1, 2, 1], [2, 3, 4]])) expected = np.array([[3., -3., -0.33333333], [9., 5., 1.]]) self.assertAllClose(expected, output_data) @parameterized.named_parameters(*_get_layer_computation_test_cases()) def test_layer_computation(self, adapt_data, axis, test_data, use_dataset, expected): input_shape = tuple([test_data.shape[i] for i in range(1, test_data.ndim)]) if use_dataset: # Keras APIs expect batched datasets adapt_data = tf.data.Dataset.from_tensor_slices(adapt_data).batch( test_data.shape[0] // 2) test_data = tf.data.Dataset.from_tensor_slices(test_data).batch( test_data.shape[0] // 2) layer = normalization.Normalization(axis=axis) layer.adapt(adapt_data) input_data = keras.Input(shape=input_shape) output = layer(input_data) model = keras.Model(input_data, output) model._run_eagerly = testing_utils.should_run_eagerly() output_data = model.predict(test_data) self.assertAllClose(expected, output_data) def test_1d_data(self): data = [0, 2, 0, 2] layer = normalization.Normalization(axis=-1) layer.adapt(data) output = layer(data) self.assertListEqual(output.shape.as_list(), [4, 1]) if tf.executing_eagerly(): self.assertAllClose(output.numpy(), [[-1], [1], [-1], [1]]) def test_0d_data(self): if not tf.executing_eagerly(): self.skipTest("Only supported in TF2.") data = [0, 2, 0, 2] layer = normalization.Normalization(axis=-1) layer.adapt(data) output = layer(0.) self.assertListEqual(output.shape.as_list(), [1, 1]) self.assertAllClose(output.numpy(), [[-1]]) @parameterized.parameters( # Results should be identical no matter how the axes are specified (3d). {"axis": (1, 2)}, {"axis": (2, 1)}, {"axis": (1, -1)}, {"axis": (-1, 1)}, ) def test_axis_permutations(self, axis): layer = normalization.Normalization(axis=axis) # data.shape = [2, 2, 3] data = np.array([[[0., 1., 2.], [0., 2., 6.]], [[2., 3., 4.], [3., 6., 10.]]]) expect = np.array([[[-1., -1., -1.], [-1., -1., -1.]], [[1., 1., 1.], [1., 1., 1.]]]) layer.adapt(data) self.assertAllClose(expect, layer(data)) def test_model_summary_after_layer_adapt(self): data = np.array([[[0., 1., 2.], [0., 2., 6.]], [[2., 3., 4.], [3., 6., 10.]]]) layer = normalization.Normalization(axis=-1) layer.adapt(data) model = keras.Sequential( [layer, keras.layers.Dense(64, activation="relu"), keras.layers.Dense(1)]) model.summary() def test_merge_state(self): data = np.random.rand(30, 10, 2) ds = tf.data.Dataset.from_tensor_slices(data).batch(2) norm = normalization.Normalization(axis=(1, 2)) norm.adapt(ds) partial_ds_1 = ds.shard(3, 0) partial_ds_2 = ds.shard(3, 1) partial_ds_3 = ds.shard(3, 2) norm_1 = normalization.Normalization(axis=(1, 2)) norm_2 = normalization.Normalization(axis=(1, 2)) norm_3 = normalization.Normalization(axis=(1, 2)) norm_1.adapt(partial_ds_1) norm_2.adapt(partial_ds_2) norm_3.adapt(partial_ds_3) norm_1.merge_state([norm_2, norm_3]) merged_norm = norm_1 self.assertAllClose(norm(data), merged_norm(data)) def test_multiple_adapts(self): first_adapt = [[0], [2], [0], [2]] second_adapt = [[2], [4], [2], [4]] predict_input = [[2], [2]] expected_first_output = [[1], [1]] expected_second_output = [[-1], [-1]] inputs = keras.Input(shape=(1,), dtype=tf.int32) layer = normalization.Normalization(axis=-1) layer.adapt(first_adapt) outputs = layer(inputs) model = keras.Model(inputs=inputs, outputs=outputs) actual_output = model.predict(predict_input) self.assertAllClose(actual_output, expected_first_output) # Re-adapt the layer on new inputs. layer.adapt(second_adapt) # Re-compile the model. model.compile() # `predict` should now use the new model state. actual_output = model.predict(predict_input) self.assertAllClose(actual_output, expected_second_output) @parameterized.parameters( {"adapted": True}, {"adapted": False}, ) def test_saved_model_tf(self, adapted): input_data = [[0.], [2.], [0.], [2.]] expected_output = [[-1.], [1.], [-1.], [1.]] inputs = keras.Input(shape=(1,), dtype=tf.float32) if adapted: layer = normalization.Normalization(axis=-1) layer.adapt(input_data) else: layer = normalization.Normalization(mean=1., variance=1.) outputs = layer(inputs) model = keras.Model(inputs=inputs, outputs=outputs) output_data = model.predict(input_data) self.assertAllClose(output_data, expected_output) # Save the model to disk. output_path = os.path.join(self.get_temp_dir(), "tf_saved_model") tf.saved_model.save(model, output_path) loaded_model = tf.saved_model.load(output_path) f = loaded_model.signatures["serving_default"] # Ensure that the loaded model is unique (so that the save/load is real) self.assertIsNot(model, loaded_model) # Validate correctness of the new model. new_output_data = f(tf.constant(input_data))["normalization"] self.assertAllClose(new_output_data, expected_output) @parameterized.parameters( {"adapted": True}, {"adapted": False}, ) def test_saved_model_keras(self, adapted): input_data = [[0.], [2.], [0.], [2.]] expected_output = [[-1.], [1.], [-1.], [1.]] cls = normalization.Normalization inputs = keras.Input(shape=(1,), dtype=tf.float32) if adapted: layer = cls(axis=-1) layer.adapt(input_data) else: layer = cls(mean=1., variance=1.) outputs = layer(inputs) model = keras.Model(inputs=inputs, outputs=outputs) output_data = model.predict(input_data) self.assertAllClose(output_data, expected_output) # Save the model to disk. output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model") model.save(output_path, save_format="tf") loaded_model = keras.models.load_model( output_path, custom_objects={"Normalization": cls}) # Ensure that the loaded model is unique (so that the save/load is real) self.assertIsNot(model, loaded_model) # Validate correctness of the new model. new_output_data = loaded_model.predict(input_data) self.assertAllClose(new_output_data, expected_output) @parameterized.parameters( {"adapted": True}, {"adapted": False}, ) def test_saved_weights_keras(self, adapted): input_data = [[0.], [2.], [0.], [2.]] expected_output = [[-1.], [1.], [-1.], [1.]] cls = normalization.Normalization inputs = keras.Input(shape=(1,), dtype=tf.float32) if adapted: layer = cls(axis=-1) layer.adapt(input_data) else: layer = cls(mean=1., variance=1.) outputs = layer(inputs) model = keras.Model(inputs=inputs, outputs=outputs) output_data = model.predict(input_data) self.assertAllClose(output_data, expected_output) # Save the model to disk. output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_weights") model.save_weights(output_path, save_format="tf") new_model = keras.Model.from_config( model.get_config(), custom_objects={"Normalization": cls}) new_model.load_weights(output_path) # Validate correctness of the new model. new_output_data = new_model.predict(input_data) self.assertAllClose(new_output_data, expected_output) if __name__ == "__main__": tf.test.main()
#!/usr/bin/env python3 # Copyright (c) 2014-2017 Wladimir J. van der Laan # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Script to generate list of seed nodes for chainparams.cpp. This script expects two text files in the directory that is passed as an argument: nodes_main.txt nodes_test.txt These files must consist of lines in the format <ip> <ip>:<port> [<ipv6>] [<ipv6>]:<port> <onion>.onion 0xDDBBCCAA (IPv4 little-endian old pnSeeds format) The output will be two data structures with the peers in binary format: static SeedSpec6 pnSeed6_main[]={ ... } static SeedSpec6 pnSeed6_test[]={ ... } These should be pasted into `src/chainparamsseeds.h`. ''' from base64 import b32decode from binascii import a2b_hex import sys, os import re # ipv4 in ipv6 prefix pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff]) # tor-specific ipv6 prefix pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43]) def name_to_ipv6(addr): if len(addr)>6 and addr.endswith('.onion'): vchAddr = b32decode(addr[0:-6], True) if len(vchAddr) != 16-len(pchOnionCat): raise ValueError('Invalid onion %s' % s) return pchOnionCat + vchAddr elif '.' in addr: # IPv4 return pchIPv4 + bytearray((int(x) for x in addr.split('.'))) elif ':' in addr: # IPv6 sub = [[], []] # prefix, suffix x = 0 addr = addr.split(':') for i,comp in enumerate(addr): if comp == '': if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end continue x += 1 # :: skips to suffix assert(x < 2) else: # two bytes per component val = int(comp, 16) sub[x].append(val >> 8) sub[x].append(val & 0xff) nullbytes = 16 - len(sub[0]) - len(sub[1]) assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0)) return bytearray(sub[0] + ([0] * nullbytes) + sub[1]) elif addr.startswith('0x'): # IPv4-in-little-endian return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:]))) else: raise ValueError('Could not parse address %s' % addr) def parse_spec(s, defaultport): match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s) if match: # ipv6 host = match.group(1) port = match.group(2) elif s.count(':') > 1: # ipv6, no port host = s port = '' else: (host,_,port) = s.partition(':') if not port: port = defaultport else: port = int(port) host = name_to_ipv6(host) return (host,port) def process_nodes(g, f, structname, defaultport): g.write('static SeedSpec6 %s[] = {\n' % structname) first = True for line in f: comment = line.find('#') if comment != -1: line = line[0:comment] line = line.strip() if not line: continue if not first: g.write(',\n') first = False (host,port) = parse_spec(line, defaultport) hoststr = ','.join(('0x%02x' % b) for b in host) g.write(' {{%s}, %i}' % (hoststr, port)) g.write('\n};\n') def main(): if len(sys.argv)<2: print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr) exit(1) g = sys.stdout indir = sys.argv[1] g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n') g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n') g.write('/**\n') g.write(' * List of fixed seed nodes for the viacoin network\n') g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n') g.write(' *\n') g.write(' * Each line contains a 16-byte IPv6 address and a port.\n') g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n') g.write(' */\n') with open(os.path.join(indir,'nodes_main.txt'),'r') as f: process_nodes(g, f, 'pnSeed6_main', 5223) g.write('\n') with open(os.path.join(indir,'nodes_test.txt'),'r') as f: process_nodes(g, f, 'pnSeed6_test', 25223) g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n') if __name__ == '__main__': main()
# Generated by Django 2.1.7 on 2020-06-12 02:05 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('api', '0002_speech2text'), ('api', '0002_project_single_class_classification'), ] operations = [ ]
from emmental._version import __version__ from emmental.meta import Meta, init __all__ = ["__version__", "Meta", "init"]
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. { 'name': 'Recurring Documents', 'category': 'Extra Tools', 'description': """ Create recurring documents. =========================== This module allows to create new documents and add subscriptions on that document. e.g. To have an invoice generated automatically periodically: ------------------------------------------------------------- * Define a document type based on Invoice object * Define a subscription whose source document is the document defined as above. Specify the interval information and partner to be invoiced. """, 'depends': ['base'], 'data': [ 'security/ir.model.access.csv', 'views/subscription_view.xml' ], 'demo': ['data/subscription_demo.xml'], }
import numpy as np import os # try to load io libraries (h5py and z5py) try: import h5py WITH_H5PY = True except ImportError: WITH_H5PY = False try: import z5py WITH_Z5PY = True except ImportError: WITH_Z5PY = False from ..core.base import SyncableDataset from ..core.base import IndexSpec from . import volumetric_utils as vu from ...utils import python_utils as pyu class LazyVolumeLoaderBase(SyncableDataset): def __init__(self, dataset, window_size, stride, downsampling_ratio=None, padding=None, padding_mode='reflect', transforms=None, return_index_spec=False, name=None, data_slice=None): super(LazyVolumeLoaderBase, self).__init__() assert len(window_size) == dataset.ndim, "%i, %i" % (len(window_size), dataset.ndim) assert len(stride) == dataset.ndim # Validate transforms assert transforms is None or callable(transforms) self.name = name self.return_index_spec = return_index_spec self.dataset = dataset self.window_size = window_size self.stride = stride self.padding_mode = padding_mode self.transforms = transforms # slicing and padding self.data_slice = self.normalize_slice(data_slice) self.padding = padding # DataloaderIter should do the shuffling self.shuffle = False # compute the shape self.shape = self.get_shape() self._data_shape = tuple(dsl.stop - dsl.start for dsl in self.data_slice)\ if self.data_slice is not None else self.dataset.shape if downsampling_ratio is None: self.downsampling_ratio = [1] * self.dataset.ndim elif isinstance(downsampling_ratio, int): self.downsampling_ratio = [downsampling_ratio] * self.dataset.ndim elif isinstance(downsampling_ratio, (list, tuple)): assert len(downsampling_ratio) == self.dataset.ndim self.downsampling_ratio = list(downsampling_ratio) else: raise NotImplementedError self.base_sequence = self.make_sliding_windows() def normalize_slice(self, data_slice): if data_slice is None: return None slice_ = tuple(slice(0 if sl.start is None else sl.start, sh if sl.stop is None else sl.stop) for sl, sh in zip(data_slice, self.dataset.shape)) if len(slice_) < self.dataset.ndim: slice_ = slice_ + tuple(slice(0, sh) for sh in self.dataset.shape[len(slice_):]) return slice_ # get the effective shape after slicing and / or padding def get_shape(self): if self.data_slice is None: shape = self.dataset.shape else: # get the shape from the data slice (don't support ellipses) shape = tuple(slice_.stop - slice_.start for slice_ in self.data_slice) if self.padding is not None: # TODO is this correct ??? shape = tuple(sh + sum(pad) for sh, pad in zip(shape, self.padding)) return shape def make_sliding_windows(self): return list(vu.slidingwindowslices(shape=list(self.shape), window_size=self.window_size, strides=self.stride, shuffle=self.shuffle, add_overhanging=True, ds=self.downsampling_ratio)) def __getitem__(self, index): # Casting to int would allow index to be IndexSpec objects. index = int(index) slices = self.base_sequence[index] slices_ = tuple(slices) # check if we have padding and if we need to pad if self.padding is not None: # get the start and stop positions in the dataset without padding starts = [sl.start - pad[0] for sl, pad in zip(slices_, self.padding)] stops = [sl.stop - pad[0] for sl, pad in zip(slices_, self.padding)] # check if we need to pad to the left pad_left = None if any(start < 0 for start in starts): pad_left = tuple(abs(start) if start < 0 else 0 for start in starts) starts = [max(0, start) for start in starts] # check if we need to pad to the right pad_right = None if any(stop > sh for stop, sh in zip(stops, self._data_shape)): pad_right = tuple(stop - sh if stop > sh else 0 for stop, sh in zip(stops, self._data_shape)) stops = [min(sh, stop) for sh, stop in zip(self._data_shape, stops)] # check if we need any paddingand if so calculate the padding width need_padding = pad_left is not None or pad_right is not None if need_padding: # check the pad width (left and right) that we need for this batch pad_left = (0,) * len(self.shape) if pad_left is None else pad_left pad_right = (0,) * len(self.shape) if pad_right is None else pad_right pad_width = tuple((pl, pr) for pl, pr in zip(pad_left, pad_right)) # update the slicing slices_ = tuple(slice(start, stop) for start, stop in zip(starts, stops)) else: need_padding = False # if we have data-slices, we need to bring # the slices back to the volume space if self.data_slice is not None: slices_ = tuple(slice(sl.start + dsl.start, sl.stop + dsl.start) for sl, dsl in zip(slices_, self.data_slice)) # load the slice and pad if necessary sliced_volume = self.dataset[slices_] if need_padding: sliced_volume = np.pad(sliced_volume, pad_width=pad_width, mode=self.padding_mode) if self.transforms is None: transformed = sliced_volume else: transformed = self.transforms(sliced_volume) if self.return_index_spec: return transformed, IndexSpec(index=index, base_sequence_at_index=slices) else: return transformed def clone(self, dataset=None, transforms=None, name=None): # Make sure the dataset shapes check out assert dataset.shape == self.dataset.shape # Make a new instance (without initializing) new = type(self).__new__(type(self)) # Update dictionary to initialize new_dict = dict(self.__dict__) if dataset is not None: new_dict.update({'dataset': dataset}) if transforms is not None: new_dict.update({'transforms': transforms}) if name is not None: new_dict.update({'name': name}) new.__dict__.update(new_dict) return new def __repr__(self): return "{}(shape={}, name={})".format(type(self).__name__, self.dataset.shape, self.name) # baseclass for hdf5, zarr or n5 volume loaders class LazyVolumeLoader(LazyVolumeLoaderBase): def __init__(self, file_impl, path, path_in_file=None, data_slice=None, transforms=None, name=None, **slicing_config): if isinstance(path, dict): assert name is not None assert name in path self.path = path.get(name) elif isinstance(path, str): assert os.path.exists(path), path self.path = path else: raise NotImplementedError if isinstance(path_in_file, dict): assert name is not None assert name in path_in_file self.path_in_file = path_in_file.get(name) elif isinstance(path_in_file, str): self.path_in_file = path_in_file elif path_in_file is None: self.path_in_file = None else: raise NotImplementedError if data_slice is None or isinstance(data_slice, (str, list, tuple)): data_slice = vu.parse_data_slice(data_slice) elif isinstance(data_slice, dict): assert name is not None assert name in data_slice data_slice = vu.parse_data_slice(data_slice.get(name)) else: raise NotImplementedError self.validate_data_slice(data_slice) slicing_config_for_name = pyu.get_config_for_name(slicing_config, name) assert 'window_size' in slicing_config_for_name assert 'stride' in slicing_config_for_name self.file_ = file_impl(self.path, mode='r') # Initialize superclass with the volume super(LazyVolumeLoader, self).__init__(dataset=self.file_[self.path_in_file], name=name, transforms=transforms, data_slice=data_slice, **slicing_config_for_name) # we do not support step in the dataslice def validate_data_slice(self, data_slice): if data_slice is not None: assert all(sl.step in (None, 1) for sl in data_slice), "Complicated step is not supported" class LazyHDF5VolumeLoader(LazyVolumeLoader): def __init__(self, path, path_in_h5_dataset=None, data_slice=None, transforms=None, name=None, **slicing_config): assert WITH_H5PY, "Need h5py to load volume from hdf5 file." super(LazyHDF5VolumeLoader, self).__init__(file_impl=h5py.File, path=path, path_in_file=path_in_h5_dataset, data_slice=data_slice, transforms=transforms, name=name, **slicing_config) # this is not pythonic, but we need to close the h5py file def __del__(self): self.file_.close() class LazyN5VolumeLoader(LazyVolumeLoader): def __init__(self, path, path_in_file=None, data_slice=None, transforms=None, name=None, **slicing_config): assert WITH_Z5PY, "Need z5py to load volume from N5 file." assert slicing_config.get('downsampling_ratio', None) is None,\ "Downsampling is not supported by z5py based loaderes" super(N5VolumeLoader, self).__init__(file_impl=z5py.N5File, path=path, path_in_file=path_in_file, data_slice=data_slice, transforms=transforms, name=name, **slicing_config) class LazyZarrVolumeLoader(LazyVolumeLoader): def __init__(self, path, path_in_file=None, data_slice=None, transforms=None, name=None, **slicing_config): assert WITH_Z5PY, "Need z5py to load volume from zarr file." assert slicing_config.get('downsampling_ratio', None) is None,\ "Downsampling is not supported by z5py based loaderes" super(ZarrVolumeLoader, self).__init__(file_impl=z5py.ZarrFile, path=path, path_in_file=path_in_file, data_slice=data_slice, transforms=transforms, name=name, **slicing_config)
# # Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from cuml.metrics.trustworthiness import trustworthiness from cuml.metrics.regression import r2_score from cuml.metrics.accuracy import accuracy_score from cuml.metrics.cluster.adjustedrandindex import adjusted_rand_score