content
stringlengths
1
1.04M
input_ids
listlengths
1
774k
ratio_char_token
float64
0.38
22.9
token_count
int64
1
774k
import sys from PyQt5.QtWidgets import QDialog, QApplication from demoMousetrack import * if __name__=="__main__": app = QApplication(sys.argv) w = MyForm() w.show() sys.exit(app.exec_())
[ 11748, 25064, 198, 6738, 9485, 48, 83, 20, 13, 48, 83, 54, 312, 11407, 1330, 1195, 44204, 11, 1195, 23416, 198, 6738, 13605, 44, 516, 21879, 441, 1330, 1635, 198, 198, 361, 11593, 3672, 834, 855, 1, 834, 12417, 834, 1298, 220, 220, ...
2.322222
90
fibo = [1, 2] fibo_2 = [] i = 0 while True: a = fibo[-2] + fibo[-1] fibo.append(a) i += 1 if a > 4000000: del fibo[-1] break print(fibo) for c in fibo: print(c) if c % 2 == 0: fibo_2.append(c) print(sum(fibo_2))
[ 69, 26762, 796, 685, 16, 11, 362, 60, 201, 198, 69, 26762, 62, 17, 796, 17635, 201, 198, 201, 198, 72, 796, 657, 201, 198, 201, 198, 4514, 6407, 25, 201, 198, 220, 220, 220, 257, 796, 12900, 78, 58, 12, 17, 60, 1343, 12900, 78...
1.595506
178
import sys import logging from flask import Flask, render_template from flask.ext.babel import Babel from flask.ext.bcrypt import Bcrypt from flask.ext.login import LoginManager from flask.ext.security import Security, SQLAlchemyUserDatastore from flask.ext.sqlalchemy import SQLAlchemy from flask_mail import Mail from flask_s3 import FlaskS3 from config import ProdConfig db = SQLAlchemy() bcrypt = Bcrypt() babel = Babel() login_manager = LoginManager() s3 = FlaskS3() mail = Mail()
[ 11748, 25064, 198, 11748, 18931, 198, 6738, 42903, 1330, 46947, 11, 8543, 62, 28243, 198, 6738, 42903, 13, 2302, 13, 65, 9608, 1330, 50175, 198, 6738, 42903, 13, 2302, 13, 15630, 6012, 1330, 347, 29609, 198, 6738, 42903, 13, 2302, 13, ...
3.4
145
Cocodrilo1 = Cocodrilo(1) Cocodrilo1.desplazar()
[ 628, 220, 197, 197, 220, 628, 628, 198, 34, 420, 375, 81, 18526, 16, 796, 18490, 375, 81, 18526, 7, 16, 8, 198, 34, 420, 375, 81, 18526, 16, 13, 8906, 489, 29413, 3419 ]
1.735294
34
""" Compute Strongly Connected Components Given a graph, calculate number of strongest connected components In a subgraph, if we can reach from every vertex to every other vertex, then it is called SCC. Example: >> graph1 = {0: [1], 1: [2], 2: [0, 3], 3: [4], 4: [5, 7], 5: [6], 6: [4, 7]} >>> compute_sccs(graph1)) >> [[0, 2, 1], [3], [4, 6, 5], [7]] Approach: Using Kosaraju Algorithm: - Perform DFS traversal of a graph, to get a stack representing the order of visited nodes while traversal. (The starting node will be returned when we pop the stack for the first time) - Perform DFS traversal of a reversed graph, where directions of all edges are reversed - Collect strongly connected components while traversal Tested with: https://www.hackerearth.com/practice/algorithms/graphs/strongly-connected-components/tutorial/ """ from typing import Generator, Union Node = Union[str, int] Graph = dict[Node, list[Node]] Stack = list[Node] def genrate_all_nodes(graph: Graph) -> Generator[int, None, None]: """ Return a generator for all nodes in the graph """ mentioned_nodes = set() for node in graph.keys(): if node not in mentioned_nodes: yield node mentioned_nodes.add(node) for neighbour in graph[node]: if neighbour not in mentioned_nodes: yield neighbour mentioned_nodes.add(neighbour) def reverse(graph: Graph) -> Graph: """ Return a new graph with same vertices as input graph, but all edges are reversed """ reverse_graph = {} for node in graph.keys(): for neighbour in graph[node]: if neighbour not in reverse_graph: reverse_graph[neighbour] = [node] else: reverse_graph[neighbour].append(node) return reverse_graph def get_dfs_stack(graph: Graph) -> Stack: """ Perform DFS traversal in an input graph Return a stack, representing the order of the path (the starting node will be returned when we pop the stack for the first time) """ explored = set() stack = [] for node in genrate_all_nodes(graph): if node not in explored: dfs_util(node) return stack def get_strongly_connected_components(graph: Graph, stack: Stack) -> list[list[Node]]: """ Perfrom DFS traversal on a reversed graph to get """ explored = set() sccs = [] while len(stack) > 0: node = stack.pop() if node not in explored: sccs.append(dfs_util(node, [])) return sccs def compute_sccs(graph: Graph) -> list[list[Node]]: """ Given a directed graph, return the list of strongly connected components """ reverse_graph = reverse(graph) stack = get_dfs_stack(graph) sccs = get_strongly_connected_components(reverse_graph, stack) return sccs if __name__ == "__main__": graph1 = {0: [1], 1: [2], 2: [0, 3], 3: [4], 4: [5, 7], 5: [6], 6: [4, 7]} print(compute_sccs(graph1)) # [[0, 2, 1], [3], [4, 6, 5], [7]] graph2 = { 1: [10], 3: [6, 9], 7: [12], 9: [2, 14], 12: [9], 4: [11], 2: [5, 10], 5: [3], 15: [3, 8], 8: [11], 11: [5], } print(compute_sccs(graph2)) # [[15], [8], [4], [11], [7], [12], [3, 5, 2, 9], [14], [6], [1], [10]]
[ 37811, 198, 7293, 1133, 13535, 306, 8113, 276, 36109, 198, 198, 15056, 257, 4823, 11, 15284, 1271, 286, 12841, 5884, 6805, 198, 198, 818, 257, 850, 34960, 11, 611, 356, 460, 3151, 422, 790, 37423, 284, 790, 584, 37423, 11, 198, 8524, ...
2.318667
1,500
def running_threads(): """ Currently running threads :returns: list of running thread information :rtype: list of str """ import threading threads = [] main_thread = threading.current_thread() for t in threading.enumerate(): if t is main_thread: continue threads.append('#{}: {}: {}'.format(len(threads) + 1, t.getName(), t)) return threads def loggers(): """ Currently configured loggers :returns: list of configured loggers :rtype: list of Logger objects """ import logging root = logging.root existing = root.manager.loggerDict.keys() return [logging.getLogger(name) for name in existing]
[ 4299, 2491, 62, 16663, 82, 33529, 198, 220, 220, 220, 37227, 16888, 2491, 14390, 628, 220, 220, 220, 1058, 7783, 82, 25, 1351, 286, 2491, 4704, 1321, 198, 220, 220, 220, 1058, 81, 4906, 25, 1351, 286, 965, 628, 220, 220, 220, 37227,...
2.729412
255
__version__ = "0.5.1" default_app_config = 'rest_registration.apps.RestRegistrationConfig'
[ 834, 9641, 834, 796, 366, 15, 13, 20, 13, 16, 1, 198, 12286, 62, 1324, 62, 11250, 796, 705, 2118, 62, 2301, 33397, 13, 18211, 13, 19452, 47133, 16934, 6, 198 ]
2.935484
31
#! -*- coding: utf-8 -*- # 测试代码可用性: 结合MLM的Gibbs采样 from tqdm import tqdm import numpy as np from bert4keras.models import build_transformer_model from bert4keras.tokenizers import Tokenizer from bert4keras.snippets import to_array config_path = '/root/kg/bert/chinese_L-12_H-768_A-12/bert_config.json' checkpoint_path = '/root/kg/bert/chinese_L-12_H-768_A-12/bert_model.ckpt' dict_path = '/root/kg/bert/chinese_L-12_H-768_A-12/vocab.txt' tokenizer = Tokenizer(dict_path, do_lower_case=True) # 建立分词器 model = build_transformer_model( config_path=config_path, checkpoint_path=checkpoint_path, with_mlm=True ) # 建立模型,加载权重 sentences = [] init_sent = u'科学技术是第一生产力。' # 给定句子或者None minlen, maxlen = 8, 32 steps = 10000 converged_steps = 1000 vocab_size = tokenizer._vocab_size if init_sent is None: length = np.random.randint(minlen, maxlen + 1) tokens = ['[CLS]'] + ['[MASK]'] * length + ['[SEP]'] token_ids = tokenizer.tokens_to_ids(tokens) segment_ids = [0] * len(token_ids) else: token_ids, segment_ids = tokenizer.encode(init_sent) length = len(token_ids) - 2 for _ in tqdm(range(steps), desc='Sampling'): # Gibbs采样流程:随机mask掉一个token,然后通过MLM模型重新采样这个token。 i = np.random.choice(length) + 1 token_ids[i] = tokenizer._token_mask_id probas = model.predict(to_array([token_ids], [segment_ids]))[0, i] token = np.random.choice(vocab_size, p=probas) token_ids[i] = token sentences.append(tokenizer.decode(token_ids)) print(u'部分随机采样结果:') for _ in range(10): print(np.random.choice(sentences[converged_steps:]))
[ 2, 0, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 10545, 113, 233, 46237, 243, 47987, 163, 254, 223, 20998, 107, 18796, 101, 45250, 100, 25, 13328, 119, 241, 28938, 230, 5805, 44, 21410, 38, 571, 1443, 34932, 22...
1.988579
788
# coding: utf-8 ### # @file condense.py # @author Sébastien Rouault <sebastien.rouault@alumni.epfl.ch> # # @section LICENSE # # Copyright © 2020 École Polytechnique Fédérale de Lausanne (EPFL). # All rights reserved. # # @section DESCRIPTION # # Condense parameter vector aggregation random function. ### import tensorflow as tf import tools from . import _GAR, register # ---------------------------------------------------------------------------- # # Condense random function class TFCondenseGAR(_GAR): """ Full-TensorFlow condense random function class. """ # ---------------------------------------------------------------------------- # # GAR registering # Register aggregation rule register("condense", TFCondenseGAR)
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 21017, 198, 1303, 2488, 7753, 220, 220, 1779, 1072, 13, 9078, 198, 1303, 2488, 9800, 311, 2634, 65, 459, 2013, 13876, 1721, 1279, 325, 65, 459, 2013, 13, 472, 1721, 31, 282, 25402, 13, 538, 270...
3.666667
204
import numpy import tiledb def mbes_domain(tri=False): """Set array domain.""" index_filters = tiledb.FilterList([tiledb.ZstdFilter(level=16)]) xdim = tiledb.Dim( "longitude", domain=(None, None), tile=1000, dtype=numpy.float64, filters=index_filters, ) ydim = tiledb.Dim( "latitude", domain=(None, None), tile=1000, dtype=numpy.float64, filters=index_filters, ) if tri: # define a third dimension, i.e. depth/z/elevation zdim = tiledb.Dim( "depth", domain=(None, None), tile=1000, dtype=numpy.float64, filters=index_filters, ) domain = tiledb.Domain(xdim, ydim, zdim) else: domain = tiledb.Domain(xdim, ydim) return domain def mbes_attrs(): """Create the mbes attributes""" attrs = [ tiledb.Attr( "depth", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)] ), tiledb.Attr( "timestamp", dtype="datetime64[ns]", filters=[tiledb.ZstdFilter(level=16)] ), tiledb.Attr( "across_track", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)] ), tiledb.Attr( "along_track", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)] ), tiledb.Attr( "travel_time", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)] ), tiledb.Attr( "beam_angle", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)] ), tiledb.Attr( "mean_cal_amplitude", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)], ), tiledb.Attr( "beam_angle_forward", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)], ), tiledb.Attr( "vertical_error", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)] ), tiledb.Attr( "horizontal_error", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)], ), tiledb.Attr( "sector_number", dtype=numpy.uint8, filters=[tiledb.RleFilter(), tiledb.ZstdFilter(level=16)], ), tiledb.Attr( "beam_flags", dtype=numpy.uint8, filters=[tiledb.RleFilter(), tiledb.ZstdFilter(level=16)], ), tiledb.Attr( "ping_flags", dtype=numpy.uint8, filters=[tiledb.RleFilter(), tiledb.ZstdFilter(level=16)], ), tiledb.Attr( "tide_corrector", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)] ), tiledb.Attr( "depth_corrector", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)], ), tiledb.Attr( "heading", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)] ), tiledb.Attr( "pitch", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)] ), tiledb.Attr("roll", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)]), tiledb.Attr( "heave", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)] ), tiledb.Attr( "course", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)] ), tiledb.Attr( "speed", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)] ), tiledb.Attr( "height", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)] ), tiledb.Attr( "separation", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)] ), tiledb.Attr( "gps_tide_corrector", dtype=numpy.float32, filters=[tiledb.ZstdFilter(level=16)], ), tiledb.Attr( "centre_beam", dtype=numpy.uint8, filters=[tiledb.RleFilter(), tiledb.ZstdFilter(level=16)] ), tiledb.Attr( "beam_number", dtype=numpy.uint16, filters=[tiledb.ZstdFilter(level=16)] ), tiledb.Attr( "region_code", dtype=str, filters=[tiledb.ZstdFilter(level=16)] ), ] return attrs def mbes_schema(): """Create the tiledb schema""" domain = mbes_domain(False) # only 2 dims for the project attributes = mbes_attrs() schema = tiledb.ArraySchema( domain=domain, sparse=True, attrs=attributes, cell_order="hilbert", tile_order="row-major", capacity=1_000_000, allows_duplicates=True, ) return schema def create_mbes_array(array_uri, ctx=None): """Create the TileDB array.""" schema = mbes_schema() with tiledb.scope_ctx(ctx): tiledb.Array.create(array_uri, schema) def append_ping_dataframe(dataframe, array_uri, ctx=None): """Append the ping dataframe read from a GSF file.""" kwargs = { "mode": "append", "sparse": True, "ctx": ctx, } tiledb.dataframe_.from_pandas(array_uri, dataframe, **kwargs)
[ 11748, 299, 32152, 198, 11748, 256, 3902, 65, 628, 198, 4299, 285, 12636, 62, 27830, 7, 28461, 28, 25101, 2599, 198, 220, 220, 220, 37227, 7248, 7177, 7386, 526, 15931, 198, 220, 220, 220, 6376, 62, 10379, 1010, 796, 256, 3902, 65, ...
1.859993
2,807
import os import logging import logging.config from pyrogram import Client from config import API_ID, API_HASH, BOT_TOKEN logging.config.fileConfig('logging.conf') logging.getLogger().setLevel(logging.INFO) logging.getLogger("pyrogram").setLevel(logging.ERROR) FORCE_SUB = os.environ.get("FORCE_SUB", None) bot = TG() bot.run()
[ 11748, 28686, 198, 11748, 18931, 220, 198, 11748, 18931, 13, 11250, 198, 6738, 12972, 39529, 1330, 20985, 198, 6738, 4566, 1330, 7824, 62, 2389, 11, 7824, 62, 39, 11211, 11, 347, 2394, 62, 10468, 43959, 198, 198, 6404, 2667, 13, 11250, ...
2.641221
131
from os import path import setuptools import pathlib HERE = pathlib.Path(__file__).parent SRC_CLANG = HERE / 'src/clang' SRC_CLANG_BASE_URL = 'https://raw.githubusercontent.com/llvm/llvm-project/llvmorg-13.0.0/clang/bindings/python/clang/' if not SRC_CLANG.exists(): SRC_CLANG.mkdir(parents=True) http_get(SRC_CLANG_BASE_URL, SRC_CLANG, '__init__.py') http_get(SRC_CLANG_BASE_URL, SRC_CLANG, 'cindex.py') http_get(SRC_CLANG_BASE_URL, SRC_CLANG, 'enumerations.py') setuptools.setup()
[ 6738, 28686, 1330, 3108, 201, 198, 11748, 900, 37623, 10141, 201, 198, 11748, 3108, 8019, 201, 198, 201, 198, 39, 9338, 796, 3108, 8019, 13, 15235, 7, 834, 7753, 834, 737, 8000, 201, 198, 50, 7397, 62, 5097, 15567, 796, 15698, 1220, ...
2.118852
244
#%% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataScience.changeDirOnImportExport setting import os try: os.chdir(os.path.join(os.getcwd(), 'examples')) print(os.getcwd()) except: pass #%% [markdown] # ## Advanced Lane Finding Project # # The goals / steps of this project are the following: # # * Compute the camera calibration matrix and distortion coefficients given a set of chessboard images. # * Apply a distortion correction to raw images. # * Use color transforms, gradients, etc., to create a thresholded binary image. # * Apply a perspective transform to rectify binary image ("birds-eye view"). # * Detect lane pixels and fit to find the lane boundary. # * Determine the curvature of the lane and vehicle position with respect to center. # * Warp the detected lane boundaries back onto the original image. # * Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position. # # --- # ## First, I'll compute the camera calibration using chessboard images #%% import numpy as np import cv2 import glob import matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib', 'qt') # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((6*9,3), np.float32) objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d points in real world space imgpoints = [] # 2d points in image plane. # Make a list of calibration images images = glob.glob('../camera_cal/calibration*.jpg') undistorted = cal_undistort(img, objpoints, imgpoints) print('ok') print(objp.shape) print(corners.shape) img_size = (img.shape[1], img.shape[0]) print(img_size) %matplotlib inline plt.figure(figsize=(10.,8)) img = mpimg.imread("../camera_cal/calibration5.jpg") # Undistort using mtx and dist undist = cv2.undistort(img, mtx, dist, None, mtx) plt.subplot(2,2,1) plt.title('Original') fig = plt.imshow(img) plt.subplot(2,2,2) plt.title('Undistorted') fig = plt.imshow(undist) #%% [markdown]
[ 2, 16626, 9794, 1762, 8619, 422, 262, 44573, 6808, 284, 262, 20966, 2047, 65, 2393, 4067, 13, 6756, 428, 3090, 572, 351, 262, 6060, 26959, 13, 3803, 35277, 2202, 20939, 43834, 4634, 198, 11748, 28686, 198, 28311, 25, 198, 197, 418, 13...
3
709
if __name__ == "__main__": go_deep()
[ 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 467, 62, 22089, 3419 ]
2.294118
17
# RevKit: A Toolkit for Reversible Circuit Design (www.revkit.org) # Copyright (C) 2009-2011 The RevKit Developers <revkit@informatik.uni-bremen.de> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from PyQt4 import QtCore, QtGui from PyQt4.QtCore import * from PyQt4.QtGui import * from BaseItemButton import *
[ 2, 5416, 20827, 25, 317, 16984, 15813, 329, 797, 37393, 13588, 8495, 357, 2503, 13, 18218, 15813, 13, 2398, 8, 198, 2, 15069, 357, 34, 8, 3717, 12, 9804, 220, 383, 5416, 20827, 34152, 1279, 18218, 15813, 31, 259, 18982, 1134, 13, 35...
3.455939
261
# -*- encoding: utf-8 -*- # # Copyright © 2016-2018 Red Hat, Inc. # Copyright © 2014-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import itertools import operator import daiquiri import numpy from oslo_config import cfg import six from gnocchi import carbonara from gnocchi import utils OPTS = [ cfg.StrOpt('driver', default='file', help='Storage driver to use'), ] LOG = daiquiri.getLogger(__name__) ATTRGETTER_METHOD = operator.attrgetter("method") ATTRGETTER_GRANULARITY = operator.attrgetter("granularity") class MetricDoesNotExist(StorageError): """Error raised when this metric does not exist.""" class AggregationDoesNotExist(StorageError): """Error raised when the aggregation method doesn't exists for a metric.""" class MetricAlreadyExists(StorageError): """Error raised when this metric already exists.""" @utils.retry_on_exception_and_log("Unable to initialize storage driver") def get_driver(conf): """Return the configured driver.""" return utils.get_driver_class('gnocchi.storage', conf.storage)( conf.storage)
[ 2, 532, 9, 12, 21004, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 15069, 10673, 1584, 12, 7908, 2297, 10983, 11, 3457, 13, 198, 2, 15069, 10673, 1946, 12, 4626, 304, 20795, 590, 198, 2, 198, 2, 49962, 739, 262, 24843, 13...
3.207031
512
# SPDX-License-Identifier: BSD-3-Clause # # Copyright 2020 Raritan Inc. All rights reserved. # # This is an auto-generated file. # # Section generated by IdlC from "LuaService.idl" # import raritan.rpc from raritan.rpc import Interface, Structure, ValueObject, Enumeration, typecheck, DecodeException import raritan.rpc.luaservice # structure # structure # structure # interface
[ 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 347, 10305, 12, 18, 12, 2601, 682, 198, 2, 198, 2, 15069, 12131, 371, 283, 18642, 3457, 13, 1439, 2489, 10395, 13, 198, 2, 198, 2, 770, 318, 281, 8295, 12, 27568, 2393, 13, 198, 198,...
2.992248
129
from helper.CommonHelper import CommonHelper from flask_restful import Resource, reqparse from model.Product import Product from service.ProductService import ProductService get_parser = reqparse.RequestParser() get_parser.add_argument('product_id', type=str, required=False) get_parser.add_argument('category_id', type=str, required=False) put_parser = reqparse.RequestParser() put_parser.add_argument('name', type=str, required=True, help="name is required") put_parser.add_argument('unit', type=str, required=True, help="level is required") put_parser.add_argument('category_id', type=str, required=True, help="category_id is required") put_parser.add_argument('mrp', type=int, required=True, help="mrp is required") put_parser.add_argument('price', type=int, required=True, help="price is required") put_parser.add_argument('tag', type=list, required=True, help="tag is required", location="json") delete_parser = reqparse.RequestParser() delete_parser.add_argument('_id', type=str, required=True)
[ 6738, 31904, 13, 17227, 47429, 1330, 8070, 47429, 198, 6738, 42903, 62, 2118, 913, 1330, 20857, 11, 43089, 29572, 198, 6738, 2746, 13, 15667, 1330, 8721, 198, 6738, 2139, 13, 15667, 16177, 1330, 8721, 16177, 198, 198, 1136, 62, 48610, 7...
2.333333
534
# -*- coding: utf-8 -*- # Copyright 2022 The Luoxi Team. # All rights reserved. # This source code is licensed under the Apache 2.0 license # found in the LICENSE file in the root directory. import torch # trunk model init # lite plugin model init # naive plugin model init if __name__ == '__main__': # model.apply(weight_init_normal) dimension = 10 plugin_layer = torch.nn.Linear(dimension, dimension // 2, True) print("-" * 50) print("original") print("plugin_layer.weight", plugin_layer.weight) print("plugin_layer.bias", plugin_layer.bias) default_weight_init(plugin_layer.weight) default_bias_init(plugin_layer.bias) print("-" * 50) print("trunk_init") print("plugin_layer.weight", plugin_layer.weight) print("plugin_layer.bias", plugin_layer.bias) default_lite_plugin_init(plugin_layer) print("-" * 50) print("lite_plugin_init") print("plugin_layer.weight", plugin_layer.weight) print("plugin_layer.bias", plugin_layer.bias) default_naive_plugin_init(plugin_layer) print("-" * 50) print("naive_plugin_init") print("plugin_layer.weight", plugin_layer.weight) print("plugin_layer.bias", plugin_layer.bias)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 33160, 383, 6026, 1140, 72, 4816, 13, 198, 2, 1439, 2489, 10395, 13, 198, 2, 770, 2723, 2438, 318, 11971, 739, 262, 24843, 362, 13, 15, 5964, 198, 2, 1043,...
2.734694
441
from overrides.enforce import EnforceOverrides from overrides.final import final from overrides.overrides import __VERSION__, overrides __all__ = [ "__VERSION__", "overrides", "final", "EnforceOverrides", ]
[ 6738, 23170, 1460, 13, 268, 3174, 1330, 2039, 3174, 5886, 81, 1460, 198, 6738, 23170, 1460, 13, 20311, 1330, 2457, 198, 6738, 23170, 1460, 13, 2502, 81, 1460, 1330, 11593, 43717, 834, 11, 23170, 1460, 198, 198, 834, 439, 834, 796, 685...
2.765432
81
from typing import Optional, List import numpy from pyrr import Matrix44, Vector3 from vistas.core.bounds import BoundingBox from vistas.core.graphics.object import Object3D, Intersection class Ray: """ Representation of a ray in 3D space. Rays emit from an origin along a direction. Implementation inspired by mrdoob - https://github.com/mrdoob/three.js/blob/master/src/math/Ray.js """ def at(self, t): """ Retrieve a point along the ray. """ return self.direction * t + self.origin def intersect_triangles(self, a, b, c): """ Determine face-level triangle intersections from this ray. """ e1 = b - a e2 = c - a direction = numpy.array(self.direction) origin = numpy.array(self.origin) eps = numpy.finfo(numpy.float32).eps pvec = numpy.cross(direction, e2) det = numpy.sum(e1 * pvec, axis=-1) det_cond = (det >= eps) | (det <= -eps) # Get values outside of range -eps < det < eps inv_det = 1 / det tvec = origin - a u = numpy.sum(tvec * pvec, axis=-1) * inv_det u_cond = (u <= 1) & (u >= 0) # Get values if not (u < 0 or u > 1) qvec = numpy.cross(tvec, e1) v = numpy.sum(direction * qvec, axis=-1) * inv_det v_cond = (v >= 0) & (u + v <= 1) # Get values if not (if v < 0 or u + v > 1) # Filter down and determine intersections result = numpy.sum(e2 * qvec, axis=-1) * inv_det intersections = numpy.where(det_cond & u_cond & v_cond) distances = result[intersections] # Now we return their locations in terms of distance return distances, intersections[0] class Raycaster: """ A class for mouse picking in 3D space. Inspiration from ThreeJS' Raycaster implementation. https://github.com/mrdoob/three.js/blob/master/src/core/Raycaster.js """ def set_from_camera(self, coords: tuple, camera): """ Update the Raycaster's ray to extend from the given Camera. """ self.ray.origin = camera.get_position() self.ray.direction = camera.unproject(coords) self.ray.direction.normalize() def intersect_object(self, coords, obj, camera) -> List[Intersection]: """ Retrieve intersections, sorted in ascending distance, to a given Object3D. """ intersects = [] if issubclass(obj.__class__, Object3D): camera.push_matrix() self.set_from_camera(coords, camera) camera.matrix *= Matrix44.from_translation(obj.position) intersects = obj.raycast(self) camera.pop_matrix() if intersects: intersects.sort(key=lambda i: i.distance) return intersects def intersect_objects(self, coords: tuple, camera) -> List[Intersection]: """ Retrieve intersections to all Object3D objects in a given Camera's Scene. """ intersects = [] for obj in camera.scene.objects: intersects += self.intersect_object(coords, obj, camera) or [] if intersects: intersects.sort(key=lambda i: i.distance) return intersects
[ 6738, 19720, 1330, 32233, 11, 7343, 198, 198, 11748, 299, 32152, 198, 6738, 279, 2417, 81, 1330, 24936, 2598, 11, 20650, 18, 198, 198, 6738, 410, 37503, 13, 7295, 13, 65, 3733, 1330, 347, 9969, 14253, 198, 6738, 410, 37503, 13, 7295, ...
2.402861
1,328
import io import asyncio import hashlib import logging import unittest.mock as mock import aiohttp.client_exceptions as a_exc import synapse.exc as s_exc import synapse.axon as s_axon import synapse.common as s_common import synapse.telepath as s_telepath import synapse.lib.httpapi as s_httpapi import synapse.lib.msgpack as s_msgpack import synapse.tests.utils as s_t_utils logger = logging.getLogger(__name__) # This causes blocks which are not homogeneous when sliced in kibibyte lengths bbuf = b'0123456' * 4793491 abuf = b'asdfasdf' pbuf = b'pennywise' rbuf = b'robert gray' bbufhash = hashlib.sha256(bbuf).digest() asdfhash = hashlib.sha256(abuf).digest() emptyhash = hashlib.sha256(b'').digest() pennhash = hashlib.sha256(pbuf).digest() rgryhash = hashlib.sha256(rbuf).digest() asdfretn = (8, asdfhash) emptyretn = (0, emptyhash) pennretn = (9, pennhash) rgryretn = (11, rgryhash) bbufretn = (len(bbuf), bbufhash)
[ 11748, 33245, 198, 11748, 30351, 952, 198, 11748, 12234, 8019, 198, 11748, 18931, 198, 11748, 555, 715, 395, 13, 76, 735, 355, 15290, 198, 198, 11748, 257, 952, 4023, 13, 16366, 62, 1069, 11755, 355, 257, 62, 41194, 198, 198, 11748, 6...
2.583333
360
# Copyright (c) 2018 Mengye Ren, Eleni Triantafillou, Sachin Ravi, Jake Snell, # Kevin Swersky, Joshua B. Tenenbaum, Hugo Larochelle, Richars S. Zemel. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ============================================================================= from __future__ import (absolute_import, division, print_function, unicode_literals) import datetime import os import sys from fewshot.utils import logger log = logger.get() class ExperimentLogger(): """Writes experimental logs to CSV file.""" def __init__(self, logs_folder): """Initialize files.""" self._write_to_csv = logs_folder is not None if self._write_to_csv: if not os.path.isdir(logs_folder): os.makedirs(logs_folder) catalog_file = os.path.join(logs_folder, "catalog") with open(catalog_file, "w") as f: f.write("filename,type,name\n") with open(catalog_file, "a") as f: f.write("{},plain,{}\n".format("cmd.txt", "Commands")) with open(os.path.join(logs_folder, "cmd.txt"), "w") as f: f.write(" ".join(sys.argv)) with open(catalog_file, "a") as f: f.write("train_ce.csv,csv,Train Loss (Cross Entropy)\n") f.write("train_acc.csv,csv,Train Accuracy\n") f.write("valid_acc.csv,csv,Validation Accuracy\n") f.write("learn_rate.csv,csv,Learning Rate\n") self.train_file_name = os.path.join(logs_folder, "train_ce.csv") if not os.path.exists(self.train_file_name): with open(self.train_file_name, "w") as f: f.write("step,time,ce\n") self.trainval_file_name = os.path.join(logs_folder, "train_acc.csv") if not os.path.exists(self.trainval_file_name): with open(self.trainval_file_name, "w") as f: f.write("step,time,acc\n") self.val_file_name = os.path.join(logs_folder, "valid_acc.csv") if not os.path.exists(self.val_file_name): with open(self.val_file_name, "w") as f: f.write("step,time,acc\n") self.lr_file_name = os.path.join(logs_folder, "learn_rate.csv") if not os.path.exists(self.lr_file_name): with open(self.lr_file_name, "w") as f: f.write("step,time,lr\n") def log_train_ce(self, niter, ce): """Writes training CE.""" if self._write_to_csv: with open(self.train_file_name, "a") as f: f.write("{:d},{:s},{:e}\n".format( niter + 1, datetime.datetime.now().isoformat(), ce)) def log_train_acc(self, niter, acc): """Writes training accuracy.""" if self._write_to_csv: with open(self.trainval_file_name, "a") as f: f.write("{:d},{:s},{:e}\n".format( niter + 1, datetime.datetime.now().isoformat(), acc)) def log_valid_acc(self, niter, acc): """Writes validation accuracy.""" if self._write_to_csv: with open(self.val_file_name, "a") as f: f.write("{:d},{:s},{:e}\n".format( niter + 1, datetime.datetime.now().isoformat(), acc)) def log_learn_rate(self, niter, lr): """Writes validation accuracy.""" if self._write_to_csv: with open(self.lr_file_name, "a") as f: f.write("{:d},{:s},{:e}\n".format( niter + 1, datetime.datetime.now().isoformat(), lr))
[ 2, 15069, 357, 66, 8, 2864, 6065, 1360, 68, 7152, 11, 2574, 43850, 7563, 415, 1878, 359, 280, 11, 20678, 259, 371, 15820, 11, 14757, 5489, 695, 11, 198, 2, 7939, 2451, 364, 2584, 11, 20700, 347, 13, 9368, 268, 24738, 11, 25930, 25...
2.504098
1,708
import math import random from typing import Union, Optional import discord from discord.ext import commands from .utils import db from .utils import checks
[ 11748, 10688, 198, 11748, 4738, 198, 6738, 19720, 1330, 4479, 11, 32233, 198, 11748, 36446, 198, 6738, 36446, 13, 2302, 1330, 9729, 198, 6738, 764, 26791, 1330, 20613, 198, 6738, 764, 26791, 1330, 8794, 628 ]
4.514286
35
from __future__ import annotations import procrunner
[ 6738, 11593, 37443, 834, 1330, 37647, 198, 198, 11748, 13834, 16737, 628 ]
4.583333
12
# coding: utf-8 from PIL import Image from pymlgame.surface import Surface NONBLOCKING = 0 BLOCKING = 1 DESTROYABLE = 2
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 6738, 350, 4146, 1330, 7412, 198, 6738, 12972, 4029, 6057, 13, 42029, 1330, 20321, 198, 198, 45, 1340, 9148, 11290, 2751, 796, 657, 198, 9148, 11290, 2751, 796, 352, 198, 30910, 5446, 21414, ...
2.673913
46
from typing import List if __name__ == "__main__": solution = Solution() root = buildTree(None, 0, [0,1,0,0,1,0,None,None,1,0,0]) result = solution.isValidSequence(root, [0,1,0,1]) print (result)
[ 6738, 19720, 1330, 7343, 628, 198, 220, 220, 220, 220, 220, 220, 220, 220, 628, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 4610, 796, 28186, 3419, 198, 220, 220, 220, 6808, 796, 1382, 27660, 7,...
2.196078
102
import math import os import sys import numpy import scipy import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib as mpl import matplotlib.cm as cm import csv from mpl_toolkits.mplot3d import Axes3D from numpy import log10 prefix = 'data/ArrayOfDotProducts_' suffix = '_clearCache_shadowfax' outputPrefix = 'figures/' # read in all of the data. # TODO: you'll need to disable everything that's not relevant here or it'll be angry about missing files dotProductSize = numpy.loadtxt(open(prefix + 'dotProductSize' + suffix + '.csv','rb'),delimiter=',',skiprows=0) memorySize = numpy.loadtxt(open(prefix + 'memorySize' + suffix + '.csv','rb'),delimiter=',',skiprows=0) numberOfDotProducts = numpy.loadtxt(open(prefix + 'numberOfDotProducts' + suffix + '.csv','rb'),delimiter=',',skiprows=0) serialTimes = numpy.loadtxt(open(prefix + 'serialTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0) ompTimes = numpy.loadtxt(open(prefix + 'ompTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0) cudaIndependentTimes = numpy.loadtxt(open(prefix + 'cudaIndependentTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0) cudaReductionTimes = numpy.loadtxt(open(prefix + 'cudaReductionTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0) cudaSwitchingTimes = numpy.loadtxt(open(prefix + 'cudaSwitchingTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0) kokkosOmpTimes = numpy.loadtxt(open(prefix + 'kokkosOmpTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0) kokkosCudaIndependentTimes = numpy.loadtxt(open(prefix + 'kokkosCudaIndependentTimes' + suffix + '.csv','rb'),delimiter=',',skiprows=0) # set up a list of the times and names, for easy iteration later # TODO: make this consistent with the files that you read in and/or care about allTimes = [] allNames = [] # NOTE: if you are doing comparisons against serial time, it's assumed that the first entry in allTimes is serial allTimes.append(serialTimes) allNames.append('serial') # NOTE: if you are doing comparisons against omp time, it's assumed that the second entry in allTimes is openmp. if you aren't doing those comparisons, you should go disable that portion of this script. allTimes.append(ompTimes) allNames.append('omp') # NOTE: if you are doing comparisons against cuda time, it's assumed that the third entry in allTimes is cuda. if you aren't doing those comparisons, you should go disable that portion of this script. allTimes.append(cudaIndependentTimes) allNames.append('cudaIndependent') # there are no assumptions about the rest of the ordering allTimes.append(cudaReductionTimes) allNames.append('cudaReduction') allTimes.append(cudaSwitchingTimes) allNames.append('cudaSwitching') allTimes.append(kokkosOmpTimes) allNames.append('kokkosOmp') allTimes.append(kokkosCudaIndependentTimes) allNames.append('kokkosCudaIndependent') # these are toggles for whether to make image files and whether to make orbit files for making movies makeImageFiles = True #makeImageFiles = False makeOrbitFilesForMovies = True #makeOrbitFilesForMovies = False numberOfOrbitFrames = 100 #markerPool = ['-', '--', ':'] markerPool = ['-', '--'] colors = cm.gist_ncar(numpy.linspace(1, 0, len(allTimes))) markers = [] for i in range(len(allTimes)): markers.append(markerPool[i % len(markerPool)]) fig3d = plt.figure(0) fig2d = plt.figure(1, figsize=(14, 6)) ax2d = plt.subplot(111) box2d = ax2d.get_position() ax2d.set_position([box2d.x0, box2d.y0, box2d.width * 0.60, box2d.height]) bbox_to_anchor2d = (1.87, 0.5) # make an image of just the number of dot products # TODO: you might want to make an image of the number of cells, so you'd adjust this. fig3d = plt.figure(0) ax = fig3d.gca(projection='3d') ax.view_init(elev=0, azim=-111) surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(numberOfDotProducts), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False) plt.xlabel('log10(dotProductSize)') plt.ylabel('log10(memorySize)') ax.set_zlabel('log10(numberOfDotProducts)') plt.title('number of dot products') if (makeImageFiles == True): ax.view_init(elev=2, azim=-23) filename = outputPrefix + 'NumberOfDotProducts' + suffix plt.savefig(filename + '.pdf') print 'saved file to %s' % filename else: plt.show() # goal: make images showing just the raw times # find the min and max values across all flavors so that the color scale is the same for each graph maxValue = -10 minValue = 10 for timesIndex in numpy.arange(0, len(allTimes)): maxValue = numpy.max([maxValue, numpy.max(log10(allTimes[timesIndex]))]) minValue = numpy.min([minValue, numpy.min(log10(allTimes[timesIndex]))]) # make the color scale colorNormalizer = matplotlib.colors.Normalize(vmin=minValue, vmax=maxValue) # for each time for timesIndex in range(len(allTimes)): # make a 3d plot fig3d = plt.figure(0) plt.clf() times = allTimes[timesIndex] name = allNames[timesIndex] ax = fig3d.gca(projection='3d') ax.view_init(elev=0, azim=-111) surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(times), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False) surf.set_norm(colorNormalizer) plt.xlabel('log10(dotProductSize)') plt.ylabel('log10(memorySize)') ax.set_zlabel('log10(raw time) [seconds]') ax.set_zlim([minValue, maxValue]) plt.title(name + ' raw time') if (makeImageFiles == True): ax.view_init(elev=2, azim=-23) filename = outputPrefix + 'RawTimes_' + name + suffix plt.savefig(filename + '.pdf') print 'saved file to %s' % filename else: plt.show() # make a 2D plot of all flavors, for the smallest and largest sizes of memory fig2d = plt.figure(1) for memorySizeIndex in [-1, 0]: legendNames = [] plt.cla() for timesIndex in range(len(allTimes)): times = allTimes[timesIndex] name = allNames[timesIndex] plt.plot(dotProductSize[:, memorySizeIndex], times[:, memorySizeIndex], markers[timesIndex], color=colors[timesIndex], hold='on', linewidth=2) legendNames.append(name) plt.xscale('log') plt.yscale('log') plt.title('raw times for memory size %.2e' % memorySize[0, memorySizeIndex], fontsize=16) plt.xlabel('dot product size', fontsize=16) plt.ylabel('raw time [seconds]', fontsize=16) plt.xlim([dotProductSize[0, 0], dotProductSize[-1, 0]]) ax2d.legend(legendNames, loc='center right', bbox_to_anchor=bbox_to_anchor2d) if (makeImageFiles == True): sizeDescription = 'largestSize' if (memorySizeIndex == -1) else 'smallestSize' filename = outputPrefix + 'RawTimes_2d_' + sizeDescription + suffix plt.savefig(filename + '.pdf') print 'saved file to %s' % filename else: plt.show() # now make plots that are normalized by memory size maxValue = -10 minValue = 10 for timesIndex in numpy.arange(0, len(allTimes)): maxValue = numpy.max([maxValue, numpy.max(log10(allTimes[timesIndex] / memorySize))]) minValue = numpy.min([minValue, numpy.min(log10(allTimes[timesIndex] / memorySize))]) colorNormalizer = matplotlib.colors.Normalize(vmin=minValue, vmax=maxValue) for timesIndex in range(len(allTimes)): fig3d = plt.figure(0) plt.clf() times = allTimes[timesIndex] name = allNames[timesIndex] ax = fig3d.gca(projection='3d') ax.view_init(elev=0, azim=-111) surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(times / memorySize), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False) surf.set_norm(colorNormalizer) plt.xlabel('log10(dotProductSize)') plt.ylabel('log10(memorySize)') ax.set_zlabel('log10(normalized time [seconds / memorySize])') ax.set_zlim([minValue, maxValue]) plt.title(name + ' normalized time') if (makeImageFiles == True): ax.view_init(elev=2, azim=-23) filename = outputPrefix + 'NormalizedTime_' + name + suffix plt.savefig(filename + '.pdf') print 'saved file to %s' % filename # possibly make orbit plots for movies if (makeOrbitFilesForMovies == True): for frameIndex in range(numberOfOrbitFrames): ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1)) filename = outputPrefix + 'orbitFrames/NormalizedTime_' + name + suffix + '_%02d.pdf' % frameIndex plt.savefig(filename) print 'saved file to %s' % filename else: plt.show() # now make relative speedups over serial maxSpeedup = -10 minSpeedup = 10 for timesIndex in numpy.arange(1, len(allTimes)): maxSpeedup = numpy.max([maxSpeedup, numpy.max(log10(allTimes[0] / allTimes[timesIndex]))]) minSpeedup = numpy.min([minSpeedup, numpy.min(log10(allTimes[0] / allTimes[timesIndex]))]) colorNormalizer = matplotlib.colors.Normalize(vmin=minSpeedup, vmax=maxSpeedup) # intentionally start at 1 so that i don't compare serial to serial for timesIndex in numpy.arange(1, len(allTimes)): fig3d = plt.figure(0) plt.clf() times = allTimes[timesIndex] name = allNames[timesIndex] ax = fig3d.gca(projection='3d') ax.view_init(elev=0, azim=-111) surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(allTimes[0] / times), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False) surf.set_norm(colorNormalizer) plt.xlabel('log10(dotProductSize)') plt.ylabel('log10(memorySize)') ax.set_zlabel('log10(speedup) [unitless]') ax.set_zlim([minSpeedup, maxSpeedup]) plt.title(name + ' speedup over serial') if (makeImageFiles == True): ax.view_init(elev=2, azim=-23) filename = outputPrefix + 'VersusSerial_' + name + suffix plt.savefig(filename + '.pdf') print 'saved file to %s' % filename if (makeOrbitFilesForMovies == True and timesIndex > 0): for frameIndex in range(numberOfOrbitFrames): ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1)) filename = outputPrefix + 'orbitFrames/VersusSerial_' + name + suffix + '_%02d.pdf' % frameIndex plt.savefig(filename) print 'saved file to %s' % filename else: plt.show() fig2d = plt.figure(1) for memorySizeIndex in [-1, 0]: legendNames = [] plt.cla() for timesIndex in range(len(allTimes)): times = allTimes[timesIndex] name = allNames[timesIndex] plt.plot(dotProductSize[:, memorySizeIndex], allTimes[0][:, memorySizeIndex] / times[:, memorySizeIndex], markers[timesIndex], color=colors[timesIndex], hold='on', linewidth=2) legendNames.append(name) plt.xscale('log') plt.yscale('log') plt.title('speedup over serial for memory size %.2e' % memorySize[0, memorySizeIndex], fontsize=16) plt.xlabel('dot product size', fontsize=16) plt.ylabel('speedup [unitless]', fontsize=16) #plt.ylim([0, 6]) plt.xlim([dotProductSize[0, 0], dotProductSize[-1, 0]]) ax2d.legend(legendNames, loc='center right', bbox_to_anchor=bbox_to_anchor2d) if (makeImageFiles == True): sizeDescription = 'largestSize' if (memorySizeIndex == -1) else 'smallestSize' filename = outputPrefix + 'VersusSerial_2d_' + sizeDescription + suffix plt.savefig(filename + '.pdf') print 'saved file to %s' % filename else: plt.show() # now make relative speedup over openmp # TODO: you might disable this part maxSpeedup = -10 minSpeedup = 10 for timesIndex in numpy.arange(2, len(allTimes)): maxSpeedup = numpy.max([maxSpeedup, numpy.max(log10(allTimes[1] / allTimes[timesIndex]))]) minSpeedup = numpy.min([minSpeedup, numpy.min(log10(allTimes[1] / allTimes[timesIndex]))]) colorNormalizer = matplotlib.colors.Normalize(vmin=minSpeedup, vmax=maxSpeedup) # intentionally start at 2 so that i don't compare serial or omp to omp for timesIndex in numpy.arange(2, len(allTimes)): fig3d = plt.figure(0) plt.clf() times = allTimes[timesIndex] name = allNames[timesIndex] ax = fig3d.gca(projection='3d') ax.view_init(elev=0, azim=-111) surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(allTimes[1] / times), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False) surf.set_norm(colorNormalizer) plt.xlabel('log10(dotProductSize)') plt.ylabel('log10(memorySize)') ax.set_zlabel('log10(speedup) [unitless]') ax.set_zlim([minSpeedup, maxSpeedup]) plt.title(name + ' speedup over omp') if (makeImageFiles == True): ax.view_init(elev=2, azim=-23) filename = outputPrefix + 'VersusOmp_' + name + suffix plt.savefig(filename + '.pdf') print 'saved file to %s' % filename if (makeOrbitFilesForMovies == True and timesIndex > 1): for frameIndex in range(numberOfOrbitFrames): ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1)) filename = outputPrefix + 'orbitFrames/VersusOmp_' + name + suffix + '_%02d.pdf' % frameIndex plt.savefig(filename) print 'saved file to %s' % filename else: plt.show() fig2d = plt.figure(1) for memorySizeIndex in [-1, 0]: legendNames = [] plt.cla() for timesIndex in range(len(allTimes)): times = allTimes[timesIndex] name = allNames[timesIndex] plt.plot(dotProductSize[:, memorySizeIndex], allTimes[1][:, memorySizeIndex] / times[:, memorySizeIndex], markers[timesIndex], color=colors[timesIndex], hold='on', linewidth=2) legendNames.append(name) plt.xscale('log') plt.yscale('log') plt.title('speedup over openmp for memory size %.2e' % memorySize[0, memorySizeIndex], fontsize=16) plt.xlabel('dot product size', fontsize=16) plt.ylabel('speedup [unitless]', fontsize=16) plt.xlim([dotProductSize[0, 0], dotProductSize[-1, 0]]) ax2d.legend(legendNames, loc='center right', bbox_to_anchor=bbox_to_anchor2d) if (makeImageFiles == True): sizeDescription = 'largestSize' if (memorySizeIndex == -1) else 'smallestSize' filename = outputPrefix + 'VersusOmp_2d_' + sizeDescription + suffix plt.savefig(filename + '.pdf') print 'saved file to %s' % filename else: plt.show() # relative speedup over cudaIndependent # TODO: you might disable this part maxSpeedup = -10 minSpeedup = 10 for timesIndex in numpy.arange(3, len(allTimes)): maxSpeedup = numpy.max([maxSpeedup, numpy.max(log10(allTimes[2] / allTimes[timesIndex]))]) minSpeedup = numpy.min([minSpeedup, numpy.min(log10(allTimes[2] / allTimes[timesIndex]))]) colorNormalizer = matplotlib.colors.Normalize(vmin=minSpeedup, vmax=maxSpeedup) # intentionally start at 3 so that i don't compare cuda or serial or omp to cuda for timesIndex in numpy.arange(3, len(allTimes)): fig3d = plt.figure(0) plt.clf() times = allTimes[timesIndex] name = allNames[timesIndex] ax = fig3d.gca(projection='3d') ax.view_init(elev=0, azim=-111) surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), log10(allTimes[2] / times), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False) surf.set_norm(colorNormalizer) plt.xlabel('log10(dotProductSize)') plt.ylabel('log10(memorySize)') ax.set_zlabel('log10(speedup) [unitless]') ax.set_zlim([minSpeedup, maxSpeedup]) plt.title(name + ' speedup over cudaIndependent') if (makeImageFiles == True): ax.view_init(elev=2, azim=-23) filename = outputPrefix + 'VersusCudaIndependent_' + name + suffix plt.savefig(filename + '.pdf') print 'saved file to %s' % filename if (makeOrbitFilesForMovies == True and timesIndex > 2): for frameIndex in range(numberOfOrbitFrames): ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1)) filename = outputPrefix + 'orbitFrames/VersusCudaIndependent_' + name + suffix + '_%02d.pdf' % frameIndex plt.savefig(filename) print 'saved file to %s' % filename else: plt.show() fig2d = plt.figure(1) for memorySizeIndex in [-1, 0]: legendNames = [] plt.cla() for timesIndex in range(len(allTimes)): times = allTimes[timesIndex] name = allNames[timesIndex] plt.plot(dotProductSize[:, memorySizeIndex], allTimes[2][:, memorySizeIndex] / times[:, memorySizeIndex], markers[timesIndex], color=colors[timesIndex], hold='on', linewidth=2) legendNames.append(name) plt.xscale('log') plt.yscale('log') plt.title('speedup over cuda independent for memory size %.2e' % memorySize[0, memorySizeIndex], fontsize=16) plt.xlabel('dot product size', fontsize=16) plt.ylabel('speedup [unitless]', fontsize=16) plt.xlim([dotProductSize[0, 0], dotProductSize[-1, 0]]) ax2d.legend(legendNames, loc='center right', bbox_to_anchor=bbox_to_anchor2d) if (makeImageFiles == True): sizeDescription = 'largestSize' if (memorySizeIndex == -1) else 'smallestSize' filename = outputPrefix + 'VersusCudaIndependent_2d_' + sizeDescription + suffix plt.savefig(filename + '.pdf') print 'saved file to %s' % filename else: plt.show() # these graphs are essentially duplicates of ones made already, but with a linear scale instead of logarithmic (by request of carter). # these graphs just compare kokkos omp versus openmp and kokkos cuda versus cuda # omp fig3d = plt.figure(0) plt.clf() ax = fig3d.gca(projection='3d') ax.view_init(elev=0, azim=-111) surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), (allTimes[1] / allTimes[5]), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False) plt.xlabel('log10(dotProductSize)') plt.ylabel('log10(memorySize)') ax.set_zlabel('speedup [unitless]') plt.title('kokkos omp speedup over omp') if (makeImageFiles == True): ax.view_init(elev=2, azim=-23) filename = outputPrefix + 'VersusOmp_kokkosOmp_linear' + suffix plt.savefig(filename + '.pdf') print 'saved file to %s' % filename if (makeOrbitFilesForMovies == True): for frameIndex in range(numberOfOrbitFrames): ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1)) filename = outputPrefix + 'orbitFrames/VersusOmp_kokkosOmp_linear' + suffix + '_%02d.pdf' % frameIndex plt.savefig(filename) print 'saved file to %s' % filename else: plt.show() # cuda fig3d = plt.figure(0) plt.clf() ax = fig3d.gca(projection='3d') ax.view_init(elev=0, azim=-111) surf = ax.plot_surface(log10(dotProductSize), log10(memorySize), (allTimes[2] / allTimes[6]), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0.5, antialiased=False) plt.xlabel('log10(dotProductSize)') plt.ylabel('log10(memorySize)') ax.set_zlabel('speedup [unitless]') plt.title('kokkos cuda speedup over cuda') if (makeImageFiles == True): ax.view_init(elev=2, azim=-23) filename = outputPrefix + 'VersusCudaIndependent_kokkosCudaIndependent_linear' + suffix plt.savefig(filename + '.pdf') print 'saved file to %s' % filename if (makeOrbitFilesForMovies == True): for frameIndex in range(numberOfOrbitFrames): ax.view_init(elev=2, azim=360 * frameIndex / (numberOfOrbitFrames - 1)) filename = outputPrefix + 'orbitFrames/VersusCudaIndependent_kokkosCudaIndependent_linear' + suffix + '_%02d.pdf' % frameIndex plt.savefig(filename) print 'saved file to %s' % filename else: plt.show()
[ 11748, 10688, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 299, 32152, 198, 11748, 629, 541, 88, 198, 11748, 2603, 29487, 8019, 198, 6759, 29487, 8019, 13, 1904, 10786, 46384, 11537, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 35...
2.656637
7,112
"""Collection serializers.""" from typing import Any, Dict from dj_rest_auth.registration.serializers import RegisterSerializer from django.contrib.auth.password_validation import validate_password from rest_framework import exceptions, serializers from .models import CustomUser from .validators import ( validate_confusables, validate_confusables_email, validate_reserved_name, ) class UserDetailsSerializer(serializers.Serializer): """User detail serializer.""" email = serializers.EmailField(read_only=True) username = serializers.CharField(read_only=True) picture = serializers.ImageField(read_only=True) is_active = serializers.BooleanField(read_only=True) class JWTSerializer(serializers.Serializer): """JWT serializer.""" access_token = serializers.CharField(read_only=True) refresh_token = serializers.CharField(read_only=True) user = UserDetailsSerializer(read_only=True) class CustomRegisterSerializer(RegisterSerializer): """Custom Register serializer.""" full_name = serializers.CharField(max_length=300) def get_cleaned_data(self: "CustomRegisterSerializer") -> Dict[str, Any]: """Cleaning for input data.""" data_dict = super().get_cleaned_data() data_dict["full_name"] = self.validated_data.get("full_name", "") return data_dict class UserSignUpSerializer(serializers.ModelSerializer): """User signup serializer.""" class Meta: """Meta data.""" model = CustomUser fields = ( "username", "password", "email", "full_name", ) extra_kwargs = { "password": {"write_only": True, "style": {"input_type": "password"}} } def validate_password(self: "UserSignUpSerializer", value: str) -> str: """Password validation.""" validate_password(value, self.instance) return value def create(self: "UserSignUpSerializer", validated_data: Dict) -> CustomUser: """Create method for UserSignUpSerializer.""" password = validated_data.pop("password") username = validated_data.get("username") email = validated_data.get("email") local, domain = email.split("@") validate_reserved_name( value=username, exception_class=exceptions.ValidationError ) validate_reserved_name(value=local, exception_class=exceptions.ValidationError) validate_confusables(value=username, exception_class=exceptions.ValidationError) validate_confusables_email( local_part=local, domain=domain, exception_class=exceptions.ValidationError ) user = CustomUser(**validated_data) user.set_password(password) user.save() return user
[ 37811, 36307, 11389, 11341, 526, 15931, 198, 6738, 19720, 1330, 4377, 11, 360, 713, 198, 198, 6738, 42625, 62, 2118, 62, 18439, 13, 2301, 33397, 13, 46911, 11341, 1330, 17296, 32634, 7509, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 184...
2.676806
1,052
import numpy as np from PIL import Image import matplotlib, os #print "importing visual module" if not('DISPLAY' in os.environ): matplotlib.use("Agg") import matplotlib.pyplot as plt import matplotlib.ticker as ticker import matplotlib.backends.backend_pdf import copy # customization: http://matplotlib.org/users/customizing.html def saveAllPlotsToPDF(): ''' # Save all created plots into a pdf file. ''' pdf = matplotlib.backends.backend_pdf.PdfPages("output.pdf") for i in plt.get_fignums(): fig = plt.figure(i) pdf.savefig(fig) pdf.close() def xkcd(): '''special style''' plt.xkcd() def show(): '''show plots on screen''' plt.show() def GenerateAverageImagesFromDictionary(dict, save_to_dir=None, output_folder=None): ''' Gets a dictionary of d[score_label_value] pointing to an array of images :param dict: :return: Up to 100 averaged images ''' dict_of_images = {} for i in range(0,len(dict)): imlist = dict[i] N = len(imlist) if N > 0: w, h = Image.open(imlist[0]).size arr = np.zeros((h, w, 3), np.float) for im in imlist: imarr = np.array(Image.open(im), dtype=np.float) arr = arr + imarr / N arr = np.array(np.round(arr), dtype=np.uint8) dict_of_images[i] = arr if save_to_dir is not None: out=Image.fromarray(arr,mode="RGB") out.save(output_folder+str(i).zfill(3)+"_avgFrom_"+str(N)+".png") #out.show() return dict_of_images def plotX_sortValues(dont_touch_this_x, title='', x_min=0.0, x_max=1.0, notReverse=False, custom_x_label = '# of images', custom_y_label = 'Score value'): '''Visualization of dataset by the method of sorting array by value and plotting.''' x = copy.copy(dont_touch_this_x) if notReverse: x.sort() else: x.sort(reverse=True) plt.figure() axes = plt.axes() axes.set_xlabel(custom_x_label) axes.set_ylabel(custom_y_label) plt.plot(x, color='red') axes.fill_between(range(len(x)), x, facecolor='orange', edgecolor='red', alpha=1) zoomOut(axes, [0.0, len(x)-1], [x_min, x_max], factor=0.05) axes.fill_between(x, 0) axes.set_title(title) def plotHistogram(x, title='', num_bins=100, x_min=0.0, x_max=1.0, custom_x_label = 'Score value', custom_y_label = 'Count of occurances'): ''' Plot histogram from the x data.''' plt.figure() axes = plt.axes() hist, bins = np.histogram(x, bins=num_bins) width = 0.7 * (bins[1] - bins[0]) center = (bins[:-1] + bins[1:]) / 2 plt.bar(center, hist, align='center', width=width, color='orange', edgecolor='red') axes.xaxis.set_major_locator(ticker.MultipleLocator(np.abs(x_max-x_min)/10.0)) axes.xaxis.set_minor_locator(ticker.MultipleLocator(np.abs(x_max-x_min)/100.0)) # add a 'best fit' line axes.set_xlabel(custom_x_label) axes.set_ylabel(custom_y_label) zoomOutY(axes, factor=0.05, only_up=True) zoomOutX(axes, [x_min, x_max], factor=0.05) # Tweak spacing to prevent clipping of ylabel axes.set_title(title) def plotWhisker(data, title='', y_min=0.0, y_max=1.0, legend_on=True, notch=True): ''' Plot box plot / whisker graph from data.''' plt.figure(figsize=(5, 8)) axes = plt.axes() axes.yaxis.set_major_locator(ticker.MultipleLocator(np.abs(y_max-y_min)/10.0)) axes.yaxis.set_minor_locator(ticker.MultipleLocator(np.abs(y_max-y_min)/100.0)) meanpointprops = dict(linewidth=1.0) boxplot = plt.boxplot(data, notch=notch, showmeans=True, meanprops=meanpointprops) plt.xticks([]) if (legend_on): boxplot['medians'][0].set_label('median') boxplot['means'][0].set_label('mean') boxplot['fliers'][0].set_label('outlayers') # boxplot['boxes'][0].set_label('boxes') # boxplot['whiskers'][0].set_label('whiskers') boxplot['caps'][0].set_label('caps') axes.set_xlim([0.7, 1.7]) plt.legend(numpoints = 1) zoomOutY(axes,factor=0.1) axes.set_title(title) def plotMultipleWhiskerPlots(datas, whiskers, labels): # support of generating multiple box plots ''' Example run: means_men = (20, 35, 30, 35, 27) std_men = (2, 3, 4, 1, 2) means_women = (25, 32, 34, 20, 25) std_women = (3, 5, 2, 3, 3) datas = [means_men, means_women, means_men] whiskers = [std_men, std_women, std_women] labels = ['1', '2', '3'] plotMultipleWhiskerPlots(datas,whiskers,labels) ''' fig, ax = plt.subplots() index = np.arange(len(datas[0])) bar_width = (1.0 / len(datas)) * 0.9 opacity = 0.6 error_config = {'ecolor': '0.3'} colors = ['r', 'b', 'y'] for i in range(0,len(datas)): rects = plt.bar(index + i*bar_width, datas[i], bar_width, alpha=opacity, color=colors[min(i,len(colors)-1)], yerr=whiskers[i], error_kw=error_config, label=labels[i]) plt.xticks(index + bar_width / len(datas),np.arange(1,len(datas[0])+1)) plt.legend() plt.tight_layout() def subPlot2(fce1, fce2, param1=None, param2=None): ''' Join two plots. Example run: def tmp_fce1(): ... def tmp_fce2(): ... subPlot2(tmp_fce1, tmp_fce2) ''' plt.subplot(2, 1, 1) fce1() plt.subplot(2, 1, 2) fce2() plt.show() def zoomOut(axes, xlim=None, ylim=None, factor=0.05): ''' Set size to fit in limitations. :param axes: handler to matlibplot :param xlim: list of [from x, to x] values :param ylim: list of [from y, to y] values :param factor: zoom factor :return: ''' zoomOutX(axes, xlim, factor) zoomOutY(axes, ylim, factor) def zoomOutX(axes,xlim=None,factor=0.05): ''' handle the X axis''' if xlim == None: xlim = axes.get_xlim() axes.set_xlim((xlim[0] + xlim[1]) / 2 + np.array((-0.5, 0.5)) * (xlim[1] - xlim[0]) * (1 + factor)) def zoomOutY(axes,ylim=None,factor=0.05, only_up = False): ''' handle the Y axis''' if ylim == None: ylim = axes.get_ylim() bottom = -0.5 axes.set_ylim((ylim[0] + ylim[1]) / 2 + np.array((-0.5, 0.5)) * (ylim[1] - ylim[0]) * (1 + factor)) if only_up: ylim = axes.get_ylim() #print ylim axes.set_ylim(0.0,ylim[1])
[ 11748, 299, 32152, 355, 45941, 198, 6738, 350, 4146, 1330, 7412, 198, 11748, 2603, 29487, 8019, 11, 28686, 198, 198, 2, 4798, 366, 11748, 278, 5874, 8265, 1, 198, 198, 361, 407, 10786, 26288, 31519, 6, 287, 28686, 13, 268, 2268, 2599,...
2.081403
3,108
# coding=utf-8 # Storage Management Providers # # Copyright (C) 2014 Red Hat, Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation are # those of the authors and should not be interpreted as representing official # policies, either expressed or implied, of the FreeBSD Project. # # Authors: Jan Synacek <jsynacek@redhat.com> # Jan Safranek <jsafrane@redhat.com> # """ Mount management. Usage: %(cmd)s list [ --all ] [ <target> ... ] %(cmd)s create <device> <mountpoint> [ (-t <fs_type>) (-o <options>) ] %(cmd)s delete <target> %(cmd)s show [ --all ] [ <target> ... ] Commands: list List mounted filesystems with a device attached to them. <target> can be specified either as device names or mountpoints. create Mount a specified device on the path given by mountpoint. Optionally, filesystem type, common options (filesystem independent) and filesystem specific options can be provided. If no filesystem type is specified, it is automatically detected. Options can be provided as a comma-separated string of 'option_name:value' items. Possible option names are: AllowExecution AllowMandatoryLock AllowSUID AllowUserMount AllowWrite Auto Dump FileSystemCheckOrder InterpretDevices Silent SynchronousDirectoryUpdates SynchronousIO UpdateAccessTimes UpdateDirectoryAccessTimes UpdateFullAccessTimes UpdateRelativeAccessTimes Possible option values for all of the options except for FileSystemCheckOrder are 't', 'true', 'f', 'false'. All of them are case insensitive. The FileSystemCheckOrder option's value is a number. In case an option is not recognized as being one of the possible options listed above, it's used as a filesystem dependent option. Examples: create /dev/vda1 /mnt -t ext4 -o 'AllowWrite:F,InterpretDevices:false' create /dev/vda2 /mnt -o 'FileSystemCheckOrder:2' create /dev/vda3 /mnt -o 'user_xattr,barrier=0' create /dev/vda4 /mnt -o 'Dump:t, AllowMandatoryLock:t, acl' delete Unmount a mounted filesystem. Can be specified either as a device path or a mountpoint. show Show detailed information about mounted filesystems with a device attached to them. <target> can be specified either as device names or mountpoints. <spec>. Optionally, show all mounted filesystems. """ from lmi.shell.LMIUtil import lmi_isinstance from lmi.scripts.common import command from lmi.scripts.common import get_logger from lmi.scripts.common.formatter import command as fcmd from lmi.scripts.storage import show, fs, lvm, mount, raid, partition from lmi.scripts.storage.common import (size2str, get_devices, get_children, get_parents, str2device, str2size, str2vg) from lmi.scripts.common.errors import LmiFailed LOG = get_logger(__name__) def get_mounts_for_targets(ns, targets): """ Return list of LMI_MountedFilesystem instances for given devices or directories. :type mntspec: List of strings or LMIInstance/CIM_StorageExtents. :param mntspec: Mount specifications. If a string is provided as a mount specification, it can be either device name or mount directory. """ mounts = [] for target in targets: try: device = str2device(ns, target) if device: target = device.Name except LmiFailed: # we did not find CIM_StorageExtent for the device, it must be non # device filesystem specification pass mnts = ns.LMI_MountedFileSystem.instances({'FileSystemSpec':target}) + \ ns.LMI_MountedFileSystem.instances({'MountPointPath':target}) mounts += mnts return mounts
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 20514, 8549, 7518, 4157, 198, 2, 198, 2, 15069, 357, 34, 8, 1946, 2297, 10983, 11, 3457, 13, 1439, 2489, 10395, 13, 198, 2, 198, 2, 2297, 396, 3890, 290, 779, 287, 2723, 290, 13934, 5107, 11,...
2.796218
1,904
################################################################################ # CODE TO CONVERT COLAB NOTEBOOK TO PDF # #This code converts an .ipynb file to a Tex based PDF and saves it in the Colab# #Notebook folder with the same filename.pdf # ################################################################################ # Function List ################################################################################ #Converter for .ipynb conversion to PDF. Input is a string of the file name
[ 29113, 29113, 14468, 198, 2, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 42714, 5390, 7102, 15858, 20444, 6242, 24550, 39453, 5390, 12960, 220, 220, 220, 220, 220, 220, 220, 220, 22...
3.558282
163
from selenium import webdriver from Cart import cart from Main_page import main_page from Duck_Page import duck_page
[ 6738, 384, 11925, 1505, 1330, 3992, 26230, 198, 6738, 13690, 1330, 6383, 198, 6738, 8774, 62, 7700, 1330, 1388, 62, 7700, 198, 6738, 21867, 62, 9876, 1330, 22045, 62, 7700, 628 ]
3.806452
31
from unittest.mock import patch import pytest from rotkehlchen.assets.resolver import AssetResolver from rotkehlchen.tests.utils.mock import MockResponse @pytest.fixture() @pytest.fixture() @pytest.fixture() def query_github_for_assets() -> bool: """If True, the default behavior of querying github for latest assets will occur""" return False @pytest.fixture() def force_reinitialize_asset_resolver() -> bool: """If True, the asset resolver instance will be force to start frm scratch""" return False # We need auto-use here since the fixture needs to be included # everywhere so as to not have Asset() calls use a Resolver not # initialized from here which would take more time @pytest.fixture(autouse=True) def asset_resolver( data_dir, query_github_for_assets, mock_asset_meta_github_response, mock_asset_github_response, force_reinitialize_asset_resolver, ): """Run the first initialization of the AssetResolver singleton It's an autouse fixture so that it always gets initialized """ if force_reinitialize_asset_resolver: AssetResolver._AssetResolver__instance = None if query_github_for_assets: AssetResolver(data_dir) return # else mock the github request to return version lower than anything possible get_patch = patch('requests.get', side_effect=mock_get_request) with get_patch: AssetResolver(data_dir)
[ 6738, 555, 715, 395, 13, 76, 735, 1330, 8529, 198, 198, 11748, 12972, 9288, 198, 198, 6738, 5724, 365, 18519, 6607, 13, 19668, 13, 411, 14375, 1330, 31433, 4965, 14375, 198, 6738, 5724, 365, 18519, 6607, 13, 41989, 13, 26791, 13, 76, ...
2.931313
495
# -*- coding: utf-8 -*- # # Copyright (c) 2020~2999 - Cologler <skyoflw@gmail.com> # ---------- # # ---------- from typing import TypedDict, Optional from logging import Logger import zipfile import xml.etree.ElementTree as et from .androidManifestDecompress import read def read_package_info(path: str, logger: Logger) -> Optional[_PackageInfo]: 'read package info from *.apk file.' with zipfile.ZipFile(path) as z: with z.open('AndroidManifest.xml') as am: try: a = read(am) except: logger.warning(f'unable decode manifest, skiped.') else: xml = et.fromstring(a) return dict( package=xml.get('package'), version=xml.get('versionName') )
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 15069, 357, 66, 8, 12131, 93, 1959, 2079, 532, 327, 928, 1754, 1279, 15688, 1659, 75, 86, 31, 14816, 13, 785, 29, 198, 2, 24200, 438, 198, 2, 198, 2, ...
2.127937
383
from ape import plugins from ape.api.config import ConfigDict @plugins.register(plugins.Config)
[ 6738, 43835, 1330, 20652, 198, 6738, 43835, 13, 15042, 13, 11250, 1330, 17056, 35, 713, 628, 198, 31, 37390, 13, 30238, 7, 37390, 13, 16934, 8, 198 ]
3.62963
27
import redis # redis_instance = redis.StrictRedis(host='aerios-ec-dqu.3mvwix.0001.use1.cache.amazonaws.com', port=9000, db=0) redis_instance = redis.StrictRedis() pub_sub = redis_instance.pubsub() # print(subscriber("otp_channel")) # print(publisher("otp_channel", "Your otp is 12345")) # print(listen_message()) # print(unsubsciber(channels))
[ 11748, 2266, 271, 198, 198, 2, 2266, 271, 62, 39098, 796, 2266, 271, 13, 1273, 2012, 7738, 271, 7, 4774, 11639, 25534, 4267, 12, 721, 12, 67, 421, 13, 18, 76, 85, 86, 844, 13, 18005, 13, 1904, 16, 13, 23870, 13, 33103, 8356, 13,...
2.489362
141
__version__ = "0.5.6" if __name__ == "__main__": # The build script uses this to extract the current version print(__version__)
[ 834, 9641, 834, 796, 366, 15, 13, 20, 13, 21, 1, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 1303, 383, 1382, 4226, 3544, 428, 284, 7925, 262, 1459, 2196, 198, 220, 220, 220, 3601, 7, 8...
2.795918
49
# before running the qc, need to rename various output files # <data format="html" name="html_file" /> # <data format="txt" name="log_file" parent="html_file" /> # <data format="tabular" name="marker_file" parent="html_file" /> # <data format="tabular" name="subject_file" parent="html_file" /> from galaxy import datatypes,model import sys,string
[ 2, 878, 2491, 262, 10662, 66, 11, 761, 284, 36265, 2972, 5072, 3696, 198, 2, 220, 220, 220, 220, 220, 220, 1279, 7890, 5794, 2625, 6494, 1, 1438, 2625, 6494, 62, 7753, 1, 11037, 198, 2, 220, 220, 220, 220, 220, 220, 1279, 7890, ...
2.783582
134
import operator
[ 11748, 10088, 201 ]
5.333333
3
import torch, sys import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence sys.path.append('..') from utils import * from seq2seq_attn.model import EncoderRNN as PrevUtterancesEncoder from seq2seq_world_state.model import NextActionsEncoder, BlockCountersEncoder, BlockRegionCountersEncoder # NOTE: no need of init weights here as that is done within the sub-modules of this module class UtterancesAndBlockCountersEncoder(nn.Module): """ Integrated model -- combines an encoder RNN for encoding previous utterances with a global block counters encoder """ class UtterancesAndBlockRegionCountersEncoder(nn.Module): """ Integrated model -- combines an encoder RNN for encoding previous utterances with a regional block counters encoder (which comes with an optional global block counters encoder as well) """
[ 11748, 28034, 11, 25064, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 11748, 28034, 13, 20471, 13, 45124, 355, 376, 198, 6738, 28034, 13, 20471, 13, 26791, 13, 81, 20471, 1330, 2353, 62, 79, 29373, 62, 43167, 11, 14841, 62, 34860,...
3.475285
263
PI = float(3.14159) raio = float(input()) print("A=%0.4f" %(PI * (raio * raio)))
[ 11901, 796, 12178, 7, 18, 13, 1415, 19707, 8, 198, 198, 430, 952, 796, 12178, 7, 15414, 28955, 198, 198, 4798, 7203, 32, 28, 4, 15, 13, 19, 69, 1, 4064, 7, 11901, 1635, 357, 430, 952, 1635, 2179, 952, 22305, 198 ]
1.97619
42
#modules for GUI interface import tkinter from tkinter import * from tkinter import ttk # User interface setup # this sets up the characteristics of the window root = Tk() root.title("Disney Checker") mainframe = ttk.Frame(root, padding="3 3 12 12") mainframe.grid(column=0, row=0, sticky=(N, W, E, S)) mainframe.columnconfigure(0, weight=1) mainframe.rowconfigure(0, weight=1) #set up array of labels, text entry boxes, and buttons firstLabel = StringVar() ttk.Label(mainframe, textvariable=firstLabel).grid(column=3, row=3, sticky=(W, E)) firstLabel.set('Character') firstInputBox = ttk.Entry(mainframe, width = 60, textvariable = StringVar()) firstInputBox.grid(column=4, row=3, sticky=W) firstInputBox.insert(END, 'type name here') #set up action button doSomethingButton = ttk.Button(mainframe, text = "Check character", width = 30, command = lambda: checkCharacterButtonClick() ) doSomethingButton.grid(column=4, row=15, sticky=W) # ------------------------------------------------------------------------------------------ # Function definitions if __name__=="__main__": main()
[ 2, 18170, 329, 25757, 7071, 198, 11748, 256, 74, 3849, 198, 6738, 256, 74, 3849, 1330, 1635, 198, 6738, 256, 74, 3849, 1330, 256, 30488, 198, 198, 2, 11787, 7071, 9058, 198, 198, 2, 428, 5621, 510, 262, 9695, 286, 262, 4324, 198, ...
3.277612
335
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # This file is included in the final Docker image and SHOULD be overridden when # deploying the image to prod. Settings configured here are intended for use in local # development environments. Also note that superset_config_docker.py is imported # as a final step as a means to override "defaults" configured here # import logging import os from cachelib.file import FileSystemCache logger = logging.getLogger() def get_env_variable(var_name, default=None): """Get the environment variable or raise exception.""" try: return os.environ[var_name] except KeyError: if default is not None: return default else: error_msg = "The environment variable {} was missing, abort...".format( var_name ) raise EnvironmentError(error_msg) DATABASE_DIALECT = get_env_variable("DATABASE_DIALECT") DATABASE_USER = get_env_variable("DATABASE_USER") DATABASE_PASSWORD = get_env_variable("DATABASE_PASSWORD") DATABASE_HOST = get_env_variable("DATABASE_HOST") DATABASE_PORT = get_env_variable("DATABASE_PORT") DATABASE_DB = get_env_variable("DATABASE_DB") # The SQLAlchemy connection string. SQLALCHEMY_DATABASE_URI = "%s://%s:%s@%s:%s/%s" % ( DATABASE_DIALECT, DATABASE_USER, DATABASE_PASSWORD, DATABASE_HOST, DATABASE_PORT, DATABASE_DB, ) REDIS_HOST = get_env_variable("REDIS_HOST") REDIS_PORT = get_env_variable("REDIS_PORT") REDIS_CELERY_DB = get_env_variable("REDIS_CELERY_DB", 0) REDIS_RESULTS_DB = get_env_variable("REDIS_CELERY_DB", 1) RESULTS_BACKEND = FileSystemCache("/app/superset_home/sqllab") CELERY_CONFIG = CeleryConfig SQLLAB_CTAS_NO_LIMIT = True # # Optionally import superset_config_docker.py (which will have been included on # the PYTHONPATH) in order to allow for local settings to be overridden # try: import superset_config_docker from superset_config_docker import * # noqa logger.info( f"Loaded your Docker configuration at " f"[{superset_config_docker.__file__}]" ) except ImportError: logger.info("Using default Docker config...") # source: https://github.com/apache/incubator-superset/pull/1866#issuecomment-347310860 ADDITIONAL_MIDDLEWARE = [ReverseProxied, ]
[ 2, 49962, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 2, 393, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 198, 2, 9387, 351, 428, 670, 329, 3224, 1321, 198, 2, 5115, 6634, 9238, 13, 220, 383, 7054,...
2.833489
1,069
# input n = int(input()) k = int(input()) sensor = map(int, input().split()) # process ''' 센서들의 좌표를 정렬 각 센서마다 본인 다음 센서와의 거리를 계산 거리들 정렬 가장 긴 거리 제거 >> 처음에는 하나였던 센서들이 두 개의 묶음으로 쪼개짐 >> 두 개의 집중국 다시 가장 긴 거리 제거 >> 두 묶음이었던 센서 중 하나가 쪼개지면서 총 세 개의 묶음이 됨 >> 세 개의 집중국 이런 식으로 k개의 집중국이 될 때까지 최대 거리를 제거 남은 거리들의 합 계산 ''' sensor = sorted(sensor) dist = sorted([sensor[i + 1] - sensor[i] for i in range(n - 1)]) # output print(sum(dist[: (n - 1) - (k - 1)]))
[ 2, 5128, 198, 77, 796, 493, 7, 15414, 28955, 198, 74, 796, 493, 7, 15414, 28955, 198, 82, 22854, 796, 3975, 7, 600, 11, 5128, 22446, 35312, 28955, 198, 2, 1429, 198, 7061, 6, 198, 168, 226, 120, 168, 226, 250, 167, 241, 97, 3597...
0.848659
522
import random print(experiment(1_000_000))
[ 11748, 4738, 628, 198, 4798, 7, 23100, 3681, 7, 16, 62, 830, 62, 830, 4008, 198 ]
2.8125
16
import argparse import sys import seqlib parser = argparse.ArgumentParser( description='Calculate statistics for genome') parser.add_argument('--fasta', required = True, type = str, metavar = '<path>', help='path to a fasta file, may be compressed') arg = parser.parse_args() contig_size = [] nt_count = {} for name, seq in seqlib.read_fasta(arg.fasta): seq = seq.upper() contig_size.append(len(seq)) for base in seq: if base not in nt_count: nt_count[base] = 1 elif base in nt_count: nt_count[base] += 1 gc_count = nt_count['G'] + nt_count['C'] # Sort contigs longest to shortest contig_size.sort(reverse = True) num_contigs = len(contig_size) shortest_contig = contig_size[-1] longest_contig = contig_size[0] total_size = 0 for i in contig_size: total_size += i avg_size = total_size/num_contigs # Median Calculation if num_contigs % 2 == 1: median_contig = contig_size[int(num_contigs/2)] elif num_contigs % 2 == 0: med1 = contig_size[int(num_contigs/2)] med2 = contig_size[int((num_contigs/2) +1)] median_contig = (med1 + med2)/2 n50 = 0 val = 0 for size in contig_size: val += size if val > total_size/2: n50 = size break gc_fraction = gc_count/total_size * 100 print(f'Total size: {total_size}') print(f'Number of contigs: {num_contigs}') print(f'Shortest contig: {shortest_contig}') print(f'Longest contig: {longest_contig}') print(f'Average contig size: {avg_size}') print(f'Median contig size: {median_contig}') print(f'N50: {n50}') print(f'GC Fraction: {gc_fraction}%') print(f'Letter Counts: {nt_count}')
[ 11748, 1822, 29572, 198, 11748, 25064, 198, 198, 11748, 33756, 8019, 198, 198, 48610, 796, 1822, 29572, 13, 28100, 1713, 46677, 7, 198, 197, 11213, 11639, 9771, 3129, 378, 7869, 329, 19270, 11537, 198, 48610, 13, 2860, 62, 49140, 10786, ...
2.366716
679
"""Optimal hard threshold for matrix denoising.""" import logging import numpy as np from scipy import integrate # Create logger log = logging.getLogger(__name__) def optht(beta, sv, sigma=None): """Compute optimal hard threshold for singular values. Off-the-shelf method for determining the optimal singular value truncation (hard threshold) for matrix denoising. The method gives the optimal location both in the case of the known or unknown noise level. Parameters ---------- beta : scalar or array_like Scalar determining the aspect ratio of a matrix, i.e., ``beta = m/n``, where ``m >= n``. Instead the input matrix can be provided and the aspect ratio is determined automatically. sv : array_like The singular values for the given input matrix. sigma : real, optional Noise level if known. Returns ------- k : int Optimal target rank. Notes ----- Code is adapted from Matan Gavish and David Donoho, see [1]_. References ---------- .. [1] Gavish, Matan, and David L. Donoho. "The optimal hard threshold for singular values is 4/sqrt(3)" IEEE Transactions on Information Theory 60.8 (2014): 5040-5053. http://arxiv.org/abs/1305.5870 """ # Compute aspect ratio of the input matrix if isinstance(beta, np.ndarray): m = min(beta.shape) n = max(beta.shape) beta = m / n # Check ``beta`` if beta < 0 or beta > 1: raise ValueError('Parameter `beta` must be in (0,1].') if sigma is None: # Sigma is unknown log.info('Sigma unknown.') # Approximate ``w(beta)`` coef_approx = _optimal_SVHT_coef_sigma_unknown(beta) log.info(f'Approximated `w(beta)` value: {coef_approx}') # Compute the optimal ``w(beta)`` coef = (_optimal_SVHT_coef_sigma_known(beta) / np.sqrt(_median_marcenko_pastur(beta))) # Compute cutoff cutoff = coef * np.median(sv) else: # Sigma is known log.info('Sigma known.') # Compute optimal ``w(beta)`` coef = _optimal_SVHT_coef_sigma_known(beta) # Compute cutoff cutoff = coef * np.sqrt(len(sv)) * sigma # Log cutoff and ``w(beta)`` log.info(f'`w(beta)` value: {coef}') log.info(f'Cutoff value: {cutoff}') # Compute and return rank greater_than_cutoff = np.where(sv > cutoff) if greater_than_cutoff[0].size > 0: k = np.max(greater_than_cutoff) + 1 else: k = 0 log.info(f'Target rank: {k}') return k def _optimal_SVHT_coef_sigma_known(beta): """Implement Equation (11).""" return np.sqrt(2 * (beta + 1) + (8 * beta) / (beta + 1 + np.sqrt(beta**2 + 14 * beta + 1))) def _optimal_SVHT_coef_sigma_unknown(beta): """Implement Equation (5).""" return 0.56 * beta**3 - 0.95 * beta**2 + 1.82 * beta + 1.43 def _mar_pas(x, topSpec, botSpec, beta): """Implement Marcenko-Pastur distribution.""" if (topSpec - x) * (x - botSpec) > 0: return np.sqrt((topSpec - x) * (x - botSpec)) / (beta * x) / (2 * np.pi) else: return 0 def _median_marcenko_pastur(beta): """Compute median of Marcenko-Pastur distribution.""" botSpec = lobnd = (1 - np.sqrt(beta))**2 topSpec = hibnd = (1 + np.sqrt(beta))**2 change = 1 while change & ((hibnd - lobnd) > .001): change = 0 x = np.linspace(lobnd, hibnd, 10) y = np.zeros_like(x) for i in range(len(x)): yi, err = integrate.quad( _mar_pas, a=x[i], b=topSpec, args=(topSpec, botSpec, beta), ) y[i] = 1.0 - yi if np.any(y < 0.5): lobnd = np.max(x[y < 0.5]) change = 1 if np.any(y > 0.5): hibnd = np.min(x[y > 0.5]) change = 1 return (hibnd + lobnd) / 2.
[ 37811, 27871, 4402, 1327, 11387, 329, 17593, 2853, 78, 1710, 526, 15931, 198, 198, 11748, 18931, 198, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 629, 541, 88, 1330, 19386, 198, 198, 2, 13610, 49706, 198, 6404, 796, 18931, 13, 1136, ...
2.169822
1,849
''' Asynchronous generator without any data loss in case that handling one message costs too much time. ''' import asyncio from ensureTaskCanceled.ensureTaskCanceled import ensureTaskCanceled # def NoLossAsyncGenerator(raw_async_iterater): # async def no_data_loss_async_generator_wrapper(raw_async_iterater): # q = asyncio.Queue() # # async def yield2q(raw_async_iterater, q: asyncio.Queue): # async for msg in raw_async_iterater: # q.put_nowait(msg) # # asyncio.create_task(yield2q(raw_async_iterater, q)) # while True: # msg = await q.get() # # generator.left = q.qsize() # # generator.__dict__['left'] = q.qsize() # yield msg # # generator = no_data_loss_async_generator_wrapper(raw_async_iterater) # return generator if __name__ == '__main__': asyncio.run(test_no_data_loss_async_generator())
[ 7061, 6, 198, 1722, 31301, 17301, 1231, 597, 1366, 2994, 287, 1339, 326, 9041, 530, 3275, 3484, 1165, 881, 640, 13, 198, 7061, 6, 198, 11748, 30351, 952, 198, 6738, 4155, 25714, 34, 590, 992, 13, 641, 495, 25714, 34, 590, 992, 1330,...
2.237981
416
from django.urls import path from .import views from blog.views import like_view app_name='products' urlpatterns = [ path('amazon', views.amazon, name='amazon'), path('ebay', views.ebay, name='ebay'), path('<int:id>/', views.product_detail, name="product_detail"), path('product_comparison/<int:id>/',views.product_comparison,name="product_comparison") ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 198, 6738, 764, 11748, 5009, 220, 198, 6738, 4130, 13, 33571, 1330, 588, 62, 1177, 198, 1324, 62, 3672, 11639, 29498, 6, 198, 198, 6371, 33279, 82, 796, 685, 628, 220, 220, 220, 3108, 10...
2.777778
135
from .Rafa import Rafa
[ 6738, 764, 49, 28485, 1330, 20824, 64 ]
3.142857
7
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import os from . import BaseEncoder from ..devices import OnnxDevice, PaddleDevice, TorchDevice, TFDevice, MindsporeDevice from ...excepts import ModelCheckpointNotExist from ...helper import is_url, cached_property # mixin classes go first, base classes are read from right to left. class BaseOnnxEncoder(OnnxDevice, BaseEncoder): """ :class:`BasePaddleEncoder` is the base class for implementing Encoders with models from :mod:`onnxruntime` library. :param output_feature: the name of the layer for feature extraction. :param model_path: the path of the model in the format of `.onnx`. Check a list of available pretrained models at https://github.com/onnx/models#image_classification and download the git LFS to your local path. The ``model_path`` is the local path of the ``.onnx`` file, e.g. ``/tmp/onnx/mobilenetv2-1.0.onnx``. """ def post_init(self): """ Load the model from the `.onnx` file and add outputs for the selected layer, i.e. ``outputs_name``. The modified models is saved at `tmp_model_path`. """ super().post_init() model_name = self.raw_model_path.split('/')[-1] if self.raw_model_path else None tmp_model_path = self.get_file_from_workspace(f'{model_name}.tmp') if model_name else None raw_model_path = self.raw_model_path if self.raw_model_path and is_url(self.raw_model_path): import urllib.request download_path, *_ = urllib.request.urlretrieve(self.raw_model_path) raw_model_path = download_path self.logger.info(f'download the model at {self.raw_model_path}') if tmp_model_path and not os.path.exists(tmp_model_path) and self.outputs_name: self._append_outputs(raw_model_path, self.outputs_name, tmp_model_path) self.logger.info(f'save the model with outputs [{self.outputs_name}] at {tmp_model_path}') if tmp_model_path and os.path.exists(tmp_model_path): import onnxruntime self.model = onnxruntime.InferenceSession(tmp_model_path, None) self.inputs_name = self.model.get_inputs()[0].name self._device = None self.to_device(self.model) else: raise ModelCheckpointNotExist(f'model at {tmp_model_path} does not exist') @staticmethod class BaseTFEncoder(TFDevice, BaseEncoder): """:class:`BasePaddleEncoder` is the base class for implementing Encoders with models from :mod:`tensorflow` library.""" pass class BaseTorchEncoder(TorchDevice, BaseEncoder): """Base encoder class for :mod:`pytorch` library.""" pass class BasePaddleEncoder(PaddleDevice, BaseEncoder): """:class:`BasePaddleEncoder` is the base class for implementing Encoders with models from :mod:`paddlepaddle` library.""" pass class BaseMindsporeEncoder(MindsporeDevice, BaseEncoder): """ :class:`BaseMindsporeEncoder` is the base class for implementing Encoders with models from `mindspore`. To implement your own executor with the :mod:`mindspore` lilbrary, .. highlight:: python .. code-block:: python import mindspore.nn as nn class YourAwesomeModel(nn.Cell): def __init__(self): ... def construct(self, x): ... class YourAwesomeEncoder(BaseMindsporeEncoder): def encode(self, data, *args, **kwargs): from mindspore import Tensor return self.model(Tensor(data)).asnumpy() def get_cell(self): return YourAwesomeModel() :param model_path: the path of the model's checkpoint. :param args: additional arguments :param kwargs: additional key value arguments """ def post_init(self): """ Load the model from the `.ckpt` checkpoint. """ super().post_init() if self.model_path and os.path.exists(self.model_path): self.to_device() from mindspore.train.serialization import load_checkpoint, load_param_into_net _param_dict = load_checkpoint(ckpt_file_name=self.model_path) load_param_into_net(self.model, _param_dict) else: raise ModelCheckpointNotExist(f'model {self.model_path} does not exist') @cached_property def model(self): """ Get the Mindspore Neural Networks Cells. :return: model property """ return self.get_cell() def get_cell(self): """ Return Mindspore Neural Networks Cells. Pre-defined building blocks or computing units to construct Neural Networks. A ``Cell`` could be a single neural network cell, such as conv2d, relu, batch_norm, etc. or a composition of cells to constructing a network. """ raise NotImplementedError
[ 834, 22163, 4766, 834, 796, 366, 15269, 357, 66, 8, 12131, 449, 1437, 9552, 15302, 13, 1439, 2489, 10395, 526, 198, 834, 43085, 834, 796, 366, 25189, 4891, 12, 17, 13, 15, 1, 198, 198, 11748, 28686, 198, 198, 6738, 764, 1330, 7308, ...
2.465645
2,023
#!/usr/bin/env python import os import json import collections pwd = os.path.dirname(os.path.abspath(__file__)) root = os.path.dirname(pwd) try: f = open(os.path.join(root, 'Rules.1blockpkg')) obj = json.load(f, object_pairs_hook=collections.OrderedDict) try: json_file = open(os.path.join(root, 'Rules.1blockpkg.json'), 'w') json.dump(obj, json_file, indent=4, separators=(',', ': ')) finally: json_file.close() finally: f.close()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 11748, 28686, 198, 11748, 33918, 198, 11748, 17268, 198, 198, 79, 16993, 796, 28686, 13, 6978, 13, 15908, 3672, 7, 418, 13, 6978, 13, 397, 2777, 776, 7, 834, 7753, 834, 4008, 198, 15...
2.25
212
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Source file for d2sec exploit information # # Software is free software released under the "Modified BSD license" # # Copyright (c) 2016 Pieter-Jan Moreels - pieterjan.moreels@gmail.com # Sources SOURCE_NAME = 'd2sec' SOURCE_FILE = "https://www.d2sec.com/exploits/elliot.xml" # Imports import copy from collections import defaultdict from io import BytesIO from xml.sax import make_parser from xml.sax.handler import ContentHandler from lib.Config import Configuration as conf from lib.Source import Source
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 8090, 2393, 329, 288, 17, 2363, 14561, 1321, 198, 2, 198, 2, 10442, 318, 1479, 3788, 2716, 739, 262,...
2.850242
207
# parameters_t.py #-*- coding: utf-8 -*- from __future__ import absolute_import from decimal import Decimal from hypothesis import given from hypothesis.strategies import text import pytest from loris import img_info from loris.loris_exception import RequestException, SyntaxException from loris.parameters import ( FULL_MODE, PCT_MODE, PIXEL_MODE, RegionParameter, RotationParameter, SizeParameter, ) from tests import loris_t def build_image_info(width=100, height=100): """Produces an ``ImageInfo`` object of the given dimensions.""" info = img_info.ImageInfo(None) info.width = width info.height = height return info
[ 2, 10007, 62, 83, 13, 9078, 198, 2, 12, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 198, 6738, 32465, 1330, 4280, 4402, 198, 198, 6738, 14078, 1330, 1813, 198, 6...
3.118483
211
from typing import List, cast from fastapi import Depends, HTTPException from rx import operators as rxops from api_server.dependencies import sio_user from api_server.fast_io import FastIORouter, SubscriptionRequest from api_server.gateway import rmf_gateway from api_server.models import Lift, LiftHealth, LiftRequest, LiftState from api_server.repositories import RmfRepository, rmf_repo_dep from api_server.rmf_io import rmf_events router = FastIORouter(tags=["Lifts"]) @router.get("", response_model=List[Lift]) @router.get("/{lift_name}/state", response_model=LiftState) async def get_lift_state( lift_name: str, rmf_repo: RmfRepository = Depends(rmf_repo_dep) ): """ Available in socket.io """ lift_state = await rmf_repo.get_lift_state(lift_name) if lift_state is None: raise HTTPException(status_code=404) return lift_state @router.sub("/{lift_name}/state", response_model=LiftState) @router.get("/{lift_name}/health", response_model=LiftHealth) async def get_lift_health( lift_name: str, rmf_repo: RmfRepository = Depends(rmf_repo_dep) ): """ Available in socket.io """ lift_health = await rmf_repo.get_lift_health(lift_name) if lift_health is None: raise HTTPException(status_code=404) return lift_health @router.sub("/{lift_name}/health", response_model=LiftHealth) @router.post("/{lift_name}/request")
[ 6738, 19720, 1330, 7343, 11, 3350, 198, 198, 6738, 3049, 15042, 1330, 2129, 2412, 11, 14626, 16922, 198, 6738, 374, 87, 1330, 12879, 355, 374, 87, 2840, 198, 198, 6738, 40391, 62, 15388, 13, 45841, 3976, 1330, 264, 952, 62, 7220, 198,...
2.630394
533
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import TYPE_CHECKING, Any, Iterable, Optional from ax.core.base import Base from ax.core.data import Data if TYPE_CHECKING: # pragma: no cover # import as module to make sphinx-autodoc-typehints happy from ax import core # noqa F401 class Metric(Base): """Base class for representing metrics. Attributes: lower_is_better: Flag for metrics which should be minimized. """ def __init__(self, name: str, lower_is_better: Optional[bool] = None) -> None: """Inits Metric. Args: name: Name of metric. lower_is_better: Flag for metrics which should be minimized. """ self._name = name self.lower_is_better = lower_is_better @property def name(self) -> str: """Get name of metric.""" return self._name def fetch_trial_data( self, trial: "core.base_trial.BaseTrial", **kwargs: Any ) -> Data: """Fetch data for one trial.""" raise NotImplementedError # pragma: no cover def fetch_experiment_data( self, experiment: "core.experiment.Experiment", **kwargs: Any ) -> Data: """Fetch this metric's data for an experiment. Default behavior is to fetch data from all trials expecting data and concatenate the results. """ return Data.from_multiple_data( [ self.fetch_trial_data(trial, **kwargs) if trial.status.expecting_data else Data() for trial in experiment.trials.values() ] ) @classmethod def fetch_trial_data_multi( cls, trial: "core.base_trial.BaseTrial", metrics: Iterable["Metric"], **kwargs: Any, ) -> Data: """Fetch multiple metrics data for one trial. Default behavior calls `fetch_trial_data` for each metric. Subclasses should override this to trial data computation for multiple metrics. """ return Data.from_multiple_data( [metric.fetch_trial_data(trial, **kwargs) for metric in metrics] ) @classmethod def fetch_experiment_data_multi( cls, experiment: "core.experiment.Experiment", metrics: Iterable["Metric"], **kwargs: Any, ) -> Data: """Fetch multiple metrics data for an experiment. Default behavior calls `fetch_trial_data_multi` for each trial. Subclasses should override to batch data computation across trials + metrics. """ return Data.from_multiple_data( [ cls.fetch_trial_data_multi(trial, metrics, **kwargs) if trial.status.expecting_data else Data() for trial in experiment.trials.values() ] ) def clone(self) -> "Metric": """Create a copy of this Metric.""" return Metric(name=self.name, lower_is_better=self.lower_is_better)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 15069, 357, 66, 8, 3203, 11, 3457, 13, 290, 663, 29116, 13, 198, 2, 198, 2, 770, 2723, 2438, 318, 11971, 739, 262, 17168, 5964, 1043, 287, 262, 198, 2, 38559, 24290, 2393, ...
2.389932
1,331
""" Tests the PageWatcher class to ensure that requests are being made and compared properly """ import unittest import time from http_page_monitor.tests.logging_http_server\ import setup_logging_server from .. import watchers class TestPageWatcher(unittest.TestCase): """ Tests the PageWatcher class """ @classmethod @classmethod def setUp(self): """ Reset the log before each test """ self.server.reset_log() def test_initial_request(self): """ Make sure that the initial request is made when the watcher is started """ page_w = watchers.PageWatcher(self.server.generate_address('/'), comparison_function=lambda a, b: None) # Start and immediatly stop to trigger a single request page_w.start() page_w.stop() # Allow the request to go through time.sleep(1) # Assert that there was an initial request self.assertEqual(self.server.request_count, 1) def test_single_request(self): """ Test to ensure a second request is made after the initial request """ page_w = watchers.PageWatcher(self.server.generate_address('/'), time_interval=0.5) # Give the page watcher time to make a second request page_w.start() time.sleep(0.75) page_w.stop() # Assert that there was an initial request and a second request self.assertEqual(self.server.request_count, 2) def test_equal_pages_difference(self): """ Test how the watcher responds to a page that doesn't change with the default comparison function """ alerts = [] page_w = watchers.PageWatcher(self.server.generate_address('/'), time_interval=0.5, alert_function=dummy_alert_function) # Give the page watcher time to make a second request page_w.start() time.sleep(0.75) page_w.stop() # Assert that there was an initial request and a second request self.assertEqual(self.server.request_count, 2) # Assert that an alert wasn't made self.assertEqual(len(alerts), 0) def test_page_difference(self): """ Test how the watcher responds to a page difference with the default comparison function """ alerts = [] page_w = watchers.PageWatcher(self.server.generate_address('/every2'), time_interval=0.5, alert_function=dummy_alert_function) # Give the page watcher time to make a second request page_w.start() time.sleep(0.7) page_w.stop() # Assert that there was an initial request and a second request self.assertEqual(self.server.request_count, 2) # Assert that an alert was made self.assertEqual(len(alerts), 1) def test_custom_page_difference_function(self): """ Test how the watcher responds to a page difference with the default comparison function """ alerts = [] page_w = watchers.PageWatcher(self.server.generate_address('/every2'), time_interval=0.5, alert_function=dummy_alert_function, comparison_function=custom_comparison_function) # Give the page watcher time to make a second request page_w.start() time.sleep(0.7) page_w.stop() # Assert that there was an initial request and a second request self.assertEqual(self.server.request_count, 2) # Assert that an alert was made self.assertEqual(len(alerts), 1) # Make sure that the message was passed down self.assertEqual(alerts[0][1], "Response 1, Response 2 there was a difference") def test_custom_page_difference_function_no_difference(self): """ Test how the watcher responds to a page difference with the default comparison function """ alerts = [] page_w = watchers.PageWatcher(self.server.generate_address('/every2'), time_interval=0.5, alert_function=dummy_alert_function, comparison_function=custom_comparison_function) # Give the page watcher time to make a second request page_w.start() time.sleep(0.7) page_w.stop() # Assert that there was an initial request and a second request self.assertEqual(self.server.request_count, 2) # Assert that no alerts were made self.assertEqual(len(alerts), 0)
[ 37811, 30307, 262, 7873, 54, 34734, 1398, 284, 4155, 326, 198, 220, 220, 220, 7007, 389, 852, 925, 290, 3688, 6105, 37227, 198, 11748, 555, 715, 395, 198, 11748, 640, 198, 198, 6738, 2638, 62, 7700, 62, 41143, 13, 41989, 13, 6404, 2...
2.301112
2,069
import os import sys import datetime from .filesystem import ( remove_dir_if_exists, remove_file_if_exists, normpath, ) from .chains import ( get_base_blockchain_storage_dir, ) @normpath CHAINDATA_DIR = './chaindata' @normpath DAPP_DIR = './dapp' @normpath NODEKEY_FILENAME = 'nodekey' @normpath IPC_FILENAME = 'geth.ipc' @normpath @normpath @normpath @normpath
[ 11748, 28686, 198, 11748, 25064, 198, 11748, 4818, 8079, 198, 198, 6738, 764, 16624, 6781, 1330, 357, 198, 220, 220, 220, 4781, 62, 15908, 62, 361, 62, 1069, 1023, 11, 198, 220, 220, 220, 4781, 62, 7753, 62, 361, 62, 1069, 1023, 11,...
2.362573
171
############################################################################# # # Author: Michel F. SANNER # # Copyright: M. Sanner TSRI 2000 # # revision: Guillaume Vareille # ######################################################################### # # $Header: /opt/cvs/python/packages/share1.5/ViewerFramework/VF.py,v 1.218 2013/10/03 22:31:33 annao Exp $ # # $Id: VF.py,v 1.218 2013/10/03 22:31:33 annao Exp $ # """defines base classe ViewerFramework The ViewerFramework class can be subclassed to create applications that use a DejaVu Camera object to display 3D geometries. In the following we'll call Viewer a class derived from ViewerFramework. The design features of the viewer framework include: - extensibility: new commands can be written by subclassing the VFCommand base class. - dynamically configurable: commands (or set of commands called modules) can be loaded dynamically from libraries. - Commands loaded into an application can create their own GUI elements (menus, cascading menus, buttons, sliders, etc...). The viewer framework provides support for the creation of such GUI elements. - Commands can be invoked either through the GUI or throught the Python Shell. - Macros provide a lightweight mechanism to add simple commands. In fact any Python function can be added as a Macro - Support for logging of commands: this allows to record and play back a session. - documentation: the module and command documentation is provided in the source code. This documentation can be extracted using existing tools and made available in various formats including HTML and man pages. The document ation is also accessible through the application's Help command which uses Python's introspection capabilities to retrieve the documentation. A ViewerFramework always has at least one menu bar called "menuRoot" and at least one buttonbar called "Toolbar". The geometried displayed in a Viewer can be stored in objects derived from the base class GeometryContainer. This container holds a dictionnary of geometries where the keys are the geometry's name and the values instances of DejaVu Geometries. Commands: Commands for an application derived from ViewerFramework can be developped by sub-classing the VFCommand base class (see VFCommand overview).The class VFCommandGUI allows to define GUI to be associated with a command. Command can be added dynamically to an application using the AddCommand command of the ViewerFramework. example: # derive a new command class ExitCommand(Command): doit(self): import sys sys.exit() # get a CommandGUI object g = CommandGUI() # add information to create a pull-down menu in a menu bar called # 'MoVi' under a menu-button called 'File' with a menu Command called # 'Exit'. We also specify that we want a separator to appear above # this entry in the menu g.addMenuCommand('MoVi', 'File', 'Exit', separatorAbove=1) # add an instance of an ExitCommand with the alias 'myExit' to a viewer # v. This will automatically add the menu bar, the menu button # (if necessary) the menu entry and bind the default callback function v.addCommand( ExitCommand(), 'myExit', g ) The command is immediately operational and can be invoked through the pull down menu OR using the Python shell: v.myExit() CommandGUI objects allow to specify what type of GUI a command should have. It is possible to create pull-down menu entries, buttons of different kinds etc.. Modules: A bunch of related commands can be groupped into a module. A module is a .py file that defines a number of commands and provides a functions called initModule(viewer) used to register the module with an instance of a viewer. When a module is added to a viewer, the .py file is imported and the initModule function is executed. Usually this functions instanciates a number of command objects and their CommandGUI objects and adds them to the viewer. """ import os, string, warnings import traceback, sys, glob, time class VFEvent: """Base class for ViewerFramework events. """ def __init__(self, arg=None, objects=[], *args, **kw): """ """ self.arg = arg self.objects = objects self.args = args self.kw = kw if len(args): self.args = args if len(kw): for k,v in kw.items(): setattr(self, k, v) class LogEvent(VFEvent): """created each time a log string is written to the log""" def __init__(self, logstr): """ """ self.logstr = logstr class GeomContainer: """Class to hold geometries to be shown in a viewer. This class provides a dictionnary called geoms in which the name of a DejaVu geometry is the key to access that particular geometry object Geometries can be added using the addGeometry method. """ def __init__(self, viewer=None): """constructor of the geometry container""" ## Dictionary of geometries used to display atoms from that molecule ## using sticks, balls, CPK spheres etc ... self.geoms = {} self.VIEWER = viewer # DejaVu Viewer object self.masterGeom = None ## Dictionary linking geom names to cmds which updates texture coords ## for the current set of coordinates self.texCoordsLookup = {} self.updateTexCoords = {} def delete(self): """Function to remove self.geoms['master'] and self.geoms['selectionSpheres'] from the viewer when deleted""" # switch the object and descendant to protected=False for c in self.geoms['master'].AllObjects(): c.protected = False if self.VIEWER: self.VIEWER.RemoveObject(self.geoms['master']) #for item in self.geoms.values(): # item.delete() # if item.children!=[]: # self.VIEWER.RemoveObject(item) def addGeom(self, geom, parent=None, redo=False): """ This method should be called to add a molecule-specific geometry. geom -- DejaVu Geom instance parent -- parent geometry, if not specified we use self.masterGeom """ if parent is None: parent = self.masterGeom # we need to make sure the geometry name is unique in self.geoms # and in parent.children nameUsed=False geomName = geom.name for object in parent.children: if object.name==geomName: nameUsed=True break if nameUsed or self.geoms.has_key(geomName): newName = geomName+str(len(self.geoms)) geom.name = newName warnings.warn("renaming geometry %s to %s"%(geomName, newName))#, stacklevel=14) self.geoms[geomName]=geom # add the geometry to the viewer. At this point the name should be # unique in both the parent geoemtry and the geomContainer.geoms dict if self.VIEWER: self.VIEWER.AddObject( geom, parent=parent, redo=redo) else: parent.children.append(geom) geom.parent = parent geom.fullName = parent.fullName+'|'+geom.name #from DejaVu.Labels import Labels from DejaVu.Spheres import Spheres ## from ViewerFramework.gui import InputFormDescr from mglutil.gui.InputForm.Tk.gui import InputFormDescr from mglutil.util.callback import CallBackFunction from mglutil.util.packageFilePath import findResourceFile, getResourceFolderWithVersion try: from ViewerFramework.VFGUI import ViewerFrameworkGUI except: pass from ViewerFramework.VFCommand import Command,CommandGUI,InteractiveCmdCaller # Import basic commands. from ViewerFramework.basicCommand import loadCommandCommand, loadMacroCommand from ViewerFramework.basicCommand import ShellCommand, ShellCommandGUI, ExitCommand from ViewerFramework.basicCommand import loadModuleCommand from ViewerFramework.basicCommand import BrowseCommandsCommand, RemoveCommand from ViewerFramework.basicCommand import SaveSessionCommand, SaveSessionCommandGUI from ViewerFramework.helpCommands import helpCommand try: from comm import Comm except: pass from DejaVu import Viewer from DejaVu.Camera import Camera import types, Tkinter import thread import os, sys, traceback import tkMessageBox from mglutil.preferences import UserPreference class ViewerFramework: """ Base class for applications providing a 3D geometry Viewer based on a DejaVu Camera object along with support for adding GUI and commands dynamically. """ def __init__(self, title='ViewerFrameWork', logMode='no', libraries=[], gui=1, resourceFile = '_vfrc', viewerClass=Viewer, master=None, guiVisible=1, withShell=1, verbose=True, trapExceptions=True): """ Construct an instance of a ViewerFramework object with: - an instance of a VFGUI that provides support for adding to the GUI of the application - a dictionnary of commands - a list of commands that create geometry - a list of objects to be displayed - a dictionary of colorMaps * logMode can be: 'no': for no loging of commands at all 'overwrite': the log files overwrite the one from the previous session 'unique': the log file name include the date and time * libraries is a list of names of Python package that provide a cmdlib.py and modlib.py - trapExceptions should be set to False when creating a ViewerFramework for testing, such that exception are seen by the testing framework """ self.__frozen = False self.hasGui = gui self.embeded=False self.cmdHistory = [] # history of command [(cmd, args, kw)] global __debug__ self.withShell = withShell self.trapExceptions = trapExceptions #self.__debug__ = 0 # create a socket communication object try: self.socketComm = Comm() self.webControl = Comm() self.cmdQueue = None # queue of command comming from server except: self.socketComm = None self.webControl = None self.timeUsedForLastCmd = 0. # -1 when command fails assert logMode in ['no', 'overwrite', 'unique'] self.resourceFile = resourceFile self.commands = {} # dictionnary of command added to a Viewer self.userpref = UserPreference() #self.removableCommands = UserPreference(os.path.dirname(self.resourceFile), 'commands') self.userpref.add('Sharp Color Boundaries for MSMS', 'sharp', ('sharp', 'blur'), doc="""Specifies color boundaries for msms surface [sharp or blur] (will not modify already displayed msms surfaces, only new surfaces will be affected)""", category="DejaVu") #Warning: changing the cursor tends to make the window flash.""") # Interface to Visual Programming Environment, if available self.visionAPI = None if self.hasGui : try: # does this package exists? from Vision.API import VisionInterface # create empty object. Note that this will be filled with life # when the visionCommand is executed self.visionAPI = VisionInterface() except: pass self.objects = [] # list of objects self.colorMaps = {} # available colorMaps self.colorMapCt = 0 # used to make sure names are unique self.undoCmdStack = [] # list of strings used to undo # lock needs to be acquired before object can be added self.objectsLock = thread.allocate_lock() # lock needs to be acquired before topcommands can be run self.commandsLock = thread.allocate_lock() # nexted commands counter self.commandNestingLevel = 0 # place holder for a list of command that can be carried out each time # an object is added to the application # every entry is a tuple (function, args_tuple, kw_dict) self.onAddObjectCmds = [] # list of commands that have an onRemoveMol self.cmdsWithOnAddObj = [] # list of commands that have an onAddMol self.cmdsWithOnRemoveObj = [] # dict cmd:[cm1, cmd2, ... cmdn]. When cmd runs the onCmdRun method # of all cmds in the list will be called with the arguments passed # to cmd self.cmdsWithOnRun = {} # list of commands that have an onExit self.cmdsWithOnExit = [] self.firstPerspectiveSet = True self.logMode = logMode self.libraries = libraries + ['ViewerFramework'] self.topNegateCmds = [] # used in Command.doitWrapper() to accumulate negation commands # for sub commands of a top command # you cannot create a GUI and have it visible. if not self.hasGui: self.guiVisible=0 else: self.guiVisible=guiVisible self.master=master if gui: self.GUI = ViewerFrameworkGUI(self, title=title, viewerClass=viewerClass, root=master, withShell=withShell, verbose=verbose) self.GUI.VIEWER.suspendRedraw = True self.viewSelectionIcon = 'cross' # or 'spheres' or 'labels' self.userpref.add('Show Progress Bar', 'hide', ['show','hide'], doc = """When set to 'show' the progress bar is displayed. When set to 'hide', the progress bar widget is widthdrawn, but can be redisplayed by choosing 'show' again.""", category='Viewer', callbackFunc=[self.GUI.showHideProgressBar_CB], ) if gui: cbList = [self.GUI.logUserPref_cb,] else: cbList = [] #if gui: # self.guiSupport = __import__( "DejaVu.ViewerFramework.gui", globals(), # locals(), ['gui']) if gui and self.guiVisible==0: # if gui == 1 but self.guiVisible == 0: the gui is created but # withdrawn immediatly self.GUI.ROOT.withdraw() if self.withShell: # Uses the pyshell as the interpreter when the VFGUI is hidden. self.GUI.pyshell.top.deiconify() self.viewSelectionIcon = 'cross' # or 'spheres' or 'labels' self.userpref.add( 'Transformation Logging', 'no', validValues = ['no', 'continuous', 'final'], callbackFunc = cbList, doc="""Define when transformation get logged.\n'no' : never; 'continuous': after every transformation; 'final': when the Exit command is called""") self.userpref.add( 'Visual Picking Feedback', 1, [0, 1], category="DejaVu", callbackFunc = [self.SetVisualPickingFeedBack,], doc="""When set to 1 a sphere is drawn at picked vertex""") self.userpref.add( 'Fill Selection Box', 1, [0,1], category="DejaVu", callbackFunc = [self.fillSelectionBoxPref_cb], doc="""Set this option to 1 to have the program draw a solid selection box after 'fillSelectionBoxDelay' miliseconds without a motion""") self.userpref.add( 'Fill Selection Box Delay', 200, category="DejaVu", validateFunc = self.fillDelayValidate, callbackFunc = [self.fillSelectionBoxDelayPref_cb], doc="""Delay in miliseconds after which the selection box turns solid if the 'fillSelectionBox' is set. Valide values are >0 and <10000""") self.userpref.add( 'Warning Message Format', 'pop-up', ['pop-up', 'printed'], callbackFunc = [self.setWarningMsgFormat], category="Viewer", doc="""Set format for warning messages. valid values are 'pop-up' and 'printed'""") self._cwd = os.getcwd() self.userpref.add( 'Startup Directory', self._cwd, validateFunc = self.startupDirValidate, callbackFunc = [self.startupDirPref_cb], doc="""Startup Directory uses os.chdir to change the startup directory. Startup Directory is set to current working directory by default.""") rcFolder = getResourceFolderWithVersion() self.rcFolder = rcFolder self.userpref.add( 'Log Mode', 'no', ['no', 'overwrite', 'unique'], callbackFunc = [self.setLogMode], category="Viewer", doc="""Set the log mode which can be one of the following: no - do not log the commands. overwrite - stores the log in mvAll.log.py. unique - stores the log in mvAll_$time.log.py. log.py files are stored in resource folder located under ~/.mgltools/$Version """) self.userpref.add( 'Command History Depth', 500, validateFunc=self.commmandHistoryValidate, #callbackFunc = [] doc="Set Command Hsistory Depth - number of commands kept in the command history list and displayed in the MESSAGE BOX") if self.hasGui: # add an interactive command caller self.ICmdCaller = InteractiveCmdCaller( self ) # remove DejaVu's default picking behavior vi = self.GUI.VIEWER vi.RemovePickingCallback(vi.unsolicitedPick) # overwrite the Camera's DoPick method to set the proper pickLevel # based on the interactive command that will be called for the # current modifier configuration for c in vi.cameras: c.DoPick = self.DoPick self.addBasicCommands() if self.hasGui: from mglutil.util.recentFiles import RecentFiles fileMenu = self.GUI.menuBars['menuRoot'].menubuttons['File'].menu rcFile = rcFolder if rcFile: rcFile += os.sep + 'Pmv' + os.sep + "recent.pkl" self.recentFiles = RecentFiles( self, fileMenu, filePath=rcFile, menuLabel='Recent Files', index=2) self.logMode = 'no' self.GUI.dockCamera() self.logMode = logMode # load out default interactive command which prints out object names self.ICmdCaller.setCommands( self.printGeometryName ) self.ICmdCaller.go() if gui: self.userpref.add( 'Icon Size', 'medium', ['very small', 'small', 'medium', 'large', 'very large'], callbackFunc = [self.SetIconSize,], category="Viewer", doc="""Sets the size of icons for the Toolbar.""") self.userpref.add( 'Save Perspective on Exit', 'yes', validValues = ['yes', 'no'], doc="""Saves GUI perspective on Exit. The following features are saved: GUI geometry, and whether camera is docked or not. """) self.GUI.VIEWER.suspendRedraw = False self.GUI.VIEWER.currentCamera.height = 600 # dictionary of event:[functions]. functions will be called by # self.dispatchEvent self.eventListeners = {} self.userpref.saveDefaults() self.userpref.loadSettings() if self.userpref.has_key('Save Perspective on Exit') and self.userpref['Save Perspective on Exit']['value'] == 'yes': self.restorePerspective() #self.GUI.VIEWER.ReallyRedraw() def registerListener(self, event, function): """registers a function to be called for a given event. event has to be a class subclassing VFEvent """ assert issubclass(event, VFEvent) assert callable(function) if not self.eventListeners.has_key(event): self.eventListeners[event] = [function] else: if function in self.eventListeners[event]: warnings.warn('function %s already registered for event %s'%( function,event)) else: self.eventListeners[event].append(function) def dispatchEvent(self, event): """call all registered listeners for this event type """ assert isinstance(event, VFEvent) if self.eventListeners.has_key(event.__class__): if self.hasGui: vi=self.GUI.VIEWER autoRedraw = vi.autoRedraw vi.stopAutoRedraw() for func in self.eventListeners[event.__class__]: func(event) if autoRedraw: vi.startAutoRedraw() else: for func in self.eventListeners[event.__class__]: func(event) def clients_cb(self, client, data): """get called every time a client sends a message""" import sys sys.stdout.write('%s sent %s\n'%(client,data) ) #exec(data) def embedInto(self, hostApp,debug=0): """ function to define an hostapplication, take the string name of the application """ if self.hasGui: raise RuntiomeError("VF with GUI cannot be embedded") from ViewerFramework.hostApp import HostApp self.hostApp = HostApp(self, hostApp, debug=debug) self.embeded=True def updateIMD(self): """get called every time the server we are connected to sends a message what about more than one molecule attached currently under develppment """ from Pmv.moleculeViewer import EditAtomsEvent #print "pause",self.imd.pause if self.imd.mindy: #print "ok update mindy" self.imd.updateMindy() if self.hasGui and self.imd.gui : self.GUI.VIEWER.OneRedraw() self.GUI.VIEWER.update() self.GUI.ROOT.after(1, self.updateIMD) else : if not self.imd.pause: self.imd.lock.acquire() coord = self.imd.imd_coords[:] self.imd.lock.release() if coord != None: #how many mol if type(self.imd.mol) is list : b=0 for i,m in enumerate(self.imd.mol) : n1 = len(m.allAtoms.coords) self.imd.mol.allAtoms.updateCoords(coord[b:n1], self.imd.slot[i]) b=n1 else : self.imd.mol.allAtoms.updateCoords(coord, self.imd.slot) import DejaVu if DejaVu.enableVBO : if type(self.imd.mol) is list : b=0 for i,m in enumerate(self.imd.mol) : N=len(m.geomContainer.geoms['cpk'].vertexSet.vertices.array) m.geomContainer.geoms['cpk'].vertexSet.vertices.array[:]=coord[b:N] b=N else : N=len(self.imd.mol.geomContainer.geoms['cpk'].vertexSet.vertices.array) self.imd.mol.geomContainer.geoms['cpk'].vertexSet.vertices.array[:]=coord[:N] #self.GUI.VIEWER.OneRedraw() #self.GUI.VIEWER.update() else : from Pmv.moleculeViewer import EditAtomsEvent if type(self.imd.mol) is list : for i,m in enumerate(self.imd.mol) : event = EditAtomsEvent('coords', m.allAtoms) self.dispatchEvent(event) else : event = EditAtomsEvent('coords', self.imd.mol.allAtoms) self.dispatchEvent(event) #self.imd.mol.geomContainer.geoms['balls'].Set(vertices=coord) #self.imd.mol.geomContainer.geoms['sticks'].Set(vertices=coord.tolist()) #self.imd.mol.geomContainer.geoms['lines'].Set(vertices=coord) #self.imd.mol.geomContainer.geoms['bonds'].Set(vertices=coord) #self.imd.mol.geomContainer.geoms['cpk'].Set(vertices=coord) if self.handler.isinited : self.handler.getForces(None) self.handler.updateArrow() #""" if self.hasGui and self.imd.gui : self.GUI.VIEWER.OneRedraw() self.GUI.VIEWER.update() self.GUI.ROOT.after(5, self.updateIMD) #self.GUI.ROOT.after(10, self.updateIMD) def server_cb(self, server, data): """get called every time the server we are connected to sends a message""" import sys #sys.stderr.write('server %s sent> %s'%(server,data) ) self.cmdQueue.put( (server,data) ) #exec(data) # cannot exec because we are not in main thread # and Tkitner is not thread safe #self.GUI.VIEWER.Redraw() def customize(self, file=None): """if a file is specified, this files gets sourced, else we look for the file specified in self.resourceFile in the following directories: 1 - current directory 2 - user's home directory 3 - the package to which this instance belongs to """ #print 'ZZZZZZZZZZZZZZZZZZZZZZZZ' #import traceback #traceback.print_stack() if file is not None: if not os.path.exists(file): return self.source(file, globalNames=1, log=0) return resourceFileLocation = findResourceFile(self, resourceFile=self.resourceFile) if resourceFileLocation.has_key('currentdir') and \ not resourceFileLocation['currentdir'] is None: path = resourceFileLocation['currentdir'] elif resourceFileLocation.has_key('home') and \ not resourceFileLocation['home'] is None: path = resourceFileLocation['home'] elif resourceFileLocation.has_key('package') and \ not resourceFileLocation['package'] is None: path = resourceFileLocation['package'] else: return self.source(path, globalNames=1, log=0) path = os.path.split(path)[-1] if os.path.exists(path): self.source(path, globalNames=1, log=0) return def after(func, *args, **kw): """method to run a thread enabled command and wait for its completion. relies on the command to release a lock called self.done only works for commands, not for macros """ lock = thread.allocate_lock() lock.acquire() func.private_threadDone = lock apply( func, args, kw ) func.waitForCompletion() def getLog(self): """ generate log strings for all commands so far """ logs = [] i = 0 for cmd, args, kw in self.cmdHistory: try: log = cmd.logString( *args, **kw)+'\n' except: log = '#failed to create log for %d in self.cmdHistory: %s\n'%( i, cmd.name) logs.append(log) i += 1 return logs def addCmdToHistory(self, cmd, args, kw): """ append a command to the history of commands """ #print "ADDING Command to history", cmd.name self.cmdHistory.append( (cmd, args, kw)) maxLen = self.userpref['Command History Depth']['value'] lenCmds = len(self.cmdHistory) if maxLen>0 and lenCmds > maxLen: #print "maxLen", maxLen, lenCmds self.cmdHistory = self.cmdHistory[-maxLen:] if self.hasGui: nremoved = lenCmds-maxLen # update text in the message box message_box = self.GUI.MESSAGE_BOX nlines = float(nremoved+1) try: message_box.tx.delete('1.0', str(nlines)) except: pass def log(self, cmdString=''): """append command to logfile FIXME: this should also get whatever is typed in the PythonShell """ if self.logMode == 'no': return if cmdString[-1]!='\n': cmdString = cmdString + '\n' if hasattr(self, 'logAllFile'): self.logAllFile.write( cmdString ) self.logAllFile.flush() if self.socketComm is not None and len(self.socketComm.clients): #is it really need? cmdString=cmdString.replace("log=0","log=1") self.socketComm.sendToClients(cmdString) self.dispatchEvent( LogEvent( cmdString ) ) ## if self.selectLog: ## self.logSelectFile.write( cmdString ) def tryto(self, command, *args, **kw ): """result <- tryto(command, *args, **kw ) if an exception is raised print traceback and continue """ self.commandNestingLevel = self.commandNestingLevel + 1 try: if self.commandNestingLevel==1: self.commandsLock.acquire() if not self.trapExceptions: # we are running tests and want exceptions not to be caught result = command( *args, **kw ) else: # exception should be caught and displayed try: result = command( *args, **kw ) except: print 'ERROR *********************************************' if self.guiVisible==1 and self.withShell: self.GUI.pyshell.top.deiconify() self.GUI.ROOT.config(cursor='') self.GUI.VIEWER.master.config(cursor='') self.GUI.MESSAGE_BOX.tx.component('text').config(cursor='xterm') traceback.print_exc() sys.last_type, sys.last_value, sys.last_traceback = sys.exc_info() result = 'ERROR' # sets cursors back to normal finally: if self.commandNestingLevel==1: self.commandsLock.release() self.commandNestingLevel = self.commandNestingLevel - 1 return result def message(self, str, NL=1): """ write into the message box """ if self.hasGui: self.GUI.message(str,NL) else: print str def unsolicitedPick(self, pick): """treat and unsollicited picking event""" vi = self.GUI.VIEWER if vi.isShift() or vi.isControl(): vi.unsolicitedPick(pick) else: #print picked geometry for k in pick.hits.keys(): self.message(k) def addBasicCommands(self): """Create a frame to hold menu and button bars""" from ViewerFramework.dejaVuCommands import PrintGeometryName, \ SetCameraSizeCommand, SetCamSizeGUI # Basic command that needs to be added manually. self.addCommand( PrintGeometryName(), 'printGeometryName ', None ) g = CommandGUI() g.addMenuCommand('menuRoot', 'File', 'Browse Commands', separatorAbove=1, ) self.addCommand( BrowseCommandsCommand(), 'browseCommands', g) self.addCommand( SetCameraSizeCommand(), 'setCameraSize', SetCamSizeGUI) from ViewerFramework.basicCommand import UndoCommand, \ ResetUndoCommand, NEWUndoCommand, RedoCommand # g = CommandGUI() # g.addMenuCommand('menuRoot', 'File', 'Remove Command') # self.addCommand( RemoveCommand(), 'removeCommand', g) from mglutil.util.packageFilePath import getResourceFolderWithVersion self.vfResourcePath = getResourceFolderWithVersion() if self.vfResourcePath is not None: self.vfResourcePath += os.sep + "ViewerFramework" if not os.path.isdir(self.vfResourcePath): try: os.mkdir(self.vfResourcePath) except Exception, inst: print inst txt="Cannot create the Resource Folder %s" %self.vfResourcePath self.vfResourcePath = None g = CommandGUI() g.addMenuCommand('menuRoot', 'Edit', 'Undo ', index=0) g.addToolBar('Undo', icon1 = '_undo.gif', icon2 = 'undo.gif', type = 'ToolBarButton',state = 'disabled', balloonhelp = 'Undo', index = 1) self.addCommand( NEWUndoCommand(), 'NEWundo', g) g = CommandGUI() g.addMenuCommand('menuRoot', 'Edit', 'Redo ', index=1) g.addToolBar('Redo', icon1 = '_redo.gif', icon2 = 'redo.gif', type = 'ToolBarButton',state = 'disabled', balloonhelp = 'Redo', index = 2) self.addCommand( RedoCommand(), 'redo', g) # keep old undo command for now for backward compatibility self.addCommand( UndoCommand(), 'undo', None ) self.addCommand( ResetUndoCommand(), 'resetUndo ', None) g = CommandGUI() #g.addMenuCommand('menuRoot', 'File', 'Load Command') self.addCommand( loadCommandCommand(), 'loadCommand', g) g = CommandGUI() #g.addMenuCommand('menuRoot', 'File', 'Load Module') self.addCommand( loadModuleCommand(), 'loadModule', g) g = CommandGUI() g.addMenuCommand('menuRoot', 'File', 'Load Macros', separatorBelow=1) self.addCommand( loadMacroCommand(), 'loadMacro', g) # Load Source command from customizationCommands module: self.browseCommands('customizationCommands', commands=['source',], package='ViewerFramework', topCommand=0) # force the creation of the default buttonbar and PyShell checkbutton # by viewing the Python Shell widget if self.withShell: self.addCommand( ShellCommand(), 'Shell', ShellCommandGUI ) # add the default 'Help' menubutton in the default menubar if self.hasGui: bar = self.GUI.menuBars['menuRoot'] help = self.GUI.addMenuButton( bar, 'Help', {}, {'side':'right'}) self.GUI.addMenuButton( bar, 'Grid3D', {}, {'side':'right'}) try: import grid3DCommands self.browseCommands("grid3DCommands", package="ViewerFramework", topCommand=0) except Exception, inst: print inst print "Cannot import grid3DCommands. Disabling grid3DCommands..." #self.GUI.ROOT.after(1500, self.removeCommand.loadCommands) # load helpCommand and searchForCmd self.browseCommands('helpCommands', commands=['helpCommand','searchForCmd', 'citeThisScene', 'showCitation'], package='ViewerFramework', topCommand = 0) # load SetUserPreference and setOnAddObjectCmds Commands self.browseCommands('customizationCommands', commands=['setUserPreference', 'setOnAddObjectCommands'], package='ViewerFramework', topCommand = 0) # load ChangeVFGUIvisGUI and SetOnAddObjectCmds Command self.browseCommands('customizeVFGUICommands', package='ViewerFramework', topCommand = 0) self.addCommand( SaveSessionCommand(), 'saveSession ', SaveSessionCommandGUI) # Add the Exit command under File g = CommandGUI() g.addMenuCommand('menuRoot', 'File', 'Exit', separatorAbove=1) self.addCommand( ExitCommand(), 'Exit', g ) # load object transformation, camera transformation, # light transformation, Clipping Plane transformation, # CenterGeom, centerScene commands self.browseCommands("dejaVuCommands", commands=[ 'transformObject', 'transformCamera', 'setObject', 'setCamera', 'setLight', 'setClip', 'addClipPlane', 'centerGeom', 'centerScene', 'centerSceneOnVertices', 'alignGeomsnogui','alignGeoms', 'toggleStereo', 'centerSceneOnPickedPixel'], package='ViewerFramework', topCommand = 0) def validInstance(self, classList, obj): """Checks whether an object is an instance of one the classes in the list""" ok = 0 for Klass in classList: if isinstance(obj, Klass): OK=1 break return OK def getOnAddObjectCmd(self): """ returns a copy of the list of commands currently executed when a new object is added """ return self.onAddObjectCmds[:] def addOnAddObjectCmd(self, cmd, args=[], kw={}): """ adds a command to the list of commands currently executed when a new object is added """ assert callable(cmd) assert type(args)==types.TupleType or type(args)==types.ListType assert type(kw)==types.DictType assert cmd.flag & Command.objArgOnly kw['topCommand'] = 0 kw['setupNegate'] = 0 if type(args)==types.ListType: args = tuple(args) self.onAddObjectCmds.append( (cmd, args, kw) ) def removeOnAddObjectCmd(self, cmd): """ removes a command to the list of commands currently executed when a new object is added """ for com in self.onAddObjectCmds: if com[0]==cmd: self.onAddObjectCmds.remove(com) return com print 'WARNING: command %s not found'%cmd.name return None def addObject(self, name, obj, geomContainer=None): """Add an object to a Viewer""" #print 'acquiring addObject lock' self.objectsLock.acquire() self.objects.append(obj) self.objectsLock.release() #print 'releasing addObject lock' ## if geomContainer is None: ## obj.geomContainer = GeomContainer( self.GUI.VIEWER ) ## else: ## obj.geomContainer = geomContainer obj.geomContainer = geomContainer # prepare progress bar lenCommands = len(self.cmdsWithOnAddObj) if self.hasGui: self.GUI.configureProgressBar(init=1, mode='increment', max=lenCommands, progressformat='ratio', labeltext='call initGeom methods') #call initGeom method of all commands creating geometry from time import time #t0 = time() for com in self.cmdsWithOnAddObj: com.onAddObjectToViewer(obj) #t1 = time() #print 'INITI', com, t1-t0 #check for gui if self.hasGui: self.GUI.updateProgressBar() # now set progress bar back to '%' format if self.hasGui: self.GUI.configureProgressBar(progressformat='percent') # prepare progress bar lenCommands = len(self.onAddObjectCmds) #call functions that need to be called on object #t0 = time() for com in self.onAddObjectCmds: com[2]['redraw']=0 com[2]['log']=0 #t1 = time() #print 'INITI2', com, t1-t0 com[0]( *((obj,)+com[1]), **com[2] ) # note we have to re-configure the progress bar because doitWrapper # will overwrite the mode to 'percent' #check for gui if self.hasGui: self.GUI.configureProgressBar(init=1, mode='increment', max=lenCommands, progressformat='ratio', labeltext='call geom functions') self.GUI.updateProgressBar() if self.hasGui: # now set progress bar back to '%' format self.GUI.configureProgressBar(progressformat='percent') # create add object event event = AddObjectEvent(objects=[obj]) self.dispatchEvent(event) if self.hasGui: self.centerScene(topCommand=0) self.GUI.VIEWER.Redraw() def removeObject(self, obj, undoable=False): """Remove an object from a Viewer""" #1 Delete the obj from the list of objects. del(self.objects[self.objects.index(obj)]) # call onRemoveMol method of all commands creating geometry # To remove geometries created by these commands from the VIEWER ## MS chose to cerate undoableDelete__ variable in VF to let cmd's ## onRemoveObjectFromViewer method decide what to do when delete is ## undoable. Passign undoable into th method would require changing ## the signature in each implementation when onyl a hand full do ## something s[pecial when undoable is True self.undoableDelete__ = undoable for com in self.cmdsWithOnRemoveObj: self.tryto( com.onRemoveObjectFromViewer, (obj) ) del self.undoableDelete__ # clean up the managedGeometries list if obj.geomContainer: for cmd in self.commands.values(): if len(cmd.managedGeometries)==0: continue geomList = [] for g in cmd.managedGeometries: if hasattr(g, 'mol') and g.mol==obj: continue geomList.append(g) cmd.managedGeometries = geomList # remove everything created in the geomContainer associated to the # mol we want to destroy, if obj.geomContainer: obj.geomContainer.delete() # create remove object event event = DeleteObjectEvent(objects=[obj]) self.dispatchEvent(event) def addCommandProxy(self, commandProxy): """To make startup time faster this function add GUI elements without importing and loading the full dependiencies for a command """ if self.hasGui: gui = commandProxy.gui if gui is not None: gui.register(self, commandProxy) gui.registered = True def addCommand(self, command, name, gui=None): """ Add a command to a viewer. arguments: command: Command instance name: string gui: optional CommandGUI object objectType: optional type of object for which we need to add geoms geomDescr: optional dictionary of 'name:objectType' items name is used to create an alias for the command in the viewer if a gui is specified, call gui.register to add the gui to the viewer """ #print "addCommand", name, command assert isinstance(command, Command) # happens because of dependencies if name in self.commands.keys(): return self.commands[name] error = self.tryto(command.checkDependencies, self) if error=='ERROR': print '\nWARNING: dependency check failed for command %s' % name return ## def download_cb(): ## import os ## os.system('netscape http://www.scripps.edu/pub/olson-web/people/scoon/login.html &') ## def Ok_cb(idf): ## idf.form.destroy() ## tb = traceback.extract_tb(sys.exc_traceback) ## from gui import InputFormDescr, CallBackFunction ## import Tkinter ## idf = InputFormDescr("Missing dependencies !") ## idf.append({'widgetType': Tkinter.Label, ## 'text':"%s can't be loaded, needs %s module" ## % (tb[1][-1][7:],command.__class__.__name__), ## 'gridcfg':{'columnspan':2}}) ## idf.append({'widgetType':Tkinter.Button, 'text':'OK', ## 'command':CallBackFunction(Ok_cb, idf), ## 'gridcfg':{'sticky':Tkinter.W+Tkinter.E}}) ## idf.append({'widgetType':Tkinter.Button, 'text':'Download', ## 'command':download_cb, ## 'gridcfg':{'row':-1, 'sticky':Tkinter.W+Tkinter.E, ## 'columnspan':5 }}) ## form = self.getUserInput(idf, modal=0, blocking=0) ## self.warningMsg(title = "Missing dependencies !", ## message = "%s can't be loaded, needs %s module" ## % (tb[1][-1][7:],command.__class__.__name__)) ## return command.vf = self name = string.strip(name) name = string.replace(name, ' ', '_') self.commands[name] = command command.name=name command.undoMenuString=name # string used to change menu entry for Undo command.undoMenuStringPrefix='' # prefix used to change menu entry for Undo setattr(self, name, command) #exec ( 'self.%s = command' % name ) if self.hasGui: if gui is not None: assert isinstance(gui, CommandGUI) gui.register(self, command) gui.registered = True #call the onAddCmdToViewer method of the new command command.onAddCmdToViewer() for c in self.commands.values(): c.onAddNewCmd(command) #if hasattr(command, 'onAddObjectToViewer'): # if callable(command.onAddObjectToViewer): # self.cmdsWithOnAddObj.append(command) # for o in self.objects: # command.onAddObjectToViewer(o) if hasattr(command, 'onRemoveObjectFromViewer'): if callable(command.onRemoveObjectFromViewer): self.cmdsWithOnRemoveObj.append(command) if hasattr(command, 'onExitFromViewer'): if callable(command.onExitFromViewer): self.cmdsWithOnExit.append(command) def updateGeomContainers(self, objectType, geomDescr): """To be called when a new command that requires geometry is add to a viewer. This method loops over existing objects to create the required geometry for already existing objects""" for o in self.objects: if not isinstance(object, objectType): continue o.geomContainer.addGeom( geomDescr ) def askFileOpen(self, idir=None, ifile=None, types=None, title='Open', relative=True, parent=None, multiple=False): """filename <- askFileOpen( idir, ifile, types, title) if the viewer is run with a gui this function displays a file browser else it askes for a file name idir: optional inital directory ifile: optional initial filename types: list of tuples [('PDB files','*.pdb'),] title: widget's title relative: when set to True the file name is realtive to the directory where the application has been started multiple: allow selecting multiple files returns: a filename ot None if the Cancel button """ if self.hasGui: if parent: file = self.GUI.askFileOpen(parent, idir=idir, ifile=ifile, types=types, title=title, multiple=multiple) else: file = self.GUI.askFileOpen( self.GUI.ROOT, idir=idir, ifile=ifile, types=types, title=title, multiple=multiple) if file is () or file is None: # this is returned if one click on the file list and # then clicks Cancel return else: default = '' if idir: default = idir if ifile: default = os.path.join( default, ifile ) file = raw_input("file name [%s] :"%default) if file=='': if default != '' and os.path.exists(file): file = default if multiple is False: fpath,fname = os.path.split(file) if relative and file and os.path.abspath(os.path.curdir) == fpath: file = os.path.join( os.path.curdir, file[len(os.path.abspath(os.path.curdir))+1:]) return file else: files = [] for f in file: fpath,fname = os.path.split(f) if relative and f and os.path.abspath(os.path.curdir) == fpath: f = os.path.join(os.path.curdir, f[len(os.path.abspath(os.path.curdir))+1:]) files.append(f) return files def setLogMode(self, name, oldval, newval): "Sets the Lig Mode" self.logMode = newval # open log file for all commands if self.logMode == 'unique': import time t = time.localtime(time.time()) fname1 = 'mvAll_%04d-%02d-%02d_%02d-%02d-%02d.log.py'%(t[0],t[1],t[2],t[3],t[4],t[5]) fname1 = os.path.join(self.rcFolder, fname1) if self.hasGui: self.GUI.ROOT.after_idle(self.clearOldLogs) elif self.logMode == 'overwrite': fname1 = os.path.join(self.rcFolder, 'mvAll.log.py') if self.logMode != 'no': flag = self.tryOpenFileInWrite(fname1) while flag == 0: idf = InputFormDescr(title = 'Directory not writable ...') variable = Tkinter.StringVar() idf.append({'name':'noLog','widgetType': Tkinter.Radiobutton, 'text':'noLog','variable':variable, 'value':'noLog','defaultValue':'noLog', 'gridcfg':{'sticky':Tkinter.W}}) idf.append({'name':'browse','widgetType': 'SaveButton', 'typeofwidget':Tkinter.Radiobutton, 'types':[ ('Python Files', '*.py')], 'title':'Choose a log File...', 'text':'browse', 'variable':variable, 'defaultValue':'noLog', 'value':'browse', 'gridcfg':{'sticky':Tkinter.W}}) self.GUI.ROOT.deiconify() self.GUI.ROOT.update() result = self.getUserInput(idf) if result == {}: self.GUI.ROOT.destroy() return elif result['noLog'] == 'noLog': self.logMode = 'no' flag = 1 elif result['noLog'] == 'browse' and result.has_key('browse'): assert not result['browse'] in [''] flag = self.tryOpenFileInWrite(result['browse']) elif result['noLog'] == 'browse' and not result.has_key('browse'): print "you didn't enter a proper file name try again" flag = 0 def setWarningMsgFormat(self, name, oldval, newval): """ newval can be either 'pop-up' or 'printed'""" self.messageFormat = newval def warningMsg(self, msg, title='WARNING: ', parent = None): """None <- warningMsg(msg)""" if type(title) is not types.StringType: title = 'WARNING: ' if self.hasGui and self.messageFormat=='pop-up': tkMessageBox.showwarning(title, msg,parent = parent) else: sys.stdout.write(title+msg+'\n') def askOkCancelMsg(self, msg): """None <- okCancelMsg(msg)""" if self.hasGui: return tkMessageBox.askyesno('expand selection', msg) else: val = raw_input('anser [0]/1: '+msg+'\n') if val=='1': return 1 else: return 0 ## FIXME .. do we need this ? def errorMsg(self, msg, errtype=RuntimeError): """None <- errorMsg(errorType, msg)""" if self.hasGui: tkMessageBox.showerror(msg) raise errtype(msg) def getUserInput(self, formDescription, master=None, root=None, modal=0, blocking=1, defaultDirection = 'row', closeWithWindow = 1, okCfg={'text':'OK'}, cancelCfg={'text':'Cancel'}, initFunc=None, scrolledFrame=0, width=None, height=None, okcancel=1, onDestroy = None, postCreationFunc=None, postUsingFormFunc=None): """val[] <- getUserInput(formDescription) Returns a list of values obtained either from an InputForm or by prompting the user for values """ ## from gui import InputForm, InputFormDescr from mglutil.gui.InputForm.Tk.gui import InputForm, InputFormDescr assert isinstance(formDescription, InputFormDescr) if self.hasGui: if master==None: master = self.GUI.ROOT #root = self.GUI.getCmdsParamsMaster() #if not postCreationFunc: # postCreationFunc = self.GUI.getAfterCreatingFormFunc() #if not postUsingFormFunc: # postUsingFormFunc = self.GUI.getAfterUsingFormFunc() form = InputForm(master, root, formDescription, modal=modal, blocking=blocking, defaultDirection=defaultDirection, closeWithWindow=closeWithWindow, okCfg=okCfg, cancelCfg=cancelCfg, initFunc=initFunc, scrolledFrame=scrolledFrame, width=width, height=height, okcancel=okcancel, onDestroy=onDestroy) if form.ownsRoot: geom = form.root.geometry() # make sure the upper left dorner is visible w = string.split(geom, '+') changepos = 0 if w[1][0]=='-': posx = '+50' changepos=1 else: posx = '+'+w[1] if w[2][0]=='-': posy ='+50' changepos=1 else: posy = '+'+w[2] if changepos: form.root.geometry(posx+posy) if postCreationFunc: postCreationFunc(form.root) if not (modal or blocking): return form else: values = form.go() if postUsingFormFunc: postUsingFormFunc(form.root) return values else: self.warningMsg("nogui InputForm not yet implemented") def transformedCoordinatesWithInstances(self, hits): """ hist is pick.hits = {geom: [(vertexInd, intance),...]} This function will use the instance information to return a list of transformed coordinates """ # FIXME this is in DejaVu.VIewer and should go away here vt = [] for geom, values in hits.items(): coords = geom.vertexSet.vertices.array for vert, instance in values: M = geom.GetMatrix(geom.LastParentBeforeRoot(), instance[1:]) pt = coords[vert] ptx = M[0][0]*pt[0]+M[0][1]*pt[1]+M[0][2]*pt[2]+M[0][3] pty = M[1][0]*pt[0]+M[1][1]*pt[1]+M[1][2]*pt[2]+M[1][3] ptz = M[2][0]*pt[0]+M[2][1]*pt[1]+M[2][2]*pt[2]+M[2][3] vt.append( (ptx, pty, ptz) ) return vt if __name__ == '__main__': v = ViewerFramework() import pdb
[ 29113, 29113, 7804, 4242, 2, 198, 2, 198, 2, 6434, 25, 12386, 376, 13, 37376, 21479, 198, 2, 198, 2, 15069, 25, 337, 13, 2986, 1008, 26136, 7112, 4751, 198, 2, 198, 2, 18440, 25, 1962, 5049, 2454, 569, 533, 8270, 198, 2, 198, 29...
2.112968
27,167
"""Parsing HTML with BeautifulSoup""" from datetime import datetime from bs4 import BeautifulSoup def parse_html(html): """Parse FX html, return date and dict of {symbol -> rate}""" soup = BeautifulSoup(html, 'html.parser') # <h4>Date: <i class="date">2019-11-11</i></h4> i = soup('i', {'class': 'date'}) if not i: raise ValueError('cannot find date') date = datetime.strptime(i[0].text, '%Y-%m-%d') rates = {} for tr in soup('tr'): # <tr> # <td><i class="fas fa-pound-sign" data-toggle="tooltip" # title="GBP"></i></td> # <td>0.83</td> # </tr> symbol_td, rate_td = tr('td') symbol = symbol_td('i')[0]['title'] rate = float(rate_td.text) rates[symbol] = rate return date, rates if __name__ == '__main__': with open('fx.html') as fp: html = fp.read() date, rates = parse_html(html) print(f'date: {date}') for symbol, rate in rates.items(): print(f'USD/{symbol} = {rate:f}')
[ 37811, 47, 945, 278, 11532, 351, 23762, 50, 10486, 37811, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 198, 6738, 275, 82, 19, 1330, 23762, 50, 10486, 628, 198, 4299, 21136, 62, 6494, 7, 6494, 2599, 198, 220, 220, 220, 37227, 10044, ...
2.161088
478
# четна / нечетна сума # Да се напише програма, която въвежда n цели числа и проверява дали сумата на числата на четни позиции е равна на сумата # на числата на нечетни позиции. При равенство печата "Yes" + сумата, иначе печата "No" + разликата. # Разликата се изчислява по абсолютна стойност. Форматът на изхода трябва да е като в примерите по-долу. n = int(input()) sum_even = 0 sum_odd = 0 for i in range(1, n + 1): current_num = int(input()) if i % 2 == 0: sum_even = sum_even + current_num else: sum_odd = sum_odd + current_num if sum_even == sum_odd: print('Yes') print(f'Sum = {sum_even}') else: print('No') print(f'Diff = {abs(sum_even - sum_odd)}')
[ 2, 220, 141, 229, 16843, 20375, 22177, 16142, 1220, 12466, 121, 16843, 141, 229, 16843, 20375, 22177, 16142, 220, 21727, 35072, 43108, 16142, 198, 2, 12466, 242, 16142, 220, 21727, 16843, 12466, 121, 16142, 140, 123, 18849, 141, 230, 1684...
1.314711
537
# terrascript/softlayer/d.py
[ 2, 8812, 15961, 14, 4215, 29289, 14, 67, 13, 9078, 198 ]
2.636364
11
import os from setuptools import find_packages from numpy.distutils.core import setup base_dir = os.path.dirname(__file__) src_dir = os.path.join(base_dir, "pymaxpro_lite") about = {} with open(os.path.join(src_dir, "__about__.py")) as f: exec(f.read(), about) pkgs = find_packages() if __name__ == "__main__": metadata = dict( name = about["__title__"], version = about["__version__"], description = about["__description__"], author = about["__author__"], license = about["__license__"], url = about["__uri__"], packages = pkgs, install_requires = ['numpy', 'scipy'], python_requires = '>=3.6', ) setup(**metadata)
[ 11748, 28686, 198, 6738, 900, 37623, 10141, 1330, 1064, 62, 43789, 198, 6738, 299, 32152, 13, 17080, 26791, 13, 7295, 1330, 9058, 198, 198, 8692, 62, 15908, 796, 28686, 13, 6978, 13, 15908, 3672, 7, 834, 7753, 834, 8, 198, 10677, 62, ...
2.230539
334
# Copyright (c) 2015 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from testtools import testcase from functionaltests.client import base from functionaltests.common import cleanup from functionaltests import utils order_create_key_data = { "name": "barbican functional test secret name", "algorithm": "aes", "bit_length": 256, "mode": "cbc", "payload_content_type": "application/octet-stream", } # Any field with None will be created in the model with None as the value # but will be omitted in the final request (via the requests package) # to the server. # # Given that fact, order_create_nones_data is effectively an empty json request # to the server. order_create_nones_data = { 'type': None, "meta": { "name": None, "algorithm": None, "bit_length": None, "mode": None, "payload_content_type": None, } } @utils.parameterized_test_case
[ 2, 15069, 357, 66, 8, 1853, 37927, 13200, 11, 3457, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789,...
3.121739
460
from rpi_ws281x import Color, PixelStrip, ws import time from threading import Thread import random from PIL import Image import numpy as np #import cv2 from skimage import io, transform #from skimage.transform import resize, rescale # LED strip configuration: LED_COUNT = 2304 # Number of LED pixels. LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!). LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz) LED_DMA = 10 # DMA channel to use for generating signal (try 10) LED_BRIGHTNESS = 20 # Set to 0 for darkest and 255 for brightest LED_INVERT = False # True to invert the signal (when using NPN transistor level shift) LED_CHANNEL = 0 ##LED_STRIP = ws.SK6812_STRIP_RGBW #LED_STRIP = ws.SK6812W_STRIP LED_STRIP = ws.WS2812_STRIP # ##frame = utils.get_colorless_array_2d(self.width, self.height) #for ix,iy,iz in np.ndindex(image_resized.shape): # image_resized[ix,iy] = tuple(image_resized[ix,iy]) #print(image_resized) #io.imshow(image_resized) #img = Image.fromarray(image_resized, 'RGB') #img.show() #io.imsave(f"../anim_frames_processed/{image_name}", image_resized) #dsize = (width, height) #output = cv2.resize(src, dsize) #cv2.imwrite('../anim_frames/processed.bmp',output) #im = np.array(Image.open('../anim_frames/anim_test.bmp')) ##im = np.array(im.tolist()) #print(im) #print(np.shape(im)) #print(im.dtype) ##new_im = im.view(dtype=np.dtype([('x', im.dtype), ('y', im.dtype)])) ##new_im = new_im.reshape(new_im.shape[:-1]) ##print(new_im) #x = np.empty((im.shape[0], im.shape[1]), dtype=tuple) ##x.fill(init_value) #for ix,iy,iz in np.ndindex(im.shape): # x[ix,iy] = tuple(im[ix,iy]) # print(tuple(im[ix,iy])) #print(x) ##arr = misc.imread('../anim_frames/anim_test.bmp') # 640x480x3 array ##print(arr) ##printt(np.shape(arr)) def color_full(strip): """Wipe color across display a pixel at a time.""" color = [None] * 9 color[0] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255)) color[1] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255)) color[2] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255)) color[3] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255)) color[4] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255)) color[5] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255)) color[6] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255)) color[7] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255)) color[8] = Color(random.randint(0,255), random.randint(0,255), random.randint(0,255)) for i in range(0, strip.numPixels(), 256): for j in range(i, i+256, 1): strip.setPixelColor(j, color[i // 256]) strip.show() def color_wipe(strip, color, wait_ms=50): """Wipe color across display a pixel at a time.""" for i in range(strip.numPixels()): strip.setPixelColor(i, color) start = time.time() strip.show() end = time.time() print(f"{(end - start) * 1000} ms") #time.sleep(wait_ms / 1000.0) def color_wipe_infinite(strip, color, wait_ms=50): """Wipe color across display a pixel at a time.""" while True: color_clear(strip) for i in range(strip.numPixels()): strip.setPixelColor(i, color) #start = time.time() strip.show() #end = time.time() #print(f"{(end - start) * 1000} ms") #time.sleep(wait_ms / 1000.0) init_animation()
[ 6738, 374, 14415, 62, 18504, 30368, 87, 1330, 5315, 11, 11349, 1273, 5528, 11, 266, 82, 198, 11748, 640, 198, 6738, 4704, 278, 1330, 14122, 198, 11748, 4738, 198, 6738, 350, 4146, 1330, 7412, 198, 11748, 299, 32152, 355, 45941, 198, 2...
2.258662
1,674
from django.utils import timezone from employees.models import Employee
[ 6738, 42625, 14208, 13, 26791, 1330, 640, 11340, 198, 198, 6738, 4409, 13, 27530, 1330, 36824, 628, 198 ]
4.166667
18
# ---------------------------------------------------------------------------------------------------------------- # Channel Component # ---------------------------------------------------------------------------------------------------------------- # This component lets you register channels with a keyword to be later used by the send() function of the bot # ---------------------------------------------------------------------------------------------------------------- class Channel(): """set() Register a channel with a name Parameters ---------- name: Channel name id_key: Channel name in config.json """ """setID() Register a channel with a name Parameters ---------- name: Channel name id: Channel id """ """setMultiple() Register multiple channels Parameters ---------- channel_list: List of pair [name, id_key or id] """ """get() Get a registered channel Returns ---------- discord.Channel: Discord Channel """
[ 201, 198, 2, 16529, 47232, 201, 198, 2, 11102, 35100, 201, 198, 2, 16529, 47232, 201, 198, 2, 770, 7515, 8781, 345, 7881, 9619, 351, 257, 21179, 284, 307, 1568, 973, 416, 262, 3758, 3419, 2163, 286, 262, 10214, 201, 198, 2, 16529, ...
3.563518
307
from flask import Blueprint, redirect, render_template, request, session, url_for # from asset_tracker_restapi import asset_tracker_restapi bp = Blueprint("main", __name__) @bp.route("/", methods=['GET', 'POST']) @bp.route("/action", methods=['GET', 'POST']) @bp.route("/search", methods=['GET','POST'])
[ 6738, 42903, 1330, 39932, 11, 18941, 11, 8543, 62, 28243, 11, 2581, 11, 6246, 11, 19016, 62, 1640, 198, 2, 422, 11171, 62, 2213, 10735, 62, 2118, 15042, 1330, 11171, 62, 2213, 10735, 62, 2118, 15042, 628, 198, 46583, 796, 39932, 7203,...
3
104
# tests.py # # Copyright 2016 Christian Diener <mail[at]cdiener.com> # # MIT license. See LICENSE for more information. import pytest from corda import reaction_confidence, test_model from cobra import Model, Reaction, Metabolite from cobra.manipulation import convert_to_irreversible, revert_to_reversible if __name__ == '__main__': pytest.main()
[ 2, 220, 5254, 13, 9078, 198, 2, 198, 2, 220, 15069, 1584, 4302, 6031, 877, 1279, 4529, 58, 265, 60, 10210, 72, 877, 13, 785, 29, 198, 2, 198, 2, 220, 17168, 5964, 13, 4091, 38559, 24290, 329, 517, 1321, 13, 198, 198, 11748, 1297...
3.103448
116
""" This module knows how to serialize general object, objects specialized with a ``__json__()`` method, Django QuerySets, and ``ttcal`` objects. """ __version__ = '3.0.4'
[ 37811, 198, 1212, 8265, 4206, 703, 284, 11389, 1096, 2276, 2134, 11, 5563, 16976, 198, 4480, 257, 7559, 834, 17752, 834, 3419, 15506, 2446, 11, 37770, 43301, 50, 1039, 11, 290, 7559, 926, 9948, 15506, 5563, 13, 198, 37811, 198, 834, 9...
3.307692
52
""" SSSD_Config - file ``/etc/sssd/sssd.config`` ============================================ """ from insights.core import IniConfigFile from insights.core.plugins import parser from insights.specs import Specs @parser(Specs.sssd_config) class SSSD_Config(IniConfigFile): """ Parse the content of the ``/etc/sssd/sssd.config`` file. The 'sssd' section must always exist. Within that, the 'domains' parameter is usually defined to give a comma-separated list of the domains that sssd is to manage. The 'sssd' section will define one or more active domains, which are then configured in the 'domain/{domain}' section of the configuration. These domains are then available via the 'domains' method, and the configuration of a domain can be fetched as a dictionary using the 'domain_config' method. Sample configuration:: [sssd] config_file_version = 2 # Number of times services should attempt to reconnect in the # event of a crash or restart before they give up reconnection_retries = 3 # If a back end is particularly slow you can raise this timeout here sbus_timeout = 30 services = nss, pam # SSSD will not start if you do not configure any domains. # Add new domain configurations as [domain/<NAME>] sections, and # then add the list of domains (in the order you want them to be # queried) to the "domains" attribute below and uncomment it. # domains = LOCAL,LDAP domains = example.com debug_level = 9 [nss] # The following prevents SSSD from searching for the root user/group in # all domains (you can add here a comma-separated list of system accounts that # are always going to be /etc/passwd users, or that you want to filter out). filter_groups = root filter_users = root reconnection_retries = 3 [pam] reconnection_retries = 3 [domain/example.com] id_provider = ldap lookup_family_order = ipv4_only ldap_uri = ldap://ldap.example.com/ ldap_search_base = dc=example,dc=com enumerate = False hbase_directory= /home create_homedir = True override_homedir = /home/%u auth_provider = krb5 krb5_server = kerberos.example.com krb5_realm = EXAMPLE.COM Example: >>> type(conf) <class 'insights.parsers.sssd_conf.SSSD_Config'> >>> conf.get('nss', 'filter_users') 'root' >>> conf.getint('pam', 'reconnection_retries') 3 >>> conf.domains ['example.com'] >>> domain = conf.domain_config('example.com') >>> 'ldap_uri' in domain True """ @property def domains(self): """ Returns the list of domains defined in the 'sssd' section. This is used to refer to the domain-specific sections of the configuration. """ if self.has_option('sssd', 'domains'): domains = self.get('sssd', 'domains') if domains: return domains.split(',') # Return a blank list if no domains. return [] def domain_config(self, domain): """ Return the configuration dictionary for a specific domain, given as the raw name as listed in the 'domains' property of the sssd section. This then looks for the equivalent 'domain/{domain}' section of the config file. """ full_domain = 'domain/' + domain if full_domain not in self: return {} return self.items(full_domain)
[ 37811, 198, 5432, 10305, 62, 16934, 532, 2393, 7559, 14, 14784, 14, 824, 21282, 14, 824, 21282, 13, 11250, 15506, 198, 10052, 25609, 198, 37811, 198, 6738, 17218, 13, 7295, 1330, 554, 72, 16934, 8979, 198, 6738, 17218, 13, 7295, 13, 3...
2.515797
1,456
""" check if main line of each SGF is legal. """ __author__ = "ICHIKAWA, Yuji <ichikawa.yuji@gmail.com>" import sys import os from multiprocessing import Pool import psutil import sgf from board import Board, move2ev from utilities import file_pathes_under def check_and_arg(e: any): """ is defined explicitly because Pool instance cannot treat lambda function lambda e: (check(e), e). """ return check(e), e if __name__ == '__main__': if len(sys.argv) != 2: print("Usage: python {} <directory>".format(sys.argv[0])) sys.exit(0) with Pool(psutil.cpu_count(logical=False)) as pool: for result, filename in pool.imap_unordered(check_and_arg, file_pathes_under(sys.argv[1], "sgf"), 10): if not result: print(filename)
[ 37811, 198, 9122, 611, 1388, 1627, 286, 1123, 311, 21713, 318, 2742, 13, 198, 37811, 198, 834, 9800, 834, 796, 366, 20739, 18694, 12298, 32, 11, 10605, 7285, 1279, 488, 40398, 13, 24767, 7285, 31, 14816, 13, 785, 24618, 198, 198, 1174...
2.528662
314
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Mar 10 12:20:03 2021 @author: prowe By Penny Rowe 2021/03/10 AI with Prof. America Chambers, Spring 2021 Based on Variable.java """ class Variable: """ A variable in a Sudoku CSP with domain {1, 2, 3, ..., 9} The value of the variable may be fixed by the original problem. """ def __init__(self, row, col, nside, val={1, 2, 3, 4, 5, 6, 7, 8, 9}, fix=False): """ Create a new variable with the domain specified. domain may be: - {1...9} - a single value - a collection """ self.row = row self.col = col self.fixed = fix self.max_domain_val = nside self.domain = val.copy() def replace(self, value): """ Replace the domain of the variable with a value @param val The value to be added @throws IllegalStateException The domain is fixed """ if self.fixed: raise ValueError('The domain is fixed; cannot replace value.') self.domain = {value} def add(self, value): """ Add a value to the domain of the variable @param val The value to be added @throws IllegalStateException The domain is fixed """ if self.fixed: raise ValueError('The domain is fixed; cannot add value.') self.domain.add(value) def add_all(self, collection): """ Adds a collection of values to the domain of the variable @param input A collection of integer values to be added @throws IllegalStateException The domain is fixed """ if self.fixed: raise ValueError('The domain is fixed; cannot add collection.') self.domain.union(collection) def remove(self, val): """ Removes a value from the domain @param val The value to be removed @throws IllegalStateException The domain is fixed @returns: False """ if self.fixed: raise ValueError('The domain is fixed; cannot remove value.') self.domain.remove(val) def clear(self): """ # # Removes all values from the domain # # @throws IllegalStateException # The domain is fixed # """ if self.fixed: raise ValueError('The domain is fixed; cannot clear values.') self.domain = {} def get_domain(self): """ Returns the domain of the variable @return The domain of the variable """ return self.domain def get_domain_size(self): """ Returns the size of the variable's domain @return The size of the variable's domain """ return len(self.domain) def get_only_value(self): """ Returns the only value in the variable's domain @throws IllegalStateException The domain has more than 1 value or is empty @return The only value in the variable's domain """ if self.get_domain_size() != 1: raise ValueError('Domain of one expected, but was 0 or > 1') return next(iter(self.domain)) #def isfixed(self): # """ # Returns true if domain is fixed # @return True if the domain is fixed and false otherwise # """ # return self.fixed def contains(self, value): """ Returns true if domain contains value @param value The value to be checked @return True if the domain contains the value, false otherwise """ return value in self.domain
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 3300, 1526, 838, 1105, 25, 1238, 25, 3070, 33448, 198, 198, 31, 9800, 25, 386, 732, 628, ...
2.325264
1,611
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: resource_requirements.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from influxdb_service_sdk.model.container import resource_list_pb2 as influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='resource_requirements.proto', package='container', syntax='proto3', serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'), serialized_pb=_b('\n\x1bresource_requirements.proto\x12\tcontainer\x1a\x38influxdb_service_sdk/model/container/resource_list.proto\"j\n\x14ResourceRequirements\x12\'\n\x06limits\x18\x01 \x01(\x0b\x32\x17.container.ResourceList\x12)\n\x08requests\x18\x02 \x01(\x0b\x32\x17.container.ResourceListBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3') , dependencies=[influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2.DESCRIPTOR,]) _RESOURCEREQUIREMENTS = _descriptor.Descriptor( name='ResourceRequirements', full_name='container.ResourceRequirements', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='limits', full_name='container.ResourceRequirements.limits', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='requests', full_name='container.ResourceRequirements.requests', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=100, serialized_end=206, ) _RESOURCEREQUIREMENTS.fields_by_name['limits'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST _RESOURCEREQUIREMENTS.fields_by_name['requests'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST DESCRIPTOR.message_types_by_name['ResourceRequirements'] = _RESOURCEREQUIREMENTS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ResourceRequirements = _reflection.GeneratedProtocolMessageType('ResourceRequirements', (_message.Message,), { 'DESCRIPTOR' : _RESOURCEREQUIREMENTS, '__module__' : 'resource_requirements_pb2' # @@protoc_insertion_point(class_scope:container.ResourceRequirements) }) _sym_db.RegisterMessage(ResourceRequirements) DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 262, 8435, 11876, 17050, 13, 220, 8410, 5626, 48483, 0, 198, 2, 2723, 25, 8271, 62, 8897, 18883, 13, 1676, 1462, 198, 198, 11748, 25064, 198, 62, ...
2.767442
1,204
from django.shortcuts import render from . import models import requests # Create your views here.
[ 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 198, 6738, 764, 1330, 4981, 198, 11748, 7007, 198, 198, 2, 13610, 534, 5009, 994, 13 ]
4.125
24
# When I made this, I was not comletely sure how to make functions, so when it # doesn't understand the input, it just spits out a line saying it didn't # understand then moves on. author = 'Zed A. Shaw' book = 'Learn Python the Hard Way by %s' % author print """ Hi dad! This is a little program I wrote to show you some of the cool things I've learned today while hammering away at the book %s\n. """ % book print "\nI'm just going to ask you a few simple questions." print "When answering a 'yes' or 'no' question, please" print "use 'y' or 'n' for your response." ready = raw_input('Are you ready? ') if ready == 'y': print "\nGood! Let's get started.\n" elif ready == 'n': print "\nAh, I see. Well, we're moving on anyways!\n" else: print "\nI'm sorry, I didn't understand that... Moving on!\n" name = raw_input('What is your name? ') print "\nHello %s!\n" % name day_going = raw_input("How is your day going? (say 1 for good, 2 for so-so, or 3 for bad) ") strDayGoing = None if day_going == '1': print "\nI'm glad you're having a good day.\n" strDayGoing = 'good' elif day_going == '2': print "\nAw, cheer up ol' chum!\n" strDayGoing = 'so-so' elif day_going == '3': print "\nI'm sorry to hear that... Moving on!\n" strDayGoing = 'bad' else: print "\nI'm sorry, I didn't understand that... Moving on!\n" fave_color = raw_input("What's your favorite color? ") if fave_color.lower() == 'blue': print "\nI knew that already, dad. You're probably wearing", print "blue shirt too, right?\n" else: print "\nThat... that can't be right! It's supposed to be blue!!\n" lunch = raw_input('What did you have for lunch today? ') print "\nMmmm! %s sounds delicious!\n" % lunch print """ So let me get this straight... Your name is %s. You're having a %s day. Your favorite color is %s. And you had %s for lunch. """ % (name, strDayGoing, fave_color, lunch) correct = raw_input("Is all of that information correct? ") if correct == 'y': print "\nGreat! Thanks for checking out all the stuff I learned today!", print "I'm now going to give you a brief tour of the script itself.\n" elif correct == 'n': print "\nWelp, can't be my fault! I wrote this script perfectly!!\n" else: print "\nSorry, I didn't get that. Oh well!\n"
[ 2, 1649, 314, 925, 428, 11, 314, 373, 407, 401, 5807, 306, 1654, 703, 284, 787, 5499, 11, 523, 618, 340, 198, 2, 1595, 470, 1833, 262, 5128, 11, 340, 655, 599, 896, 503, 257, 1627, 2282, 340, 1422, 470, 220, 198, 2, 1833, 788, ...
2.840937
811
"""Change some model user and pay_type to use helper function that strip unique field Revision ID: b986a61de65c Revises: 29128332c534 Create Date: 2021-12-08 19:10:55.573019 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'b986a61de65c' down_revision = '29128332c534' branch_labels = None depends_on = None
[ 37811, 19400, 617, 2746, 2836, 290, 1414, 62, 4906, 284, 779, 31904, 2163, 326, 10283, 3748, 2214, 198, 198, 18009, 1166, 4522, 25, 275, 49087, 64, 5333, 2934, 2996, 66, 198, 18009, 2696, 25, 2808, 12762, 32148, 66, 20, 2682, 198, 164...
2.80916
131
import argparse import os import sys assert sys.version_info[0] == 3 and sys.version_info[1] >= 5, "Requires Python 3.5 or newer" sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))) if sys.platform not in ["linux", "linux2", "darwin"]: print("stnm supports only macos and linux") sys.exit(1) if __name__ == '__main__': main()
[ 11748, 1822, 29572, 198, 11748, 28686, 198, 11748, 25064, 198, 198, 30493, 25064, 13, 9641, 62, 10951, 58, 15, 60, 6624, 513, 290, 25064, 13, 9641, 62, 10951, 58, 16, 60, 18189, 642, 11, 366, 39618, 11361, 513, 13, 20, 393, 15064, 1...
2.572414
145
from functools import partial from typing import Any, Dict, Optional, Type, Union import graphene from django.db.models import QuerySet from graphene.types.mountedtype import MountedType from graphene.types.unmountedtype import UnmountedType from graphene_django.filter import DjangoFilterConnectionField from simple_graphql.django.config import extract_extra_meta_config from simple_graphql.django.fields.authorize import authorize_query from simple_graphql.django.search import order_qs, search_qs from simple_graphql.django.types import ModelInstance, ModelSchemaConfig
[ 6738, 1257, 310, 10141, 1330, 13027, 198, 6738, 19720, 1330, 4377, 11, 360, 713, 11, 32233, 11, 5994, 11, 4479, 198, 198, 11748, 42463, 198, 6738, 42625, 14208, 13, 9945, 13, 27530, 1330, 43301, 7248, 198, 6738, 42463, 13, 19199, 13, ...
3.84
150
import numpy as np import pandas as pd import datetime import json from ..util.utility import * from ..metrics.fairness_metrics import FairnessMetrics from ..metrics.performance_metrics import PerformanceMetrics from ..metrics.newmetric import * from ..metrics.tradeoff import TradeoffRate import ipywidgets as widgets import IPython from ipywidgets import Layout, Button, Box, VBox, HBox, Text, GridBox from IPython.display import display, clear_output, HTML from IPython.core.display import HTML import matplotlib.pyplot as plt import matplotlib.patches as mpatches import sys import warnings from ..util.errors import * from math import floor import concurrent.futures from tqdm.auto import tqdm from pathlib import Path import matplotlib.lines as mlines class Fairness: """ Base Class with attributes used across all use cases within Machine Learning model fairness evaluation. """ def __init__(self, model_params): """ Parameters ------------------ model_params : list It holds ModelContainer object(s). Data holder that contains all the attributes of the model to be assessed. Compulsory input for initialization. Instance Attributes ------------------- fair_metric_obj : object, default=None Stores the FairnessMetrics() object and contains the result of the computations. perf_metric_obj : object, default=None Stores the PerformanceMetrics() object and contains the result of the computations. percent_distribution : dict, default=None Stores the percentage breakdown of the classes in y_true. calibration_score : float, default=None The brier score loss computed for calibration. Computable if y_prob is given. tradeoff_obj : object, default=None Stores the TradeoffRate() object and contains the result of the computations. correlation_output : dict, default=None Pairwise correlation of most important features (top 20 feature + protected variables). feature_mask : dict of list, default=None Stores the mask array for every protected variable applied on the x_test dataset. fair_conclusion : dict, default=None Contains conclusion of how the primary fairness metric compares against the fairness threshold. The key will be the protected variable and the conclusion will be "fair" or "unfair". e.g. {"gender": {'fairness_conclusion': "fair", "threshold": 0.01}, "race":{'fairness_conclusion': "unfair", "threshold": 0.01}} evaluate_status : int, default=0 Tracks the status of the completion of the evaluate() method to be checked in compile(). Either 1 for complete or -1 for error if any exceptions were raised. evaluate_status_cali: boolean, default=False Tracks the status of the completion of the calibration curve step within evaluate() method to be checked in compile(). False = Skipped (if y_prob is not provided) True = Complete tradeoff_status : int, default=0 Tracks the status of the completion of the tradeoff() method to be checked in compile(). 0 = Not started 1 = Complete -1 = Skipped (if y_prob is not provided) feature_imp_status : int, default=0 Tracks the status of the completion of the compute_feature_imp() method to be checked in compile(). 0 = Not started 1 = Complete -1 = Skipped (if model_object not provided, wrong train_op_name/predict_op_name, x_train or x_test error) feature_imp_status_loo: boolean, default=False Tracks the status of the completion of the leave-one-out analysis step within feature_importance() method to be checked in compile(). False = Skipped (if x_train or y_train or model object or fit/predict operator names are not provided) True = Complete feature_imp_status_corr: boolean, default=False Tracks the status of the completion of the correlation matrix computation step within feature_importance() method to be checked in compile(). False = Skipped (if the correlation dataframe is not provided in ModelContainer) True = Complete feature_imp_values: dict of list, default = None Contains the difference in metric values between the original and loco models for each protected variable. {"gender": { "gender": (perf_delta, fair_delta, flip, suggestion), "race": (perf_delta, fair_delta, flip, suggestion) }, "race": { "gender": (perf_delta, fair_delta, flip, suggestion), "race": (perf_delta, fair_delta, flip, suggestion) } } flip = "fair to fair", "unfair to fair", "fair to unfair", "unfair to unfair" sigma : float or int , default = 0 Standard deviation for Gaussian kernel for smoothing the contour lines of primary fairness metric. When sigma <= 0, smoothing is turn off. Suggested to try sigma = 3 or above if noisy contours are observed. err : object VeritasError object """ self.model_params = model_params self.fair_metric_obj = None self.perf_metric_obj = None self.percent_distribution = None self.calibration_score = None self.calibration_curve_bin = None self.tradeoff_obj = None self.correlation_output = None self.feature_mask = self._set_feature_mask() self.fair_conclusion = None self.evaluate_status = 0 self.tradeoff_status = 0 self.feature_imp_status = 0 self.feature_imp_values = None self.feature_imp_status_corr = False self.feature_imp_status_loo = False self.sigma = None self.err = VeritasError() def evaluate(self, visualize=False, output=True, n_threads=1, seed=None): """ Computes the percentage count of subgroups, performance, and fairness metrics together with their confidence intervals, calibration score & fairness metric self.fair_conclusion for all protected variables. If visualize = True, output will be overwritten to False (will not be shown) and run fairness_widget() from Fairness. Parameters ---------- visualize : boolean, default=False If visualize = True, output will be overwritten to False and run fairness_widget() from Fairness. output : boolean, default=True If output = True, _print_evaluate() from Fairness will run. n_threads : int, default=1 Number of currently active threads of a job seed : int, default=None Used to initialize the random number generator. Returns ---------- _fairness_widget() or _print_evaluate() """ #check if evaluate hasn't run, only run if haven't if self.evaluate_status == 0: #to show progress bar eval_pbar = tqdm(total=100, desc='Evaluate performance', bar_format='{l_bar}{bar}') eval_pbar.update(1) #execute performance metrics from PerformanceMetrics class self._compute_performance(n_threads=n_threads, seed = seed, eval_pbar=eval_pbar) eval_pbar.set_description('Evaluate fairness') #execute fairness metrics from FairnessMetrics class self._compute_fairness(n_threads=n_threads, seed = seed, eval_pbar=eval_pbar) #to determine fairness conclusion based on inputs self._fairness_conclusion() #set status to 1 after evaluate has run self.evaluate_status = 1 eval_pbar.set_description('Evaluate') eval_pbar.update(100 - eval_pbar.n) eval_pbar.close() print('', flush=True) #to trigger widget if visualize == True: output = False self._fairness_widget() #to trigger evaluate printout if output == True: self._print_evaluate() def _fair_conclude(self, protected_feature_name, **kwargs): """ Checks the fairness_conclusion for the selected protected feature with the primary fairness metric value against the fair_threshold Parameters ---------- protected_feature_name : string Name of a protected feature Other Parameters ---------------- priv_m_v : float Privileged metric value Returns ---------- out : dict Fairness threshold and conclusion for the chosen protected variable """ #for feature importance, when privileged metric values have been overwritten during leave-one-out analysis if "priv_m_v" in kwargs: priv_m_v = kwargs["priv_m_v"] value = kwargs["value"] #else run as per input values else: priv_m_v = self.fair_metric_obj.result.get(protected_feature_name).get("fair_metric_values").get(self.fair_metric_name)[1] value = self.fair_metric_obj.result[protected_feature_name]["fair_metric_values"].get(self.fair_metric_name)[0] #to handle different variations of threhold value provided e.g. float, decimals, integer fair_threshold = self._compute_fairness_metric_threshold(priv_m_v) out = {} #append threshold value to result out['threshold'] = fair_threshold #if metric used is ratio based, means it will either be more than 1 or less than 1. So set n = 1 to see the difference. if FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[2] == 'ratio': n = 1 #if metric used is pairty based, means it will either be more than 0 or less than 0 So set n = 0 to see the difference. elif FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[2] == 'parity': n = 0 #find absolute difference of fair values calculated after metric has been applied f_value = abs(value - n) #determine whether input values are fair or unfair depending on metrics applied if f_value <= fair_threshold: out['fairness_conclusion'] = 'fair' else: out['fairness_conclusion'] = 'unfair' return out def _fairness_conclusion(self): """ Computes _fair_conclude() for all the protected features and returns results in a dictionary Returns ---------- self.fair_conclusion : dict fair_conclusion and threshold for every protected variable """ self.fair_conclusion = {} #to append each fair conclusion for each protected variable into a single dictionary for i in self.model_params[0].p_var: self.fair_conclusion[i] = self._fair_conclude(i) def _compute_fairness_metric_threshold(self, priv_m_v): """ Computes the fairness metric threshold based on the fair_threshold variable Parameters ---------- priv_m_v : float Privileged metric value Returns ---------- fair_threshold : float Fairness metric threshold """ #to handle different variations of threhold value provided e.g. float, decimals, integer if self.fair_threshold > 1: self.fair_threshold = floor(self.fair_threshold) if FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[2] == 'ratio': fair_threshold = 1 - (self.fair_threshold / 100) elif FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[2] == 'parity': fair_threshold = (1 - (self.fair_threshold / 100)) * priv_m_v return fair_threshold else: return self.fair_threshold def _compute_performance(self, n_threads, seed, eval_pbar): """ Computes the percentage count of subgroups, all the performance metrics together with their confidence intervals & the calibration curve data. Parameters ----------- n_threads : int Number of currently active threads of a job seed : int Used to initialize the random number generator. eval_pbar : tqdm object Progress bar Returns ---------- All calculations from every performance metric """ #to initialize PerformanceMetrics and exceute all the perf metrics at one go self.perf_metric_obj = PerformanceMetrics(self) self.perf_metric_obj.execute_all_perf(n_threads=n_threads, seed = seed, eval_pbar=eval_pbar) #bring status bar to full after all perf metrics have been ran eval_pbar.update(1) #if calibration_curve function has been run, then set status to True if self.perf_metric_obj.result["calibration_curve"] is None: self.evaluate_status_cali = False else: self.evaluate_status_cali = True #if perf_dynamic function has been run, then set status to True if self.perf_metric_obj.result['perf_dynamic'] is None: self.evaluate_status_perf_dynamics = False else: self.evaluate_status_perf_dynamics = True def _compute_fairness(self, n_threads, seed, eval_pbar): """ Computes all the fairness metrics together with their confidence intervals & the self.fair_conclusion for every protected variable Parameters ----------- n_threads : int Number of currently active threads of a job seed : int Used to initialize the random number generator. eval_pbar : tqdm object Progress bar Returns ---------- All calculations from every fairness metric """ #to initialize FairnessMetrics and exceute all the fair metrics at one go self.fair_metric_obj = FairnessMetrics(self) self.fair_metric_obj.execute_all_fair(n_threads=n_threads, seed = seed, eval_pbar=eval_pbar) #bring status bar to full after all fair metrics have been ran eval_pbar.update(1) for i in self.model_params[0].p_var: for j in self._use_case_metrics['fair']: #if user provides fair metric value input value for each protected variable if self.fairness_metric_value_input is not None : if i in self.fairness_metric_value_input.keys(): if j in self.fairness_metric_value_input[i].keys(): self.fair_metric_obj.result[i]["fair_metric_values"][j]= (self.fairness_metric_value_input[i][j], self.fair_metric_obj.result[i]["fair_metric_values"][j][1], self.fair_metric_obj.result[i]["fair_metric_values"][j][2] ) msg = "{} value for {} is overwritten by user input, CI and privileged metric value may be inconsistent." msg = msg.format(FairnessMetrics.map_fair_metric_to_group[j][0], i) warnings.warn(msg) def compile(self, skip_tradeoff_flag=0, skip_feature_imp_flag=0, n_threads=1): """ Runs the evaluation function together with the trade-off and feature importance sections and saves all the results to a JSON file locally. Parameters ------------- skip_tradeoff_flag : int, default=0 Skip running tradeoff function if it is 1. skip_feature_imp_flag : int, default=0 Skip running feature importance function if it is 1. n_threads : int, default=1 Number of currently active threads of a job Returns ---------- Prints messages for the status of evaluate and tradeoff and generates model artifact """ #check if evaluate hasn't run, only run if haven't if self.evaluate_status == 0: self.evaluate(visualize=False, output=False, n_threads=n_threads) #printout print('{:40s}{:<10}'.format('Running evaluate','done')) print('{:5s}{:35s}{:<10}'.format('','performance measures','done')) print('{:5s}{:35s}{:<10}'.format('','bias detection','done')) if self.evaluate_status_cali: print('{:5s}{:35s}{:<10}'.format('','probability calibration','done')) else: print('{:5s}{:35s}{:<10}'.format('','probability calibration','skipped')) if self.evaluate_status_perf_dynamics: print('{:5s}{:35s}{:<10}'.format('','performance dynamics','done')) else: print('{:5s}{:35s}{:<10}'.format('','performance dynamics','skipped')) #check if user wants to skip tradeoff, if yes tradeoff will not run, print skipped if self.tradeoff_status == -1: print('{:40s}{:<10}'.format('Running tradeoff','skipped')) #check if tradeoff hasn't run and user does not want to skip, only run if haven't elif self.tradeoff_status == 0 and skip_tradeoff_flag==0: try : self.tradeoff(output=False, n_threads=n_threads) #if user wants to skip tradeoff, print skipped if self.tradeoff_status == -1 : print('{:40s}{:<10}'.format('Running tradeoff','skipped')) #set status to 1 after evaluate has run elif self.tradeoff_status == 1 : print('{:40s}{:<10}'.format('Running tradeoff','done')) except : print('{:40s}{:<10}'.format('Running tradeoff','skipped')) #check if tradeoff hasn't run and user wants to skip, print skipped elif self.tradeoff_status == 0 and skip_tradeoff_flag==1: self.tradeoff_status = -1 print('{:40s}{:<10}'.format('Running tradeoff','skipped')) else: print('{:40s}{:<10}'.format('Running tradeoff','done')) #check if user wants to skip feature_importance, if yes feature_importance will not run, print skipped if self.feature_imp_status_corr: print('{:40s}{:<10}'.format('Running feature importance','done')) elif self.feature_imp_status == -1: print('{:40s}{:<10}'.format('Running feature importance','skipped')) #check if feature_importance hasn't run and user does not want to skip, only run if haven't elif self.feature_imp_status == 0 and skip_feature_imp_flag ==0: try : self.feature_importance(output=False, n_threads=n_threads) if self.feature_imp_status == 1: print('{:40s}{:<10}'.format('Running feature importance','done')) elif self.feature_imp_status_corr: print('{:40s}{:<10}'.format('Running feature importance','done')) else: print('{:40s}{:<10}'.format('Running feature importance','skipped')) except: print('{:40s}{:<10}'.format('Running feature importance','skipped')) #check if feature_importance hasn't run and user wants to skip, print skipped elif self.feature_imp_status == 0 and skip_feature_imp_flag ==1: self.feature_imp_status = -1 print('{:40s}{:<10}'.format('Running feature importance','skipped')) else: print('{:40s}{:<10}'.format('Running feature importance','done')) #check if feature_importance_loo has ran, if not print skipped if self.feature_imp_status_loo: print('{:5s}{:35s}{:<10}'.format('','leave-one-out analysis','done')) else: print('{:5s}{:35s}{:<10}'.format('','leave-one-out analysis','skipped')) #check if feature_importance_corr has ran, if not print skipped if self.feature_imp_status_corr: print('{:5s}{:35s}{:<10}'.format('','correlation analysis','done')) else: print('{:5s}{:35s}{:<10}'.format('','correlation analysis','skipped')) #run function to generate json model artifact file after all API functions have ran self._generate_model_artifact() def tradeoff(self, output=True, n_threads=1, sigma = 0): """ Computes the trade-off between performance and fairness over a range of threshold values. If output = True, run the _print_tradeoff() function. Parameters ----------- output : boolean, default=True If output = True, run the _print_tradeoff() function. n_threads : int, default=1 Number of currently active threads of a job sigma : float or int , default = 0 Standard deviation for Gaussian kernel for smoothing the contour lines of primary fairness metric. When sigma <= 0, smoothing is turn off. Suggested to try sigma = 3 or above if noisy contours are observed. """ #if y_prob is None, skip tradeoff if self.model_params[0].y_prob is None: self.tradeoff_status = -1 print("Tradeoff has been skipped due to y_prob") #if user wants to skip tradeoff, return None if self.tradeoff_status == -1: return #check if tradeoff hasn't run, only run if haven't elif self.tradeoff_status == 0: self.sigma = sigma n_threads = check_multiprocessing(n_threads) #to show progress bar tdff_pbar = tqdm(total=100, desc='Tradeoff', bar_format='{l_bar}{bar}') tdff_pbar.update(5) sys.stdout.flush() #initialize tradeoff self.tradeoff_obj = TradeoffRate(self) tdff_pbar.update(10) #run tradeoff self.tradeoff_obj.compute_tradeoff(n_threads, tdff_pbar) tdff_pbar.update(100 - tdff_pbar.n) tdff_pbar.close() print('', flush=True) #if after running tradoeff, result is None, print skipped if self.tradeoff_obj.result == {}: print(self.tradeoff_obj.msg) self.tradeoff_status = -1 else: #set status to 1 after tradeoff has ran self.tradeoff_status = 1 #if tradeoff has already ran once, just print result if output and self.tradeoff_status == 1: self._print_tradeoff() def feature_importance(self, output=True, n_threads=1): """ Trains models using the leave-one-variable-out method for each protected variable and computes the performance and fairness metrics each time to assess the impact of those variables. If output = True, run the _print_feature_importance() function. Parameters ------------ output : boolean, default=True Flag to print out the results of evaluation in the console. This flag will be False if visualize=True. n_threads : int Number of currently active threads of a job Returns ------------ self.feature_imp_status_loo : boolean Tracks the status of the completion of the leave-one-out analysis step within feature_importance() method to be checked in compile(). self.feature_imp_status : int Tracks the status of the completion of the feature_importance() method to be checked in compile(). self._compute_correlation() self._print_feature_importance() """ #if feature_imp_status_corr hasn't run if self.feature_imp_status_corr == False: self._compute_correlation() #if user wants to skip feature_importance, return None if self.feature_imp_status == -1: self.feature_imp_values = None return #check if feature_importance hasn't run, only run if haven't if self.feature_imp_status == 0: for k in self.model_params: x_train = k.x_train y_train = k.y_train model_object = k.model_object x_test = k.x_test train_op_name = k.train_op_name predict_op_name = k.predict_op_name # if model_object is not provided, skip feature_importance if model_object is None: self.feature_imp_status = -1 print("Feature importance has been skipped due to model_object") return else : for var_name in [train_op_name, predict_op_name]: #to check callable functions try: callable(getattr(model_object, var_name)) except: self.feature_imp_status = -1 print("Feature importance has been skipped due to train_op_name/predict_op_name error") return #to show progress bar fimp_pbar = tqdm(total=100, desc='Feature importance', bar_format='{l_bar}{bar}') fimp_pbar.update(1) self.feature_imp_values = {} for h in self.model_params[0].p_var: self.feature_imp_values[h] = {} fimp_pbar.update(1) #if evaluate_status = 0, run evaluate() first if self.evaluate_status == 0: self.evaluate(output=False) #if user wants to skip feature_importance, return None if self.feature_imp_status == -1: self.feature_imp_values = None return fimp_pbar.update(1) num_p_var = len(self.model_params[0].p_var) n_threads = check_multiprocessing(n_threads) max_workers = min(n_threads, num_p_var) #if require to run with 1 thread, will skip deepcopy worker_progress = 80/num_p_var if max_workers >=1: threads = [] with concurrent.futures.ThreadPoolExecutor(max_workers = max_workers) as executor: fimp_pbar.update(5) #iterate through protected variables to drop one by one as part of leave-one-out for i in self.model_params[0].p_var: if max_workers == 1: use_case_object = self else: use_case_object = deepcopy(self) threads.append(executor.submit(Fairness._feature_imp_loo, p_variable=i, use_case_object=use_case_object, fimp_pbar=fimp_pbar, worker_progress=worker_progress )) for thread in threads: fimp_pbar.update(round(8/num_p_var, 2)) if thread.result() is None: self.feature_imp_status = -1 return else: for removed_pvar, values in thread.result().items(): for pvar, v in values.items(): self.feature_imp_values[pvar][removed_pvar] = v #change flag after feature_importance has finished running self.feature_imp_status_loo = True self.feature_imp_status = 1 fimp_pbar.update(2) fimp_pbar.update(100.0-fimp_pbar.n) fimp_pbar.close() print('', flush=True) #if feature_importance has already ran once, just print result if output == True: self._print_feature_importance() def _feature_imp_loo(p_variable, use_case_object, fimp_pbar, worker_progress): """ Maps each thread's work for feature_importance() Parameters ------------ p_variable : str Name of protected variable use_case_object : object Initialised use case object fimp_pbar : worker_progress : Returns ------------ dictionary of loo_result of each p_var """ #get baseline values baseline_perf_values = use_case_object.perf_metric_obj.result.get("perf_metric_values").get(use_case_object.perf_metric_name)[0] baseline_fair_values = use_case_object.fair_metric_obj.result.get(p_variable).get("fair_metric_values").get(use_case_object.fair_metric_name)[0] baseline_fairness_conclusion = use_case_object.fair_conclusion.get(p_variable).get("fairness_conclusion") #toDel#baseline_values = [baseline_perf_values, baseline_fair_values, baseline_fairness_conclusion] # empty y_pred_new list to be appended y_pred_new = [] loo_result = {} # loop through model_params for k in range(len(use_case_object.model_params)): ## for uplift model type --> two model container --> need to train two models ## when model param len =2, then it is uplift model p_var = use_case_object.model_params[k].p_var x_train = use_case_object.model_params[k].x_train y_train = use_case_object.model_params[k].y_train model_object = use_case_object.model_params[k].model_object x_test = use_case_object.model_params[k].x_test y_pred = use_case_object.model_params[k].y_pred y_prob = use_case_object.model_params[k].y_prob pos_label = use_case_object.model_params[k].pos_label neg_label = use_case_object.model_params[k].neg_label train_op = getattr(model_object, use_case_object.model_params[k].train_op_name) predict_op = getattr(model_object, use_case_object.model_params[k].predict_op_name) #show progress bar fimp_pbar.update(round(worker_progress*0.9/len(use_case_object.model_params), 2)) try: #check if x_train is a dataframe if isinstance(x_train, pd.DataFrame): #drop protected variable and train model pre_loo_model_obj = train_op(x_train.drop(columns=[p_variable]), y_train) # train_op_name is string, need to use getattr[] to get the attribute? else : pre_loo_model_obj = train_op(x_train, y_train, p_variable) # train_op to handle drop column i inside train_op # Predict and compute performance Metrics (PerformanceMetrics.result.balanced_acc) except: #else print skipped and return None print("LOO analysis is skipped for [", p_variable, "] due to x_train/y_train error") use_case_object.feature_imp_status = -1 return None try: #check if x_test is a dataframe if isinstance(x_test, pd.DataFrame): #drop protected variable and predict pre_y_pred_new = np.array(predict_op(x_test.drop(columns=[p_variable]))) else : pre_y_pred_new = predict_op(x_train, y_train, p_variable) # train_op to handle drop column i inside train_op except: #else print skipped and return None print("LOO analysis is skipped for [", p_variable, "] due to x_test/y_test error") use_case_object.feature_imp_status = -1 return None fimp_pbar.update(round(worker_progress*0.02, 2)) pre_y_pred_new = predict_op(x_test.drop(columns=[p_variable])) #to ensure labels and datatype for predicted values are correct before running metrics if len(pre_y_pred_new.shape) == 1 and pre_y_pred_new.dtype.kind in ['i','O','U']: pre_y_pred_new, pos_label2 = check_label(pre_y_pred_new, pos_label, neg_label) else: pre_y_pred_new = pre_y_pred_new.astype(np.float64) y_pred_new.append(pre_y_pred_new) #run performance and fairness evaluation only for primary performance and fair metric loo_perf_value = use_case_object.perf_metric_obj.translate_metric(use_case_object.perf_metric_name, y_pred_new=y_pred_new) ##to find deltas (removed - baseline) for primary perf metric deltas_perf = loo_perf_value - baseline_perf_values #toDel#baseline_values[0] # to iterate through each protected variable for each protected variable that is being dropped for j in use_case_object.model_params[0].p_var: fimp_pbar.update(round(worker_progress*0.08/len(p_var), 2)) use_case_object.fair_metric_obj.curr_p_var = j #will this work under multithreading? will not work, should changes to a copy ## get loo_perf_value,loo_fair_values loo_fair_value, loo_priv_m_v = use_case_object.fair_metric_obj.translate_metric(use_case_object.fair_metric_name, y_pred_new=y_pred_new)[:2] ##to find deltas (removed - baseline) for each protected variable in iteration for primary fair metric #toDel#deltas_fair = loo_fair_value - baseline_values[1] baseline_fair_values_j = use_case_object.fair_metric_obj.result.get(j).get("fair_metric_values").get(use_case_object.fair_metric_name)[0] baseline_fairness_conclusion_j = use_case_object.fair_conclusion.get(j).get("fairness_conclusion") deltas_fair = loo_fair_value - baseline_fair_values_j ##fairness fair_conclusion loo_fairness_conclusion = use_case_object._fair_conclude(j, priv_m_v=loo_priv_m_v, value=loo_fair_value) #toDel#delta_conclusion = baseline_values[2] + " to " + loo_fairness_conclusion["fairness_conclusion"] delta_conclusion = baseline_fairness_conclusion_j + " to " + loo_fairness_conclusion["fairness_conclusion"] ##suggestion #if metric used is parity based, means it will either be more than 0 or less than 0. So set n = 0 to see the difference. if FairnessMetrics.map_fair_metric_to_group.get(use_case_object.fair_metric_name)[2] == 'parity': n = 0 #if metric used is ratio based, means it will either be more than 1 or less than 1. So set n = 1 to see the difference. else: n = 1 if abs(loo_fair_value - n) < abs(baseline_fair_values_j - n): if PerformanceMetrics.map_perf_metric_to_group.get(use_case_object.perf_metric_name)[1] == "regression" : if deltas_perf <= 0: suggestion = 'exclude' else: suggestion = 'examine further' else : if deltas_perf >= 0: suggestion = 'exclude' else: suggestion = 'examine further' delta_conclusion += " (+)" elif abs(loo_fair_value - n) > abs(baseline_fair_values_j - n): if PerformanceMetrics.map_perf_metric_to_group.get(use_case_object.perf_metric_name)[1] == "regression" : if deltas_perf >= 0: suggestion = 'include' else: suggestion = 'examine further' else: if deltas_perf <= 0: suggestion = 'include' else: suggestion = 'examine further' delta_conclusion += " (-)" else: if PerformanceMetrics.map_perf_metric_to_group.get(use_case_object.perf_metric_name)[1] == "regression" : if deltas_perf < 0: suggestion = 'exclude' elif deltas_perf > 0: suggestion = 'include' else: suggestion = 'exclude' else: if deltas_perf > 0: suggestion = 'exclude' elif deltas_perf < 0: suggestion = 'include' else: suggestion = 'exclude' loo_result[j] = [deltas_perf, deltas_fair, delta_conclusion, suggestion] return {p_variable: loo_result} def _compute_correlation(self): """ Computes the top-20 correlation matrix inclusive of the protected variables """ try : if isinstance(self.model_params[0].x_test, str): self.feature_imp_status_corr = False return if isinstance(self.model_params[0].feature_imp, pd.DataFrame) and isinstance(self.model_params[0].x_test, pd.DataFrame): #sort feature_imp dataframe by values (descending) sorted_dataframe = self.model_params[0].feature_imp.sort_values(by=self.model_params[0].feature_imp.columns[1], ascending=False) #extract n_features and pass into array feature_cols = np.array(sorted_dataframe.iloc[:,0]) p_var_cols = np.array(self.model_params[0].p_var) feature_cols = [col for col in feature_cols if col not in p_var_cols] feature_cols = feature_cols[:20-len(p_var_cols)] #feature_columns value from x_test feature_columns = self.model_params[0].x_test[feature_cols] #p_var_columns value from protected_features_cols p_var_columns = self.model_params[0].x_test[p_var_cols] #create final columns and apply corr() df = pd.concat([feature_columns, p_var_columns], axis=1).corr() self.correlation_output = {"feature_names":df.columns.values, "corr_values":df.values} #return correlation_output as dataframe self.feature_imp_status_corr = True else: #extract n_features and pass into array feature_cols = np.array(self.model_params[0].x_test.columns[:20]) p_var_cols = np.array(self.model_params[0].p_var) feature_cols = [col for col in feature_cols if col not in p_var_cols] feature_cols = feature_cols[:20-len(p_var_cols)] #feature_columns value from x_test feature_columns = self.model_params[0].x_test[feature_cols] #p_var_columns value from protected_features_cols p_var_columns = self.model_params[0].x_test[p_var_cols] #create final columns and apply corr() df = pd.concat([feature_columns, p_var_columns], axis=1).corr() self.correlation_output = {"feature_names":df.columns.values, "corr_values":df.values} self.feature_imp_status_corr = True except: self.feature_imp_status_corr = False def _print_evaluate(self): """ Formats the results of the evaluate() method before printing to console. """ if ("_rejection_inference_flag" in dir(self)): if True in self._rejection_inference_flag.values(): print("Special Parameters") print("Rejection Inference = True") name = [] for i in self.model_params[0].p_grp.keys(): name += [i + " - " + str(self.model_params[0].p_grp.get(i)[0])] str1 = ", ".join( str(e) for e in list(set(filter(lambda a: a != self.model_params[0].p_grp.get(i)[0], self.model_params[0].protected_features_cols[i])))) name += [i + " - " + str1] titles = ['Group', 'Base Rate', 'Number of Rejected Applicants'] a = [] for i in self.spl_params['base_default_rate'].keys(): a += self.spl_params['base_default_rate'].get(i) b = [] for i in self.spl_params['num_applicants'].keys(): b += self.spl_params['num_applicants'].get(i) data = [titles] + list(zip(name, a, b)) for i, d in enumerate(data): line = '| '.join(str(x).ljust(16) for x in d) print(line) if i == 0: print('-' * len(line)) print("\n") elif hasattr(self, 'spl_params') and ('revenue' in self.spl_params or 'treatment_cost' in self.spl_params): print("Special Parameters") titles = ['Revenue', 'Treatment Cost'] a = [self.spl_params['revenue']] b = [self.spl_params['treatment_cost']] data = [titles] + list(zip(a, b)) for i, d in enumerate(data): line = '| '.join(str(x).ljust(16) for x in d) print(line) if i == 0: print('-' * len(line)) print("\n") if PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[1] != "regression": print("Class Distribution") if self.model_params[0].model_type != "uplift": print("{0:<45s}{1:>29.{decimal_pts}f}%".format("\t" + "pos_label", self.perf_metric_obj.result.get("class_distribution").get("pos_label") * 100, decimal_pts=self.decimals)) print("{0:<45s}{1:>29.{decimal_pts}f}%".format("\t" + "neg_label", self.perf_metric_obj.result.get("class_distribution").get("neg_label") * 100, decimal_pts=self.decimals)) else: print("{0:<45s}{1:>29.{decimal_pts}f}%".format("\t" + "CN", self.perf_metric_obj.result.get("class_distribution").get("CN") * 100, decimal_pts=self.decimals)) print("{0:<45s}{1:>29.{decimal_pts}f}%".format("\t" + "TN", self.perf_metric_obj.result.get("class_distribution").get("TN") * 100, decimal_pts=self.decimals)) print("{0:<45s}{1:>29.{decimal_pts}f}%".format("\t" + "CR", self.perf_metric_obj.result.get("class_distribution").get("CR") * 100, decimal_pts=self.decimals)) print("{0:<45s}{1:>29.{decimal_pts}f}%".format("\t" + "TR", self.perf_metric_obj.result.get("class_distribution").get("TR") * 100, decimal_pts=self.decimals)) else: pass print("\n") if self.model_params[0].sample_weight is not None: print("Performance Metrics (Sample Weight = True)") else: print("Performance Metrics") for k in self._use_case_metrics["perf"]: print_metric_value(k, 0) if self.perf_metric_obj.result.get("calibration_curve") is None: pass else: print("\n") print("Probability Calibration") m = "\tBrier Loss Score" v = "{:.{decimal_pts}f}".format(self.perf_metric_obj.result.get("calibration_curve").get("score"), decimal_pts=self.decimals) print("{0:<45s}{1:>30s}".format(m, v)) print("\n") if self.fair_metric_input == 'auto': print('Primary Fairness Metric Suggestion') print('\t{}'.format(FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[0])) print('based on') print('\tfair_priority = {}'.format(self.fair_priority)) print('\tfair_concern = {}'.format(self.fair_concern)) print('\tfair_impact = {}'.format(self.fair_impact)) print('\n') for i, i_var in enumerate(self.model_params[0].p_var): p_len = len(str(i + 1) + ": " + i_var) print("-" * 35 + str(i + 1) + ": " + i_var.title() + "-" * int((45 - p_len))) print("Value Distribution") print("{:<45s}{:>29.{decimal_pts}f}%".format('\tPrivileged Group', self.fair_metric_obj.result.get(i_var).get( "feature_distribution").get("privileged_group") * 100, decimal_pts=self.decimals)) print("{:<45s}{:>29.{decimal_pts}f}%".format('\tUnprivileged Group', self.fair_metric_obj.result.get(i_var).get( "feature_distribution").get("unprivileged_group") * 100, decimal_pts=self.decimals)) print("\n") if self.model_params[0].sample_weight is not None: print("Fairness Metrics (Sample Weight = True)") else: print("Fairness Metrics") for h in self._use_case_metrics["fair"]: print_metric_value(h, 1) print("\n") print("Fairness Conclusion") m = "\tOutcome ({})".format(FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[0]) v = self.fair_conclusion.get(i_var).get("fairness_conclusion").title() print("{0:<55s}{1:>20s}*".format(m, v)) m = "\tFairness Threshold" if self.fair_threshold > 0 and self.fair_threshold < 1: v = str(self.fair_threshold) elif self.fair_threshold > 1 and self.fair_threshold < 100: v = str(self.fair_threshold) + "%" print("{0:<45s}{1:>30s}".format(m, v)) print("\n") print('* The outcome is calculated based on your inputs and is provided for informational purposes only. Should you decide to act upon the information herein, you do so at your own risk and Veritas Toolkit will not be liable or responsible in any way. ') sys.stdout.flush() def _print_tradeoff(self): """ Formats the results of the tradeoff() method before printing to console. """ i = 1 p_var = self.model_params[0].p_var for p_variable in p_var: #title title_str = " "+ str(i) + ". " + p_variable +" " if len(title_str)%2 == 1: title_str+=" " line_str = int((72-len(title_str))/2) * "-" print(line_str + title_str +line_str) print("Performance versus Fairness Trade-Off") #Single Threshold print("\t Single Threshold") print("\t\t{:35s}{:>20.{decimal_pts}f}".format("Privileged/Unprivileged Threshold", self.tradeoff_obj.result[p_variable]["max_perf_single_th"][ 0], decimal_pts=self.decimals)) print("\t\t{:35s}{:>20.{decimal_pts}f}".format( str("Best " + self.tradeoff_obj.result[p_variable]["perf_metric_name"] + "*"), self.tradeoff_obj.result[p_variable]["max_perf_single_th"][2], decimal_pts=self.decimals)) # Separated Thresholds print("\t Separated Thresholds") print("\t\t{:35s}{:>20.{decimal_pts}f}".format("Privileged Threshold", self.tradeoff_obj.result[p_variable]["max_perf_point"][0], decimal_pts=self.decimals)) print("\t\t{:35s}{:>20.{decimal_pts}f}".format("Unprivileged Threshold", self.tradeoff_obj.result[p_variable]["max_perf_point"][1], decimal_pts=self.decimals)) print("\t\t{:35s}{:>20.{decimal_pts}f}".format( str("Best " + self.tradeoff_obj.result[p_variable]["perf_metric_name"] + "*"), self.tradeoff_obj.result[p_variable]["max_perf_point"][2], decimal_pts=self.decimals)) # Separated Thresholds under Neutral Fairness (0.01) print("\t Separated Thresholds under Neutral Fairness ({})".format(self.fair_neutral_tolerance)) print("\t\t{:35s}{:>20.{decimal_pts}f}".format("Privileged Threshold", self.tradeoff_obj.result[p_variable][ "max_perf_neutral_fair"][0], decimal_pts=self.decimals)) print("\t\t{:35s}{:>20.{decimal_pts}f}".format("Unprivileged Threshold", self.tradeoff_obj.result[p_variable][ "max_perf_neutral_fair"][1], decimal_pts=self.decimals)) print("\t\t{:35s}{:>20.{decimal_pts}f}".format( str("Best " + self.tradeoff_obj.result[p_variable]["perf_metric_name"] + "*"), self.tradeoff_obj.result[p_variable]["max_perf_neutral_fair"][2], decimal_pts=self.decimals)) print("\t\t*estimated by approximation, subject to the resolution of mesh grid") print("") i+=1 sys.stdout.flush() def _print_feature_importance(self): """ Formats the results of the feature_importance() method before printing to console. """ for i, i_var in enumerate(self.model_params[0].p_var): print("\n") p_len = len(str(i + 1) + ": Fairness on " + i_var) print("-" * 50 + str(i + 1) + ": Fairness on " + i_var.title() + "-" * int((116 - 50 - p_len))) print() print("-" * 116) print("|{:<30}|{:<20}|{:<20}|{:<20}|{:<20}|".format("Removed Protected Variable", self.perf_metric_name, self.fair_metric_name, "Fairness Conclusion", "Suggestion")) print("-" * 116) for j in self.model_params[0].p_var: col1, col2, col3, col4 = self.feature_imp_values[i_var][j] print("|{:<30}|{:<20.{decimal_pts}f}|{:<20.{decimal_pts}f}|{:<20}|{:<20}|".format(j, col1, col2, col3, (col4).title(), decimal_pts=self.decimals)) print("-" * 116) print() if self.feature_imp_status_corr == False: print("Correlation matrix skippped") else: return self.correlation_output sys.stdout.flush() def _generate_model_artifact(self): """ Generates the JSON file to be saved locally at the end of compile() """ #aggregate the results into model artifact print('{:40s}'.format('Generating model artifact'), end='') artifact = {} # Section 1 - fairness_init #write results to fairness_init fairness_init = {} fairness_init["fair_metric_name_input"] = self.fair_metric_input fairness_init["fair_metric_name"] = FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[0] fairness_init["perf_metric_name"] = PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[0] fairness_init["protected_features"] = self.model_params[0].p_var if FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[1] != "regression": fairness_init["fair_priority"] = self.fair_priority fairness_init["fair_concern"] = self.fair_concern fairness_init["fair_impact"] = self.fair_impact if self.model_params[0].model_type == "uplift" or self.model_params[0].model_type == "credit": fairness_init["special_params"] = self.spl_params #num_applicants and base_default_rate for creditscoring, treatment_cost, revenue and selection_threshold for customermarketing fairness_init["fair_threshold_input"] = self.fair_threshold_input fairness_init["fair_neutral_tolerance"] = self.fair_neutral_tolerance model_type = self.model_params[0].model_type #add fairness_init results to artifact artifact["fairness_init"] = fairness_init perf_result = deepcopy(self.perf_metric_obj.result) perf_vals_wth_metric_names = {} for key in self.perf_metric_obj.result["perf_metric_values"].keys(): if key in PerformanceMetrics.map_perf_metric_to_group.keys(): perf_vals_wth_metric_names[PerformanceMetrics.map_perf_metric_to_group.get(key)[0]] = \ self.perf_metric_obj.result["perf_metric_values"][key] perf_result["perf_metric_values"] = perf_vals_wth_metric_names artifact = {**artifact, **(perf_result)} artifact["correlation_matrix"] = self.correlation_output # above part will only be tested when Credit Scoring and Customer Marketing classes can be run p_var = self.model_params[0].p_var #write results to features_dict features_dict = {} for pvar in p_var: dic_h = {} dic_h["fair_threshold"] = self.fair_conclusion.get(pvar).get("threshold") dic_h["privileged"] = self.model_params[0].p_grp[pvar] dic_t = {} dic_t["fairness_conclusion"] = self.fair_conclusion.get(pvar).get("fairness_conclusion") dic_t["tradeoff"] = None if self.tradeoff_status != -1: dic_t["tradeoff"] = self.tradeoff_obj.result.get(pvar) dic_t["feature_importance"] = None if self.feature_imp_status != -1: dic_t["feature_importance"] = self.feature_imp_values.get(pvar) fair_vals_wth_metric_names = {} for key in self.fair_metric_obj.result.get(pvar)['fair_metric_values'].keys(): if key in FairnessMetrics.map_fair_metric_to_group.keys(): fair_vals_wth_metric_names[FairnessMetrics.map_fair_metric_to_group.get(key)[0]] = \ self.fair_metric_obj.result.get(pvar)['fair_metric_values'][key] fair_result = deepcopy(self.fair_metric_obj.result.get(pvar)) fair_result['fair_metric_values'] = fair_vals_wth_metric_names for k, v in fair_result['fair_metric_values'].items(): fair_result['fair_metric_values'][k] = [v[0], v[2]] features_dict[str(pvar)] = {**dic_h, **fair_result, **dic_t} #add features_dict results to artifact artifact["features"] = features_dict print('done') model_name = (self.model_params[0].model_name +"_").replace(" ","_") filename = "model_artifact_" + model_name + datetime.datetime.today().strftime('%Y%m%d_%H%M') + ".json" self.artifact = artifact artifactJson = json.dumps(artifact, cls=NpEncoder) jsonFile = open(filename, "w") jsonFile.write(artifactJson) jsonFile.close() print("Saved model artifact to " + filename) def _fairness_widget(self): """ Runs to pop up a widget to visualize the evaluation output """ try : if get_ipython().__class__.__name__ == 'ZMQInteractiveShell': display(HTML(""" <style> .dropdown_clr { background-color: #E2F0D9; } .fair_green{ width:auto; background-color:#E2F0D9; } .perf_blue { width:auto; background-color:#DEEBF7; } </style> """)) result_fairness = self.fair_metric_obj.result option_p_var = self.fair_metric_obj.p_var[0] options = [] for i in self.fair_metric_obj.p_var[0]: options += [i + " (privileged group = " + str(self.model_params[0].p_grp.get(i))+ ")"] model_type = self.model_params[0].model_type.title() if PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[1] != "regression": model_concern = self.fair_concern.title() model_priority = self.fair_priority.title() model_impact = self.fair_impact.title() else: model_concern = "N/A" model_priority = "N/A" model_impact = "N/A" model_name = self.model_params[0].model_name.title() html_pink = '<div style="color:black; text-align:left; padding-left:5px; background-color:#FBE5D6; font-size:12px">{}</div>' html_grey_true = '<div style="color:black; text-align:center; background-color:#AEAEB2; font-size:12px">{}</div>' html_grey_false = '<div style="color:#8E8E93; text-align:center; background-color:#E5E5EA; font-size:12px">{}</div>' html_yellow_left = '<div style="color:black; text-align:left; padding-left:5px; background-color:#FFF2CC; font-size:12px">{}</div>' html_yellow_right = '<div style="color:black; text-align:right; padding-right:5px; background-color:#FFF2CC; font-size:12px">{}</div>' html_model_type = widgets.HTML(value=html_yellow_left.format('Model Type: ' + model_type), layout=Layout(display="flex", width='30%')) html_model_name = widgets.HTML(value=html_yellow_right.format('Model Name: ' + model_name), layout=Layout(display="flex", justify_content="flex-end", width='45%')) dropdown_protected_feature = widgets.Dropdown(options=options, description=r'Protected Feature:', layout=Layout(display="flex", justify_content="flex-start", width='62.5%', padding='0px 0px 0px 5px'), style=dict(description_width='initial')) dropdown_protected_feature.add_class("dropdown_clr") html_model_priority = widgets.HTML(value=html_pink.format("Priority: " + model_priority), layout=Layout(display="flex", width='12.5%')) html_model_impact = widgets.HTML(value=html_pink.format("Impact: " + model_impact), layout=Layout(display="flex", width='12.5%')) html_model_concern = widgets.HTML(value=html_pink.format('Concern: ' + model_concern), layout=Layout(display="flex", width='12.5%')) if (self.model_params[0].sample_weight is not None): sw = html_grey_true else: sw = html_grey_false if "_rejection_inference_flag" in dir(self): if True in self._rejection_inference_flag.values(): ri = html_grey_true else: ri = html_grey_false elif hasattr(self, 'spl_params') and model_type == "Uplift": if None not in self.spl_params.values(): ri = html_grey_true else: ri = html_grey_false else: ri = html_grey_false html_sample_weight = widgets.HTML(value=sw.format('Sample Weight'), layout=Layout(display="flex", justify_content="center", width='12.5%')) if model_type == "Credit": html_rej_infer = widgets.HTML(value=ri.format('Rejection Inference'), layout=Layout(display="flex", justify_content="center", width='12.5%')) elif model_type == "Default" or PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[1] == "regression": regression = '<div style="color:#E5E5EA; text-align:center; background-color:#E5E5EA; font-size:12px">{}</div>' html_rej_infer = widgets.HTML(value=regression.format('N/A'), layout=Layout(display="flex", justify_content="center", width='12.5%')) elif PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[1] != "regression": html_rej_infer = widgets.HTML(value=ri.format('Revenue & Cost'), layout=Layout(display="flex", justify_content="center", width='12.5%')) html_fair_italics = '<div style="color:black; text-align:left; padding-left:5px; font-style: italic;font-weight: bold;font-size:14px">{}</div>' html_fair_bold = '<div style="color:black; text-align:center;font-weight: bold;font-size:20px">{}</div>' html_fair_bold_red = '<div style="color:#C41E3A; text-align:center; font-weight:bold; font-size:20px">{}</div>' html_fair_bold_green = '<div style="color:#228B22; text-align:center; font-weight:bold; font-size:20px">{}</div>' html_fair_small = '<div style="color:black; text-align:left; padding-left:25px; font-size:12px">{}</div>' html_fair_metric = '<div style="color:black; text-align:right; font-weight: bold;font-size:20px">{}</div>' html_fair_ci = '<div style="color:black; text-align:left; padding-left:5px; font-size:15px">{}</div>' chosen_p_v = option_p_var[0] fair1 = widgets.HTML(value=html_fair_italics.format('Fairness'), layout=Layout(display="flex", margin='0')) fair2_1 = widgets.HTML(value=html_fair_small.format('Metric'), layout=Layout(display="flex", justify_content="flex-start", margin='0')) fair2_2 = widgets.HTML(value=html_fair_small.format('Assessment'), layout=Layout(display="flex", justify_content="flex-start", margin='0')) fair3_1 = widgets.HTML( value=html_fair_bold.format(FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[0]), layout=Layout(display="flex", justify_content="center", margin='0')) if self.fair_conclusion.get(chosen_p_v).get("fairness_conclusion") == 'fair': pattern = html_fair_bold_green else: pattern = html_fair_bold_red fair3_2_v = pattern.format(self.fair_conclusion.get(chosen_p_v).get("fairness_conclusion").title()) fair3_2 = widgets.HTML(value=fair3_2_v, layout=Layout(display="flex", justify_content="center", margin='0')) fair4_1 = widgets.HTML(value=html_fair_small.format('Value'), layout=Layout(display="flex", justify_content="flex-start", margin='0')) fair4_2 = widgets.HTML(value=html_fair_small.format('Threshold'), layout=Layout(display="flex", justify_content="flex-start", margin='0')) v = html_fair_metric.format("{:.{decimal_pts}f}".format(self.fair_metric_obj.result.get(chosen_p_v).get('fair_metric_values').get(self.fair_metric_name)[0], decimal_pts=self.decimals)) fair5_1 = widgets.HTML(value=v,layout=Layout(display="flex", width='50%', justify_content="center", margin='0')) c = html_fair_ci.format('\xB1 ' + "{:.{decimal_pts}f}".format(self.fair_metric_obj.result.get(chosen_p_v).get('fair_metric_values').get(self.fair_metric_name)[2], decimal_pts=self.decimals)) fair5_1_1 = widgets.HTML(value=c,layout=Layout(display="flex", width='50%', justify_content="center", margin='0')) t = html_fair_bold.format("{:.{decimal_pts}f}".format(self.fair_conclusion.get(chosen_p_v).get("threshold"), decimal_pts=self.decimals)) fair5_2 = widgets.HTML(value=t, layout=Layout(display="flex", justify_content="center", margin='0')) fair5 = HBox([fair5_1, fair5_1_1], layout=Layout(display="flex", justify_content="center")) box1f = VBox(children=[fair2_1, fair3_1, fair4_1, fair5], layout=Layout(width="66.666%")) box2f = VBox(children=[fair2_2, fair3_2, fair4_2, fair5_2], layout=Layout(width="66.666%")) box3f = HBox([box1f, box2f]) box4f = VBox([fair1, box3f], layout=Layout(width="66.666%", margin='5px 5px 5px 0px')) box4f.add_class("fair_green") html_perf_italics = '<div style="color:black; text-align:left; padding-left:5px; font-style: italic;font-weight: bold;font-size:14px">{}</div>' html_perf_bold = '<div style="color:black; text-align:center; font-weight: bold;font-size:20px">{}</div>' html_perf_small = '<div style="color:black; text-align:left; padding-left:25px; font-size:12px">{}</div>' html_perf_metric = '<div style="color:black; text-align:right; font-weight: bold;font-size:20px">{}</div>' html_perf_ci = '<div style="color:black; text-align:left; padding-left:5px;font-size:15px">{}</div>' perf1 = widgets.HTML(value=html_perf_italics.format('Performance'), layout=Layout(display="flex", width='33.3333%', margin='0')) perf2_1 = widgets.HTML(value=html_perf_small.format('Assessment'), layout=Layout(display="flex", justify_content="flex-start", margin='0')) perf3_1 = widgets.HTML( value=html_perf_bold.format(PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[0]), layout=Layout(display="flex", justify_content="flex-start", margin='0')) perf4_1 = widgets.HTML(value=html_perf_small.format('Value'), layout=Layout(display="flex", justify_content="flex-start", margin='0')) v = "{:.{decimal_pts}f}".format(self.perf_metric_obj.result.get('perf_metric_values').get(self.perf_metric_name)[0], decimal_pts=self.decimals) perf5_1 = widgets.HTML(value=html_perf_metric.format(v), layout=Layout(display="flex", justify_content="flex-start", width="50%", margin='0')) c = "{:.{decimal_pts}f}".format(self.perf_metric_obj.result.get('perf_metric_values').get(self.perf_metric_name)[1], decimal_pts=self.decimals) perf5_1_1 = widgets.HTML(value=html_perf_ci.format('\xB1 ' + c), layout=Layout(display="flex", justify_content="flex-start", width="50%", margin='0')) perf5 = HBox([perf5_1, perf5_1_1], layout=Layout(display="flex", justify_content="center")) box1p = VBox(children=[perf2_1, perf3_1, perf4_1, perf5]) box2p = VBox([perf1, box1p], layout=Layout(width="33.333%", margin='5px 0px 5px 5px')) box2p.add_class('perf_blue') metric_box = HBox([box4f, box2p], layout=Layout(width="auto")) PATH = Path(__file__).parent.parent.joinpath('resources', 'widget') if model_type != 'Uplift' and PerformanceMetrics.map_perf_metric_to_group.get(self.perf_metric_name)[1] != "regression": image1 = IPython.display.Image(filename=PATH/"perf_class_jpg.JPG", width=300, height=500) A = widgets.Image( value=image1.data, format='jpg', width=260 ) image2 = IPython.display.Image(filename=PATH/"fair_class_jpg.JPG", width=300, height=500) B = widgets.Image( value=image2.data, format='jpg', width=260 ) elif model_type == "Uplift": image1 = IPython.display.Image(filename=PATH/"perf_uplift_jpg.JPG", width=300, height=500) A = widgets.Image( value=image1.data, format='jpg', width=260 ) image2 = IPython.display.Image(filename=PATH/"fair_uplift_jpg.JPG", width=300, height=500) B = widgets.Image( value=image2.data, format='jpg', width=260 ) else: image1 = IPython.display.Image(filename=PATH/"perf_regression_jpg.JPG", width=300, height=500) A = widgets.Image( value=image1.data, format='jpg', width=260 ) image2 = IPython.display.Image(filename=PATH/"fair_regression_jpg.JPG", width=300, height=500) B = widgets.Image( value=image2.data, format='jpg', width=260 ) tab = widgets.Tab([A, B], layout={'width': '32%', 'margin': '15px', 'height': '350px'}) tab.set_title(0, 'Performance Metrics') tab.set_title(1, 'Fairness Metrics') plot_output = widgets.Output(layout=Layout(display='flex', align_items='stretch', width="66.6666%")) filtering(option_p_var[0]) dropdown_protected_feature.observe(dropdown_event_handler, names='value') item_layout = widgets.Layout(margin='0 0 0 0') input_widgets1 = widgets.HBox([html_model_type, html_sample_weight, html_rej_infer, html_model_name], layout=item_layout) input_widgets2 = widgets.HBox([dropdown_protected_feature, html_model_priority, html_model_impact, html_model_concern], layout=item_layout) input_widgets = VBox([input_widgets1, input_widgets2]) top_display = widgets.VBox([input_widgets, metric_box]) plot_tab = widgets.HBox([plot_output, tab]) dashboard = widgets.VBox([top_display, plot_tab]) display(dashboard) print("*The threshold and the values of ratio-based metrics are shifted down by 1.") else: print("The widget is only available on Jupyter notebook") except: pass def _set_feature_mask(self): """ Sets the feature mask for each protected variable based on its privileged group Returns ---------- feature_mask : dict of list Stores the mask array for every protected variable applied on the x_test dataset. """ feature_mask = {} for i in self.model_params[0].p_var: privileged_grp = self.model_params[0].p_grp.get(i) feature_mask[i] = self.model_params[0].protected_features_cols[i].isin(privileged_grp) return feature_mask def _get_e_lift(self): """ Helper function to get empirical lift Returns --------- None """ return None def _get_confusion_matrix(self, curr_p_var = None, **kwargs): """ Compute confusion matrix Parameters ------------- curr_p_var : string, default=None Current protected variable Returns ------- Confusion matrix metrics based on privileged and unprivileged groups """ if curr_p_var == None : return [None] * 4 else : return [None] * 8 def _base_input_check(self): """ Checks if there are conflicting input values """ try: if FairnessMetrics.map_fair_metric_to_group.get(self.fair_metric_name)[2] == 'information': if self.fair_threshold > 1: self.err.push('conflict_error', var_name_a=str(self.fair_metric_name), some_string="conflict with fair_threshold", value="", function_name="_base_input_check") self.err.pop() except TypeError: pass def _model_type_input(self): """ Checks if model type input is valid """ for i in self.model_params : #throw an error if model_type provided is not in _model_type_to_metric_lookup if i.model_type not in self._model_type_to_metric_lookup.keys(): self.err.push('value_error', var_name="model_type", given=str(i.model_type), expected=list(self._model_type_to_metric_lookup.keys()), function_name="_model_type_input") #print any exceptions occured self.err.pop() model_size = self._model_type_to_metric_lookup[self.model_params[0].model_type][2] #check if model_size provided based in model_type provided is accepted as per _model_type_to_metric_lookup if model_size > len(self.model_params): self.err.push('length_error', var_name="model_type", given=str(len(self.model_params)), expected=str(model_size), function_name="_model_type_input") #print any exceptions occured self.err.pop() #check if model_size is -1. If it is only take first set of model_params values elif model_size == -1: self.model_params = self.model_params[:1] else: self.model_params = self.model_params[:model_size] #check if model_type of first model_container is uplift, the model_name of second model_container should be clone. Otherwise, throw an exception if self.model_params[0].model_type == 'uplift': if self.model_params[1].model_name != "clone" : self.err.push('value_error', var_name="model_name", given=str(self.model_params[1].model_name), expected="clone", function_name="_model_type_input") #print any exceptions occured self.err.pop() def _fairness_metric_value_input_check(self): """ Checks if fairness metric value input is valid """ if self.fairness_metric_value_input is not None: for i in self.fairness_metric_value_input.keys() : #if user provided keys are not in protected variables, ignore if i not in self.model_params[0].p_var: print("The fairness_metric_value_input is not provided properly, so it is ignored") self.fairness_metric_value_input = None break for j in self.fairness_metric_value_input[i].keys(): #if user provided fair metrics are not in fair metrics in use case class, ignore if j not in self._use_case_metrics['fair']: print("The fairness_metric_value_input is not provided properly, so it is ignored") self.fairness_metric_value_input = None break def check_fair_metric_name(self): """ Checks if primary fairness metric is valid """ try: if FairnessMetrics.map_fair_metric_to_group[self.fair_metric_name][4] == False: ratio_parity_metrics = [] for i,j in FairnessMetrics.map_fair_metric_to_group.items(): if j[1] == self._model_type_to_metric_lookup[self.model_params[0].model_type][0]: if FairnessMetrics.map_fair_metric_to_group[i][4] == True: ratio_parity_metrics.append(i) self.err.push('value_error', var_name="fair_metric_name", given=self.fair_metric_name, expected=ratio_parity_metrics, function_name="check_fair_metric_name") except: pass #print any exceptions occured self.err.pop() def check_perf_metric_name(self): """ Checks if primary performance metric is valid """ try: if PerformanceMetrics.map_perf_metric_to_group[self.perf_metric_name][4] == False: perf_list = [] for i,j in PerformanceMetrics.map_perf_metric_to_group.items(): if j[1] == self._model_type_to_metric_lookup[self.model_params[0].model_type][0]: if PerformanceMetrics.map_perf_metric_to_group[i][4] == True: perf_list.append(i) self.err.push('value_error', var_name="perf_metric_name", given=self.perf_metric_name, expected=perf_list, function_name="check_perf_metric_name") except: pass #print any exceptions occured self.err.pop() def _fairness_tree(self, is_pos_label_favourable = True): """ Sets the feature mask for each protected variable based on its privileged group Parameters ----------- is_pos_label_favourable: boolean, default=True Whether the pos_label is the favourable label Returns ---------- self.fair_metric_name : string Fairness metric name """ err_ = [] if self.fair_concern not in ['eligible', 'inclusive', 'both']: err_.append(['value_error', "fair_concern", str(self.fair_concern), str(['eligible', 'inclusive', 'both'])]) if self.fair_priority not in ['benefit', 'harm']: err_.append(['value_error', "fair_priority", str(self.fair_priority),str(['benefit', 'harm'])]) if self.fair_impact not in ['significant', 'selective', 'normal']: err_.append(['value_error', "fair_impact", str(self.fair_impact),str(['significant', 'selective', 'normal'])]) if err_ != []: for i in range(len(err_)): self.err.push(err_[i][0], var_name=err_[i][1], given=err_[i][2], expected=err_[i][3], function_name="_fairness_tree") self.err.pop() if is_pos_label_favourable == True: if self.fair_priority == "benefit": if self.fair_impact == "normal" : if self.fair_concern == 'inclusive' : self.fair_metric_name = 'fpr_parity' elif self.fair_concern == 'eligible': self.fair_metric_name = 'equal_opportunity' elif self.fair_concern == 'both': self.fair_metric_name = 'equal_odds' elif self.fair_impact =="significant" or self.fair_impact == "selective" : if self.fair_concern == 'inclusive' : self.fair_metric_name = 'fdr_parity' elif self.fair_concern == 'eligible': self.fair_metric_name = 'ppv_parity' elif self.fair_concern == 'both': self.err.push("conflict_error", var_name_a="fair_concern", some_string="not applicable", value="", function_name="_fairness_tree") self.err.pop() elif self.fair_priority == "harm" : if self.fair_impact == "normal" : if self.fair_concern == 'inclusive' : self.fair_metric_name = 'fpr_parity' elif self.fair_concern == 'eligible': self.fair_metric_name = 'fnr_parity' elif self.fair_concern == 'both': self.fair_metric_name = 'equal_odds' elif self.fair_impact =="significant" or self.fair_impact == "selective" : if self.fair_concern == 'inclusive' : self.fair_metric_name = 'fdr_parity' elif self.fair_concern == 'eligible': self.fair_metric_name = 'for_parity' elif self.fair_concern == 'both': self.fair_metric_name = 'calibration_by_group' else: if self.fair_priority == "benefit": if self.fair_impact == "normal" : if self.fair_concern == 'inclusive' : self.fair_metric_name = 'fnr_parity' elif self.fair_concern == 'eligible': self.fair_metric_name = 'tnr_parity' elif self.fair_concern == 'both': self.fair_metric_name = 'neg_equal_odds' elif self.fair_impact =="significant" or self.fair_impact == "selective" : if self.fair_concern == 'inclusive' : self.fair_metric_name = 'for_parity' elif self.fair_concern == 'eligible': self.fair_metric_name = 'npv_parity' elif self.fair_concern == 'both': self.err.push("conflict_error", var_name_a="fairness concern", some_string="not applicable", value="", function_name="_fairness_tree") self.err.pop() elif self.fair_priority == "harm" : if self.fair_impact == "normal" : if self.fair_concern == 'inclusive' : self.fair_metric_name = 'fnr_parity' elif self.fair_concern == 'eligible': self.fair_metric_name = 'fpr_parity' elif self.fair_concern == 'both': self.fair_metric_name = 'equal_odds' elif self.fair_impact =="significant" or self.fair_impact == "selective" : if self.fair_concern == 'inclusive' : self.fair_metric_name = 'for_parity' elif self.fair_concern == 'eligible': self.fair_metric_name = 'fdr_parity' elif self.fair_concern == 'both': self.fair_metric_name = 'calibration_by_group' return self.fair_metric_name def get_prob_calibration_results(self): """ Gets the probability calibration results Returns ------------ a dictionary with below keys and values: 'prob_true': the ground truth values split into 10 bins from 0 to 1 'prob_pred': the mean predicted probability in each bin 'score': the brier loss score """ if self.evaluate_status_cali == True: return self.perf_metric_obj.result.get("calibration_curve") else: return None def get_perf_metrics_results(self): """ Gets the performance metrics results Returns ------------ a dictionary with keys as the metric name and values as the metric value together with confidence interval """ if self.evaluate_status == 1: return self.perf_metric_obj.result.get("perf_metric_values") else: return None def get_fair_metrics_results(self): """ Gets the fair metrics results Returns ------------ a dictionary with keys as the metric name and values as the metric value together with confidence interval """ if self.evaluate_status == 1: result = {} for p_var in self.fair_metric_obj.result.keys(): result[p_var] = self.fair_metric_obj.result[p_var]['fair_metric_values'] return result else: return None def get_tradeoff_results(self): """ Gets the tradeoff results Returns ------------ a dictionary with below keys and values: protected variable name as key to split result values for each protected variable 'fair_metric_name': fairness metric name 'perf_metric_name': performance metric name 'fair': array of shape (n, n*) of fairness metric values 'perf': array of shape (n, n*) of performance metric values 'th_x': array of shape (n*, ) of thresholds on x axis 'th_y': array of shape (n*, ) of thresholds on y axis 'max_perf_point': maxiumn performance point on the grid 'max_perf_single_th': maxiumn performance point on the grid with single threshold 'max_perf_neutral_fair': maxiumn performance point on the grid with neutral fairness *n is defined by tradeoff_threshold_bins in config """ if self.tradeoff_status == 1: return self.tradeoff_obj.result else: return None def get_loo_results(self): """ Gets the leave one out analysis results Returns ------------ a dictionary with below keys and values: protected variable name as key to split fairness result on each protected variable protected variable name as key to denote the removed protected variable array values denote the performance metric value, fariness metric value, fairness conclusion and suggestion """ if self.feature_imp_status_loo == True: return self.feature_imp_values else: return None def get_correlation_analysis_results(self): """ Gets the correlation analysis results Returns ------------ a dictionary with below keys and values: 'feature_names': feature names for correlation analysis 'corr_values': correlation values according to feature names """ if self.feature_imp_status_corr == True: return self.correlation_output else: return None class NpEncoder(json.JSONEncoder): """ """ def default(self, obj): """ Parameters ------------ obj : object """ if isinstance(obj, np.integer): return int(obj) if isinstance(obj, np.floating): return float(obj) if isinstance(obj, np.ndarray): return obj.tolist() return super(NpEncoder, self).default(obj)
[ 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 4818, 8079, 198, 11748, 33918, 198, 6738, 11485, 22602, 13, 315, 879, 1330, 1635, 198, 6738, 11485, 4164, 10466, 13, 22043, 1108, 62, 4164, 10466, 1330, 70...
2.027979
43,569
from django.forms import ModelForm from apps.event.models import Event
[ 6738, 42625, 14208, 13, 23914, 1330, 9104, 8479, 198, 198, 6738, 6725, 13, 15596, 13, 27530, 1330, 8558 ]
3.944444
18
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = ['ClientServiceAccountRoleArgs', 'ClientServiceAccountRole'] @pulumi.input_type @pulumi.input_type
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 17202, 39410, 25, 428, 2393, 373, 7560, 416, 262, 21624, 12994, 24118, 687, 10290, 357, 27110, 5235, 8, 16984, 13, 17202, 198, 2, 17202, 2141, 407, 4370, 416, 1021, 4556, 345, 821, 1728, 345, 760...
3.519685
127
#!/usr/bin/env python # -*- coding: UTF-8 -*- import re from typing import Optional from base import BaseObject from base import FileIO class DBpediaTaxonomyExtractor(BaseObject): """ Extract latent 'is-a' hierarchy from unstructured text """ __isa_patterns = None __clause_patterns = None def __init__(self, input_text: str, is_debug: bool = False): """ Created: 7-Jan-2020 craig.trim@ibm.com * https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1706 Updated: 7-Feb-2020 craig.trim@ibm.com * moved dictionaries to CSV resources https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1837 """ BaseObject.__init__(self, __name__) if self.__isa_patterns is None: self.__isa_patterns = FileIO.file_to_lines_by_relative_path( "resources/config/dbpedia/patterns_isa.csv") self.__isa_patterns = [x.lower().strip() for x in self.__isa_patterns] if self.__clause_patterns is None: self.__clause_patterns = FileIO.file_to_lines_by_relative_path( "resources/config/dbpedia/patterns_clause.csv") self.__clause_patterns = [x.lower().strip() for x in self.__clause_patterns] self._input_text = input_text self._is_debug = is_debug @staticmethod def _remove_parens(input_text: str) -> str: """ Purpose: Remove parens Sample Input: A drug (/drɑːɡ/) is any substance Sample Output: A drug is any substance :return: text without parens """ if '(' not in input_text and ')' not in input_text: return input_text x = input_text.index('(') y = input_text.index(')') + 2 return f"{input_text[0:x]}{input_text[y:]}" @staticmethod def _remove_akas(input_text: str) -> str: """ Purpose: Remove AKA sections Sample Input: Lung cancer, also known as lung carcinoma, is a malignant lung tumor Sample Output: Lung cancer is a malignant lung tumor :return: text without AKA """ patterns = [', also known as ', ', or ', ', formerly known as'] for pattern in patterns: if pattern in input_text: x = input_text.index(pattern) y = input_text[:(x + len(pattern))].index(',') + x + len(pattern) + 4 input_text = f"{input_text[:x]}{input_text[y:]}" return input_text
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 41002, 12, 23, 532, 9, 12, 628, 198, 11748, 302, 198, 6738, 19720, 1330, 32233, 198, 198, 6738, 2779, 1330, 7308, 10267, 198, 6738, 2779, 1330, 9220, 9399, ...
2.041854
1,338
''' Trade classifications''' VALID_TRADE_CLASSIFICATIONS = [ "Ag", "Na", "In", "Ni", "Ri", "Po", "Wa", "De", "As", "Ic" ] class TradeClassification(): ''' Planetary trade classification''' @property def trade_classification(self): ''' Return own value''' return self.__trade_classification
[ 7061, 6, 9601, 1398, 6637, 7061, 6, 198, 198, 23428, 2389, 62, 5446, 19266, 62, 31631, 30643, 18421, 796, 685, 198, 220, 220, 220, 366, 10262, 1600, 198, 220, 220, 220, 366, 26705, 1600, 198, 220, 220, 220, 366, 818, 1600, 198, 220,...
2.220859
163
import pandas as pd import numpy as np from common.libs.neo2cos import find_songs INT_BITS = 32 MAX_INT = (1 << (INT_BITS - 1)) - 1 # Maximum Integer for INT_BITS def main(mode, input_cus=''): # df1 = pd.read_excel(Song_addr); """读取歌曲库""" # df2 = pd.read_excel(Cus_addr); """读取用户库""" # print (indi_list) # data = pd.DataFrame(df1) # 将所有歌曲信息放进一个dataframe中(几百首歌可以用,多了要想别的) # cus = pd.DataFrame(df2) # 同上 song, song_sec, song_rev = find_songs(input_cus) # init_data(Song_addr,Cus_addr) if mode == 1: rec_list = [] cursor = 1 while (cursor): try: cus_temp = cus.loc[cursor - 1].values # print(cus_temp) except: cursor = 0 else: cursor += 1 rec_list.append(recommend_one(data, cus_temp)) # print (rec_list)""" elif mode == 2: rec_list = recommend_one(song, song_sec, song_rev, input_cus) print(rec_list) else: return 0 return rec_list # 返回cus_id # print(main(2,[10,1,1,0,0,0,0,0]))
[ 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 2219, 13, 8019, 82, 13, 710, 78, 17, 6966, 1330, 1064, 62, 82, 28079, 198, 198, 12394, 62, 26094, 50, 796, 3933, 198, 22921, 62, 12394, 796, 357, 16, ...
1.691377
661
#!/usr/bin/env python3 import secrets import re def len_pass(): """ function generate randomly the length of password, from 10 to 16 caracters """ while True: len_pass = secrets.randbelow(17) if len_pass >= 10: break return len_pass def get_password(len_password): """ function generates the password with the length gived like parameter. """ password = "" while len(password) <= len_password: # p1 and p2, positions generate randomly p1 = secrets.randbelow(4) p2 = secrets.randbelow(len(all_strings[p1])) if all_strings[p1][p2] not in password: password += all_strings[p1][p2] return password lower_strings = "abcdefghijklmnopqrstuvwxyz" upper_strings = "ABCDEFGHIJKLMNOPQRSTU" number_strings = "0123456789" symbol_strings = "!@#$%^*()[]{}?" regex = r"[a-z]{2,}[A-Z]{2,}[0-9]{2,}[!@#\$%\^\*\(\)\[\]\{\}\?]{2,}" all_strings = [] all_strings.append(lower_strings) all_strings.append(upper_strings) all_strings.append(number_strings) all_strings.append(symbol_strings) len_password = len_pass() print("Generating password...") while True: password = get_password(len_password) # checking if password matches with password requirements if re.search(regex, password) != None: break print(f"your password is: {password}") # saving the password in a file. with open("your_pass.txt","w", encoding="utf-8") as file: file.write(password) print("Your password saved in \"your_pass.txt\"") print("done!")
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 11748, 13141, 198, 11748, 302, 198, 198, 4299, 18896, 62, 6603, 33529, 198, 220, 220, 220, 37227, 198, 220, 220, 220, 2163, 7716, 15456, 262, 4129, 286, 9206, 11, 198, 220, 220, ...
2.440124
643
# Copyright 2017 AT&T Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from tempest import config from tempest.lib.common.utils import test_utils from tempest.lib import decorators from patrole_tempest_plugin import rbac_rule_validation from patrole_tempest_plugin.tests.api.network import rbac_base as base CONF = config.CONF
[ 2, 15069, 2177, 5161, 5, 51, 10501, 13, 198, 2, 1439, 6923, 33876, 13, 198, 2, 198, 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 220, 220, 220, 407, 779...
3.350554
271
from datetime import date import boundaries boundaries.register('Brock wards', domain='Brock, ON', last_updated=date(2018, 11, 2), name_func=lambda f: 'Ward %s' % f.get('WARD'), id_func=boundaries.attr('WARD'), authority='Township of Brock', source_url='https://city-oshawa.opendata.arcgis.com/datasets/DurhamRegion::brock-ward-boundaries', licence_url='https://www.durham.ca/en/regional-government/resources/Documents/OpenDataLicenceAgreement.pdf', data_url='https://opendata.arcgis.com/datasets/f48be88029db4e959269cf1d0773998a_30.zip', encoding='iso-8859-1', extra={'division_id': 'ocd-division/country:ca/csd:3518039'}, )
[ 6738, 4818, 8079, 1330, 3128, 198, 198, 11748, 13215, 198, 198, 7784, 3166, 13, 30238, 10786, 33, 10823, 32710, 3256, 198, 220, 220, 220, 7386, 11639, 33, 10823, 11, 6177, 3256, 198, 220, 220, 220, 938, 62, 43162, 28, 4475, 7, 7908, ...
2.501873
267
from flask import Flask, render_template from flask_sqlalchemy import SQLAlchemy from time import sleep from cpu_load_generator import load_single_core import requests import urllib3 import json import logging import config import re import os REQUESTS_LIMIT = 20 DELAY_IN_SECONDS = 0.1 app = Flask(__name__) user = os.getenv('DB_LOGIN', default = config.DB_LOGIN) password = os.getenv('DB_PASSWORD', default = config.DB_PASSWORD) host = os.getenv('DB_HOST', default = config.DB_HOST) dbname = os.getenv('DB_NAME', default = config.DB_NAME) app.config['SQLALCHEMY_DATABASE_URI'] = \ f'mysql+pymysql://{user}:{password}@{host}/{dbname}' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False db = SQLAlchemy(app) @app.route('/') @app.route('/health') @app.route('/cpu_load/<int:seconds>') @app.route('/cpu_load/') @app.route('/planet/<id>') @app.route('/clear_data') @app.route('/fill_data') if __name__ == '__main__': app.run(host='0.0.0.0', port=5000)
[ 6738, 42903, 1330, 46947, 11, 8543, 62, 28243, 198, 6738, 42903, 62, 25410, 282, 26599, 1330, 16363, 2348, 26599, 198, 6738, 640, 1330, 3993, 198, 6738, 42804, 62, 2220, 62, 8612, 1352, 1330, 3440, 62, 29762, 62, 7295, 198, 11748, 7007,...
2.494845
388
""" Basic script to take in insert size csv from shiver, where each row is 3 comma-separated values (insert size, number of that size, fraction). Returns the insert size at 0.05, 0.5, 0.95 percentiles, as well as the number of inserts >350 and the fraction of inserts that are >350bp. tanya.golubchik@bdi.ox.ac.uk October 2017 """ from __future__ import print_function import sys from os import path def get_insert_size_stats(instrm, thresh=350): """ Calculate insert size stats - values at .05/.5/.95 pc and number of inserts over a threshold size. """ cumsum = 0. v05, v50, v95 = '', '', '' n_thresh = 0 f_thresh = 0 for line in instrm: try: iz, n, frac = line.split(',') iz = int(iz) frac = float(frac) except ValueError: continue if iz > thresh: n_thresh += int(n) f_thresh += frac cumsum += frac if not v05 and (cumsum >= 0.05): v05 = iz if not v50 and (cumsum >= 0.5): v50 = iz if not v95 and (cumsum >= 0.95): v95 = iz return v05, v50, v95, n_thresh, f_thresh if __name__ == '__main__': if len(sys.argv) != 2 or not path.isfile(sys.argv[-1]): sys.stdout.write(',,,,\n') sys.stderr.write('Usage: {0} MyInsertSizeStats.csv\n'.format(sys.argv[0])) sys.exit(1) with open(sys.argv[1]) as instrm: v05, v50, v95, n_thresh, f_thresh = get_insert_size_stats(instrm, thresh=350) sys.stdout.write('{0},{1},{2},{3},{4}\n'.format(v05, v50, v95, n_thresh, f_thresh))
[ 37811, 198, 198, 26416, 4226, 284, 1011, 287, 7550, 2546, 269, 21370, 422, 427, 1428, 11, 810, 1123, 5752, 198, 271, 513, 39650, 12, 25512, 515, 3815, 357, 28463, 2546, 11, 1271, 286, 326, 2546, 11, 13390, 737, 198, 198, 35561, 262, ...
2.078507
777
''' Launch a RevitLookup "Snoop Objects" dialog for elements from the RPS shell = Introduction = You _do_ have `RevitLookup` installed, don't you? This is _the_ tool for introspecting model elements. You can find it in the Revit SDK folder, along with the source code. The plugin does many things, among which I most often use the "Snoop Current Selection..." feature. This pops up a nice dialog that lets you snoop around in the selected elements properties. See here for more: https://github.com/jeremytammik/RevitLookup I find that RevitLookup and RevitPythonShell complement each other rather well. Except, while inside the shell, you can't start any other plugins, so you can't access the snoop functionality. Unless... = Details = The module `revitsnoop` provides a mechanism to hook into the RevitLookup plugin and start it with an object of your choice. Example: {{{ >>>import revitsnoop >>>snooper = revitsnoop.RevitSnoop(__revit__) >>>snooper.snoop(doc.ProjectInformation) }}} This will pop up a dialog for snooping the documents project information. You can of course snoop any `Element` object. ''' import clr from Autodesk.Revit.DB import ElementSet
[ 7061, 6, 201, 198, 38296, 257, 5416, 270, 8567, 929, 366, 50, 3919, 404, 35832, 1, 17310, 329, 4847, 422, 262, 371, 3705, 7582, 201, 198, 201, 198, 28, 22395, 796, 201, 198, 201, 198, 1639, 4808, 4598, 62, 423, 4600, 18009, 270, 8...
3.206349
378
# Copyright 2016 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import print_function import mock import reactive.sdn_charm_handlers as handlers import charms_openstack.test_utils as test_utils
[ 2, 15069, 1584, 19507, 605, 12052, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 921, 743...
3.825
200
# meds/kernel.py # # """ central piece of code that loads the plugins and starts services. """ from meds.utils.cli import hello, set_completer, enable_history, termsetup from meds.utils.misc import include, locked from meds.utils.trace import get_exception from meds.utils.name import name, sname, mname from meds.utils.join import j from meds.scheduler import Scheduler from meds.object import Object, OOL from meds.engine import Engine from meds.event import Event from meds import __version__ import meds.core import importlib import logging import pkgutil import termios import types import time import tty import sys
[ 2, 1117, 82, 14, 33885, 13, 9078, 198, 2, 198, 2, 198, 198, 37811, 4318, 3704, 286, 2438, 326, 15989, 262, 20652, 290, 4940, 2594, 13, 37227, 628, 198, 6738, 1117, 82, 13, 26791, 13, 44506, 1330, 23748, 11, 900, 62, 785, 1154, 353...
3.328042
189
import re import shutil from pathlib import Path from typing import Optional from functools import wraps from time import time import boto3 from botocore.client import Config from sm.browser.mz_search import S3File
[ 11748, 302, 198, 11748, 4423, 346, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, 19720, 1330, 32233, 198, 6738, 1257, 310, 10141, 1330, 27521, 198, 6738, 640, 1330, 640, 198, 198, 11748, 275, 2069, 18, 198, 6738, 10214, 420, 382, 13, ...
3.539683
63
# -*- coding: utf-8 -*- # # Unless explicitly stated otherwise all files in this repository are licensed # under the Apache 2 License. # # This product includes software developed at Datadog # (https://www.datadoghq.com/). # # Copyright 2018 Datadog, Inc. # """create_pull_request_card.py Creates a trello card based on GitHub pull request data. """ import textwrap from . import CreateTrelloCard from ..services import PullRequestService class CreatePullRequestCard(CreateTrelloCard): """A class that creates a trello card on a board.""" def __init__(self): """Initializes a task to create a pull request trello card.""" super().__init__() self._pull_request_service = PullRequestService() def _card_body(self): """Concrete helper method. Internal helper to format the trello card body, based on the data passed in. Returns: str: the markdown template for the Trello card created. """ return textwrap.dedent( f""" # GitHub Pull Request Opened By Community Member ___ - Pull Request link: [{self._title}]({self._url}) - Opened by: [{self._user}]({self._user_url}) ___ ### Pull Request Body ___ """ ) + self._body def _persist_card_to_database(self, card): """Concrete helper method. Internal helper to save the record created to the database. Args: card (trello.Card): An object representing the trello card created. Returns: None """ self._pull_request_service.create( name=self._title, url=self._url, github_pull_request_id=self._id, repo_id=self._repo_id, trello_board_id=card.board_id, trello_card_id=card.id, trello_card_url=card.url, trello_list_id=card.list_id )
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 198, 2, 17486, 11777, 5081, 4306, 477, 3696, 287, 428, 16099, 389, 11971, 198, 2, 739, 262, 24843, 362, 13789, 13, 198, 2, 198, 2, 770, 1720, 3407, 3788, 416...
2.313817
854
#!/usr/bin/env python3 from setuptools import setup, find_packages requirements = ['lz4tools==1.3.1.2', 'numpy', 'py==1.4.31', 'pytest==3.0.3'] setup(name='slisonner', version='0.7.9', description='Habidatum Chronotope Slison encode/decode utility', long_description='', author='Nikita Pestrov', author_email='nikita.pestrov@habidatum.com', maintainer='Nikita Pestrov', maintainer_email='nikita.pestrov@habidatum.com', packages=find_packages(), install_requires=requirements, platforms='any', classifiers=['Programming Language :: Python :: 3.4'])
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 6738, 900, 37623, 10141, 1330, 9058, 11, 1064, 62, 43789, 628, 198, 8897, 18883, 796, 37250, 75, 89, 19, 31391, 855, 16, 13, 18, 13, 16, 13, 17, 3256, 198, 6, 77, 32152, ...
2.404669
257
#!/usr/bin/python3 from tools import * from sys import argv from os.path import join import h5py import matplotlib.pylab as plt import numpy as np from time import sleep if len(argv) > 1: pathToSimFolder = argv[1] else: pathToSimFolder = "../data/" parameters, electrodes = readParameters(pathToSimFolder) fileOpenTries = 0 while fileOpenTries < 50: fileOpenTries += 1 try: with h5py.File(join(pathToSimFolder, "data.hdf5"), "r") as dataFile: voltages = np.array(dataFile["/voltages"][:]) optEnergy = np.array(dataFile["/optEnergy"][:]) while True: try: generations = np.array(dataFile["/generation"][:]) mode = "genetic" break except KeyError: pass try: basinAccepted = np.array(dataFile["/basinAccepted"][:], dtype=int) accepted = basinAccepted.astype(bool) notAccepted = np.invert(accepted) mode = "basinHop" break except KeyError: pass mode = "MC" try: accepted = np.array(dataFile["/accepted"][:], dtype=bool) notAccepted = np.invert(accepted) except KeyError: accepted = np.ones( optEnergy.shape, dtype=bool ) # support for deprecated version notAccepted = np.invert(accepted) break break except OSError as e: if "No such file" in repr(e): raise e else: print(f"could not open file. try number {fileOpenTries}") sleep(1) cotrolElectrodeIndices = list(range(0, len(electrodes))) cotrolElectrodeIndices.remove(parameters["outputElectrode"]) cotrolElectrodeIndices.remove(parameters["inputElectrode1"]) cotrolElectrodeIndices.remove(parameters["inputElectrode2"]) controlVoltages = voltages[:, cotrolElectrodeIndices] if mode == "MC": distance = 0 meanRange = 1000 displace = [] for i in range(int(distance + meanRange / 2), controlVoltages.shape[0]): mean = np.mean( controlVoltages[ int(i - distance - meanRange / 2) : int(i - distance + meanRange / 2), : ], axis=0, ) # displace.append(np.sqrt(np.sum((controlVoltages[i])**2))) displace.append(np.sqrt(np.sum((mean - controlVoltages[i]) ** 2))) MSD = np.sum((controlVoltages[0] - controlVoltages[:]) ** 2, axis=1) fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2)) ax.plot(range(len(MSD)), MSD, "r-", label="MSD") ax2 = ax.twinx() ax2.plot( range(int(distance + meanRange / 2), controlVoltages.shape[0]), displace, "k-", label="displacement", ) ax.legend() ax2.legend() ax.set_xlabel("step") ax.set_ylabel("displacement") plt.savefig(join(pathToSimFolder, "displacement.png"), bbox_inches="tight", dpi=300) # plt.show() plt.close(fig) fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2)) ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best") ax.plot( np.arange(optEnergy.shape[0])[notAccepted[:, 0]], optEnergy[notAccepted], ".", ms=1, color="darkred", label="not accepted", zorder=10, ) ax.plot( np.arange(optEnergy.shape[0])[accepted[:, 0]], optEnergy[accepted], ".", ms=1, color="darkgreen", label="accepted", zorder=10, ) # ax.set_xlim(-0.15,0.65) ax.set_ylim(0.15, 1.05) ax.set_xlabel("iteration") ax.set_ylabel(r"$\mathcal{F}$") ax.legend() plt.savefig(join(pathToSimFolder, "convergence.png"), bbox_inches="tight", dpi=300) # plt.show() plt.close(fig) fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2)) ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best") ax.plot( np.arange(optEnergy.shape[0])[notAccepted[:, 0]], optEnergy[notAccepted], ".", ms=1, color="darkred", label="not accepted", zorder=10, ) ax.plot( np.arange(optEnergy.shape[0])[accepted[:, 0]], optEnergy[accepted], ".", ms=1, color="darkgreen", label="accepted", zorder=10, ) ax2 = ax.twinx() ax.set_zorder(ax2.get_zorder() + 1) ax.patch.set_visible(False) ax2.plot( range(int(distance + meanRange / 2), controlVoltages.shape[0]), displace, "k-", label="displacement", ) ax.set_ylim(0.15, 1.05) ax.set_xlabel("iteration") ax.set_ylabel(r"$\mathcal{F}$") ax2.set_ylabel("displacement") # ax.legend([line],[line.get_label()]) # ax2.legend() plt.savefig( join(pathToSimFolder, "convergence_displacement.png"), bbox_inches="tight", dpi=300, ) # plt.show() plt.close(fig) ############################### if mode == "genetic": distance = 0 meanRange = 1000 displace = [] for i in range(int(distance + meanRange / 2), controlVoltages.shape[0]): mean = np.mean( controlVoltages[ int(i - distance - meanRange / 2) : int(i - distance + meanRange / 2), : ], axis=0, ) # displace.append(np.sqrt(np.sum((controlVoltages[i])**2))) displace.append(np.sqrt(np.sum((mean - controlVoltages[i]) ** 2))) MSD = np.sum((controlVoltages[0] - controlVoltages[:]) ** 2, axis=1) fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2)) ax.plot(range(len(MSD)), MSD, "r-", label="MSD") ax2 = ax.twinx() ax2.plot( range(int(distance + meanRange / 2), controlVoltages.shape[0]), displace, "k-", label="displacement", ) ax.legend() ax2.legend() ax.set_xlabel("step") ax.set_ylabel("displacement") plt.savefig(join(pathToSimFolder, "displacement.png"), bbox_inches="tight", dpi=300) # plt.show() plt.close(fig) genBest = np.empty(optEnergy.shape) for i in range(int(optEnergy.shape[0] / 25)): genBest[i * 25 : (i + 1) * 25] = max(optEnergy[i * 25 : (i + 1) * 25]) fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2)) ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best") ax.plot(optEnergy, ".", ms=1, color="darkgreen", label="all") ax.plot(genBest, color="darkblue", label="gen best") # ax.set_xlim(-0.15,0.65) ax.set_ylim(0.15, 1.05) ax.set_xlabel("iteration") ax.set_ylabel(r"$\mathcal{F}$") ax.legend() plt.savefig(join(pathToSimFolder, "convergence.png"), bbox_inches="tight", dpi=300) # plt.show() plt.close(fig) fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2)) ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best") ax.plot(optEnergy, ".", ms=1, color="darkgreen", label="all") ax.plot(genBest, color="darkblue", label="gen best") ax2 = ax.twinx() ax.set_zorder(ax2.get_zorder() + 1) ax.patch.set_visible(False) ax2.plot( range(int(distance + meanRange / 2), controlVoltages.shape[0]), displace, "k-", label="displacement", ) ax.set_ylim(0.15, 1.05) ax.set_xlabel("iteration") ax.set_ylabel(r"$\mathcal{F}$") ax2.set_ylabel("displacement") # ax.legend([line],[line.get_label()]) # ax2.legend() plt.savefig( join(pathToSimFolder, "convergence_displacement.png"), bbox_inches="tight", dpi=300, ) # plt.show() plt.close(fig) ############################### if mode == "basinHop": fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2)) ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best") ax.plot( np.arange(optEnergy.shape[0])[notAccepted[:, 0]], optEnergy[notAccepted], ".", ms=1, color="darkred", label="not accepted", zorder=10, ) ax.plot( np.arange(optEnergy.shape[0])[accepted[:, 0]], optEnergy[accepted], ".", ms=1, color="darkgreen", label="accepted", zorder=10, ) buff = np.where(basinAccepted[:, 0] == 2)[0] basinChanges = np.array([buff, np.zeros(buff.shape)], dtype=int) buff = np.where(basinAccepted[:, 0] == 3)[0] basinChanges = np.append( basinChanges, np.array([buff, np.ones(buff.shape)], dtype=int), axis=1 ) basinChanges = basinChanges[:, np.argsort(basinChanges[0])] if basinChanges.shape[1] > 0: for i in range(basinChanges.shape[1]): if basinChanges[1, i]: ax.axvline(basinChanges[0, i], color="darkgreen", zorder=-1) else: ax.axvline(basinChanges[0, i], color="darkred", zorder=-1) ax.plot( np.arange(0, basinChanges[0, 0]), np.maximum.accumulate(optEnergy[: basinChanges[0, 0]]), color="darkblue", label="basin best", ) for i in range(1, basinChanges.shape[1]): ax.plot( np.arange(basinChanges[0, i - 1], basinChanges[0, i]), np.maximum.accumulate( optEnergy[basinChanges[0, i - 1] : basinChanges[0, i]] ), color="darkblue", ) ax.plot( np.arange(basinChanges[0, -1], len(optEnergy)), np.maximum.accumulate(optEnergy[basinChanges[0, -1] :]), color="darkblue", ) # ax.set_xlim(-0.15,0.65) ax.set_ylim(0.15, 1.05) ax.set_xlabel("iteration") ax.set_ylabel(r"$\mathcal{F}$") ax.legend() plt.savefig(join(pathToSimFolder, "convergence.png"), bbox_inches="tight", dpi=300) # plt.show() plt.close(fig) fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2)) ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best") ax.plot( np.arange(optEnergy.shape[0])[notAccepted[:, 0]], optEnergy[notAccepted], ".", ms=1, color="darkred", label="not accepted", zorder=10, ) ax.plot( np.arange(optEnergy.shape[0])[accepted[:, 0]], optEnergy[accepted], ".", ms=1, color="darkgreen", label="accepted", zorder=10, ) if basinChanges.shape[1] > 0: for i in range(basinChanges.shape[1]): if basinChanges[1, i]: ax.axvline(basinChanges[0, i], color="darkgreen", zorder=-1) else: ax.axvline(basinChanges[0, i], color="darkred", zorder=-1) ax.plot( np.arange(0, basinChanges[0, 0]), np.maximum.accumulate(optEnergy[: basinChanges[0, 0]]), color="darkblue", label="basin best", ) for i in range(1, basinChanges.shape[1]): ax.plot( np.arange(basinChanges[0, i - 1], basinChanges[0, i]), np.maximum.accumulate( optEnergy[basinChanges[0, i - 1] : basinChanges[0, i]] ), color="darkblue", ) ax.plot( np.arange(basinChanges[0, -1], len(optEnergy)), np.maximum.accumulate(optEnergy[basinChanges[0, -1] :]), color="darkblue", ) ax2 = ax.twinx() ax.set_zorder(ax2.get_zorder() + 1) ax.patch.set_visible(False) # calc last basin best basinBestIdx = np.argmax(optEnergy[0 : basinChanges[0, 0]]) basinBestVoltages = controlVoltages[basinBestIdx] # ax2.plot(np.arange(0,basinChanges[0,0]), np.sqrt(np.sum((controlVoltages[0:basinChanges[0,0]] - basinBestVoltages)**2, axis = 1 )) ,color="darkblue") for i in range(1, basinChanges.shape[1]): ax2.plot( np.arange(basinChanges[0, i - 1], basinChanges[0, i]), np.sqrt( np.sum( ( controlVoltages[basinChanges[0, i - 1] : basinChanges[0, i]] - basinBestVoltages ) ** 2, axis=1, ) ), color="k", ) # calc last basin best if basinChanges[1, i]: basinBestIdx = ( np.argmax(optEnergy[basinChanges[0, i - 1] : basinChanges[0, i]]) + basinChanges[0, i - 1] ) basinBestVoltages = controlVoltages[basinBestIdx] ax2.plot( np.arange(basinChanges[0, -1], len(optEnergy)), np.sqrt( np.sum( (controlVoltages[basinChanges[0, -1] :] - basinBestVoltages) ** 2, axis=1, ) ), color="k", ) ax.set_ylim(0.15, 1.05) ax.set_xlabel("iteration") ax.set_ylabel(r"$\mathcal{F}$") ax2.set_ylabel("dist") # ax.legend([line],[line.get_label()]) # ax2.legend() plt.savefig( join(pathToSimFolder, "convergence_dist.png"), bbox_inches="tight", dpi=300 ) # plt.show() plt.close(fig)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 6738, 4899, 1330, 1635, 198, 6738, 25064, 1330, 1822, 85, 198, 6738, 28686, 13, 6978, 1330, 4654, 198, 198, 11748, 289, 20, 9078, 198, 11748, 2603, 29487, 8019, 13, 79, 2645, 397, 355, ...
1.928868
6,973
from abc import ABC, abstractmethod from planteye_vision.data_chunks.data_chunk_data import DataChunkData from planteye_vision.data_chunks.metadata_chunk import MetadataChunk, MetadataChunkData from planteye_vision.data_chunks.data_chunk_status import DataChunkStatus
[ 6738, 450, 66, 1330, 9738, 11, 12531, 24396, 198, 6738, 4618, 25379, 62, 10178, 13, 7890, 62, 354, 14125, 13, 7890, 62, 354, 2954, 62, 7890, 1330, 6060, 1925, 2954, 6601, 198, 6738, 4618, 25379, 62, 10178, 13, 7890, 62, 354, 14125, ...
3.253012
83
# Name: authserver.py # # Description: Validates a one-time-passocde that is included in the header. # Cases: # [1] Valid OTP - belongs to a known application, not used # [2] Valid OTP - belongs to a known application, but already used # [3] Invalid OTP - not recgnised # [4] No OTP # # #!flask/bin/python from flask import Flask, jsonify, request from flask_api import status import requests OTPCodeTable=dict() #Structure to hold an OTP code for an authorized application app = Flask(__name__) #Placeholder - return: # True if this token should be expired # False if this token can continue to be used #Simpistic function to get an unused code from the pool @app.route('/', methods=['GET']) @app.route('/auth', methods=['GET']) @app.after_request OTPCodeTable=initializeTestCodes() if __name__ == '__main__': requests.packages.urllib3.disable_warnings() app.run(host='0.0.0.0',port=80)
[ 2, 6530, 25, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 6284, 15388, 13, 9078, 198, 2, 198, 2, 12489, 25, 220, 220, 220, 220, 220, 3254, 37051, 257, 530, 12, 2435, 12, 6603, 420, 2934, 326, 318, 3017, 287, 262, ...
2.447115
416