content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
import electrum from aiohttp import web from base import BaseDaemon class BTCDaemon(BaseDaemon): name = "BTC" electrum = electrum DEFAULT_PORT = 5000 daemon = BTCDaemon() app = web.Application() daemon.configure_app(app) daemon.start(app)
nilq/baby-python
python
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models def migrate_HeatSensor(apps, schema_editor): HeatSensor = apps.get_model('heatcontrol', 'HeatSensor') HeatControl = apps.get_model('heatcontrol', 'HeatControl') HeatControlProfile = apps.get_model('heatcontrol', 'HeatControlProfile') for hs in HeatSensor.objects.select_related('sensor', 'daytype').all(): hc, created = HeatControl.objects.get_or_create(sensor=hs.sensor, defaults={'kp': 1, 'ki': 1, 'kd': 1}) HeatControlProfile.objects.create(heatcontrol=hc, daytype=hs.daytype, start=hs.start, end=hs.end, target_temp=hs.target_temp) def migrate_HeatSensorOverride(apps, schema_editor): HeatSensorOverride = apps.get_model('heatcontrol', 'HeatSensorOverride') HeatControl = apps.get_model('heatcontrol', 'HeatControl') HeatControlOverride = apps.get_model('heatcontrol', 'HeatControlOverride') for hso in HeatSensorOverride.objects.select_related('sensor').all(): hc, created = HeatControl.objects.get_or_create(sensor=hso.sensor, defaults={'kp': 1, 'ki': 1, 'kd': 1}) HeatControlOverride.objects.create(heatcontrol=hc, start=hso.start, end=hso.end, target_temp=hso.target_temp) class Migration(migrations.Migration): dependencies = [ ('heatcontrol', '0003_auto_20161204_0620'), ] operations = [ migrations.RunPython(migrate_HeatSensor), migrations.RunPython(migrate_HeatSensorOverride), ]
nilq/baby-python
python
from __future__ import print_function from loguru import logger import io3d import io3d.datasets import sed3 import numpy as np import matplotlib.pyplot as plt logger.enable("io3d") logger.disable("io3d") import matplotlib.pyplot as plt from pathlib import Path import bodynavigation import exsu import sys import os import tensorflow as tf import os from skimage.transform import resize from skimage.io import imsave import numpy as np from skimage.segmentation import mark_boundaries from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose from tensorflow.keras.optimizers import Adam, SGD from tensorflow.keras.callbacks import ModelCheckpoint from tensorflow.keras import backend as K from tensorflow.keras.callbacks import History from skimage.exposure import rescale_intensity from skimage import io # from data import load_train_data, load_test_data from sklearn.utils import class_weight from typing import Optional from numbers import Number def window( data3d: np.ndarray, vmin: Optional[Number] = None, vmax: Optional[Number] = None, center: Optional[Number] = None, width: Optional[Number] = None, vmin_out: Optional[Number] = 0, vmax_out: Optional[Number] = 255, dtype=np.uint8): """ Rescale input ndarray and trim the outlayers. :param data3d: ndarray with numbers :param vmin: minimal input value. Skipped if center and width is given. :param vmax: maximal input value. Skipped if center and width is given. :param center: Window center :param width: Window width :param vmin_out: Output mapping minimal value :param vmax_out: Output mapping maximal value :param dtype: Output dtype :return: """ if width and center: vmin = center - (width / 2.) vmax = center + (width / 2.) # logger.debug(f"vmin={vmin}, vmax={vmax}") k = float(vmax_out - vmin_out) / (vmax - vmin) q = vmax_out - k * vmax # logger.debug(f"k={k}, q={q}") data3d_out = data3d * k + q data3d_out[data3d_out > vmax_out] = vmax_out data3d_out[data3d_out < vmin_out] = vmin_out return data3d_out.astype(dtype) import h5py import tensorflow as tf class generator: def __init__(self, label, organ_label, is_mask=False): self.label = label self.organ_label = organ_label self.is_mask=is_mask def __call__(self): fnimgs = Path(f'mask_{self.label}_{self.organ_label}') if self.is_mask else Path(f'img_{self.label}') for indx in range(len(fnimgs.glob("*.npy"))): fnimg = fnimgs / f"{indx:06d}.npy" img = np.load(fnimg) yield img # with h5py.File(self.file, 'r') as hf: # for im in hf["train_img"]: # imgs_train = np.load(f'imgs_train_{experiment_label}.npy') # yield im def load_train_data(experiment_label): imgs_train = np.load(f'imgs_train_{experiment_label}.npy') masks_train = np.load(f'masks_train_{experiment_label}.npy') return imgs_train, masks_train def load_test_data(experiment_label): imgs_test = np.load(f'imgs_test_{experiment_label}.npy') masks_test = np.load(f'masks_test_{experiment_label}.npy') return imgs_test, masks_test def get_dataset_loaders(label, organ_label): imgs = tf.data.Dataset.from_generator( generator(label, organ_label, is_mask=False), tf.uint8, tf.TensorShape([512, 512, 3])) masks = tf.data.Dataset.from_generator( generator(label, organ_label, is_mask=True), tf.uint8, tf.TensorShape([512, 512, 3])) return imgs, masks def create_train_data(label="train", datasets=None, dataset_label="", organ_label="rightkidney", skip_if_exists=True): # fnimgs = f'imgs_{label}_{dataset_label}.npy' # fnmasks =f'masks_{label}_{dataset_label}.npy' fnimgs = Path(f'img_{label}_{dataset_label}') fnmasks =Path(f'mask_{label}_{dataset_label}_{organ_label}') fnpattern = "{dataset}_{i:02d}_{k:05d}.npy" p_imgs = fnimgs p_masks =fnmasks # if p_imgs.exists() and p_imgs.is_dir() and p_masks.exists() and p_masks.is_dir() and skip_if_exists: # logger.info("Files exists. Skipping creation and loading instead.") # # imgs_train = np.load(fnimgs) # # masks_train = np.load(fnmasks) if True: # imgs_train = [] # masks_train = [] if not datasets: datasets = { "3Dircadb1": {"start": 1, "stop": 2}, # "sliver07": {"start":0, "stop":0} } indx = 0 for dataset in datasets: for i in range( datasets[dataset]["start"], datasets[dataset]["stop"] ): logger.debug(f"{dataset} {i}") fn0 = fnpattern.format(dataset=dataset, i=i, k=0) if not (fnmasks / fn0).exists(): # logger.info(f"File {fn0} exists. Skipping") # continue segm3dp = io3d.datasets.read_dataset(dataset, organ_label, i) if segm3dp is None: print(f" Organ label '{organ_label}' does not exist. Skipping.") continue for k in range(segm3dp.data3d.shape[0]): np.save(fnmasks / fnpattern.format(dataset=dataset, i=i, k=k) , segm3d[k]) if not (fnimgs / fn0).exists(): data3dp = io3d.datasets.read_dataset(dataset, "data3d", i) data3d = window(data3dp["data3d"], center=40, width=400, vmin_out=0, vmax_out=255, dtype=np.uint8) segm3d = segm3dp["data3d"] bn = bodynavigation.body_navigation.BodyNavigation(data3dp["data3d"], voxelsize_mm=data3dp["voxelsize_mm"]) feature_list = [ data3d, bn.dist_to_sagittal(), bn.dist_coronal(), bn.dist_to_diaphragm_axial(), bn.dist_to_surface(), ] # print(f"shapes: data3d={data3d.shape}, dst={dst.shape}") # for j in range(0, data3d.shape[0]): # imgs_train.append(np.stack([data3d[j, :, :], feature_list[0][j, :, :]], axis=2)) # masks_train.append(segm3d[j, :, :]) all_features = expand_dims_and_concat(feature_list, 3) for k in range(all_features.shape[0]): fnimgs.mkdir(parents=True, exist_ok=True) fnmasks.mkdir(parents=True, exist_ok=True) np.save(fnimgs / fnpattern.format(dataset=dataset, i=i, k=k), all_features[k]) indx += 1 logger.debug(f"i={i}, {all_features.shape}") # imgs_train = np.array(imgs_train, dtype=np.int16) # masks_train = np.array(masks_train, dtype=np.uint8) # np.save(fnimgs, imgs_train) # np.save(fnmasks, masks_train) # print(f'Saving to .npy files done. imgs.shape={imgs_train.shape}, masks.shape={masks_train.shape}') # return imgs_train, masks_train def dice_coef(y_true, y_pred): smooth = 1. y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) def dice_coef_loss(y_true, y_pred): return -dice_coef(y_true, y_pred) # The functions return our metric and loss # %% # one_weight = (1-num_of_ones)/(num_of_ones + num_of_zeros) # zero_weight = (1-num_of_zeros)/(num_of_ones + num_of_zeros) def weighted_binary_crossentropy(zero_weight, one_weight): def weighted_binary_crossentropy(y_true, y_pred): b_ce = K.binary_crossentropy(y_true, y_pred) # weighted calc weight_vector = y_true * one_weight + (1 - y_true) * zero_weight weighted_b_ce = weight_vector * b_ce return K.mean(weighted_b_ce) return weighted_binary_crossentropy def save_segmentations(imgs_test, imgs_mask_test, pred_dir='preds'): print(f"shapes={imgs_test.shape},{imgs_mask_test.shape}") if not os.path.exists(pred_dir): os.mkdir(pred_dir) for k in range(len(imgs_mask_test)): a = rescale_intensity(imgs_test[k][:, :], out_range=(-1, 1)) b = (imgs_mask_test[k][:, :] > 0.5).astype('uint8') io.imsave(os.path.join(pred_dir, f'{k:05}_pred.png'), mark_boundaries(a, b)) # nb_channels = 2 class UNetTrainer(): def __init__(self, nb_channels, img_rows, img_cols, experiment_label, organ_label): self.nb_channels = nb_channels self.img_rows = img_rows self.img_cols = img_cols self.experiment_label = experiment_label self.organ_label = organ_label pass def get_unet(self, weights=None): if weights is None: weights = [0.05956, 3.11400] # {0: 0.5956388648542532, 1: 3.1140000760253925} inputs = Input((self.img_rows, self.img_cols, self.nb_channels)) conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs) conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1) conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2) conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3) conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4) pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4) conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5) up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3) conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6) conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6) up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3) conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7) conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7) up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3) conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8) conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8) up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3) conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9) conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9) conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9) # conv10 = Conv2D(2, (1, 1), activation='softmax')(conv9) model = Model(inputs=[inputs], outputs=[conv10]) # model.compile(optimizer=Adam(lr=1e-3), loss=dice_coef_loss, metrics=[dice_coef]) # model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[dice_coef, "accuracy"]) model.compile(optimizer='adam', loss=weighted_binary_crossentropy(weights[0], weights[1]), metrics=[dice_coef, "accuracy"]) # model.compile(optimizer='adam', loss=weighted_binary_crossentropy(weights[0], weights[1]), metrics=[dice_coef, "accuracy"]) # categorical crossentropy (weighted) return model # The different layers in our neural network model (including convolutions, maxpooling and upsampling) # %% def preprocess(self, imgs, is_mask=False): new_shape = list(imgs.shape).copy() new_shape[1] = self.img_rows new_shape[2] = self.img_cols # imgs_p = np.ndarray((imgs.shape[0], img_rows, img_cols, imgs.shape[3]), dtype=np.uint8) imgs_p = np.ndarray(new_shape, dtype=np.uint8) for i in range(imgs.shape[0]): imgs_p[i] = resize(imgs[i], new_shape[1:], preserve_range=True) # imgs_p[i] = resize(imgs[i, 0 ], (img_cols, img_rows), preserve_range=True) # imgs_p = imgs_p[..., np.newaxis] if is_mask: imgs_p = (imgs_p > 0).astype('float32') else: imgs_p = imgs_p.astype('float32') return imgs_p # We adapt here our dataset samples dimension so that we can feed it to our network # %% # %% def train_and_predict(self, continue_training=False, epochs=50, step=1): # if True: print('-' * 30) print('Loading and preprocessing train data...') print('-' * 30) experiment_label = self.experiment_label # imgs_train, imgs_mask_train = load_train_data(self.experiment_label) imgs_train, imgs_mask_train = get_dataset_loaders("train", self.organ_label) imgs_train = imgs_train[::step] imgs_mask_train = imgs_mask_train[::step] logger.debug(f"imgs_train.shape={imgs_train.shape}") logger.debug(f"imgs_mask_train.shape={imgs_mask_train.shape}") imgs_train = self.preprocess(imgs_train) imgs_mask_train = self.preprocess(imgs_mask_train, is_mask=True) logger.debug(f"imgs_train.shape={imgs_train.shape}") logger.debug(f"imgs_mask_train.shape={imgs_mask_train.shape}") # TODO remove - using small part of dataset # imgs_train = imgs_train[50:65] # imgs_mask_train = imgs_mask_train[50:65] # imgs_train = imgs_train.astype('float32') # mean = np.mean(imgs_train) # mean for data centering # std = np.std(imgs_train) # std for data normalization # imgs_train -= mean # imgs_train /= std # Normalization of the train set # imgs_mask_train = (imgs_mask_train > 0).astype('float32') y_train = imgs_mask_train # Calculate the weights for each class so that we can balance the data cl_weights = class_weight.compute_class_weight( 'balanced', np.unique(y_train.flatten()), y_train.flatten() ) print(f"weights={cl_weights}") cl_weights_dct = dict(enumerate(cl_weights)) print('-' * 30) print('Creating and compiling model...') print('-' * 30) model = self.get_unet(cl_weights) if continue_training: model.load_weights(f'weights_{experiment_label}.h5') model_checkpoint = ModelCheckpoint(f'weights_{experiment_label}.h5', monitor='val_loss', save_best_only=True) # Saving the weights and the loss of the best predictions we obtained print('-' * 30) print('Fitting model...') print('-' * 30) log_dir = f'logs\\{experiment_label}\\' # Path(log_dir).mkdir(parents=True, exist_ok=True) model.fit_generator() history = model.fit( imgs_train, imgs_mask_train, batch_size=10, epochs=epochs, verbose=1, shuffle=True, validation_split=0.2, callbacks=[ model_checkpoint, tf.keras.callbacks.TensorBoard(log_dir=log_dir) ], # class_weight=weights_dct # tohle nefunguje pro 4d data ) # predict_test_data(mean=None, std=None) self.predict_test_data(history) return history def predict_test_data(self, history): print('-' * 30) print('Loading and preprocessing test data...') print('-' * 30) # imgs_test, imgs_maskt = load_test_data(self.experiment_label) imgs_test, imgs_maskt = get_dataset_loaders("test", self.organ_label) imgs_test = self.preprocess(imgs_test) imgs_maskt = self.preprocess(imgs_maskt, is_mask=True) y_train = imgs_maskt # Calculate the weights for each class so that we can balance the data cl_weights = class_weight.compute_class_weight( 'balanced', np.unique(y_train.flatten()), y_train.flatten() ) model = self.get_unet(cl_weights) # TODO remove this limit # imgs_test = imgs_test[50:65] # imgs_maskt = imgs_maskt[50:65] imgs_test = imgs_test.astype('float32') # imgs_test -= mean # imgs_test /= std # Normalization of the test set # TODO remove this part # going to test on train set # imgs_test = imgs_train # imgs_maskt = imgs_mask_train print('-' * 30) print('Loading saved weights...') print('-' * 30) model.load_weights(f'weights_{self.experiment_label}.h5') print('-' * 30) print('Predicting masks on test data...') print('-' * 30) imgs_mask_test = model.predict(imgs_test, verbose=1) np.save('imgs_mask_test.npy', imgs_mask_test) print('-' * 30) print('Saving predicted masks to files...') print('-' * 30) pred_dir = f"preds/{self.experiment_label}" Path(pred_dir).mkdir(parents=True, exist_ok=True) # Saving our predictions in the directory 'preds' logger.debug(f"imgs_test.shape={imgs_test.shape}") logger.debug(f"imgs_mask_test.shape={imgs_mask_test.shape}") # save_segmentations(imgs_test[:, :, :, 0, 0], imgs_mask_test[:, :, :, 0], pred_dir=pred_dir) save_segmentations(imgs_test[:, :, :, 0], imgs_mask_test[:, :, :, 0], pred_dir=pred_dir) plt.plot(history.history['dice_coef']) plt.plot(history.history['val_dice_coef']) plt.title('Model dice coeff') plt.ylabel('Dice coeff') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() # plotting our dice coeff results in function of the number of epochs def load_batch(): pass def expand_dims_and_concat(larr:np.ndarray, axis:int): larr = list(map(lambda x: np.expand_dims(x,axis), larr)) arr = np.concatenate(larr, axis=axis) return arr
nilq/baby-python
python
""" -*- test-case-name: PyHouse.Modules.Computer.Mqtt.test.test_computer -*- @name: PyHouse/src/Modules/Computer/Mqtt/mqtt_client.py @author: D. Brian Kimmel @contact: D.BrianKimmel@gmail.com @copyright: (c) 2015-2016 by D. Brian Kimmel @license: MIT License @note: Created on Jun 5, 2015 @Summary: Connect this computer node to the household Mqtt Broker. """ # Import system type stuff import copy import datetime from twisted.internet import defer # from twisted.internet.endpoints import SSL4ClientEndpoint # from twisted.internet.ssl import Certificate, optionsForClientTLS # Import PyMh files and modules. from Modules.Core.data_objects import NodeData, MqttInformation, MqttJson from Modules.Computer.Mqtt.mqtt_actions import Actions from Modules.Computer.Mqtt.mqtt_protocol import PyHouseMqttFactory from Modules.Computer.Mqtt.mqtt_xml import Xml as mqttXML from Modules.Utilities import json_tools, xml_tools from Modules.Computer import logging_pyh as Logger LOG = Logger.getLogger('PyHouse.Mqtt_Client ') PEM_FILE = '/etc/pyhouse/ca_certs/rootCA.pem' class Struct: def __init__(self, **args): self.__dict__.update(args) class Util(object): """ """ def connect_to_one_broker_TCP(self, p_pyhouse_obj, p_broker): l_clientID = 'PyH-' + p_pyhouse_obj.Computer.Name l_host = p_broker.BrokerAddress l_port = p_broker.BrokerPort l_username = None # p_broker.UserName l_password = None # p_broker.Password p_broker._ClientAPI = self LOG.info('Connecting via TCP...') if l_host is None or l_port is None: LOG.error('Bad Mqtt broker Address: {} or Port: {}'.format(l_host, l_port)) p_broker._ProtocolAPI = None else: l_factory = PyHouseMqttFactory(p_pyhouse_obj, l_clientID, p_broker, l_username, l_password) _l_connector = p_pyhouse_obj.Twisted.Reactor.connectTCP(l_host, l_port, l_factory) LOG.info('TCP Connected to broker: {}; Host:{}'.format(p_broker.Name, l_host)) pass @defer.inlineCallbacks def connect_to_one_broker_TLS(self, _p_pyhouse_obj, _p_broker): # l_host = p_broker.BrokerAddress # l_port = p_broker.BrokerPort # l_username = p_broker.UserName # l_password = p_broker.Password # l_clientID = 'PyH-' + p_pyhouse_obj.Computer.Name LOG.info('Connecting via TLS...') # l_factory = protocol.Factory.forProtocol(echoclient.EchoClient) # l_factory = PyHouseMqttFactory(p_pyhouse_obj, l_clientID, p_broker, l_username, l_password) # l_certData = PEM_FILE.getContent() # l_authority = Certificate.loadPEM(l_certData) # l_options = optionsForClientTLS(l_host.decode('utf-8'), l_authority) # l_endpoint = SSL4ClientEndpoint(p_pyhouse_obj.Twisted.Reactor, l_host, l_port, l_options) # l_client = yield l_endpoint.connect(l_factory) l_done = defer.Deferred() # l_client.connectionLost = lambda reason: l_done.callback(None) yield l_done def connect_to_all_brokers(self, p_pyhouse_obj): """ This will create a connection for each active broker in the config file. These connections will automatically reconnect if the connection is broken (broker reboots e.g.) """ l_count = 0 for l_broker in p_pyhouse_obj.Computer.Mqtt.Brokers.itervalues(): if not l_broker.Active: continue if l_broker.BrokerPort < 2000: self.connect_to_one_broker_TCP(p_pyhouse_obj, l_broker) else: self.connect_to_one_broker_TLS(p_pyhouse_obj, l_broker) l_count += 1 LOG.info('TCP Connected to {} Broker(s).'.format(l_count)) return l_count @staticmethod def _make_topic(p_pyhouse_obj, p_topic): l_topic = p_pyhouse_obj.Computer.Mqtt.Prefix + p_topic return l_topic @staticmethod def _make_message(p_pyhouse_obj, p_message = None): """ @param p_pyhouse_obj: is the entire PyHouse Data tree. @param message_json: is message that is already json encoded\ @param message_obj: is additional object that will be added into the meddage as Json. """ l_message = MqttJson() l_message.Sender = p_pyhouse_obj.Computer.Name l_message.DateTime = datetime.datetime.now() if p_message is None: pass elif isinstance(p_message, object): xml_tools.stuff_new_attrs(l_message, p_message) else: xml_tools.stuff_new_attrs(l_message, p_message) # print(PrettyFormatAny.form(l_message, 'Mqtt Client - Message')) l_json = json_tools.encode_json(l_message) return l_json class API(Util): """This interfaces to all of PyHouse. """ def __init__(self, p_pyhouse_obj): self.m_pyhouse_obj = p_pyhouse_obj p_pyhouse_obj.APIs.Computer.MqttAPI = self p_pyhouse_obj.Computer.Mqtt = MqttInformation() p_pyhouse_obj.Computer.Mqtt.Prefix = 'ReSeT' p_pyhouse_obj.Computer.Mqtt.Brokers = {} LOG.info("Initialized.") def LoadXml(self, p_pyhouse_obj): """ Load the Mqtt xml info. """ # LOG.info("Loading XML") l_mqtt = MqttInformation() l_mqtt.Prefix = p_pyhouse_obj.Computer.Name l_mqtt.Brokers = mqttXML.read_mqtt_xml(p_pyhouse_obj) p_pyhouse_obj.Computer.Mqtt.Brokers = l_mqtt.Brokers LOG.info("Loaded {} Brokers".format(len(l_mqtt.Brokers))) if p_pyhouse_obj.Computer.Mqtt.Brokers != {}: # LOG.info('Connecting to all MQTT Brokers.') l_count = self.connect_to_all_brokers(p_pyhouse_obj) LOG.info("Mqtt {} broker(s) Started.".format(l_count)) else: LOG.info('No Mqtt brokers are configured.') LOG.info("Loaded XML") return l_mqtt def Start(self): """ if self.m_pyhouse_obj.Computer.Mqtt.Brokers != {}: LOG.info('Connecting to all MQTT Brokers.') l_count = self.connect_to_all_brokers(self.m_pyhouse_obj) LOG.info("Mqtt {} broker(s) Started.".format(l_count)) else: LOG.info('No Mqtt brokers are configured.') """ pass def SaveXml(self, p_xml): l_xml = mqttXML().write_mqtt_xml(self.m_pyhouse_obj.Computer.Mqtt.Brokers) p_xml.append(l_xml) LOG.info("Saved Mqtt XML.") return p_xml def Stop(self): LOG.info("Stopped.") # ## The following are public commands that may be called from everywhere def MqttPublish(self, p_topic, p_message): """Send a topic, message to the broker for it to distribute to the subscription list # self.m_pyhouse_obj.APIs.Computer.MqttAPI.MqttPublish("schedule/execute", l_schedule) @param p_topic: is the partial topic, the prefix will be prepended. @param message_json : is the JSON message we want to send @param message_obj: is an additional object that we will convert to JSON and merge it into the message. """ l_topic = Util._make_topic(self.m_pyhouse_obj, p_topic) l_message = Util._make_message(self.m_pyhouse_obj, p_message) for l_broker in self.m_pyhouse_obj.Computer.Mqtt.Brokers.itervalues(): if not l_broker.Active: continue try: l_broker._ProtocolAPI.publish(l_topic, l_message) LOG.info('Mqtt publishing:\n\tBroker: {}\t\tTopic:{}\n'.format(l_broker.Name, l_topic)) except AttributeError as e_err: LOG.error("Mqtt Unpublished.\n\tERROR:{}\n\tTopic:{}\n\tMessage:{}\n".format(e_err, l_topic, l_message)) def MqttDispatch(self, p_topic, p_message): """Dispatch a received MQTT message according to the topic. TODO: This needs protection from poorly formed Mqtt messages. """ l_topic = p_topic.split('/')[2:] # Drop the pyhouse/housename/ as that is all we subscribed to. l_message = json_tools.decode_json_unicode(p_message) l_logmsg = Actions(self.m_pyhouse_obj).mqtt_dispatch(l_topic, l_message) LOG.info(l_logmsg) def doPyHouseLogin(self, p_client, p_pyhouse_obj): """Login to PyHouse via MQTT """ self.m_client = p_client l_name = p_pyhouse_obj.Computer.Name try: l_node = copy.deepcopy(p_pyhouse_obj.Computer.Nodes[l_name]) except (KeyError, TypeError): l_node = NodeData() l_node.NodeInterfaces = {} # self.MqttPublish('computer/startup', l_node) # ## END DBK
nilq/baby-python
python
import torch.nn as nn from ..builder import VQA_MODELS, build_backbone, build_encoder, build_head @VQA_MODELS.register_module() class VISDIALPRINCIPLES(nn.Module): def __init__(self, vocabulary_len, word_embedding_size, encoder, backbone, head): super().__init__() self.embedding_model = nn.Embedding(vocabulary_len, word_embedding_size, padding_idx=0) self.encoder_model = build_encoder(encoder) self.backbone = build_backbone(backbone) self.head = build_head(head) # 包括 classification head, generation head def forward(self, data): img = data['img_feat'] ques = data['ques'] his = data['hist'] batch_size, rnd, max_his_length = his.size() cap = his[:, 0, :] ques_len = data['ques_len'] hist_len = data['hist_len'] cap_len = hist_len[:, 0] ques_embed = self.embedding_model(ques) cap_emb = self.embedding_model(cap.contiguous()) his = his.contiguous().view(-1, max_his_length) his_embed = self.embedding_model(his) q_output, c_output, his_feat = self.encoder_model(ques_embed, ques_len, cap_emb, cap_len, his_embed, hist_len) ques_location = ques_len.view(-1).cpu().numpy() - 1 ques_encoded = q_output[range(batch_size), ques_location, :] cap_location = cap_len.view(-1).cpu().numpy() - 1 cap_encoded = c_output[range(batch_size), cap_location, :] his_feat = his_feat.view(batch_size, rnd, -1) fuse_feat = self.backbone(ques_encoded, cap_encoded, his_feat, q_output, c_output, ques_len, cap_len, ques_embed, cap_emb, img, batch_size) scores = self.head(fuse_feat, data) return scores
nilq/baby-python
python
import numpy as np from mchap import mset from mchap.assemble import inheritence def test_gamete_probabilities__hom(): genotypes = np.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]], np.int8) probabilities = np.array([1]) gametes_expect = np.array([[[0, 0, 0], [0, 0, 0]]], np.int8) probs_expect = np.array([1]) gametes_actual, probs_actual = inheritence.gamete_probabilities( genotypes, probabilities, ) assert mset.equal(gametes_expect, gametes_actual) np.testing.assert_array_equal(probs_expect, probs_actual) def test_gamete_probabilities__het(): genotypes = np.array([[[0, 0, 0], [0, 0, 0], [1, 1, 1], [1, 1, 1]]], np.int8) probabilities = np.array([1]) gametes_expect = np.array( [[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]], np.int8, ) probs_expect = np.array([1 / 6, 4 / 6, 1 / 6]) gametes_actual, probs_actual = inheritence.gamete_probabilities( genotypes, probabilities, ) assert mset.equal(gametes_expect, gametes_actual) np.testing.assert_array_equal(probs_expect, probs_actual) def test_gamete_probabilities__distribution(): genotypes = np.array( [ [[0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 1, 1]], [[0, 0, 0], [0, 0, 0], [1, 1, 1], [1, 1, 1]], [[0, 0, 0], [0, 0, 0], [0, 1, 1], [1, 1, 1]], ], np.int8, ) probabilities = np.array([0.6, 0.3, 0.1]) gametes_expect = np.array( [ [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]], [[0, 0, 0], [0, 1, 1]], [[0, 1, 1], [1, 1, 1]], ], dtype=np.int8, ) probs_expect = np.array( [ (0.6 * 3 / 6) + (0.3 * 1 / 6) + (0.1 * 1 / 6), (0.6 * 3 / 6) + (0.3 * 4 / 6) + (0.1 * 2 / 6), (0.6 * 0 / 6) + (0.3 * 1 / 6) + (0.1 * 0 / 6), (0.6 * 0 / 6) + (0.3 * 0 / 6) + (0.1 * 2 / 6), (0.6 * 0 / 6) + (0.3 * 0 / 6) + (0.1 * 1 / 6), ] ) gametes_actual, probs_actual = inheritence.gamete_probabilities( genotypes, probabilities, ) assert mset.equal(gametes_expect, gametes_actual) np.testing.assert_array_equal(probs_expect, probs_actual) def test_cross_probabilities__hom_x_het(): maternal_gametes = np.array([[[0, 0, 0], [0, 0, 0]]], np.int8) maternal_probs = np.array([1]) maternal_probs = np.array([1]) paternal_gametes = np.array( [[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [1, 1, 1]]], np.int8 ) paternal_probs = np.array([0.5, 0.5]) genotypes_expect = np.array( [ [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 1, 1]], ], dtype=np.int8, ) probs_expect = np.array([0.5, 0.5]) genotypes_actual, probs_actual = inheritence.cross_probabilities( maternal_gametes, maternal_probs, paternal_gametes, paternal_probs, ) assert mset.equal(genotypes_expect, genotypes_actual) np.testing.assert_array_equal(probs_expect, probs_actual)
nilq/baby-python
python
# Copyright 2019 New Relic, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import setup # Best practice: package name should be prefixed with `newrelic_extension_` INSTRUMENTED_PACKAGE = "sampleproject" PACKAGE_NAME = "newrelic_extension_{}".format(INSTRUMENTED_PACKAGE) HOOKS = [ # package_to_intercept = instrumentation_hook "sample = {}.example:instrument".format(PACKAGE_NAME) ] setup( name=PACKAGE_NAME, version="0.1", packages=[PACKAGE_NAME], package_dir={PACKAGE_NAME: "src"}, entry_points={"newrelic.hooks": HOOKS}, license="Apache-2.0", classifiers=["License :: OSI Approved :: Apache Software License"], install_requires=[ "newrelic", # Always require the package being instrumented INSTRUMENTED_PACKAGE, ], )
nilq/baby-python
python
import altair as alt from data import get_nullity_matrix_data def nullity_matrix_chart( data, keep_original_col_order=True, show_tooltip=False, threshold=0.5, h=400 ): nm_data, n_rows = get_nullity_matrix_data(data) text_font_size = 10 base = alt.Chart(nm_data, height=h) chart = base.mark_rect(cursor="context-menu" if show_tooltip else "default").encode( x=alt.X( "column:N", sort=None if keep_original_col_order else "ascending", axis=alt.Axis( orient="top", labelAngle=-90, labelColor="#44475A", domain=False, tickColor="transparent", title=None, ), ), y=alt.Y( "row:Q", axis=alt.Axis( grid=False, domain=False, tickColor="transparent", labelColor="#44475A", title=None, values=[0, n_rows], ), scale=alt.Scale(nice=False, domain=[n_rows, 0]), ), color=alt.Color( "isnull:N", legend=None, scale=alt.Scale(domain=[True, False], range=["white", "#44475A"]), ), ) if show_tooltip: chart = chart.encode( tooltip=[ alt.Tooltip("row:Q", title="Row"), alt.Tooltip("isnull:N", title="Null value?"), alt.Tooltip("column:N", title="Column"), alt.Tooltip( "percentage_null_values_per_column:Q", format=".2~%", title="% of null values in this column", ), ] ) # Altair/Vega-Lite: # Default `labelFontSize` = 10 # Default `tickSize` = 5 # Default `labelPadding` = 2 # Default `translate` = 0.5 text = base.mark_text( baseline="middle", align="right", fontSize=text_font_size, angle=270 ).encode( x=alt.X("column:N"), y=alt.value(h + (text_font_size / 2) + 5 + 2 + 0.5), text=alt.Text("percentage_null_values_per_column:Q", format=".2~%"), color=alt.condition( f"datum.percentage_null_values_per_column > {threshold}", alt.value("#E84A5F"), alt.value("#44475A"), ), ) return ( alt.layer(chart, text) .configure_view(strokeWidth=0) .configure_scale(bandPaddingInner=0.1) )
nilq/baby-python
python
#!encoding=utf-8 from textblob import TextBlob import os, sys, re def textblob_process(line): blob = TextBlob(line) return blob.tags def process_tag_result(tag_res): nps = [] i = 0 while i < len(tag_res): while i < len(tag_res) and not tag_res[i][1].startswith('NN'): i += 1 np = [] while i < len(tag_res) and (tag_res[i][1] == 'NN' or tag_res[i][1] == 'NNS' or tag_res[i][1] == 'NNP'): np.append(tag_res[i][0]) i += 1 if len(np) == 1 and tag_res[i-2][1] == 'JJ': np.insert(0, tag_res[i-2][0]) nps.append(" ".join(np)) i += 1 return nps def is_valid_np(np): if re.search(r'\d+', np): return False if not re.match(r'\w+', np): return False for brand in BRANDS: if np.find(brand) >=0: return False if np.find('/') >= 0: return False for token in np.split(' '): if len(token) <= 2: return False if token[-1] == u'®' or token[-1] == u'™': return False return True def extract(line): nps = list() tag_res = textblob_process(line) nps.extend(process_tag_result(tag_res)) return nps if __name__ == '__main__': s = "Lower cut design with a square shaped neckline" print extract_np(s)
nilq/baby-python
python
from ..crypto import Nonce from . import constants from io import BytesIO from datetime import datetime import binascii import struct import base58 import json FIELDS = { 'i64le': [8, '<q'], 'i64be': [8, '>q'], 'u64le': [8, '<Q'], 'u64be': [8, '>Q'], 'i32le': [4, '<i'], 'i32be': [4, '>i'], 'u32le': [4, '<I'], 'u32be': [4, '>I'], 'u16le': [2, '<H'], 'u16be': [2, '>H'], 'u8le': [1, '<B'], 'u8be': [1, '>B'], 'bool': [1, '?'] } class EncoderInstance: ''' This class keep decoded data ''' def __init__(self, encoder, fields, data, tag, dynamic): self.fields = fields self.data = data self.tag = tag self.dynamic = dynamic self.encoder = encoder def __repr__(self): return str(self.data) def __str__(self): s = self.encoder.name if 'messages' in self.data: s += ' [ ' for m in self.data['messages']: s += str(m) + ' ' s += ']' return s def __getitem__(self, key): return self.data[key] def __setitem__(self, key, value): self.data[key] = value def __iter__(self): if 'messages' in self.data: return iter(self.data['messages']) else: return None def encoder(self): return self.encoder def encoder_name(self): return self.encoder.name def serialize(self, skipSize=False): bio = BytesIO() if type(self.fields) == list: fields = self.fields else: fields = [ self.fields ] for f in fields: if f['name'] == 'noname': fdata = self.data else: fdata = self.data[f['name']] if type(f['type']) != str: bio.write(fdata.serialize()) elif f['type'] == 'bytes': if f['length'] == 'dynamic': bio.write(struct.pack('>I', len(fdata))) bio.write(binascii.unhexlify(fdata)) elif f['type'] == 'nonce': bio.write(fdata.get()) elif f['type'] == 'time': ff = FIELDS['i64be'] bio.write(struct.pack(ff[1], int(fdata.timestamp()))) elif f['type'] == 'string': bio.write(struct.pack('>H', len (fdata))) bio.write(fdata.encode('ascii')) elif f['type'] == 'hash' and f['of'] == 'block': bio.write(base58.b58decode_check(fdata)[len(constants.PREFIXES['b'])::]) elif f['type'] == 'hash' and f['of'] == 'chain_id': bio.write(base58.b58decode_check(fdata)[len(constants.PREFIXES['Net'])::]) elif f['type'] == 'hash' and f['of'] == 'context': bio.write(base58.b58decode_check(fdata)[len(constants.PREFIXES['Co'])::]) elif f['type'] == 'hash' and f['of'] == 'operationlist': bio.write(base58.b58decode_check(fdata)[len(constants.PREFIXES['LLo'])::]) elif f['type'] == 'hash' and f['of'] == 'operation': bio.write(base58.b58decode_check(fdata)[len(constants.PREFIXES['o'])::]) elif f['type'] == 'list': bio.write(struct.pack('>H', len(fdata) - 1)) for lelem in fdata: if type(f['of']) == str: ff = FIELDS[f['of']] bio.write(struct.pack(ff[1], lelem)) else: bio.write(lelem.serialize()) elif f['type'] == 'tlist': bio.write(struct.pack('>H', len(fdata) - 1)) for lelem in fdata: elser = lelem.serialize() bio.write(struct.pack('>H', len(elser) + 2)) bio.write(struct.pack('>H', int(lelem.tag, 16))) bio.write(elser) else: bio.write(struct.pack(FIELDS[f['type']][1], fdata)) bio.seek(0) data = bio.read() if self.dynamic and not skipSize: osize = struct.pack('>I', len(data)) return osize + data else: return data class Encoder: def __init__(self, name, fields, tag = None, instance = None, dynamic=False): self.name = name self.fields = fields self.tag = tag self.dynamic = dynamic if instance: self.instance = instance else: self.instance = EncoderInstance def __repr__(self): return str(self) def __str__(self): return self.name def from_data(self, data): parsed = {} for f in self.fields: parsed[f['name']] = data[f['name']] return self.instance(self, self.fields, parsed, self.tag, self.dynamic) def parse(self, data, skipSize=False): parsed = {} if data.__class__ == bytes: bio = BytesIO(data) else: bio = data if self.dynamic and not skipSize: osize = struct.unpack('>I', bio.read(4))[0] data2 = bio.read(osize) bio = BytesIO(data2) elif self.dynamic and skipSize: osize = len(data) if type(self.fields) == list: fields = self.fields else: fields = [ self.fields ] ptell = bio.tell() for f in fields: if not ('name' in f): f['name'] = 'noname' if type(f['type']) != str: parsed[f['name']] = f['type'].parse(bio) elif f['type'] == 'bytes': if self.dynamic and len(fields) == 1: l = osize elif f['length'] == 'dynamic': l = struct.unpack('>I', bio.read(4))[0] else: l = f['length'] parsed[f['name']] = binascii.hexlify(bio.read(l)) elif f['type'] == 'nonce': parsed[f['name']] = Nonce.from_bin(bio.read(24)) elif f['type'] == 'time': ff = FIELDS['i64be'] parsed[f['name']] = datetime.fromtimestamp(struct.unpack(ff[1], bio.read(ff[0]))[0]) elif f['type'] == 'string': l = struct.unpack('>H', bio.read(2))[0] parsed[f['name']] = bio.read(l).decode('ascii') elif f['type'] == 'hash' and f['of'] == 'block': parsed[f['name']] = base58.b58encode_check(constants.PREFIXES['b'] + bio.read(32)) elif f['type'] == 'hash' and f['of'] == 'chain_id': parsed[f['name']] = base58.b58encode_check(constants.PREFIXES['Net'] + bio.read(4)) elif f['type'] == 'hash' and f['of'] == 'context': parsed[f['name']] = base58.b58encode_check(constants.PREFIXES['Co'] + bio.read(32)) elif f['type'] == 'hash' and f['of'] == 'operationlist': parsed[f['name']] = base58.b58encode_check(constants.PREFIXES['LLo'] + bio.read(32)) elif f['type'] == 'hash' and f['of'] == 'operation': parsed[f['name']] = base58.b58encode_check(constants.PREFIXES['o'] + bio.read(32)) elif f['type'] == 'list': l = struct.unpack('>H', bio.read(2))[0] ll = [] for i in range(l + 1): if type(f['of']) == str: ff = FIELDS[f['of']] ll.append(struct.unpack(ff[1], bio.read(ff[0]))[0]) else: ll.append(f['of'].parse(bio)) parsed[f['name']] = ll # Tagged list, a list where elements are tags of other types elif f['type'] == 'tlist': l = struct.unpack('>H', bio.read(2))[0] ll = [] for i in range(l + 1): # Read the type elsize = struct.unpack('>H', bio.read(2))[0] t = hex(struct.unpack('>H', bio.read(2))[0]) # Get the data if t in f['of']: ll.append (f['of'][t].parse(bio)) else: bio.read(elsize - 2) # skip data if message is not recognized parsed['messages'] = ll else: ff = FIELDS[f['type']] parsed[f['name']] = struct.unpack(ff[1], bio.read(ff[0]))[0] if type(self.fields) != list: parsed = parsed[self.fields['name']] #ptell_end = bio.tell() return self.instance(self, self.fields, parsed, self.tag, self.dynamic)
nilq/baby-python
python
#---------- #author:someone120 #---------- import pypinyin as py import lxml import sqlite3 as sql from urllib import request as url #导包结束 def run(): print(get(1).decode('gbk')) def get(num): """ num为页码 """ header={ 'User-Agent':'Mozilla/5.0 (Linux; Android 8.1.0; Redmi 5 Build/OPM1.171019.026; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/75.0.3770.143 Mobile Safari/537.36' } resp=url.Request('http://www.hydcd.com/cy/chengyu/cy%s.htm'%(str(num).zfill(5)),headers=header) resp=url.urlopen(resp) return resp.read() if (__name__ == '__main__'): run()
nilq/baby-python
python
# pylint: disable=not-callable # pylint: disable=no-member import torch import torch.nn as nn from torch.nn import functional as F class RecurrentDynamics(nn.Module): def __init__( self, hidden_size, state_size, action_size, node_size, embedding_size, act_fn="relu", min_std=0.01, ): super().__init__() self.act_fn = getattr(torch, act_fn) self.min_std = min_std self.fc_embed_state_action = nn.Linear(state_size + action_size, hidden_size) self.rnn = nn.GRUCell(hidden_size, hidden_size) self.fc_embed_prior = nn.Linear(hidden_size, node_size) self.fc_prior = nn.Linear(node_size, 2 * state_size) self.fc_embed_posterior = nn.Linear(hidden_size + embedding_size, node_size) self.fc_posterior = nn.Linear(node_size, 2 * state_size) def forward(self, prev_hidden, prev_state, actions, encoder_output=None, non_terms=None): """ prev_hidden (batch, hidden_size) prev_state (batch, hidden_size) actions (seq_len, batch, hidden_size) encoder_output (seq_len, batch, hidden_size) non_terms (seq_len, batch, hidden_size) """ T = actions.size(0) + 1 hiddens = [torch.empty(0)] * T prior_states = [torch.empty(0)] * T prior_means = [torch.empty(0)] * T prior_stds = [torch.empty(0)] * T posterior_states = [torch.empty(0)] * T posterior_means = [torch.empty(0)] * T posterior_stds = [torch.empty(0)] * T hiddens[0] = prev_hidden prior_states[0] = prev_state posterior_states[0] = prev_state for t in range(T - 1): _state = prior_states[t] if encoder_output is None else posterior_states[t] _state = _state if non_terms is None else _state * non_terms[t] """ compute deterministic hidden state """ #print('cat in dynamic@', t, _state.shape, actions[t].shape) out = torch.cat([_state, actions[t]], dim=1) out = self.act_fn(self.fc_embed_state_action(out)) hiddens[t + 1] = self.rnn(out, hiddens[t]) """ compute latent state prior """ out = self.act_fn(self.fc_embed_prior(hiddens[t + 1])) prior_means[t + 1], _prior_std = torch.chunk(self.fc_prior(out), 2, dim=1) prior_stds[t + 1] = F.softplus(_prior_std) + self.min_std """ sample from state prior """ sample = prior_means[t + 1] + prior_stds[t + 1] * torch.randn_like( prior_means[t + 1] ) prior_states[t + 1] = sample if encoder_output is not None: """ encoder_output observations have different time index """ t_ = t - 1 """ calculate latent state posterior """ out = torch.cat([hiddens[t + 1], encoder_output[t_ + 1]], dim=1) out = self.act_fn(self.fc_embed_posterior(out)) posterior_means[t + 1], _posterior_std = torch.chunk( self.fc_posterior(out), 2, dim=1 ) posterior_stds[t + 1] = F.softplus(_posterior_std) + self.min_std """ sample from state posterior """ sample = posterior_means[t + 1] + posterior_stds[ t + 1 ] * torch.randn_like(posterior_means[t + 1]) posterior_states[t + 1] = sample hiddens = torch.stack(hiddens[1:], dim=0) prior_states = torch.stack(prior_states[1:], dim=0) prior_means = torch.stack(prior_means[1:], dim=0) prior_stds = torch.stack(prior_stds[1:], dim=0) if encoder_output is None: return { "hiddens": hiddens, "prior_means": prior_means, "prior_stds": prior_stds, "prior_states": prior_states, } else: posterior_means = torch.stack(posterior_means[1:], dim=0) posterior_stds = torch.stack(posterior_stds[1:], dim=0) posterior_states = torch.stack(posterior_states[1:], dim=0) return { "hiddens": hiddens, "prior_means": prior_means, "prior_stds": prior_stds, "prior_states": prior_states, "posterior_means": posterior_means, "posterior_stds": posterior_stds, "posterior_states": posterior_states, }
nilq/baby-python
python
import vcr import zlib import json import six.moves.http_client as httplib from assertions import assert_is_json def _headers_are_case_insensitive(host, port): conn = httplib.HTTPConnection(host, port) conn.request("GET", "/cookies/set?k1=v1") r1 = conn.getresponse() cookie_data1 = r1.getheader("set-cookie") conn = httplib.HTTPConnection(host, port) conn.request("GET", "/cookies/set?k1=v1") r2 = conn.getresponse() cookie_data2 = r2.getheader("Set-Cookie") return cookie_data1 == cookie_data2 def test_case_insensitivity(tmpdir, httpbin): testfile = str(tmpdir.join("case_insensitivity.yml")) # check if headers are case insensitive outside of vcrpy host, port = httpbin.host, httpbin.port outside = _headers_are_case_insensitive(host, port) with vcr.use_cassette(testfile): # check if headers are case insensitive inside of vcrpy inside = _headers_are_case_insensitive(host, port) # check if headers are case insensitive after vcrpy deserializes headers inside2 = _headers_are_case_insensitive(host, port) # behavior should be the same both inside and outside assert outside == inside == inside2 def _multiple_header_value(httpbin): conn = httplib.HTTPConnection(httpbin.host, httpbin.port) conn.request("GET", "/response-headers?foo=bar&foo=baz") r = conn.getresponse() return r.getheader("foo") def test_multiple_headers(tmpdir, httpbin): testfile = str(tmpdir.join("multiple_headers.yaml")) outside = _multiple_header_value(httpbin) with vcr.use_cassette(testfile): inside = _multiple_header_value(httpbin) assert outside == inside def test_original_decoded_response_is_not_modified(tmpdir, httpbin): testfile = str(tmpdir.join("decoded_response.yml")) host, port = httpbin.host, httpbin.port conn = httplib.HTTPConnection(host, port) conn.request("GET", "/gzip") outside = conn.getresponse() with vcr.use_cassette(testfile, decode_compressed_response=True): conn = httplib.HTTPConnection(host, port) conn.request("GET", "/gzip") inside = conn.getresponse() # Assert that we do not modify the original response while appending # to the casssette. assert "gzip" == inside.headers["content-encoding"] # They should effectively be the same response. inside_headers = (h for h in inside.headers.items() if h[0].lower() != "date") outside_headers = (h for h in outside.getheaders() if h[0].lower() != "date") assert set(inside_headers) == set(outside_headers) inside = zlib.decompress(inside.read(), 16 + zlib.MAX_WBITS) outside = zlib.decompress(outside.read(), 16 + zlib.MAX_WBITS) assert inside == outside # Even though the above are raw bytes, the JSON data should have been # decoded and saved to the cassette. with vcr.use_cassette(testfile): conn = httplib.HTTPConnection(host, port) conn.request("GET", "/gzip") inside = conn.getresponse() assert "content-encoding" not in inside.headers assert_is_json(inside.read()) def _make_before_record_response(fields, replacement="[REDACTED]"): def before_record_response(response): string_body = response["body"]["string"].decode("utf8") body = json.loads(string_body) for field in fields: if field in body: body[field] = replacement response["body"]["string"] = json.dumps(body).encode() return response return before_record_response def test_original_response_is_not_modified_by_before_filter(tmpdir, httpbin): testfile = str(tmpdir.join("sensitive_data_scrubbed_response.yml")) host, port = httpbin.host, httpbin.port field_to_scrub = "url" replacement = "[YOU_CANT_HAVE_THE_MANGO]" conn = httplib.HTTPConnection(host, port) conn.request("GET", "/get") outside = conn.getresponse() callback = _make_before_record_response([field_to_scrub], replacement) with vcr.use_cassette(testfile, before_record_response=callback): conn = httplib.HTTPConnection(host, port) conn.request("GET", "/get") inside = conn.getresponse() # The scrubbed field should be the same, because no cassette existed. # Furthermore, the responses should be identical. inside_body = json.loads(inside.read().decode("utf-8")) outside_body = json.loads(outside.read().decode("utf-8")) assert not inside_body[field_to_scrub] == replacement assert inside_body[field_to_scrub] == outside_body[field_to_scrub] # Ensure that when a cassette exists, the scrubbed response is returned. with vcr.use_cassette(testfile, before_record_response=callback): conn = httplib.HTTPConnection(host, port) conn.request("GET", "/get") inside = conn.getresponse() inside_body = json.loads(inside.read().decode("utf-8")) assert inside_body[field_to_scrub] == replacement
nilq/baby-python
python
# uncompyle6 version 3.2.4 # Python bytecode 2.7 (62211) # Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)] # Embedded file name: lib.coginvasion.toon.NameTag from panda3d.core import TextNode from direct.fsm import ClassicFSM, State from lib.coginvasion.globals import CIGlobals class NameTag(TextNode): NameTagColors = {CIGlobals.Suit: {'fg': (0.2, 0.2, 0.2, 1.0), 'bg': (0.8, 0.8, 0.8, 0.5)}, CIGlobals.Toon: {'fg': (0.8, 0.4, 0.0, 1.0), 'bg': (0.8, 0.8, 0.8, 0.5)}, CIGlobals.CChar: {'fg': (0.2, 0.5, 0.0, 1.0), 'bg': (0.8, 0.8, 0.8, 0.5)}} NameTagBackgrounds = {'rollover': (1.0, 1.0, 1.0, 0.65), 'down': (0.3, 0.3, 0.3, 0.5), 'up': (0.8, 0.8, 0.8, 0.5)} LocalNameTagColor = (0.3, 0.3, 0.7, 1.0) def __init__(self, name, avatarType): self.avatarType = avatarType self.fsm = ClassicFSM.ClassicFSM('NameTag', [State.State('off', self.enterOff, self.exitOff), State.State('rollover', self.enterRollover, self.exitRollover), State.State('down', self.enterDown, self.exitDown), State.State('up', self.enterUp, self.exitUp)], 'off', 'off') self.fsm.enterInitialState() TextNode.__init__(self, 'nameTag-' + str(id(self))) self.setText(name) self.setTextColor(0.191406, 0.5625, 0.773438, 1.0) self.setWordwrap(8) self.setCardAsMargin(0.1, 0.1, 0.1, 0.1) self.setCardDecal(True) self.setAlign(self.ACenter) self.nodePath = hidden.attachNewNode(self) self.nodePath.setBillboardPointEye() self.clickable = 0 def getNodePath(self): return self.nodePath def setColorLocal(self): self.setTextColor(self.LocalNameTagColor) def setClickable(self, value): self.clickable = value def getClickable(self): return self.clickable def setPickerState(self, state): self.fsm.request(state) def enterOff(self): pass def exitOff(self): pass def enterRollover(self): self.setCardColor(self.NameTagBackgrounds['rollover']) def exitRollover(self): pass def enterDown(self): self.setCardColor(self.NameTagBackgrounds['down']) def makeDefaultFG(self): self.setTextColor(self.NameTagColors[self.avatarType]['fg']) def exitDown(self): pass def enterUp(self): self.setCardColor(self.NameTagBackgrounds['up']) def exitUp(self): pass def destroy(self): self.fsm.requestFinalState() del self.fsm del self.avatarType del self.clickable self.nodePath.removeNode() self.nodePath = None return
nilq/baby-python
python
# Copyright 2016-2021 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A :term:`LPAR` (Logical Partition) is a subset of the hardware resources of a :term:`CPC` in classic mode (or ensemble mode), virtualized as a separate computer. LPARs cannot be created or deleted by the user; they can only be listed. LPAR resources are contained in CPC resources. LPAR resources only exist in CPCs that are in classic mode (or ensemble mode). CPCs in DPM mode have :term:`Partition` resources, instead. """ from __future__ import absolute_import import time import copy from ._manager import BaseManager from ._resource import BaseResource from ._exceptions import StatusTimeout from ._logging import logged_api_call from ._utils import matches_filters, divide_filter_args, RC_LOGICAL_PARTITION __all__ = ['LparManager', 'Lpar'] class LparManager(BaseManager): """ Manager providing access to the :term:`LPARs <LPAR>` in a particular :term:`CPC`. Derived from :class:`~zhmcclient.BaseManager`; see there for common methods and attributes. Objects of this class are not directly created by the user; they are accessible via the following instance variable of a :class:`~zhmcclient.Cpc` object (in DPM mode): * :attr:`~zhmcclient.Cpc.lpars` """ def __init__(self, cpc): # This function should not go into the docs. # Parameters: # cpc (:class:`~zhmcclient.Cpc`): # CPC defining the scope for this manager. # Resource properties that are supported as filter query parameters. # If the support for a resource property changes within the set of HMC # versions that support this type of resource, this list must be set up # for the version of the HMC this session is connected to. query_props = [ 'name', ] super(LparManager, self).__init__( resource_class=Lpar, class_name=RC_LOGICAL_PARTITION, session=cpc.manager.session, parent=cpc, base_uri='/api/logical-partitions', oid_prop='object-id', uri_prop='object-uri', name_prop='name', query_props=query_props) @property def cpc(self): """ :class:`~zhmcclient.Cpc`: :term:`CPC` defining the scope for this manager. """ return self._parent @logged_api_call def list(self, full_properties=False, filter_args=None): """ List the LPARs in this CPC. Authorization requirements: * Object-access permission to this CPC. * Object-access permission to any LPAR to be included in the result. Parameters: full_properties (bool): Controls whether the full set of resource properties should be retrieved, vs. only the short set as returned by the list operation. filter_args (dict): Filter arguments that narrow the list of returned resources to those that match the specified filter arguments. For details, see :ref:`Filtering`. `None` causes no filtering to happen, i.e. all resources are returned. Returns: : A list of :class:`~zhmcclient.Lpar` objects. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ resource_obj_list = [] resource_obj = self._try_optimized_lookup(filter_args) if resource_obj: resource_obj_list.append(resource_obj) # It already has full properties else: query_parms, client_filters = divide_filter_args( self._query_props, filter_args) resources_name = 'logical-partitions' uri = '{}/{}{}'.format(self.cpc.uri, resources_name, query_parms) result = self.session.get(uri) if result: props_list = result[resources_name] for props in props_list: resource_obj = self.resource_class( manager=self, uri=props[self._uri_prop], name=props.get(self._name_prop, None), properties=props) if matches_filters(resource_obj, client_filters): resource_obj_list.append(resource_obj) if full_properties: resource_obj.pull_full_properties() self._name_uri_cache.update_from(resource_obj_list) return resource_obj_list class Lpar(BaseResource): """ Representation of an :term:`LPAR`. Derived from :class:`~zhmcclient.BaseResource`; see there for common methods and attributes. Objects of this class are not directly created by the user; they are returned from creation or list functions on their manager object (in this case, :class:`~zhmcclient.LparManager`). """ def __init__(self, manager, uri, name=None, properties=None): # This function should not go into the docs. # manager (:class:`~zhmcclient.LparManager`): # Manager object for this resource object. # uri (string): # Canonical URI path of the resource. # name (string): # Name of the resource. # properties (dict): # Properties to be set for this resource object. May be `None` or # empty. assert isinstance(manager, LparManager), \ "Lpar init: Expected manager type %s, got %s" % \ (LparManager, type(manager)) super(Lpar, self).__init__(manager, uri, name, properties) @logged_api_call def update_properties(self, properties): """ Update writeable properties of this LPAR. This method serializes with other methods that access or change properties on the same Python object. Authorization requirements: * Object-access permission to this LPAR. * Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access permission to the CPC of this LPAR. * Task permission for the "Change Object Definition" task. * Since HMC 2.14.1: If the "next-activation-profile-name" property is to be updated, task permission for the "Change Object Options" task or the "Customize/Delete Activation Profiles" task. * Before HMC 2.15.0: For an LPAR whose activation-mode is "zaware", task permission for the "Firmware Details" task. * Since HMC 2.15.0: If any of the "ssc-*" or "zaware-*" properties is to be updated, task permission for the "Firmware Details" task. * Since HMC 2.15.0: If any of the numbers of allocated or reserved cores is to be updated, task permission for the "Logical Processor Add" task. Parameters: properties (dict): New values for the properties to be updated. Properties not to be updated are omitted. Allowable properties are the properties with qualifier (w) in section 'Data model' in section 'Logical Partition object' in the :term:`HMC API` book. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ # pylint: disable=protected-access self.manager.session.post(self.uri, body=properties) # Attempts to change the 'name' property will be rejected by the HMC, # so we don't need to update the name-to-URI cache. assert self.manager._name_prop not in properties self.update_properties_local(copy.deepcopy(properties)) @logged_api_call def activate(self, wait_for_completion=True, operation_timeout=None, status_timeout=None, allow_status_exceptions=False, activation_profile_name=None, force=False): """ Activate (start) this LPAR, using the HMC operation "Activate Logical Partition". This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the LPAR status has reached the desired value. If `wait_for_completion=True`, this method repeatedly checks the status of the LPAR after the HMC operation has completed, and waits until the status is in the desired state "not-operating" (which indicates that the LPAR is active but no operating system is running), or "operating", or if `allow_status_exceptions` was set additionally in the state "exceptions". Authorization requirements: * Object-access permission to this LPAR. * Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access permission to the CPC of this LPAR. * Task permission for the "Activate" task. Parameters: wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation, and for the status becoming "not-operating" or "operating" (or in addition "exceptions", if `allow_status_exceptions` was set. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. allow_status_exceptions (bool): Boolean controlling whether LPAR status "exceptions" is considered an additional acceptable end status when `wait_for_completion` is set. activation_profile_name (:term:`string`): Name of the image :class:`ActivationProfile` to use for activation. `None` means that the activation profile specified in the `next-activation-profile-name` property of the LPAR is used. force (bool): Boolean controlling whether this operation is permitted when the LPAR is in the "operating" status. TBD: What will happen with the LPAR in that case (deactivated then activated? nothing?) Returns: `None` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns `None`. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status. """ body = {} if activation_profile_name: body['activation-profile-name'] = activation_profile_name if force: body['force'] = force result = self.manager.session.post( self.uri + '/operations/activate', body, wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) if wait_for_completion: statuses = ["not-operating", "operating"] if allow_status_exceptions: statuses.append("exceptions") self.wait_for_status(statuses, status_timeout) return result @logged_api_call def deactivate(self, wait_for_completion=True, operation_timeout=None, status_timeout=None, allow_status_exceptions=False, force=False): """ De-activate (stop) this LPAR, using the HMC operation "Deactivate Logical Partition". This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the LPAR status has reached the desired value. If `wait_for_completion=True`, this method repeatedly checks the status of the LPAR after the HMC operation has completed, and waits until the status is in the desired state "not-activated", or if `allow_status_exceptions` was set additionally in the state "exceptions". Authorization requirements: * Object-access permission to this LPAR. * Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access permission to the CPC of this LPAR. * Task permission for the "Deactivate" task. Parameters: wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation, and for the status becoming "non-activated" (or in addition "exceptions", if `allow_status_exceptions` was set. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. allow_status_exceptions (bool): Boolean controlling whether LPAR status "exceptions" is considered an additional acceptable end status when `wait_for_completion` is set. force (bool): Boolean controlling whether this operation is permitted when the LPAR is in the "operating" status. TBD: What will happen with the LPAR in that case (deactivated then activated? nothing?) Returns: `None` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns `None`. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status. """ body = {} if force: body['force'] = force result = self.manager.session.post( self.uri + '/operations/deactivate', body, wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) if wait_for_completion: statuses = ["not-activated"] if allow_status_exceptions: statuses.append("exceptions") self.wait_for_status(statuses, status_timeout) return result @logged_api_call def scsi_load(self, load_address, wwpn, lun, load_parameter=None, disk_partition_id=None, operating_system_specific_load_parameters=None, boot_record_logical_block_address=None, force=False, wait_for_completion=True, operation_timeout=None, status_timeout=None, allow_status_exceptions=False, secure_boot=False): # pylint: disable=invalid-name """ Load (boot) this LPAR from a designated SCSI device, using the HMC operation "SCSI Load". This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the LPAR status has reached the desired value. If `wait_for_completion=True`, this method repeatedly checks the status of the LPAR after the HMC operation has completed, and waits until the status is in the desired state "operating", or if `allow_status_exceptions` was set additionally in the state "exceptions". Authorization requirements: * Object-access permission to this LPAR. * Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access permission to the CPC of this LPAR. * Task permission for the "SCSI Load" task. Parameters: load_address (:term:`string`): Device number of the boot device. wwpn (:term:`string`): Worldwide port name (WWPN) of the target SCSI device to be used for this operation, in hexadecimal. lun (:term:`string`): Hexadecimal logical unit number (LUN) to be used for the SCSI Load. load_parameter (:term:`string`): Optional load control string. If empty string or `None`, it is not passed to the HMC. disk_partition_id (:term:`integer`): Optional disk-partition-id (also called the boot program selector) to be used for the SCSI Load. If `None`, it is not passed to the HMC. operating_system_specific_load_parameters (:term:`string`): Optional operating system specific load parameters to be used for the SCSI Load. boot_record_logical_block_address (:term:`string`): Optional hexadecimal boot record logical block address to be used for the SCSI Load. force (bool): Boolean controlling whether this operation is permitted when the LPAR is in the "operating" status. wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation, and for the status becoming "operating" (or in addition "exceptions", if `allow_status_exceptions` was set. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. allow_status_exceptions (bool): Boolean controlling whether LPAR status "exceptions" is considered an additional acceptable end status when `wait_for_completion` is set. secure_boot (bool): Bollean controlling whether the system checks the software signature of what is loaded against what the distributor signed it with. Requires z15 or later. Returns: `None` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns `None`. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status. """ body = {} body['load-address'] = load_address body['world-wide-port-name'] = wwpn body['logical-unit-number'] = lun if load_parameter: body['load-parameter'] = load_parameter if disk_partition_id is not None: body['disk-partition-id'] = disk_partition_id if operating_system_specific_load_parameters: body['operating-system-specific-load-parameters'] = \ operating_system_specific_load_parameters if boot_record_logical_block_address: body['boot-record-logical-block-address'] = \ boot_record_logical_block_address if force: body['force'] = force if secure_boot: body['secure-boot'] = secure_boot result = self.manager.session.post( self.uri + '/operations/scsi-load', body, wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) if wait_for_completion: statuses = ["operating"] if allow_status_exceptions: statuses.append("exceptions") self.wait_for_status(statuses, status_timeout) return result @logged_api_call def scsi_dump(self, load_address, wwpn, lun, load_parameter=None, disk_partition_id=None, operating_system_specific_load_parameters=None, boot_record_logical_block_address=None, os_ipl_token=None, wait_for_completion=True, operation_timeout=None, status_timeout=None, allow_status_exceptions=False, force=False): # pylint: disable=invalid-name """ Load a standalone dump program from a designated SCSI device in this LPAR, using the HMC operation "SCSI Dump". This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the LPAR status has reached the desired value. If `wait_for_completion=True`, this method repeatedly checks the status of the LPAR after the HMC operation has completed, and waits until the status is in the desired state "operating", or if `allow_status_exceptions` was set additionally in the state "exceptions". Authorization requirements: * Object-access permission to this LPAR. * Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access permission to the CPC of this LPAR. * Task permission for the "SCSI Dump" task. Parameters: load_address (:term:`string`): Device number of the boot device. wwpn (:term:`string`): Worldwide port name (WWPN) of the target SCSI device to be used for this operation, in hexadecimal. lun (:term:`string`): Hexadecimal logical unit number (LUN) to be used for the SCSI Load. load_parameter (:term:`string`): Optional load control string. If empty string or `None`, it is not passed to the HMC. disk_partition_id (:term:`integer`): Optional disk-partition-id (also called the boot program selector) to be used for the SCSI Load. If `None`, it is not passed to the HMC. operating_system_specific_load_parameters (:term:`string`): Optional operating system specific load parameters to be used for the SCSI Load. boot_record_logical_block_address (:term:`string`): Optional hexadecimal boot record logical block address to be used for the SCSI Load. os_ipl_token (:term:`string`): Optional hexadecimal value to be used for the SCSI dump. wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation, and for the status becoming "operating" (or in addition "exceptions", if `allow_status_exceptions` was set. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. allow_status_exceptions (bool): Boolean controlling whether LPAR status "exceptions" is considered an additional acceptable end status when `wait_for_completion` is set. force (bool): Boolean controlling whether this operation is permitted when the LPAR is in the "operating" status. Returns: `None` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns `None`. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status. """ body = {} body['load-address'] = load_address body['world-wide-port-name'] = wwpn body['logical-unit-number'] = lun if load_parameter: body['load-parameter'] = load_parameter if disk_partition_id is not None: body['disk-partition-id'] = disk_partition_id if operating_system_specific_load_parameters: body['operating-system-specific-load-parameters'] = \ operating_system_specific_load_parameters if boot_record_logical_block_address: body['boot-record-logical-block-address'] = \ boot_record_logical_block_address if os_ipl_token is not None: body['os-ipl-token'] = os_ipl_token if force: body['force'] = force result = self.manager.session.post( self.uri + '/operations/scsi-dump', body, wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) if wait_for_completion: statuses = ["operating"] if allow_status_exceptions: statuses.append("exceptions") self.wait_for_status(statuses, status_timeout) return result @logged_api_call def load(self, load_address=None, load_parameter=None, clear_indicator=True, store_status_indicator=False, wait_for_completion=True, operation_timeout=None, status_timeout=None, allow_status_exceptions=False, force=False): """ Load (boot) this LPAR from a load address (boot device), using the HMC operation "Load Logical Partition". This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the LPAR status has reached the desired value. If `wait_for_completion=True`, this method repeatedly checks the status of the LPAR after the HMC operation has completed, and waits until the status is in the desired state "operating", or if `allow_status_exceptions` was set additionally in the state "exceptions". Authorization requirements: * Object-access permission to this LPAR. * Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access permission to the CPC of this LPAR. * Task permission for the "Load" task. Parameters: load_address (:term:`string`): Device number of the boot device. Up to z13, this parameter is required. Starting with z14, this parameter is optional and defaults to the load address specified in the 'last-used-load-address' property of the Lpar. load_parameter (:term:`string`): Optional load control string. If empty string or `None`, it is not passed to the HMC. clear_indicator (bool): Optional boolean controlling whether the memory should be cleared before performing the load or not cleared. The default value is `True`. store_status_indicator (bool): Optional boolean controlling whether the status should be stored before performing the Load. The default value is `False`. wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation, and for the status becoming "operating" (or in addition "exceptions", if `allow_status_exceptions` was set. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. allow_status_exceptions (bool): Boolean controlling whether LPAR status "exceptions" is considered an additional acceptable end status when `wait_for_completion` is set. force (bool): Boolean controlling whether this operation is permitted when the LPAR is in the "operating" status. TBD: What will happen with the LPAR in that case (deactivated then activated? nothing?) Returns: `None` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns `None`. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status. """ body = {} if load_address: body['load-address'] = load_address if load_parameter: body['load-parameter'] = load_parameter if force: body['force'] = force if not clear_indicator: body['clear-indicator'] = clear_indicator if store_status_indicator: body['store-status-indicator'] = store_status_indicator result = self.manager.session.post( self.uri + '/operations/load', body, wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) if wait_for_completion: statuses = ["operating"] if allow_status_exceptions: statuses.append("exceptions") self.wait_for_status(statuses, status_timeout) return result @logged_api_call def stop(self, wait_for_completion=True, operation_timeout=None, status_timeout=None, allow_status_exceptions=False): """ Stop this LPAR, using the HMC operation "Stop Logical Partition". The stop operation stops the processors from processing instructions. This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the LPAR status has reached the desired value. If `wait_for_completion=True`, this method repeatedly checks the status of the LPAR after the HMC operation has completed, and waits until the status is in the desired state "not-operating", or if `allow_status_exceptions` was set additionally in the state "exceptions". Authorization requirements: * Object-access permission to this LPAR. * Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access permission to the CPC of this LPAR. * Task permission for the "Stop" task. Parameters: wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation, and for the status becoming "not-operating" (or in addition "exceptions", if `allow_status_exceptions` was set. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. allow_status_exceptions (bool): Boolean controlling whether LPAR status "exceptions" is considered an additional acceptable end status when `wait_for_completion` is set. Returns: `None` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns `None`. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status. """ body = None result = self.manager.session.post( self.uri + '/operations/stop', body, wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) if wait_for_completion: statuses = ["not-operating"] if allow_status_exceptions: statuses.append("exceptions") self.wait_for_status(statuses, status_timeout) return result @logged_api_call def reset_clear(self, force=False, wait_for_completion=True, operation_timeout=None, status_timeout=None, allow_status_exceptions=False): """ Initialize this LPAR by clearing its pending interruptions, resetting its channel subsystem, and resetting its processors, using the HMC operation "Reset Clear". This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the LPAR status has reached the desired value. If `wait_for_completion=True`, this method repeatedly checks the status of the LPAR after the HMC operation has completed, and waits until the status is in the desired state "operating", or if `allow_status_exceptions` was set additionally in the state "exceptions". Authorization requirements: * Object-access permission to this LPAR. * Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access permission to the CPC of this LPAR. * Task permission for the "Reset Clear" task. Parameters: force (bool): Boolean controlling whether this operation is permitted when the LPAR is in the "operating" status. The default is `False`. wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation, and for the status becoming "operating" (or in addition "exceptions", if `allow_status_exceptions` was set. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. allow_status_exceptions (bool): Boolean controlling whether LPAR status "exceptions" is considered an additional acceptable end status when `wait_for_completion` is set. Returns: `None` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns `None`. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status. """ body = {} if force: body['force'] = force result = self.manager.session.post( self.uri + '/operations/reset-clear', body, wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) if wait_for_completion: statuses = ["operating"] if allow_status_exceptions: statuses.append("exceptions") self.wait_for_status(statuses, status_timeout) return result @logged_api_call def open_os_message_channel(self, include_refresh_messages=True): """ Open a JMS message channel to this LPAR's operating system, returning the string "topic" representing the message channel. Authorization requirements: * Object-access permission to this Partition. * Task permission to the "Operating System Messages" task at least in view-only mode. Parameters: include_refresh_messages (bool): Boolean controlling whether refresh operating systems messages should be sent, as follows: * If `True`, refresh messages will be recieved when the user connects to the topic. The default. * If `False`, refresh messages will not be recieved when the user connects to the topic. Returns: :term:`string`: Returns a string representing the os-message-notification JMS topic. The user can connect to this topic to start the flow of operating system messages. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ body = {'include-refresh-messages': include_refresh_messages} result = self.manager.session.post( self.uri + '/operations/open-os-message-channel', body) return result['topic-name'] @logged_api_call def send_os_command(self, os_command_text, is_priority=False): """ Send a command to the operating system running in this LPAR. Authorization requirements: * Object-access permission to this Partition. * Task permission to the "Operating System Messages" task in modification mode. Parameters: os_command_text (string): The text of the operating system command. is_priority (bool): Boolean controlling whether this is a priority operating system command, as follows: * If `True`, this message is treated as a priority operating system command. * If `False`, this message is not treated as a priority operating system command. The default. Returns: None Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ body = {'is-priority': is_priority, 'operating-system-command-text': os_command_text} self.manager.session.post( self.uri + '/operations/send-os-cmd', body) @logged_api_call def psw_restart(self, wait_for_completion=True, operation_timeout=None, status_timeout=None, allow_status_exceptions=False): """ Restart this LPAR, using the HMC operation "PSW Restart". This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the LPAR status has reached the desired value. If `wait_for_completion=True`, this method repeatedly checks the status of the LPAR after the HMC operation has completed, and waits until the status is in the desired state "operating", or if `allow_status_exceptions` was set additionally in the state "exceptions". Authorization requirements: * Object-access permission to this LPAR. * Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access permission to the CPC of this LPAR. * Task permission for the "PSW Restart" task. Parameters: wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation, and for the status becoming "operating" (or in addition "exceptions", if `allow_status_exceptions` was set. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. allow_status_exceptions (bool): Boolean controlling whether LPAR status "exceptions" is considered an additional acceptable end status when `wait_for_completion` is set. Returns: `None` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns `None`. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status. """ body = {} result = self.manager.session.post( self.uri + '/operations/psw-restart', body, wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) if wait_for_completion: statuses = ["operating"] if allow_status_exceptions: statuses.append("exceptions") self.wait_for_status(statuses, status_timeout) return result @logged_api_call def wait_for_status(self, status, status_timeout=None): """ Wait until the status of this LPAR has a desired value. Parameters: status (:term:`string` or iterable of :term:`string`): Desired LPAR status or set of status values to reach; one or more of the following values: * ``"not-activated"`` - The LPAR is not active. * ``"not-operating"`` - The LPAR is active but no operating system is running in the LPAR. * ``"operating"`` - The LPAR is active and an operating system is running in the LPAR. * ``"exceptions"`` - The LPAR or its CPC has one or more unusual conditions. Note that the description of LPAR status values in the :term:`HMC API` book (as of its version 2.13.1) is partly confusing. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached one of the desired status values. The special value 0 means that no timeout is set. `None` means that the default status timeout will be used. If the timeout expires , a :exc:`~zhmcclient.StatusTimeout` is raised. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status. """ if status_timeout is None: status_timeout = \ self.manager.session.retry_timeout_config.status_timeout if status_timeout > 0: end_time = time.time() + status_timeout if isinstance(status, (list, tuple)): statuses = status else: statuses = [status] while True: # Fastest way to get actual status value: lpars = self.manager.cpc.lpars.list( filter_args={'name': self.name}) assert len(lpars) == 1 this_lpar = lpars[0] actual_status = this_lpar.get_property('status') if actual_status in statuses: return if status_timeout > 0 and time.time() > end_time: raise StatusTimeout( "Waiting for LPAR {} to reach status(es) '{}' timed out " "after {} s - current status is '{}'". format(self.name, statuses, status_timeout, actual_status), actual_status, statuses, status_timeout) time.sleep(1) # Avoid hot spin loop
nilq/baby-python
python
# --------------------------------------------------------------------- # Project "Track 3D-Objects Over Time" # Copyright (C) 2020, Dr. Antje Muntzinger / Dr. Andreas Haja. # # Purpose of this file : Parameter file for tracking # # You should have received a copy of the Udacity license together with this program. # # https://www.udacity.com/course/self-driving-car-engineer-nanodegree--nd013 # ---------------------------------------------------------------------- # # general parameters dim_state = 6 # process model dimension # Kalman filter parameters (Step 1) dt = 0.1 # time increment q=3 # process noise variable for Kalman filter Q # track management parameters (Step 2) confirmed_threshold = 0.8 # track score threshold to switch from 'tentative' to 'confirmed' delete_threshold = 0.6 # track score threshold to delete confirmed tracks window = 6 # number of frames for track score calculation max_P = 3**2 # delete track if covariance of px or py bigger than this sigma_p44 = 50 # initial setting for estimation error covariance P entry for vx sigma_p55 = 50 # initial setting for estimation error covariance P entry for vy sigma_p66 = 5 # initial setting for estimation error covariance P entry for vz weight_dim = 0.1 # sliding average parameter for dimension estimation # association parameters (Step 3) gating_threshold = 0.995 # percentage of correct measurements that shall lie inside gate gating_threshold_lidar = 0.995 # measurement parameters (Step 4) sigma_lidar_x = 0.1 # measurement noise standard deviation for lidar x position sigma_lidar_y = 0.1 # measurement noise standard deviation for lidar y position sigma_lidar_z = 0.1 # measurement noise standard deviation for lidar z position sigma_cam_i = 5 # measurement noise standard deviation for image i coordinate sigma_cam_j = 5 # measurement noise standard deviation for image j coordinate
nilq/baby-python
python
#-*- encoding=utf-8 -*- #a example to demo multi threading python app for mq handling #ganben import Queue import threading import time import paho.mqtt.client as mqtt queueLock = threading.Lock() posiQueue = Queue.Queue(100) callQueue = Queue.Queue(100) threads = [] threadID = 1 def on_connect(client, userdata, rc): client.subscribe('position') client.subscribe('nursecall') #maybe here can be configured print('Connected with result code {0}'.format(str(rc))) def on_message(client, userdata, msg): print('Topic={0}, Message={1}'.format(msg.topic, str(msg.payload))) if msg.topic == 'position': queueLock.acquire() posiQueue.put(str(msg.payload)) queueLock.release() elif msg.topic == 'nursecall': queueLock.acquire() callQueue.put(str(msg.payload)) queueLock.release() class MqttListener(threading.Thread): def __init__(self, threadID, name, q): threading.Thread.__init__(self) self.threadID = threadID self.name = name self.q = q def run(self): process_data(self.name, self.q) print('exiting ... {0}'.format(self.name)) def process_data(threadName, q): while True: queueLock.acquire() if not q.empty(): data = q.get() queueLock.release() print('get {0} by {1}'.format(data, threadName)) else: queueLock.release() time.sleep(5) #create threads thread1 = MqttListener(1, 'thread1', posiQueue) thread1.start() threads.append(thread1) thread2 = MqttListener(2, 'thread2', callQueue) thread2.start() threads.append(thread2) #set up mqtt client client = mqtt.Client('server-listener') client.on_connect = on_connect client.on_message = on_message client.connect('192.168.1.100', 1883, 60) client.loop_forever()
nilq/baby-python
python
from flask_plugin import Plugin from flask import redirect, url_for, abort plugin = Plugin() @plugin.route('/say/<string:name>', methods=['GET']) def say(name: str): return 'Hello ' + name @plugin.route('/admin', methods=['GET']) def hello2admin(): return redirect(url_for('.say', name='Doge')) @plugin.route('/403', methods=['GET']) def test_forbidden(): abort(403) @plugin.errorhandler(403) def forbidden(error): return 'My Forbidden!', 403 @plugin.before_request def before_request(): print('Handled before request.')
nilq/baby-python
python
import os from chr.core import chr_compile_module chr_compile_module(os.path.dirname(__file__), verbose=False, overwrite=True)
nilq/baby-python
python
from django.test import TestCase # Create your tests here. from selenium import webdriver from selenium.webdriver.common.keys import Keys class MultiSelectFunctionalTests(TestCase): base_url = 'http://localhost:8000/tests' fixtures=['publications'] def setUp(self): self.driver = webdriver.Firefox() self.driver.implicitly_wait(10) #username_input = self.driver.find_element_by_name("username") #username_input.send_keys(user) def tearDown(self): self.driver.close() def testAddArticlePage(self): """As a visitor to the site, when I load the articles page, I see the publications in Selectize.js multiselect theme.""" self.driver.get('{0}{1}'.format(self.base_url,'/articles/')) self.assertIn("Headline:", self.driver.find_element_by_tag_name('body').text)
nilq/baby-python
python
import tkinter as tkinter from tkinter import filedialog as FileDialog from Properties import Properties class Main(): def __init__(self): self.Window = tkinter.Tk() self.Properties = Properties() self.setTitle('Bloc Note') self.setSize(self.Properties.x, self.Properties.y) self.Frame = tkinter.Frame(self.Window).pack(fill="x", padx=1, pady=1) self.TextScroll = tkinter.Scrollbar(self.Frame) self.Text = tkinter.Text(self.Frame, width=97, height=25, font=("Helvetica", self.Properties.TextSize, "bold"), selectbackground="gray", selectforeground="black", undo=True, yscrollcommand=self.TextScroll.set ) self.TextScroll.config(command=self.Text.yview) self.Text.pack() self.Menu = tkinter.Menu(self.Window) self.Window.config(menu=self.Menu) self.Files = tkinter.Menu(self.Window, tearoff=False) self.Menu.add_cascade(label='File', menu=self.Files) self.Files.add_command(label='New File', command=self.newFile) self.Files.add_command(label='Open File', command=self.openFile) self.Files.add_command(label='Save File', command=self.saveFile) self.Files.add_command(label='Save As', command=self.saveAsFile) self.Files.add_command(label='Exit', command=self.Window.quit) def setTitle(self, str): self.Window.title(str) def setSize(self, x, y): self.Window.geometry(f'{x}x{y}') self.Properties.x = x self.Properties.y = y def newFile(self): self.Text.delete('1.0', 'end') self.setTitle('New File - Bloc Note') self.Properties.File = False def openFile(self): TextFile = FileDialog.askopenfilename(defaultextension=".*", title="Open File") if TextFile: self.Text.delete('1.0', 'end') self.Properties.File = TextFile File = TextFile self.setTitle(f'{File} - Bloc Note') TextFile = open(TextFile, 'r') Lines = enumerate(TextFile) for index, key in Lines: self.Text.insert('end', key) TextFile.close() def saveFile(self): if self.Properties.File: TextFile = open(self.Properties.File, 'w') TextFile.write(self.Text.get('1.0', 'end')) TextFile.close() else: self.saveAsFile() def saveAsFile(self): TextFile = FileDialog.asksaveasfilename(defaultextension=".*", title="Save As") if TextFile: self.Properties.File = TextFile File = TextFile self.setTitle(f'{File} - Bloc Note') TextFile = open(TextFile, 'w') TextFile.write(self.Text.get('1.0', 'end')) TextFile.close() Main = Main() Main.Window.mainloop()
nilq/baby-python
python
import unittest from unittest.mock import MagicMock import builtins class micropython: def const(self, number): return number class TestCase(unittest.TestCase): orig_import = __import__ module_mock = MagicMock() @classmethod def import_mock(cls, name, *args): if name == "uasyncio": return cls.orig_import("asyncio") if name in ("machine", "bluetooth"): print(f"{name} mocked by MagicMoc") return cls.module_mock() if name in ("micropython",): print(f"{name} mocked by file") return micropython() if ("___") in name: print(f"hugo_test {name} redirected to devel.{name}") return cls.orig_import("devel." + name, *args) return cls.orig_import(name, *args) builtins.__import__ = TestCase.import_mock
nilq/baby-python
python
# http://github.com/timestocome # adapted from http://natureofcode.com/book/chapter-9-the-evolution-of-code/ # 3 letter match ~ 20 generations # 4 letters ~ 120 generations import string as st import re import numpy as np import copy bots = [] new_bots = [] scores = [] n_letters = 4 n_bots = 100 target = ['c', 'a', 't', 's'] # def letters and symbols allowed world = ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', ' ', '.', ',', '?') # create a random string from world options def create_random_string(length): random_array = [] for i in range(length): l = np.random.randint(len(world)) random_array.append(world[l]) return random_array # compute number of possible strings def possibilities(length): return np.power(len(world), length) # create starting generation def init_generation(): for b in range(n_bots): letters = create_random_string(n_letters) bots.append(letters) # fitness test def fitness(bot): score = 0 for i in range(n_letters): if bot[i] == target[i]: score += 1 return score # use probabilistic fitness to chose next generation def choose_fittest(): candidates_array = [] # add one vote for each score point per bot for i in range(n_bots): for j in range(int(scores[i]) + 1): # include everyone, remove one to remove zero scoring bots candidates_array.append(i) # add bot id to array once for each fitness point # shuffle array np.random.shuffle(candidates_array) # select first n_bots candidates_array = candidates_array[0:n_bots] # collect parents parents = [] for i in range(n_bots): parents.append(bots[candidates_array[i]]) np.random.shuffle(parents) return parents # randomly choose 2 and combine def mate_parents(parents): m = parents[0] d = parents[1] new_bot1 = [] new_bot2 = [] i = 0 while i < n_letters: if i % 2 == 0: new_bot1.append(m[i]) new_bot2.append(d[i]) else: new_bot1.append(d[i]) new_bot2.append(m[i]) i += 1 new_bots.append(new_bot1) new_bots.append(new_bot2) parents.pop(0) # remove mom parents.pop(0) # remove dad def mutation(b): location = np.random.randint(n_letters) new_letter = np.random.randint(len(world)) b[location] = world[new_letter] return b ########################################################################## possible = possibilities(n_letters) print('%ld combinations of length 5 can be formed from world possibilities' % possible) # start a random collection of bots init_generation() #### main loop ### generation = 0 best_score = -1 goal = 0 scores = np.zeros(n_bots) #for z in range(10): while goal == 0: # score bots for b in range(n_bots): s = fitness(bots[b]) scores[b] = s if s == n_letters: print('Winner') print(bots[b], scores[b]) goal = 1 print('--------------------') for z in range(n_bots): print(bots[z]) break if s > best_score: best_score = s # choose fittest parents = choose_fittest() # mate fittest new_bots = [] for b in range(n_bots//2): mate_parents(parents) # re-set bots to new group bots = copy.copy(new_bots) new_bots = [] # random mutations for b in range(n_bots): r = np.random.randint(20) if r == 14: bots[b] = mutation(bots[b]) generation += 1 print('Generation %d Best score %d ' % (generation, best_score))
nilq/baby-python
python
#encoding:utf-8 subreddit = 'CryptoMoonShots' t_channel = '@r_CryptoMoonShot' def send_post(submission, r2t): return r2t.send_simple(submission)
nilq/baby-python
python
from seagulls.engine import ActiveSceneClient class FakeGameScene: pass class TestActiveSceneClient: def test_apply(self) -> None: fake_scene = FakeGameScene() def callback(scene: FakeGameScene) -> None: assert scene == fake_scene client = ActiveSceneClient(fake_scene) # type: ignore client.apply(callback) # type: ignore def test_set_active_scene(self) -> None: initial_fake_scene = FakeGameScene() second_fake_scene = FakeGameScene() def callback(scene: FakeGameScene) -> None: assert scene == second_fake_scene client = ActiveSceneClient(initial_fake_scene) # type: ignore client.set_active_scene(second_fake_scene) # type: ignore client.apply(callback) # type: ignore
nilq/baby-python
python
import asyncio import logging from struct import Struct from time import time logger = logging.getLogger(__name__) class CyKitClient: def __init__(self, reader, writer, channels=14, sample_rate=128): self.sample_rate = sample_rate self._reader, self._writer = reader, writer self._struct = Struct('>' + 'f' * channels) def stop(self): if self._writer is not None: self._writer.close() def __aiter__(self): return self async def __anext__(self): if self._reader.at_eof(): raise ConnectionError("No more data from peer") data = await self._reader.readexactly(self._struct.size) if not data: raise ConnectionError("No more data from peer") return self._struct.unpack(data) async def _initialize(self, good_packet_threshold=64): last_time = time() good_packets = 0 while good_packets < good_packet_threshold: await self._reader.readexactly(self._struct.size) cur_time = time() delta = cur_time - last_time if delta > (1.0 / self.sample_rate) / 2: good_packets += 1 logger.debug("Good packet: %.4f ms", delta * 1000.0) else: logger.debug("Bad packet: %.4f ms", delta * 1000.0) last_time = cur_time return self async def connect_to_cykit(ip, port, timeout=3) -> CyKitClient: fut = asyncio.open_connection(ip, port) reader, writer = await asyncio.wait_for(fut, timeout) client = CyKitClient(reader, writer) return await client._initialize()
nilq/baby-python
python
__author__ = "Anand Krishnan Prakash" __email__ = "akprakash@lbl.gov" import pymortar import datetime import pandas as pd import argparse def get_error_message(x, resample_minutes=60): dt_format = "%Y-%m-%d %H:%M:%S" st = x.name st_str = st.strftime(dt_format) et_str = (st+datetime.timedelta(minutes=resample_minutes)).strftime(dt_format) site = x.site room = x.room zone = x.zone heat_percent = round(x.heat_percent, 2) cool_percent = round(x.cool_percent, 2) msg = "From {0} to {1}, zone: \'{2}\' in room: \'{3}\' at site: \'{4}\', was heating for {5}% of the time and cooling for {6}% of the time".format( st_str, et_str, zone, room, site, heat_percent, cool_percent ) return msg def tstat_zone_analysis(client, resample_minutes, start_time, end_time): st = start_time.strftime("%Y-%m-%dT%H:%M:%SZ") et = end_time.strftime("%Y-%m-%dT%H:%M:%SZ") print(st) print(et) tstat_query = """ SELECT ?tstat ?room ?zone ?state ?temp ?hsp ?csp WHERE { ?tstat bf:hasLocation ?room . ?zone bf:hasPart ?room . ?tstat bf:hasPoint ?state . ?tstat bf:hasPoint ?temp . ?tstat bf:hasPoint ?hsp . ?tstat bf:hasPoint ?csp . ?zone rdf:type/rdfs:subClassOf* brick:Zone . ?tstat rdf:type/rdfs:subClassOf* brick:Thermostat . ?state rdf:type/rdfs:subClassOf* brick:Thermostat_Status . ?temp rdf:type/rdfs:subClassOf* brick:Temperature_Sensor . ?hsp rdf:type/rdfs:subClassOf* brick:Supply_Air_Temperature_Heating_Setpoint . ?csp rdf:type/rdfs:subClassOf* brick:Supply_Air_Temperature_Cooling_Setpoint . }; """ qualify_response = client.qualify([tstat_query]) if qualify_response.error != "": print("ERROR: ", qualify_response.error) os.exit(1) print("Running on {0} sites".format(len(qualify_response.sites))) tstat_view = pymortar.View( name="tstat_points", sites=qualify_response.sites, definition=tstat_query, ) tstat_streams = pymortar.DataFrame( name="thermostat_data", aggregation=pymortar.MAX, window="1m", timeseries=[ pymortar.Timeseries( view="tstat_points", dataVars=["?state", "?temp", "?hsp", "?csp"] ) ] ) time_params = pymortar.TimeParams( start=st, end=et ) request = pymortar.FetchRequest( sites=qualify_response.sites, # from our call to Qualify views=[ tstat_view ], dataFrames=[ tstat_streams ], time=time_params ) result = client.fetch(request) tstat_df = result['thermostat_data'] tstats = [tstat[0] for tstat in result.query("select tstat from tstat_points")] error_df_list = [] for tstat in tstats: q = """ SELECT state_uuid, temp_uuid, hsp_uuid, csp_uuid, room, zone, site FROM tstat_points WHERE tstat = "{0}"; """.format(tstat) res = result.query(q) if len(res) == 0: continue state_col, iat_col, hsp_col, csp_col, room, zone, site = res[0] df = tstat_df[[state_col, iat_col, hsp_col, csp_col]] df.columns = ['state', 'iat', 'hsp', 'csp'] df2 = pd.DataFrame() resample_time = '{0}T'.format(resample_minutes) df2['min_hsp'] = df['hsp'].resample(resample_time).min() df2['min_csp'] = df['csp'].resample(resample_time).min() df2['max_hsp'] = df['hsp'].resample(resample_time).max() df2['max_csp'] = df['csp'].resample(resample_time).max() df2['heat_percent'] = df['state'].resample(resample_time).apply(lambda x: ((x==1).sum() + (x==4).sum())/resample_minutes*100) df2['cool_percent'] = df['state'].resample(resample_time).apply(lambda x: ((x==2).sum() + (x==5).sum())/resample_minutes*100) df2['tstat'] = tstat df2['room'] = room.split('#')[1] df2['zone'] = zone.split('#')[1] df2['site'] = site df2['both_heat_cool'] = False df2.loc[((df2.heat_percent > 0) & (df2.cool_percent > 0)), 'both_heat_cool'] = True if not df2[df2['both_heat_cool'] == True].empty: error_df_list.append(df2[df2['both_heat_cool'] == True]) if len(error_df_list) > 0: error_df = pd.concat(error_df_list, axis=0)[['site', 'zone', 'room', 'heat_percent', 'cool_percent', 'min_hsp', 'min_csp', 'max_hsp', 'max_csp']] error_df.index.name = 'time' error_msgs = error_df.apply(lambda x: get_error_message(x), axis=1).values for msg in error_msgs: print(msg) return error_df else: return pd.DataFrame() if __name__ == "__main__": parser = argparse.ArgumentParser(description='configure app parameters') parser.add_argument("-time_interval", help="length of time interval (in minutes) when you want to check if a zone is both heating and cooling", type=int, default=60, nargs='?') parser.add_argument("-st", help="start time for analysis in yyyy-mm-ddThh:mm:ss format", type=str, default="2018-12-10T00:00:00", nargs='?') parser.add_argument("-et", help="end time for analysis in yyyy-mm-ddThh:mm:ss format", type=str, default="2019-01-01T00:00:00", nargs='?') parser.add_argument("-filename", help="filename to store result of analysis", type=str, default="heat_and_cool_same_period.csv", nargs='?') resample_minutes = parser.parse_args().time_interval try: start_time = datetime.datetime.strptime(parser.parse_args().st, "%Y-%m-%dT%H:%M:%S") end_time = datetime.datetime.strptime(parser.parse_args().et, "%Y-%m-%dT%H:%M:%S") except Exception as e: raise Exception("Incorrect format for st or et. Use yyyy-mm-ddThh:mm:ss") filename = parser.parse_args().filename client = pymortar.Client({}) error_df = tstat_zone_analysis(client=client, resample_minutes=resample_minutes, start_time=start_time, end_time=end_time) if not error_df.empty: print("Writing results to {0}".format(filename)) error_df.to_csv(filename) else: print("No zones match the condition")
nilq/baby-python
python
# Данные для входа LOGIN = "логин" PASSWORD = "пароль" # Некоторые параметры API MAX_COUNT = 100 # Максимальное кол-во записей, которое можно получить по *.wall.get, деленное на 25. LIMIT = 500 AGE1 = 14 AGE2 = 20 AGE3 = 35 AGE4 = 50 """ LIMIT - Максимальное кол-во записей, скачиваемое со страницы по *wall.get (выходит 5 запросов по 100 в каждом). Больше лучше не брать, чтобы не словить бан на кол-во запросов (в будущем изменить на None). """ STOP_WORDS = ['блять', 'http', 'сука', "хуй", "ебать", "ебанина", "ебанько", "]", "ебля", "ебаный", "еблан", "епта", "ебливый", "блядь", "блядство", "блядина", "мудила", "дрочила", "пидор", "пидорас", "пидорасина", "ебучий", "хуеплет", "ебырь", "ебанутый", "пизда", "пиздец", "пиздюк", "пиздопроебина", "пиздуй", "распиздяй", "хуйня", "нахуй", "выблядок", "ебучка", "охуел", "Блять", "Http", "Сука", "Хуй", "Ебать", "Ебанина", "Ебанько", "[", "Ебля", "Ебаный", "Еблан", "Епта", "Ебливый", "Блядь", "Блядство", "Блядина", "Мудила", "Дрочила", "Пидор", "Пидорас", "Пидорасина", "Ебучий", "Хуеплет", "Ебырь", "Ебанутый", "Пизда", "Пиздец", "Пиздюк", "Пиздопроебина", "Пиздуй", "Распиздяй", "Хуйня", "Нахуй", "Выблядок", "Ебучка", "Охуел"]
nilq/baby-python
python
""" Sponge Knowledge Base Provide action arguments - element value set """ class FruitsElementValueSetAction(Action): def onConfigure(self): self.withLabel("Fruits action with argument element value set") self.withArg(ListType("fruits", StringType()).withLabel("Fruits").withUnique().withProvided(ProvidedMeta().withElementValueSet())) self.withResult(IntegerType()) def onCall(self, fruits): return len(fruits) def onProvideArgs(self, context): if "fruits" in context.provide: context.provided["fruits"] = ProvidedValue().withAnnotatedElementValueSet([ AnnotatedValue("apple").withValueLabel("Apple"), AnnotatedValue("banana").withValueLabel("Banana"), AnnotatedValue("lemon").withValueLabel("Lemon") ])
nilq/baby-python
python
# dna.py - DNA class and related functions # RMM, 11 Aug 2018 # # This file contains the implementation of DNA in the txtlsim toolbox. # This includes objects that represent the individual elements of a # DNA assembly as well as the functions required to create the models # associated with gene expression. # # Copyright (c) 2018, Build-A-Cell. All rights reserved. # See LICENSE file in the project root directory for details. import re # use Python's regular expression library from math import log from .component import Component from .sbmlutil import add_species, add_reaction, find_species from .mechanism import Mechanism, get_mechanisms from .pathutil import load_model from .parameter import get_parameters, update_existing, update_missing from .mechanisms import maturation # # DNA assembly # # The DNAassembly class is a non-standard component that consists of a # collection of DNA subcomponents. A mechanism dictionary is # maintained at the assembly level, but can be overriden at the # component level. Parameter dictionaries for DNA assembly are stored # in the individual elements and not at the assembly level, but the # `assemble_dna()` function can process assembly wide parameters. # # DNA elements that are part of an assembly have a data attribute # `assy` that points back to the assembly that the element is part of. # This attribute is initialized by the `DNAassembly.update_species()` # function (before calling the individual update functions for the DNA # elements). Note that this means that the `assy` attribute is not # available in the element initializer (since we don't yet know what # assembly we will be part of). # class DNAassembly(Component): """DNA assembly class The DNA assembly class is used to represent a collection of DNA subcomponents, typically consisting of a promoter, a ribosome binding site (5' UTR), a protein coding sequence (CDS), an optional C-terminus tag (for protein degradation), and a terminator (3' UTR). Subclasses can be used to create specialized types of DNA and predefined subclasses are available for promoters, RBSs, etc. The DNA assembly follows the rules of a Component but it is more complex because each of the elements of the assembly (subcomponents) have their own functions. As a consequence, most of what the assembly construct does is to keep track of the individual subcomponents and calls on those subcomponent to generate species and reactions. Data attributes --------------- name Name of the sequence (str) promoter Promoter sequence (DNA) utr5 5' UTR (DNA) cds Coding sequence (DNA) ctag C-terminus tag (DNA) utr3 3' UTR (DNA) dnalength Length of the entire sequence (int) rnalength Length of the transcribed components (int) peplength Lenth of the translated components (int) rnaname Name of the RNA species (str) [not implemented] rnap RNAP species (SMBLspecies) [not implemented] riboname Name of the ribosome species (str) [not implemented] ribo Ribosome species [not implemented] default_mechanisms default mechanisms for generating models custom_mechanisms customized mechanisms for generating models parameters Parameter values for the assembly (overrides elements) Methods ------- update_species() create/update species associated with construct update_reactions() create/update reactions associated with construct """ def __init__( self, name, promoter=None, utr5=None, cds=None, ctag=None, utr3=None, mechanisms={}, # custom mechanisms config_file=None, parameters={}, # parameter configuration **keywords # parameter keywords ): self.name = name self.promoter = promoter self.utr5 = utr5 self.cds = cds self.ctag = ctag self.utr3 = utr3 # Keep track of the length of DNA, RNA, and protein (peptide) self.dnalength = 0 self.rnalength = 0 self.peplength = 0 # Set up the default mechanisms for a DNA assembly # Note: transcription, translation, degradation are given by extract self.default_mechanisms = { 'maturation' : maturation.protein_basic() } self.custom_mechanisms = mechanisms # Create the config_file name (optional) if config_file == None and isinstance(name, str): config_file = self.name.lower() + ".csv" self.config_file = config_file # Set the assembly parameter values (processed by assemble_dna()) self.parameters = get_parameters( config_file, parameters, None, **keywords) # Create/update all of the species associated with this DNA assembly def update_species(self, mixture, conc, debug=False): # Create the DNA species self.dna = add_species(mixture, "DNA", self.name, conc) # Let the individual DNA elements create the additional species for dna in [self.promoter, self.utr5, self.cds, self.ctag, self.utr3]: if dna != None: # Store the DNA assembly that generated this component dna.assy = self # Update the species required for this component if debug: print("DNAassembly species update:", dna.name) dna.update_species(mixture, conc) # Create/update all of the relevant reactions for this DNA assembly def update_reactions(self, mixture, debug=False): # Go through each subcomponent and update reactions for dna in [self.promoter, self.utr5, self.cds, self.ctag, self.utr3]: if dna != None: dna.update_reactions(mixture) # # DNA component # # DNA elements will generally using the `DNA.__init__()` function to # initialize the object. To accommodate default parameter # dictionaries for tthe subclasses, an additional argument # (`default_parameters`) is available. # class DNA(Component): """DNA class The DNA class is used to represent a DNA sequence that has a given length. Its main purpose is as the parent object for DNA fragments and DNA assemblies. Note: for initialization of members of this class, the arguments should be as follows: DNA(name, length, [mechanisms], [config_file], [prefix]) DNAtype(name, required_arguments, [length], [mechanisms], [config_file], [prefix], [optional_arguments]) DNAelement(name, required_arguments, [length], [mechanisms], [config_file], [optional_arguments]) DNAtypes - DNAelements: Promoter - ConstitutePromoter, RepressedPromoter UTR5 - ConstituteRBS CDS - ProteinCDS Ctag - DegrationTAg UTR3 - Terminator Data attributes --------------- name Name of the sequence (str) length Length of the sequence (int) assy DNA assembly that we are part of mechanisms Local mechanisms for this component (overrides defaults) parameters Parameter dictionary for the DNA element """ def __init__( self, name, length=0, # positional arguments mechanisms={}, # custom mechanisms config_file=None, parameters={}, # customized parameters default_parameters = {}, # element parameters prefix="dna_", **keywords ): self.name = name self.length = length self.mechanisms = mechanisms self.prefix = prefix # Create the config_file name (optional) if config_file == None and isinstance(name, str): config_file = prefix + self.name.lower() + ".csv" self.config_file = config_file # Load and store the parameters for this component self.parameters = get_parameters( config_file, parameters, default_parameters, **keywords) # Set up default update functions to do nothing def update_species(self, mixture, conc): return None def update_reactions(self, mixture): return None # # Promoter subclasses # # The promoter subclasses are used to create standard promoters # (constitutive, repressed, activated). When creating an instance of # one of these subclasses, the name of the transcriptional regulator # (if any) is passed as an argument and the appropriate reactions are # instantiated. # # Promoter sequence class Promoter(DNA): "Promoter class - define a promoter sequence" # Default parameters used to describe a promoter default_parameters = { 'RNAPbound_F' : 20, # Default for ptet 'RNAPbound_R' : 400 # Default for ptet } def __init__( self, name, length=50, mechanisms={}, config_file=None, parameters={}, default_parameters = default_parameters, rnapname="RNAP", prefix="prom_", **keywords ): # Promoter initialization (including mechanisms and parameters) DNA.__init__( self, name, length=length, mechanisms=mechanisms, config_file=config_file, parameters=parameters, default_parameters = default_parameters, prefix=prefix, **keywords) # Set (or reset) values based on function arguments self.rnapname = rnapname # Fill in any missing parameter values with defaults update_missing(self.parameters, Promoter.default_parameters) def update_species(self, mixture, conc, parameters={}): assy = self.assy # Get the DNA assembly we are part of # Create the mRNA species assy.rnaname = assy.utr5.name + "--" + assy.cds.name if (assy.ctag != None): assy.rnaname += "--" + assy.ctag.name assy.rna = add_species(mixture, "RNA", assy.rnaname, 0) # Create RNA polymerase bound to DNA assy.rnap_bound = add_species(mixture, "Complex", self.rnapname + ":" + assy.name, 0) # Create any other species needed by the transcriptional machinery mechanisms = get_mechanisms(mixture, assy, self.mechanisms) mechanisms['transcription'].update_species(mixture, assy, conc) # Default action of a promoter is to implement transcription def update_reactions(self, mixture, debug=False): model = mixture.model # Get the model where we will store results assy = self.assy # Get the DNA assembly we are part of # Create the reactions required for transcription mechanisms = get_mechanisms(mixture, assy, self.mechanisms) mechanisms['transcription'].update_reactions(mixture, assy) # Constitute promoter class ConstitutivePromoter(Promoter): "ConstitutivePromoter - define a constitutive promoter" # Repressed promoter class RepressedPromoter(Promoter): #! TODO: add docstring # Default parameters used to describe a repressed promoter default_parameters = { 'RNAPbound_F' : 20, # Default for ptet 'RNAPbound_R' : 400, # Default for ptet 'DNA_Sequestration_F' : 2.5e-1, # Default for ptet 'DNA_Sequestration_R' : 1.11e-4, # Default for ptet } def __init__( self, name, repressor, length=50, mechanisms={}, config_file=None, parameters={}, rnapname="RNAP", dimer=False, **keywords ): # Promoter initialization (including mechanisms and parameters) Promoter.__init__( self, name, length=length, mechanisms=mechanisms, config_file=config_file, parameters=parameters, default_parameters = RepressedPromoter.default_parameters, rnapname=rnapname, **keywords) # Store additional information related to repression self.tfname = "Protein " + repressor if dimer: self.tfname += " dimer" self.dimer = dimer def update_species(self, mixture, conc): assy = self.assy # Get the DNA assembly we are part of # Create species for unrepressed promoter Promoter.update_species(self, mixture, conc) # Create repressor bound to DNA self.tf_bound = add_species(mixture, "Complex", self.tfname + ":" + assy.name, 0) # mechanisms = get_mechanisms(mixture, assy, self.mechanisms) # mechanisms['process'].update_species(mixture, assy, conc) def update_reactions(self, mixture, debug=False): model = mixture.model # Get the model where we will store results assy = self.assy # Get the DNA assembly we are part of params = self.parameters # Get the parameter dictionary # Create the reactions for the unbound promoter Promoter.update_reactions(self, mixture) # Create the reaction for the transcription factor binding to DNA tf_species = find_species(mixture, self.tfname) if tf_species == None: raise NameError("RepressedPromoter: %s not found" % self.tfname) add_reaction(mixture, [tf_species, assy.dna], [self.tf_bound], kf = params['DNA_Sequestration_F'], kr = params['DNA_Sequestration_R'], prefix = "repr_") # mechanisms = get_mechanisms(mixture, assy, self.mechanisms) # mechanisms['process'].update_reactions(mixture, assy) # # UTR5 subclasses # # The UTR5 subclasses are used to create ribosome binding sites (RBSs). class UTR5(DNA): "UTR5 class - define 5' untranslated region sequence" # Default parameters used to describe a UTR5 (empty) default_parameters = {} def __init__( self, name, length=20, mechanisms={}, config_file=None, parameters={}, default_parameters = default_parameters, prefix="utr5_", **keywords ): DNA.__init__( self, name, length, mechanisms=mechanisms, config_file=config_file, parameters=parameters, default_parameters = default_parameters, prefix=prefix, **keywords) # Constitutive RBS class ConstitutiveRBS(UTR5): #! TODO: add docstring # Default parameters used to describe a constitutive RBS (TODO) default_parameters = { 'Ribosome_Binding_F' : 0.1, # TODO: add source information 'Ribosome_Binding_R' : 4, # TODO: add source information } def __init__( self, name, length=20, mechanisms={}, config_file=None, parameters={}, riboname = 'Ribo', # Ribosome species name **keywords # Additional keywords ): UTR5.__init__( self, name, length=length, mechanisms=mechanisms, config_file=config_file, parameters=parameters, default_parameters = ConstitutiveRBS.default_parameters, **keywords) self.riboname = riboname def update_species(self, mixture, conc, parameters={}): assy = self.assy # Get the DNA assembly we are part of # Create the protein assy.protname = assy.cds.name if (assy.ctag != None): assy.protname += "--" + assy.ctag.name assy.protein = add_species(mixture, "Protein", assy.protname, 0) # Create Ribosome bound to RNA assy.ribo_bound = add_species(mixture, "Complex", self.riboname + ":" + assy.rnaname, 0) # Create any other species needed by the transcriptional machinery mechanisms = get_mechanisms(mixture, assy, self.mechanisms) mechanisms['translation'].update_species(mixture, assy, conc) # Default action of a promoter is to implement transcription def update_reactions(self, mixture, debug=False): assy = self.assy # Get the DNA assembly we are part of mechanisms = get_mechanisms(mixture, assy, self.mechanisms) mechanisms['translation'].update_reactions(mixture, assy) # # CDS subclasses # # The CDS subclasses are used to create proteins and peptides # #! Sort out whether we need anything more than CDS class CDS(DNA): "CDS class - define protein coding sequence" # Default parameters used to describe a repressed promoter default_parameter_values = { 'Dimerization_F' : 1, # Default for TetR 'Dimerization_R' : 1, # Default for TetR 'Protein_Maturation' : log(2)/(5*60) # 5 minutes (GFP) } def __init__( self, name, length=1000, mechanisms={}, config_file=None, parameters={}, dimerize = False, maturation_time=None, **keywords ): # DNA initialization DNA.__init__( self, name, length=length,mechanisms=mechanisms, config_file=config_file, parameters=parameters, default_parameters = CDS.default_parameter_values, prefix="cds_", **keywords) self.dimerize = dimerize self.maturation_time = maturation_time def update_species(self, mixture, conc, parameters={}): assy = self.assy # Get the DNA assembly we are part of # Create species for the protein self.protein = add_species(mixture, "Protein", self.name, 0) if self.dimerize: #! Move to mechanism function? self.dimer = add_species(mixture, "Protein", self.name + " dimer", 0) mechanisms = get_mechanisms(mixture, assy, self.mechanisms) mechanisms['maturation'].update_species(mixture, assy, conc) # Default action of a protein is to mature and (optionally) dimerize def update_reactions(self, mixture, debug=False): assy = self.assy # Get DNA assembly we are part of parameters = assy.cds.parameters # get parameter values if self.dimerize: #! Move to mechanism function? add_reaction(mixture, [self.protein, self.protein], [self.dimer], kf = parameters['Dimerization_F'], kr = parameters['Dimerization_R'], prefix="cds_") # Allow override of protein maturation time if self.maturation_time != None: parameters['Protein_Maturation'] = log(2)/(self.maturation_time) # Let the individual mechanisms create all of the reactions mechanisms = get_mechanisms(mixture, assy, self.mechanisms) mechanisms['maturation'].update_reactions(mixture, assy) # Protein coding sequence (same as a CDS) class ProteinCDS(CDS): "Protein coding sequence" # # Ctag subclasses # # The Ctag subclasses are used to C-terminus tags class Ctag(DNA): #! TODO: add docstring "Ctag class - define C-terminus protein tag" def __init__(self, name, length=0, mechanisms={}, config_file=None, parameters={}, **keywords): # DNA initialization DNA.__init__(self, name, length=length, mechanisms=mechanisms, config_file=config_file, parameters=parameters, prefix="ctag_", **keywords) # Degradation tag class DegradationTag(Ctag): #! TODO: add docstring def __init__(self, name, protease="ClpXP", length=9, mechanisms={}, config_file=None, parameters={}, **keywords): Ctag.__init__(self, name, length=length, mechanisms=mechanisms, config_file=config_file, parameters=parameters, **keywords) self.protease = protease # # UTR3 subclasses # # The UTR3 subclasses are used to create terminators. class UTR3(DNA): "UTR3 class - define 3' untranslated region sequence" def __init__(self, name, length=0, mechanisms={}, config_file=None, parameters={}, **keywords): # DNA initialization DNA.__init__(self, name, length=length, mechanisms=mechanisms, config_file=config_file, parameters=parameters, prefix="utr3_", **keywords) # Terminator class Terminator(UTR3): #! TODO: add docstring def __init__(self, name, length=50, mechanisms={}, config_file=None): UTR3.__init__(self, name, length, mechanisms, config_file, prefix="term_") # # Functions for creatng and manipulating DNA # # Assemble fragments of DNA into a gene def assemble_dna( prom, utr5, cds, # required arguments ctag=None, utr3=None, # optional positional arguments mechanisms = {}, # custom mechanisms config_file = None, # parameter configuration information parameters = {}, # (overrides element defaults) assy_name = None, # component-specific arguments **keywords # parameter keywords (passed to elements) ): # Create a new sequence of DNA assy = DNAassembly( assy_name, mechanisms=mechanisms, config_file=config_file, parameters=parameters, **keywords) # Initialize the name string if nothing was given if assy_name == None: assy.name = "" # Parse and store the promoter sequence if isinstance(prom, str): name, length = parse_DNA_string(prom) # Get component name prom = load_model("prom", name, length) # Load from library if isinstance(prom, Promoter): assy.promoter = prom update_existing(prom.parameters, assy.parameters) assy.dnalength += prom.length if assy_name == None: assy.name += prom.name else: ValueError("invalid promoter specification") # Parse and store the 5' UTR if isinstance(utr5, str): name, length = parse_DNA_string(utr5) # Get component name utr5 = load_model("UTR5", name, length) # Load from library if isinstance(utr5, UTR5): assy.utr5 = utr5 update_existing(utr5.parameters, assy.parameters) assy.dnalength += utr5.length assy.rnalength += utr5.length if assy_name == None: assy.name += "--" + utr5.name else: ValueError("invalid UTR5 specification") # Parse and store the protein coding sequence if isinstance(cds, str): name, length = parse_DNA_string(cds) # Get component name cds = load_model("CDS", name, length) # Load from library if isinstance(cds, CDS): assy.cds = cds update_existing(cds.parameters, assy.parameters) assy.dnalength += cds.length assy.rnalength += cds.length assy.peplength += cds.length if assy_name == None: assy.name += "--" + cds.name else: ValueError("invalid CDS specification") # Parse and store the C-terminus tag if isinstance(ctag, str): name, length = parse_DNA_string(ctag) # Get component name ctag = load_model("ctag", name, length) # Load from library if isinstance(ctag, Ctag): assy.ctag = ctag update_existing(ctag.parameters, assy.parameters) assy.dnalength += ctag.length assy.rnalength += ctag.length assy.peplength += ctag.length if assy_name == None: assy.name += "--" + ctag.name else: ValueError("invalid Ctag specification") # Parse and store the 3' UTR if isinstance(utr3, str): name, length = parse_DNA_string(utr3) # Get component name utr3 = load_model("UTR3", utr3, length) # Load from library if isinstance(utr3, UTR3): assy.utr3 = utr3 update_existing(utr3.parameters, assy.parameters) assy.dnalength += utr3.length assy.rnalength += utr3.length if assy_name == None: assy.name += "--" + utr3.name else: ValueError("invalid UTR3 specification") return assy # Parse a DNA string (from the old MATLAB TX-TL modeling library) def parse_DNA_string(spec): # First check to see if we have a name(length) specification m = re.search("^(\w+)\((\d+)\)$", spec) if m == None: # If not, see if we just find a name m = re.search("^(\w+)$", spec) if m != None: name = m.group(1) length = None else: name = m.group(1) length = int(m.group(2)) # If we didn't get anything, generate an error if m == None: ValueError("Can't parse spec" + spec) # Return name and length as a tuple return name, length
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from services import svcProject from utils.exceptionHandle import DefaultError def list_project(): """ GET /api/projects :return: """ try: return { 'title': 'Succeed to List Project', 'detail': svcProject.list_project() }, 200 except Exception as e: raise DefaultError(title='Failed to List Project', detail=str(e)) def get_project(project_id): """ GET /api/project/{project_id} :param project_id: :return: """ try: return { 'title': 'Succeed to Get Project', 'detail': svcProject.get_project(project_id) }, 200 except Exception as e: raise DefaultError(title='Failed to Get Project', detail=str(e)) def add_project(body): """ POST /api/projects :param body: :return: """ try: return { 'title': 'Succeed to Create Project', 'detail': svcProject.add_project(body) }, 200 except Exception as e: raise DefaultError(title='Failed to Create Project', detail=str(e)) def update_project(project_id, body): """ PUT /api/project/{project_id} :param project_id: :param body: :return: """ try: return { 'title': 'Succeed to Update Project', 'detail': svcProject.update_project(project_id, body) }, 200 except Exception as e: raise DefaultError(title='Failed to Update Project', detail=str(e)) def update_project_status(project_id, body): """ PUT /api/project/{project_id}/status :param project_id: :param body: :return: """ try: return { 'title': 'Succeed to change Project Status', 'detail': svcProject.set_project_status(project_id, body) }, 200 except Exception as e: raise DefaultError(title='Failed to change Project Status', detail=str(e)) def delete_project(project_id): """ DELETE /api/project/{project_id} :param project_id: :return: """ try: svcProject.set_project_status(project_id, {'status': 'delete'}) return { 'title': 'Delete Project Succeed', }, 204 except Exception as e: raise DefaultError(title='Delete Project Failed', detail=str(e)) if __name__ == '__main__': print('This is API for project')
nilq/baby-python
python
import pytz from cogs.Permissions import dm_commands, moderator_perms from GompeiFunctions import load_json, save_json from dateutil.parser import parse from discord.ext import commands from datetime import datetime from config import Config import asyncio import discord import os class Voting(commands.Cog): """ Create votes and let users vote on them. Currently only has support for handling one voting poll in a server """ def __init__(self, bot): self.bot = bot self.settings = load_json(os.path.join("config", "settings.json")) self.votes = None self.vote_open = False self.poll_message = None @commands.Cog.listener() async def on_ready(self): await self.load_voting() async def load_voting(self): self.votes = load_json(os.path.join("config", "votes.json")) # If the poll hasn't been created, nothing to load if self.votes["close"] is None: return else: closes = parse(self.votes["close"]) # If the poll has been closed if datetime.now() > closes: return else: self.vote_open = True await self.load_poll_message() await self.poll_timer(closes) async def load_poll_message(self): guild = self.bot.get_guild(self.settings["main_guild"]) print(guild) channel = guild.get_channel(self.votes["channel_id"]) print(channel) self.poll_message = await channel.fetch_message(self.votes["message_id"]) print(self.poll_message) async def update_poll_message(self): self.votes["votes"] = sorted(self.votes["votes"], key=lambda i: len(i["voters"]), reverse=True) last_votes = 0 last_count = 1 count = 1 leaderboard = "" for option in self.votes["votes"]: if len(option["voters"]) == last_votes: leaderboard += "**" + str(last_count) + ". **" + option["name"] + " - " + str(len(option["voters"])) + "\n" count += 1 else: leaderboard += "**" + str(count) + ". **" + option["name"] + " - " + str(len(option["voters"])) + "\n" last_votes = len(option["voters"]) last_count = count count += 1 embed = discord.Embed(title=self.votes["title"], color=0x43b581) embed.description = leaderboard await self.poll_message.edit(embed=embed) async def poll_timer(self, close_date): self.vote_open = True await asyncio.sleep((close_date - discord.utils.utcnow()).total_seconds()) await self.close_poll(None) @commands.command(pass_context=True, aliases=["closePoll"]) @commands.check(moderator_perms) async def close_poll(self, ctx): """ Closes the poll Usage: .closePoll :param ctx: context object """ last_votes = 0 last_count = 1 count = 1 leaderboard = "" for option in self.votes["votes"]: if len(option["voters"]) == last_votes: leaderboard += "**" + str(last_count) + ". **" + option["name"] + " - " + str( len(option["voters"])) + "\n" count += 1 else: leaderboard += "**" + str(count) + ". **" + option["name"] + " - " + str(len(option["voters"])) + "\n" last_votes = len(option["voters"]) last_count = count count += 1 embed = discord.Embed(title=self.votes["title"], color=0x43b581) if len(self.votes["votes"]) > 0: embed.description = ":star: " + self.votes["votes"][0]["name"] + " :star:\n" + leaderboard else: embed.description = ":star: Nothing! :star:\n" + leaderboard await self.poll_message.edit(embed=embed) self.vote_open = False self.votes["close"] = None self.votes["title"] = None self.votes["channel_id"] = None self.votes["message_id"] = None self.votes["votes"] = None save_json(os.path.join("config", "votes.json"), self.votes) if ctx is not None: await ctx.send("Closed poll") await self.poll_message.edit() @commands.command(pass_context=True, aliases=['createOpenVote']) @commands.check(moderator_perms) @commands.guild_only() async def create_open_vote(self, ctx, channel: discord.TextChannel, title, close_timestamp, *, message): """ Creates an open poll that users can add options to vote for Usage: .createOpenVote <channel> <title> <closeTime> <message> :param ctx: context object :param channel: channel for the poll :param title: embed title for the poll :param close_timestamp: closing time for the poll :param message: message to accompany the poll """ if str(ctx.guild.id) in self.votes: await ctx.send("A vote is already running for this server") else: closes = parse(close_timestamp) if closes is None: await ctx.send("Not a valid close time") closes = closes.astimezone(pytz.utc) if (closes - discord.utils.utcnow()).total_seconds() < 0: await ctx.send("Close time cannot be before current time") else: modifier = 4 for char in ctx.message.content[:ctx.message.content.find(close_timestamp)]: if char == "\"": modifier += 1 embed = discord.Embed(title=title, color=0x43b581) self.poll_message = await channel.send(message + "```.addOption <option> - Create an option to vote " "for and cast your vote for it\n.vote <option> - " "Cast a vote for an option in the poll\n.removeVote " "<option> - Removes a vote you casted for an " "option\n.sendPoll - sends the poll embed (does not " "update live)```", embed=embed) self.votes = { "type": "open", "close": close_timestamp, "title": title, "channel_id": channel.id, "message_id": self.poll_message.id, "votes": [] } save_json(os.path.join("config", "votes.json"), self.votes) # Create open thread voting_thread = await self.poll_message.create_thread( name=title + " Voting", auto_archive_duration=10080, ) Config.add_command_channel(voting_thread) await self.poll_timer(closes) await voting_thread.edit(archived=True) Config.remove_command_channel(voting_thread) @commands.command(pass_context=True, aliases=['createDecisionVote']) @commands.check(moderator_perms) @commands.guild_only() async def create_decision_vote(self, ctx, channel: discord.TextChannel, title, close_timestamp, *, message): if str(ctx.guild.id) in self.votes: await ctx.send("A vote is already running for this server") else: closes = parse(close_timestamp) if closes is None: await ctx.send("Not a valid close time") elif (closes - datetime.now()).total_seconds() < 0: await ctx.send("Close time cannot be before current time") else: modifier = 4 for char in ctx.message.content[:ctx.message.content.find(close_timestamp)]: if char == "\"": modifier += 1 def check_author(msg): return msg.author.id == ctx.author.id self.votes = { "type": "decision", "close": close_timestamp, "title": title, "channel_id": channel.id, "message_id": None, "votes": [] } await ctx.send("What options would you like to add to this decision poll? (Put each option on a new " "line)") response = await self.bot.wait_for('message', check=check_author) options = response.content.splitlines() for option in options: self.votes["votes"].append({"name": option, "creator": None, "voters": []}) embed = discord.Embed(title=title, color=0x43b581) if len(self.votes["votes"]) == 0: await ctx.send("You need at least one option in your poll") return self.poll_message = await channel.send( message + "```.vote <option> - Cast a vote for an option in the poll" "\n.removeVote <option> - Removes a vote you casted for an option" "\n.sendPoll - sends the poll embed (does not update live)```", embed=embed ) self.votes["message_id"] = self.poll_message.id await self.update_poll_message() save_json(os.path.join("config", "votes.json"), self.votes) await self.poll_timer(closes) @commands.command(pass_context=True, aliases=["addOption"]) @commands.check(dm_commands) async def add_option(self, ctx): """ Adds an option to the poll Usage: .addOption <option> :param ctx: context object """ if not self.vote_open: await ctx.send("There is no poll currently open") return if not self.votes["type"] == "open": await ctx.send("Cannot add options to this type of poll") return user_option = ctx.message.content[ctx.message.content.find(" ") + 1:] if len(user_option) > 88: await ctx.send("This option is too long") return if not user_option.isalnum(): if "-" in user_option: modified_string = user_option.replace("-", "") if not modified_string.isalnum(): await ctx.send("Channel names have to be alphanumeric") return if not all(c.isdigit() or c.islower() or c == "-" for c in user_option): await ctx.send("Channel names must be lowercase") return elif " " in user_option or "\n" in user_option: await ctx.send("Channel names cannot contain spaces (try using a \"-\" instead)") return else: # Check if the user has an option already or if the option already exists for option in self.votes["votes"]: if option["creator"] == ctx.author.id: await ctx.send("You already added an option to this poll") return if user_option == option["name"]: await ctx.send("This option already exists") return self.votes["votes"].append({"name": user_option, "creator": ctx.author.id, "voters": [ctx.author.id]}) save_json(os.path.join("config", "votes.json"), self.votes) await self.update_poll_message() await ctx.send("Successfully added your option") @commands.command(pass_context=True) @commands.check(dm_commands) async def vote(self, ctx): """ Votes for an option in the poll Usage: .vote <option> :param ctx: context object """ if not self.vote_open: await ctx.send("There is no poll currently open") return user_option = ctx.message.content[ctx.message.content.find(" ") + 1:] if self.votes["type"] == "open": for option in self.votes["votes"]: if user_option == option["name"]: if ctx.author.id in option["voters"]: await ctx.send("You already voted for this option") return option["voters"].append(ctx.author.id) save_json(os.path.join("config", "votes.json"), self.votes) await self.update_poll_message() await ctx.send("Successfully voted for " + user_option) return elif self.votes["type"] == "decision": print("got here") for option in self.votes["votes"]: if user_option == option["name"]: if ctx.author.id in option["voters"]: await ctx.send("You already voted for this option") return else: for other_option in self.votes["votes"]: if user_option != other_option["name"]: if ctx.author.id in other_option["voters"]: def check_author(message): return message.author.id == ctx.author.id await ctx.send( "You already voted for an option (" + other_option["name"] + "). Would you like to switch your vote to " + option["name"] + "? (Y/N)" ) response = await self.bot.wait_for('message', check=check_author) if response.content.lower() == "y" or response.content.lower() == "yes": other_option["voters"].remove(ctx.author.id) option["voters"].append(ctx.author.id) save_json(os.path.join("config", "votes.json"), self.votes) await self.update_poll_message() await ctx.send("Successfully voted for " + user_option) else: await ctx.send("Kept your vote for " + other_option["name"]) return option["voters"].append(ctx.author.id) save_json(os.path.join("config", "votes.json"), self.votes) await self.update_poll_message() await ctx.send("Successfully voted for " + user_option) return if self.votes["type"] == "open": await ctx.send( "This option doesn't exist. If you'd like to add it do it with `" + self.settings["prefix"] + "addOption <option>`" ) else: await ctx.send("This option doesn't exist.") @commands.command(pass_context=True, aliases=["removeVote"]) @commands.check(dm_commands) async def remove_vote(self, ctx): """ Removes your vote for an option in the poll Usage: .removeVote <option> :param ctx: context object """ if not self.vote_open: await ctx.send("There is no poll currently open") return user_option = ctx.message.content[ctx.message.content.find(" ") + 1:] count = 0 for option in self.votes["votes"]: if user_option == option["name"]: if ctx.author.id not in option["voters"]: await ctx.send("You haven't voted for this option") return option["voters"].remove(ctx.author.id) if len(option["voters"]) == 0 and self.votes["type"] == "open": self.votes["votes"].pop(count) save_json(os.path.join("config", "votes.json"), self.votes) await self.update_poll_message() await ctx.send("Successfully removed vote for " + user_option) return count += 1 await ctx.send("This option doesn't exist") @commands.command(pass_context=True, aliases=["removeOption"]) @commands.check(moderator_perms) async def remove_option(self, ctx): """ Removes an option from the poll entirely Usage: .removeOption <option> :param ctx: context object """ user_option = ctx.message.content[ctx.message.content.find(" ") + 1:] count = 0 for option in self.votes["votes"]: if user_option == option["name"]: self.votes["votes"].pop(count) save_json(os.path.join("config", "votes.json"), self.votes) await self.update_poll_message() await ctx.send("Successfully removed option " + user_option) return count += 1 @commands.command(pass_context=True, aliases=["sendPoll"]) @commands.check(dm_commands) async def send_poll(self, ctx): """ Sends the poll Usage: .sendPoll :param ctx: context object """ if not self.vote_open: await ctx.send("There is no poll currently open") return last_votes = 0 last_count = 1 count = 1 leaderboard = "" for option in self.votes["votes"]: if len(option["voters"]) == last_votes: leaderboard += "**" + str(last_count) + ". **" + option["name"] + " - " + str( len(option["voters"])) + "\n" count += 1 else: leaderboard += "**" + str(count) + ". **" + option["name"] + " - " + str(len(option["voters"])) + "\n" last_votes = len(option["voters"]) last_count = count count += 1 embed = discord.Embed(title=self.votes["title"], color=0x43b581) embed.description = leaderboard await ctx.send("This poll does not update live", embed=embed) def setup(bot): bot.add_cog(Voting(bot))
nilq/baby-python
python
from typing import List, Type import warnings import numpy as np import matplotlib.patches as mpatches import matplotlib.pyplot as plt from astropy import wcs from astropy.coordinates import SkyCoord from astropy.io import fits from astropy.modeling import models from astropy.utils.exceptions import AstropyWarning from astropy.visualization import LogStretch from matplotlib.offsetbox import AnchoredText from .result import Result __all__ = ["make_figure"] def _normalise(image: np.ndarray): '''Function normalises an array s.t it is over a range[0., 1.] Parameters ---------- image : np.ndarray Image to be normalised. Returns ------- Normalised image: np.ndarray. ''' m, M = np.min(image), np.max(image) return (image - m) / (M - m) def _supressAxs(ax): '''Function that removes all labels and ticks from a figure Parameters ---------- ax: matplotlib axis object Returns ------- ax : matplotlib axis object Now with no ticks or labels ''' ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) ax.spines["left"].set_visible(False) ax.spines["right"].set_visible(False) ax.spines["top"].set_visible(False) ax.spines["bottom"].set_visible(False) return ax def RADECtopixel(objList: List[List[float]], header) -> List[List[float]]: '''Function to convert RA DEC in objList to pixel coordinates using wcs in header of image Parameters ---------- objList : List[List[float]] List of list of RA, DEC, object type and psfMag_r header : Returns ------- occludingStars : List[List[float]] List of RA, DEC in pixel coordinates. ''' occludingStars = [] with warnings.catch_warnings(): # ignore invalid card warnings warnings.simplefilter('ignore', category=AstropyWarning) w = wcs.WCS(header) RAS = [item[0] for item in objList] DECS = [item[1] for item in objList] for ra, dec in zip(RAS, DECS): skyCoordPos = SkyCoord(ra, dec, unit="deg") x, y = wcs.utils.skycoord_to_pixel(skyCoordPos, wcs=w) occludingStars.append([x, y]) return occludingStars def make_oneone(ax, img, result): '''Function plots the cleaned image Parameters ---------- ax : matplotlip axis object img : np.ndarray image data to be plotted results : Result dataclass dataclass of calculated results for object Returns ------- ''' log_stretch = LogStretch(10000.) ax.imshow(log_stretch(_normalise(img)), origin="lower", aspect="auto") ax.scatter(result.apix[0], result.apix[1], label="Asym. centre") ax.set_xlim([-0.5, img.shape[0]+0.5]) ax.set_title("Cleaned Image") text = f"Sky={result.sky:.2f}\n" fr"Sky $\sigma$={result.sky_err:.2f}" textbox = AnchoredText(text, frameon=True, loc=3, pad=0.5) ax.add_artist(textbox) def make_onetwo(ax, mask, result): '''Function plots the object map Parameters ---------- ax : matplotlib axis object mask : np.ndarray object mask data to be plotted results : Result dataclass dataclass of calculated results for object Returns ------- ''' ax.imshow(mask, origin="lower", aspect="auto", cmap="gray") ax.scatter(result.apix[0], result.apix[1], label="Asym. centre") ax.set_xlim([-0.5, mask.shape[0]+0.5]) ax.set_ylim([-0.5, mask.shape[1]+0.5]) ax.set_title("Object mask") text = f"A={result.A[0]:.3f}\nA_bgr={result.A[1]:.3f}\n" rf"$A_s$={result.As[0]:.3f}" text += "\n" fr"$A_s90$={result.As90[0]:.3f}" if len(result.objList) > 0: text += f"\nmaskedFraction={result.maskedPixelFraction*100.:.1f}" textbox = AnchoredText(text, frameon=True, loc=3, pad=0.5) ax.add_artist(textbox) text = f"C={result.C:.3f}\nS={result.S:.3f}\n" rf"Gini={result.gini:.3f}" text += "\n" fr"m20={result.m20:.3f}" textbox = AnchoredText(text, frameon=True, loc=4, pad=0.5) ax.add_artist(textbox) circle = mpatches.Circle(((mask.shape[0]/2)+1, (mask.shape[1]/2)+1), result.rmax, fill=False, label="Rmax", color="white") ax.add_patch(circle) def make_twoone(ax, shape, result): '''Function plots the Sersic fit Parameters ---------- ax : matplotlib axis object axis instance to plot to shape : Tuple[int] Shape of image results : Result dataclass dataclass of calculated results for object Returns ------- modelimage : np.ndarray fitted model Sersic image ''' log_stretch = LogStretch(10000.) ny, nx = shape y, x = np.mgrid[0:ny, 0:nx] modelimage = models.Sersic2D.evaluate(x, y, result.sersic_amplitude, result.sersic_r_eff, result.sersic_n, result.sersic_x_0, result.sersic_y_0, result.sersic_ellip, result.sersic_theta) modelimage += np.random.normal(result.sky, result.sky_err, size=shape) ax.imshow(log_stretch(_normalise(modelimage)), origin="lower", aspect="auto") ax.scatter(result.sersic_x_0, result.sersic_y_0, label="Sersic centre") ax.set_title("Sersic fit") text = f"Ellip.={result.sersic_ellip:.3f}\n" text += f"n={result.sersic_n:.3f}\n r_eff={result.sersic_r_eff:.3f}\n" text += f"Amplitude={result.sersic_amplitude:.3f}" textbox = AnchoredText(text, frameon=True, loc=3, pad=0.5) ax.add_artist(textbox) a = result.sersic_r_eff b = a * np.abs(1. - result.sersic_ellip) x0 = result.sersic_x_0 y0 = result.sersic_y_0 theta = result.sersic_theta * 180./np.pi ellipse = mpatches.Ellipse(xy=(x0, y0), width=a, height=b, angle=theta, fill=False, label="Sersic half light", color="red") ax.add_patch(ellipse) return modelimage def make_twotwo(ax, img, modelImage, listofStarstoPlot, result): ''' function plots sersic fit residual Parameters ---------- ax : matplotlip axis object axis instance to plot to img : np.ndarray image data to be plotted modelImage : np.ndarray model sersic image listofStarstoPlot : List[List[float]] list of stars to that occlude the main object. [RA, DEC, name, psfMag_r] results : Result dataclass dataclasss of calculated results for object Returns ------- ''' if len(listofStarstoPlot) > 0: imageMask = np.where(result.starMask == 1, img, np.rot90(img)) residual = (imageMask - modelImage) ax.imshow(residual, origin="lower", aspect="auto") else: residual = (img - modelImage) ax.imshow(residual, origin="lower", aspect="auto") text = f"Range={np.amin(residual):.3e} => {np.amax(residual):.3e}\n" textbox = AnchoredText(text, frameon=True, loc=3, pad=0.5) ax.add_artist(textbox) ax.set_title("Sersic fit residual") def make_figure(result: Type[Result], folder: bool, save=False, show=False) -> None: '''Function plots results from image analysis. Plots two or four images. Top row: original image and object map with stars overplotted if any. bottom row: Sersic fit and residual with stars overplotted if any. Parameters ---------- result : Type[Result] Data class container of calculated results. Must have clean image and pixelmap in order to run this function. folder : bool If True then adjusts path to read file from. save : bool, optional If true function saves generated figure. show: bool, optional If true open interactive matplotlib plot. Returns ------- None ''' with warnings.catch_warnings(): # ignore invalid card warnings warnings.simplefilter('ignore', category=AstropyWarning) try: img, header = fits.getdata(result.cleanImage, header=True) except ValueError: if folder: img, header = fits.getdata(result.outfolder.parent / ("data/" + result.file), header=True) else: img, header = fits.getdata(result.outfolder.parent / (result.file), header=True) try: mask = fits.getdata(result.pixelMapFile) except ValueError: mask = fits.getdata(result.outfolder / ("pixelmap_" + result.file)) if result.sersic_r_eff != -99 and result.sky != -99: fig, axs = plt.subplots(2, 2) axs = axs.ravel() make_oneone(axs[0], img, result) make_onetwo(axs[1], mask, result) modelImage = make_twoone(axs[2], img.shape, result) make_twotwo(axs[3], img, modelImage, result.objList, result) else: fig, axs = plt.subplots(1, 2) make_oneone(axs[0], img, result) axs[0].set_ylim([-0.5, img.shape[1]+0.5]) make_onetwo(axs[1], mask, result) axs[1].set_ylim([-0.5, mask.shape[1]+0.5]) fig.set_figheight(11.25) fig.set_figwidth(20) if len(result.objList) > 0: occludingStars = RADECtopixel(result.objList, header) for i, ax in enumerate(axs): ax = _supressAxs(ax) if(len(result.objList) > 0): if i != 2: ax.scatter(*zip(*occludingStars), label="STAR", color="orange") if i != 3: ax.legend(loc=2) plt.subplots_adjust(top=0.975, bottom=0.005, left=0.003, right=0.997, hspace=0.050, wspace=0.006) if save: plt.savefig("results/result_" + result.file[11:-11] + ".png", dpi=96) if show: plt.show() plt.close()
nilq/baby-python
python
import socket, time from kubism.util.dkr import PyApp_Image import docker import kubism.util.dkr as dkr SERVER = '172.24.12.161' CLIENT = '172.24.12.160' echo_port = 8080 # Echo Test # Create Echo Server print('Building and pushing images...') echo_srv = PyApp_Image('./examples/py/echo_server.py', parent_image = 'arm32v6/python:3-alpine', repo='echo', tag='server-v6', automate=True) echo_srv.docker = docker.DockerClient(base_url=f'ssh://pi@{SERVER}') echo_cli = PyApp_Image('./examples/py/echo_client.py', parent_image = 'arm32v7/python:3-buster', repo='echo', tag='client-v7', automate=True) echo_cli.docker = docker.DockerClient(base_url=f'ssh://pi@{CLIENT}') print(f'Run Server on server {SERVER} ...') echo_srv.run(ports={f'{echo_port}/tcp':echo_port}) print('Waiting 3 seconds...') time.sleep(3) print(f'Run Client on client {CLIENT} ...') print('Calling Server...') echo_cli.run(ports={f'{echo_port}/tcp':echo_port}) #echo_srv.stop() # Not necessary #echo_cli.stop() print('DONE!')
nilq/baby-python
python
from flask_security_bundle import FlaskSecurityBundle class SecurityBundle(FlaskSecurityBundle): pass
nilq/baby-python
python
from .parse_html_index import parse_html_index from .parse_html_raceindex import parse_html_raceindex from .parse_html_racelist import parse_html_racelist from .parse_html_oddstf import parse_html_oddstf from .parse_html_oddsk import parse_html_oddsk from .parse_html_odds2tf import parse_html_odds2tf from .parse_html_odds3t import parse_html_odds3t from .parse_html_odds3f import parse_html_odds3f from .parse_html_beforeinfo import parse_html_beforeinfo from .parse_html_raceresult import parse_html_raceresult __all__ = [ 'parse_html_index', 'parse_html_raceindex', 'parse_html_racelist', 'parse_html_oddstf', 'parse_html_oddsk', 'parse_html_odds2tf', 'parse_html_odds3t', 'parse_html_odds3f', 'parse_html_beforeinfo', 'parse_html_raceresult', ]
nilq/baby-python
python
from .common import * # NOQA import pytest HUAWEI_CCE_ACCESS_KEY = os.environ.get('RANCHER_HUAWEI_CCE_ACCESS_KEY', "") HUAWEI_CCE_SECRET_KEY = os.environ.get('RANCHER_HUAWEI_CCE_SECRET_KEY', "") HUAWEI_CCE_PROJECT = os.environ.get('RANCHER_HUAWEI_CCE_PROJECT', "") HUAWEI_CCE_AMI = os.environ.get('RANCHER_HUAWEI_CCE_AMI', "") huaweiccecredential = pytest.mark.skipif(not (HUAWEI_CCE_ACCESS_KEY and HUAWEI_CCE_SECRET_KEY and HUAWEI_CCE_PROJECT), reason='HUAWEI CCE Credentials not provided, ' 'cannot create cluster') @huaweiccecredential def test_create_huaei_cce_cluster(): client = get_admin_client() huawei_cceConfig = get_huawei_cce_config() print("Cluster creation") cluster = client.create_cluster(huawei_cceConfig) print(cluster) cluster = validate_cluster(client, cluster, check_intermediate_state=True, skipIngresscheck=True) print(cluster) cluster_cleanup(client, cluster) def get_huawei_cce_config(): name = random_test_name("tl-test-auto-huawei-cce") huawei_cceConfig = { "accessKey":HUAWEI_CCE_ACCESS_KEY, "apiServerElbId":"", "authentiactionMode":"rbac", "authenticatingProxyCa":None, "availableZone":"cn-north-1a", "billingMode":0, "bmsIsAutoRenew":"false", "bmsPeriodNum":1, "bmsPeriodType":"month", "clusterBillingMode":0, "clusterEipId":"", "clusterFlavor":"cce.s2.small", "clusterType":"VirtualMachine", "containerNetworkCidr":"10.0.0.0/16", "containerNetworkMode":"overlay_l2", "dataVolumeSize":100, "dataVolumeType":"SATA", "description":"", "displayName":"", "driverName":"huaweicontainercloudengine", "eipBandwidthSize":100, "eipChargeMode":"traffic", "eipCount":3, "eipShareType":"PER", "eipType":"5_bgp", "externalServerEnabled":False, "highwaySubnet":"", "masterVersion":"v1.15.6", "nodeCount":3, "nodeFlavor":"c3.large.2", "nodeOperationSystem":"CentOS 7.6", "password":"", "projectId":HUAWEI_CCE_PROJECT, "region":"cn-north-1", "rootVolumeSize":40, "rootVolumeType":"SATA", "secretKey":HUAWEI_CCE_SECRET_KEY, "sshKey":"tanglei", "subnetId":"c3a34386-5212-4484-be9c-1220807c4cfa", "userName":"root", "vipSubnetId":"09fb7641-3958-47d7-b5fb-dd92a19ef7ee", "vpcId":"d5842876-29a6-4751-87bd-7c4af4cf2f47", "type":"huaweiEngineConfig", "keypairs":"cn-north-1a", } if HUAWEI_CCE_AMI is not None: huawei_cceConfig.update({"ami": HUAWEI_CCE_AMI}) # Generate the config for CCE cluster huawei_cceConfig = { "huaweiEngineConfig": huawei_cceConfig, "name": name, "type": "cluster" } print("\nHUAWEI CCE Configuration") print(huawei_cceConfig) return huawei_cceConfig
nilq/baby-python
python
from jewelry import Jewelry class Necklace(Jewelry): DEFAULT_METAL : str = "gold" DEFAULT_GEM : str = "diamond" def __init__(self, metal : str = DEFAULT_METAL, gem : str = DEFAULT_GEM): super(Necklace,self).__init__(polished = True) self._metal = metal self._gem = gem @property def metal(self) -> str: return self._metal @metal.setter def metal(self, value : str) -> None: self._metal = value @property def gem(self) -> str: return self._gem @gem.setter def gem(self, value : str) -> None: self._gem = value
nilq/baby-python
python
# Copyright (c) 2013 Shotgun Software Inc. # # CONFIDENTIAL AND PROPRIETARY # # This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit # Source Code License included in this distribution package. See LICENSE. # By accessing, using, copying or modifying this work you indicate your # agreement to the Shotgun Pipeline Toolkit Source Code License. All rights # not expressly granted therein are reserved by Shotgun Software Inc. """ This module contains all the implementations for the different folder types that can be created. """ from .errors import EntityLinkTypeMismatch from .static import Static from .listfield import ListField from .entity import Entity from .project import Project from .user import UserWorkspace from .step import ShotgunStep from .task import ShotgunTask
nilq/baby-python
python
import functools from collections import OrderedDict, namedtuple from http import HTTPStatus from types import FunctionType from typing import Callable, Iterable, Optional from werkzeug.routing import Map, MethodNotAllowed, NotFound, RequestRedirect, Rule from PythonPlug import Conn from PythonPlug.plug import Plug Forward = namedtuple("Forward", ["to", "change_path"]) class RouterPlug(Plug): def __init__(self): super().__init__() self.url_map = Map() self.endpoint_to_plug = {} self.forwards = OrderedDict() def route(self, rule, methods=None, name=""): methods = set(methods) if methods is not None else None if methods and not "OPTIONS" in methods: methods.add("OPTIONS") def decorator(name: Optional[str], plug: Callable): self.add_route(rule_string=rule, plug=plug, methods=methods, name=name) return plug return functools.partial(decorator, name) async def call(self, conn: Conn): try: rule, args = self.url_adapter(conn).match( return_rule=True, method=conn.scope.get("method") ) except RequestRedirect as e: return await conn.redirect(e.new_url, code=302) except MethodNotAllowed as e: return await conn.send_resp(b"", HTTPStatus.METHOD_NOT_ALLOWED, halt=True) except NotFound as e: def prefix_matcher(prefix): return conn.private["remaining_path"].startswith(prefix) forward_matches = sorted(filter(prefix_matcher, self.forwards), key=len) if forward_matches: match = forward_matches[0] router, change_path = self.forwards[match] conn.private.setdefault("consumed_path", []).append(match) conn.private["remaining_path"] = conn.private["remaining_path"][ len(match) : ] if change_path: conn._scope["path"] = conn.private["remaining_path"] return await router(conn) return conn else: plug = self.endpoint_to_plug.get(rule.endpoint) conn.private.setdefault("router_args", {}).update(args) return await plug(conn) def url_adapter(self, conn: Conn): scope = conn.scope remaining_path = conn.private.get("remaining_path") if remaining_path is None: remaining_path = conn.private["remaining_path"] = scope.get("path") return self.url_map.bind( conn.req_headers.get("host"), path_info=remaining_path, script_name=scope.get("root_path", "") or None, url_scheme=scope.get("scheme"), query_args=scope.get("query_string", b""), ) def add_route( self, *, rule_string: str, plug: Callable, name: Optional[str] = None, methods: Optional[Iterable[str]] = None, ): if not name: if isinstance(plug, FunctionType): name = plug.__name__ if isinstance(plug, Plug): name = type(plug).__name__ assert name not in self.endpoint_to_plug, ( "a plug is overwriting an existing plug: %s" % name ) self.url_map.add(Rule(rule_string, endpoint=name, methods=methods)) self.endpoint_to_plug[name] = plug def forward(self, prefix, router=None, change_path=False): assert prefix not in self.forwards, ( "Cannot forward same prefix to different routers: %s" % prefix ) self.forwards[prefix] = Forward(router, change_path) return router
nilq/baby-python
python
import unittest from test_support import run_unittest, TESTFN import glob import os def mkdirs(fname): if os.path.exists(fname) or fname == '': return base, file = os.path.split(fname) mkdirs(base) os.mkdir(fname) def touchfile(fname): base, file = os.path.split(fname) mkdirs(base) f = open(fname, 'w') f.close() def deltree(fname): for f in os.listdir(fname): fullname = os.path.join(fname, f) if os.path.isdir(fullname): deltree(fullname) else: try: os.unlink(fullname) except: pass try: os.rmdir(fname) except: pass class GlobTests(unittest.TestCase): def norm(self, *parts): return os.path.normpath(os.path.join(self.tempdir, *parts)) def mktemp(self, *parts): touchfile(self.norm(*parts)) def setUp(self): self.tempdir = TESTFN+"_dir" self.mktemp('a', 'D') self.mktemp('aab', 'F') self.mktemp('aaa', 'zzzF') self.mktemp('ZZZ') self.mktemp('a', 'bcd', 'EF') self.mktemp('a', 'bcd', 'efg', 'ha') def tearDown(self): deltree(self.tempdir) def glob(self, *parts): if len(parts) == 1: pattern = parts[0] else: pattern = os.path.join(*parts) p = os.path.join(self.tempdir, pattern) return glob.glob(p) def assertSequencesEqual_noorder(self, l1, l2): l1 = list(l1) l2 = list(l2) l1.sort() l2.sort() self.assertEqual(l1, l2) def test_glob_literal(self): eq = self.assertSequencesEqual_noorder np = lambda *f: norm(self.tempdir, *f) eq(self.glob('a'), [self.norm('a')]) eq(self.glob('a', 'D'), [self.norm('a', 'D')]) eq(self.glob('aab'), [self.norm('aab')]) eq(self.glob('zymurgy'), []) def test_glob_one_directory(self): eq = self.assertSequencesEqual_noorder np = lambda *f: norm(self.tempdir, *f) eq(self.glob('a*'), map(self.norm, ['a', 'aab', 'aaa'])) eq(self.glob('*a'), map(self.norm, ['a', 'aaa'])) eq(self.glob('aa?'), map(self.norm, ['aaa', 'aab'])) eq(self.glob('aa[ab]'), map(self.norm, ['aaa', 'aab'])) eq(self.glob('*q'), []) def test_glob_nested_directory(self): eq = self.assertSequencesEqual_noorder np = lambda *f: norm(self.tempdir, *f) if os.path.normcase("abCD") == "abCD": # case-sensitive filesystem eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF')]) else: # case insensitive filesystem eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF'), self.norm('a', 'bcd', 'efg')]) eq(self.glob('a', 'bcd', '*g'), [self.norm('a', 'bcd', 'efg')]) def test_glob_directory_names(self): eq = self.assertSequencesEqual_noorder np = lambda *f: norm(self.tempdir, *f) eq(self.glob('*', 'D'), [self.norm('a', 'D')]) eq(self.glob('*', '*a'), []) eq(self.glob('a', '*', '*', '*a'), [self.norm('a', 'bcd', 'efg', 'ha')]) eq(self.glob('?a?', '*F'), map(self.norm, [os.path.join('aaa', 'zzzF'), os.path.join('aab', 'F')])) def test_main(): run_unittest(GlobTests) if __name__ == "__main__": test_main()
nilq/baby-python
python
from selenium import webdriver import os import subprocess driver = webdriver.Chrome(service_log_path=os.path.devnull) driver.set_window_size(1500, 900) fname = "file://" + os.getcwd() + "/opcodes.html" driver.get(fname) driver.save_screenshot("../images/opcode_map.png") driver.quit() subprocess.check_output([ "convert", "../images/opcode_map.png", "-trim", "../images/opcode_map.png"])
nilq/baby-python
python
# 增加的一个类属性用于统计Student的数量,每创建一个实例,该属性自动加一 class Student(object): count = 0 def __init__(self, name, score): Student.name = name Student.score = score if Student.name != []: Student.count += 1 # 测试: if Student.count != 0: print('测试失败!') else: bart = Student('Bart', 90) if Student.count != 1: print('测试失败!') else: lisa = Student('Bart',80) if Student.count != 2: print('测试失败!') else: print('Students:', Student.count) print('测试通过!') Michael = Student('Michael', 90) Jane = Student('Jane', 90) KangKang = Student('KangKang', 90) # 说明每当创建一个实例并不会执行初始化语句 count = 0,仅在首次执行,但__init__()函数 # 每创建一个实例都会执行 print(Student.count)
nilq/baby-python
python
#User function Template for python3 class Solution: #Function to find if there exists a triplet in the #array A[] which sums up to X. def find3Numbers(self,A, n, X): # Your Code Here A.sort() for i in range(n-2): start=i+1 end=n-1 sum1=0 while(sum1!=X and end>start): sum1=A[i]+A[start]+A[end] if(sum1>X): end-=1 elif(sum1<X): start+=1 else: return 1 return 0 #{ # Driver Code Starts #Initial Template for Python 3 import atexit import io import sys _INPUT_LINES = sys.stdin.read().splitlines() input = iter(_INPUT_LINES).__next__ _OUTPUT_BUFFER = io.StringIO() sys.stdout = _OUTPUT_BUFFER @atexit.register def write(): sys.__stdout__.write(_OUTPUT_BUFFER.getvalue()) if __name__=='__main__': t = int(input()) for i in range(t): n,X=map(int,input().strip().split()) A=list(map(int,input().strip().split())) ob=Solution() if(ob.find3Numbers(A,n,X)): print(1) else: print(0) # } Driver Code Ends
nilq/baby-python
python
# Created: 17.05.2019 # Copyright (c) 2019, Manfred Moitzi # License: MIT License from typing import TYPE_CHECKING, Iterable, List, Mapping, Set import json from ezdxf.sections.tables import TABLENAMES from ezdxf.lldxf.tags import Tags if TYPE_CHECKING: from ezdxf.eztypes import Insert, MText, LWPolyline, Polyline, Spline, Leader, Dimension, Image, Mesh, Hatch from ezdxf.eztypes import DXFEntity, Linetype, DXFTag, BlockLayout __all__ = ['entities_to_code', 'block_to_code', 'table_entries_to_code'] def entities_to_code(entities: Iterable['DXFEntity'], layout: str = 'layout', ignore: Iterable[str] = None) -> 'Code': """ Translates DXF entities into Python source code to recreate this entities by ezdxf. Args: entities: iterable of DXFEntity layout: variable name of the layout (model space or block) as string ignore: iterable of entities types to ignore as strings like ``['IMAGE', 'DIMENSION']`` Returns: :class:`Code` """ code = _SourceCodeGenerator(layout=layout) code.translate_entities(entities, ignore=ignore) return code.code def block_to_code(block: 'BlockLayout', drawing: str = 'doc', ignore: Iterable[str] = None) -> 'Code': """ Translates a BLOCK into Python source code to recreate the BLOCK by ezdxf. Args: block: block definition layout drawing: variable name of the drawing as string ignore: iterable of entities types to ignore as strings like ['IMAGE', 'DIMENSION'] Returns: :class:`Code` """ dxfattribs = _purge_handles(block.block.dxfattribs()) block_name = dxfattribs.pop('name') base_point = dxfattribs.pop('base_point') code = _SourceCodeGenerator(layout='b') prolog = 'b = {}.blocks.new("{}", base_point={}, dxfattribs={{'.format(drawing, block_name, str(base_point)) code.add_source_code_line(prolog) code.add_source_code_lines(_fmt_mapping(dxfattribs, indent=4)) code.add_source_code_line(' }') code.add_source_code_line(')') code.translate_entities(block, ignore=ignore) return code.code def table_entries_to_code(entities: Iterable['DXFEntity'], drawing='doc') -> 'Code': code = _SourceCodeGenerator(doc=drawing) code.translate_entities(entities) return code.code class Code: """ Source code container. """ def __init__(self): self.code = [] # type: List[str] self.imports = set() # type: Set[str] # global imports -> indention level 0 self.layers = set() # type: Set[str] # layer names as string self.styles = set() # type: Set[str] # text style name as string, requires a TABLE entry self.linetypes = set() # type: Set[str] # line type names as string, requires a TABLE entry self.dimstyles = set() # type: Set[str] # dimension style names as string, requires a TABLE entry self.blocks = set() # type: Set[str] # block names as string, requires a BLOCK definition def code_str(self, indent: int = 0) -> str: """ Returns the source code as a single string. Args: indent: source code indentation count by spaces """ lead_str = ' ' * indent return '\n'.join(lead_str + line for line in self.code) def __str__(self) -> str: """ Returns the source code as a single string. """ return self.code_str() def import_str(self, indent: int = 0) -> str: """ Returns required imports as a single string. Args: indent: source code indentation count by spaces """ lead_str = ' ' * indent return '\n'.join(lead_str + line for line in self.imports) def add_import(self, statement: str) -> None: """ Add import statement, identical import statements are merged together. """ self.imports.add(statement) def add_line(self, code: str, indent: int = 0) -> None: """ Add a single source code line without line ending ``\\n``. """ self.code.append(' ' * indent + code) def add_lines(self, code: Iterable[str], indent: int = 0) -> None: """ Add multiple source code lines without line ending ``\\n``. """ for line in code: self.add_line(line, indent=indent) def merge(self, code: 'Code', indent: int = 0) -> None: """ Add another :class:`Code` object. """ # merge used resources self.imports.update(code.imports) self.layers.update(code.layers) self.linetypes.update(code.linetypes) self.styles.update(code.styles) self.dimstyles.update(code.dimstyles) self.blocks.update(code.blocks) # append source code lines self.add_lines(self.code, indent=indent) _PURGE_DXF_ATTRIBUTES = {'handle', 'owner', 'paperspace', 'material_handle', 'visualstyle_handle', 'plotstyle_handle'} def _purge_handles(attribs: dict) -> dict: """ Purge handles from DXF attributes which will be invalid in a new document, or which will be set automatically by adding an entity to a layout (paperspace). Args: attribs: entity DXF attributes dictionary """ return {k: v for k, v in attribs.items() if k not in _PURGE_DXF_ATTRIBUTES} def _fmt_mapping(mapping: Mapping, indent: int = 0) -> Iterable[str]: # key is always a string fmt = ' ' * indent + "'{}': {}," for k, v in mapping.items(): assert isinstance(k, str) if isinstance(v, str): v = json.dumps(v) # for correct escaping of quotes else: v = str(v) # format uses repr() for Vectors yield fmt.format(k, v) def _fmt_list(l: Iterable, indent: int = 0) -> Iterable[str]: fmt = ' ' * indent + '{},' for v in l: yield fmt.format(str(v)) def _fmt_api_call(func_call: str, args: Iterable[str], dxfattribs: dict) -> List[str]: attributes = dict(dxfattribs) args = list(args) if args else [] def fmt_keywords() -> Iterable[str]: for arg in args: if arg not in attributes: continue value = attributes.pop(arg) if isinstance(value, str): valuestr = json.dumps(value) # quoted string! else: valuestr = str(value) yield " {}={},".format(arg, valuestr) s = [func_call] s.extend(fmt_keywords()) s.append(' dxfattribs={') s.extend(_fmt_mapping(attributes, indent=8)) s.extend([ " },", ")", ]) return s def _fmt_dxf_tags(tags: Iterable['DXFTag'], indent: int = 0): fmt = ' ' * indent + 'dxftag({}, {}),' for code, value in tags: assert isinstance(code, int) if isinstance(value, str): value = json.dumps(value) # for correct escaping of quotes else: value = str(value) # format uses repr() for Vectors yield fmt.format(code, value) class _SourceCodeGenerator: """ The :class:`_SourceCodeGenerator` translates DXF entities into Python source code for creating the same DXF entity in another model space or block definition. :ivar code: list of source code lines without line endings :ivar required_imports: list of import source code lines, which are required to create executable Python code. """ def __init__(self, layout: str = 'layout', doc: str = 'doc'): self.doc = doc self.layout = layout self.code = Code() def translate_entity(self, entity: 'DXFEntity') -> None: """ Translates one DXF entity into Python source code. The generated source code is appended to the attribute `source_code`. Args: entity: DXFEntity object """ dxftype = entity.dxftype() try: entity_translator = getattr(self, '_' + dxftype.lower()) except AttributeError: self.add_source_code_line('# unsupported DXF entity "{}"'.format(dxftype)) else: entity_translator(entity) def translate_entities(self, entities: Iterable['DXFEntity'], ignore: Iterable[str] = None) -> None: """ Translates multiple DXF entities into Python source code. The generated source code is appended to the attribute `source_code`. Args: entities: iterable of DXFEntity ignore: iterable of entities types to ignore as strings like ['IMAGE', 'DIMENSION'] """ ignore = set(ignore) if ignore else set() for entity in entities: if entity.dxftype() not in ignore: self.translate_entity(entity) def add_used_resources(self, dxfattribs: Mapping) -> None: """ Register used resources like layers, line types, text styles and dimension styles. Args: dxfattribs: DXF attributes dictionary """ if 'layer' in dxfattribs: self.code.layers.add(dxfattribs['layer']) if 'linetype' in dxfattribs: self.code.linetypes.add(dxfattribs['linetype']) if 'style' in dxfattribs: self.code.styles.add(dxfattribs['style']) if 'dimstyle' in dxfattribs: self.code.dimstyles.add(dxfattribs['dimstyle']) def add_import_statement(self, statement: str) -> None: self.code.add_import(statement) def add_source_code_line(self, code: str) -> None: self.code.add_line(code) def add_source_code_lines(self, code: Iterable[str]) -> None: self.code.add_lines(code) def add_list_source_code(self, values: Iterable, prolog: str = '[', epilog: str = ']', indent: int = 0) -> None: fmt_str = ' ' * indent + '{}' self.add_source_code_line(fmt_str.format(prolog)) self.add_source_code_lines(_fmt_list(values, indent=4 + indent)) self.add_source_code_line(fmt_str.format(epilog)) def add_dict_source_code(self, mapping: Mapping, prolog: str = '{', epilog: str = '}', indent: int = 0) -> None: fmt_str = ' ' * indent + '{}' self.add_source_code_line(fmt_str.format(prolog)) self.add_source_code_lines(_fmt_mapping(mapping, indent=4 + indent)) self.add_source_code_line(fmt_str.format(epilog)) def add_tags_source_code(self, tags: Tags, prolog='tags = Tags(', epilog=')', indent=4): fmt_str = ' ' * indent + '{}' self.add_source_code_line(fmt_str.format(prolog)) self.add_source_code_lines(_fmt_dxf_tags(tags, indent=4 + indent)) self.add_source_code_line(fmt_str.format(epilog)) def generic_api_call(self, dxftype: str, dxfattribs: dict, prefix: str = 'e = ') -> Iterable[str]: """ Returns the source code strings to create a DXF entity by a generic `new_entity()` call. Args: dxftype: DXF entity type as string, like 'LINE' dxfattribs: DXF attributes dictionary prefix: prefix string like variable assignment 'e = ' """ dxfattribs = _purge_handles(dxfattribs) self.add_used_resources(dxfattribs) s = [ "{}{}.new_entity(".format(prefix, self.layout), " '{}',".format(dxftype), " dxfattribs={", ] s.extend(_fmt_mapping(dxfattribs, indent=8)) s.extend([ " },", ")", ]) return s def api_call(self, api_call: str, args: Iterable[str], dxfattribs: dict, prefix: str = 'e = ') -> Iterable[str]: """ Returns the source code strings to create a DXF entity by the specialised API call. Args: api_call: API function call like 'add_line(' args: DXF attributes to pass as arguments dxfattribs: DXF attributes dictionary prefix: prefix string like variable assignment 'e = ' """ dxfattribs = _purge_handles(dxfattribs) func_call = '{}{}.{}'.format(prefix, self.layout, api_call) return _fmt_api_call(func_call, args, dxfattribs) def new_table_entry(self, dxftype: str, dxfattribs: dict) -> Iterable[str]: """ Returns the source code strings to create a new table entity by ezdxf. Args: dxftype: table entry type as string, like 'LAYER' dxfattribs: DXF attributes dictionary """ table = '{}.{}'.format(self.doc, TABLENAMES[dxftype]) dxfattribs = _purge_handles(dxfattribs) name = dxfattribs.pop('name') s = [ "if '{}' not in {}:".format(name, table), " t = {}.new(".format(table), " '{}',".format(name), " dxfattribs={", ] s.extend(_fmt_mapping(dxfattribs, indent=12)) s.extend([ " },", " )", ]) return s # simple graphical types def _line(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.api_call('add_line(', ['start', 'end'], entity.dxfattribs())) def _point(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.api_call('add_point(', ['location'], entity.dxfattribs())) def _circle(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.api_call('add_circle(', ['center', 'radius'], entity.dxfattribs())) def _arc(self, entity: 'DXFEntity') -> None: self.add_source_code_lines( self.api_call('add_arc(', ['center', 'radius', 'start_angle', 'end_angle'], entity.dxfattribs())) def _text(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.api_call('add_text(', ['text'], entity.dxfattribs())) def _solid(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.generic_api_call('SOLID', entity.dxfattribs())) def _trace(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.generic_api_call('TRACE', entity.dxfattribs())) def _3dface(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.generic_api_call('3DFACE', entity.dxfattribs())) def _shape(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.api_call('add_shape(', ['name', 'insert', 'size'], entity.dxfattribs())) def _attrib(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.api_call('add_attrib(', ['tag', 'text', 'insert'], entity.dxfattribs())) def _attdef(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.generic_api_call('ATTDEF', entity.dxfattribs())) def _ellipse(self, entity: 'DXFEntity') -> None: self.add_source_code_lines( self.api_call('add_ellipse(', ['center', 'major_axis', 'ratio', 'start_param', 'end_param'], entity.dxfattribs())) def _viewport(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.generic_api_call('VIEWPORT', entity.dxfattribs())) self.add_source_code_line('# Set valid handles or remove attributes ending with "_handle", otherwise the DXF ' 'file is invalid for AutoCAD') # complex graphical types def _insert(self, entity: 'Insert') -> None: self.code.blocks.add(entity.dxf.name) self.add_source_code_lines(self.api_call('add_blockref(', ['name', 'insert'], entity.dxfattribs())) if len(entity.attribs): for attrib in entity.attribs: dxfattribs = attrib.dxfattribs() dxfattribs['layer'] = entity.dxf.layer # set ATTRIB layer to same as INSERT self.add_source_code_lines(self.generic_api_call('ATTRIB', attrib.dxfattribs(), prefix='a = ')) self.add_source_code_lines('e.attribs.append(a)') def _mtext(self, entity: 'MText') -> None: self.add_source_code_lines(self.generic_api_call('MTEXT', entity.dxfattribs())) # mtext content 'text' is not a single DXF tag and therefore not a DXF attribute self.add_source_code_line('e.text = {}'.format(json.dumps(entity.text))) def _lwpolyline(self, entity: 'LWPolyline') -> None: self.add_source_code_lines(self.generic_api_call('LWPOLYLINE', entity.dxfattribs())) # lwpolyline points are not DXF attributes self.add_list_source_code(entity.get_points(), prolog='e.set_points([', epilog='])') def _spline(self, entity: 'Spline') -> None: self.add_source_code_lines(self.api_call('add_spline(', ['degree'], entity.dxfattribs())) # spline points, knots and weights are not DXF attributes if len(entity.fit_points): self.add_list_source_code(entity.fit_points, prolog='e.fit_points = [', epilog=']') if len(entity.control_points): self.add_list_source_code(entity.control_points, prolog='e.control_points = [', epilog=']') if len(entity.knots): self.add_list_source_code(entity.knots, prolog='e.knots = [', epilog=']') if len(entity.weights): self.add_list_source_code(entity.weights, prolog='e.weights = [', epilog=']') def _polyline(self, entity: 'Polyline') -> None: self.add_source_code_lines(self.generic_api_call('POLYLINE', entity.dxfattribs())) # polyline vertices are separate DXF entities and therefore not DXF attributes for v in entity.vertices: attribs = _purge_handles(v.dxfattribs()) location = attribs.pop('location') if 'layer' in attribs: del attribs['layer'] # layer is automatically set to the POLYLINE layer # each VERTEX can have different DXF attributes: bulge, start_width, end_width ... self.add_source_code_line('e.append_vertex({}, dxfattribs={})'.format( str(location), attribs, )) def _leader(self, entity: 'Leader'): self.add_source_code_line('# Dimension style attribute overriding is not supported!') self.add_source_code_lines(self.generic_api_call('LEADER', entity.dxfattribs())) self.add_list_source_code(entity.vertices, prolog='e.set_vertices([', epilog='])') def _dimension(self, entity: 'Dimension'): self.add_import_statement('from ezdxf.dimstyleoverride import DimStyleOverride') self.add_source_code_line('# Dimension style attribute overriding is not supported!') self.add_source_code_lines(self.generic_api_call('DIMENSION', entity.dxfattribs())) self.add_source_code_lines([ '# You have to create the required graphical representation for the DIMENSION entity as anonymous block, ', '# otherwise the DXF file is invalid for AutoCAD (but not for BricsCAD):', '# DimStyleOverride(e).render()', '' ]) def _image(self, entity: 'Image'): self.add_source_code_line('# Image requires IMAGEDEF and IMAGEDEFREACTOR objects in the OBJECTS section!') self.add_source_code_lines(self.generic_api_call('IMAGE', entity.dxfattribs())) if len(entity.boundary_path): self.add_list_source_code( (v[:2] for v in entity.boundary_path), # just x, y axis prolog='e.set_boundary_path([', epilog='])', ) self.add_source_code_line('# Set valid image_def_handle and image_def_reactor_handle, otherwise the DXF file' ' is invalid for AutoCAD') def _mesh(self, entity: 'Mesh'): self.add_source_code_lines(self.api_call('add_mesh(', [], entity.dxfattribs())) if len(entity.vertices): self.add_list_source_code(entity.vertices, prolog='e.vertices = [', epilog=']') if len(entity.edges): # array.array -> tuple self.add_list_source_code((tuple(e) for e in entity.edges), prolog='e.edges = [', epilog=']') if len(entity.faces): # array.array -> tuple self.add_list_source_code((tuple(f) for f in entity.faces), prolog='e.faces = [', epilog=']') if len(entity.creases): self.add_list_source_code(entity.creases, prolog='e.creases = [', epilog=']') def _hatch(self, entity: 'Hatch'): add_line = self.add_source_code_line dxfattribs = entity.dxfattribs() dxfattribs['associative'] = 0 # associative hatch not supported self.add_source_code_lines(self.api_call('add_hatch(', ['color'], dxfattribs)) if len(entity.seeds): add_line("e.set_seed_points({})".format(str(entity.seeds))) if entity.pattern: self.add_list_source_code(entity.pattern.lines, prolog='e.set_pattern_definition([', epilog='])') arg = " {}={}," if entity.has_gradient_data: g = entity.gradient add_line('e.set_gradient(') add_line(arg.format('color1', str(g.color1))) add_line(arg.format('color2', str(g.color2))) add_line(arg.format('rotation', g.rotation)) add_line(arg.format('centered', g.centered)) add_line(arg.format('one_color', g.one_color)) add_line(arg.format('name', json.dumps(g.name))) add_line(')') for count, path in enumerate(entity.paths, start=1): if path.PATH_TYPE == 'PolylinePath': add_line('# {}. polyline path'.format(count)) self.add_list_source_code(path.vertices, prolog='e.paths.add_polyline_path([', epilog=' ],') add_line(arg.format('is_closed', str(path.is_closed))) add_line(arg.format('flags', str(path.path_type_flags))) add_line(')') else: # EdgePath add_line('# {}. edge path: associative hatch not supported'.format(count)) add_line('ep = e.paths.add_edge_path(flags={})'.format(path.path_type_flags)) for edge in path.edges: if edge.EDGE_TYPE == 'LineEdge': add_line('ep.add_line({}, {})'.format(str(edge.start[:2]), str(edge.end[:2]))) elif edge.EDGE_TYPE == 'ArcEdge': add_line('ep.add_arc(') add_line(arg.format('center', str(edge.center[:2]))) add_line(arg.format('radius', edge.radius)) add_line(arg.format('start_angle', edge.start_angle)) add_line(arg.format('end_angle', edge.end_angle)) add_line(arg.format('is_counter_clockwise', edge.is_counter_clockwise)) add_line(')') elif edge.EDGE_TYPE == 'EllipseEdge': add_line('ep.add_ellipse(') add_line(arg.format('center', str(edge.center[:2]))) add_line(arg.format('major_axis', str(edge.major_axis[:2]))) add_line(arg.format('ratio', edge.ratio)) add_line(arg.format('start_angle', edge.start_angle)) add_line(arg.format('end_angle', edge.end_angle)) add_line(arg.format('is_counter_clockwise', edge.is_counter_clockwise)) add_line(')') elif edge.EDGE_TYPE == 'SplineEdge': add_line('ep.add_spline(') if edge.fit_points: add_line(arg.format('fit_points', str([fp[:2] for fp in edge.fit_points]))) if edge.control_points: add_line( arg.format('control_points', str([cp[:2] for cp in edge.control_points]))) if edge.knot_values: add_line(arg.format('knot_values', str(edge.knot_values))) if edge.weights: add_line(arg.format('weights', str(edge.weights))) add_line(arg.format('degree', edge.degree)) add_line(arg.format('rational', edge.rational)) add_line(arg.format('periodic', edge.periodic)) if edge.start_tangent is not None: add_line(arg.format('start_tangent', str(edge.start_tangent))) if edge.end_tangent is not None: add_line(arg.format('end_tangent', str(edge.end_tangent))) add_line(')') # simple table entries def _layer(self, layer: 'DXFEntity'): self.add_source_code_lines(self.new_table_entry('LAYER', layer.dxfattribs())) def _ltype(self, ltype: 'Linetype'): self.add_import_statement('from ezdxf.lldxf.tags import Tags') self.add_import_statement('from ezdxf.lldxf.types import dxftag') self.add_import_statement('from ezdxf.entities.ltype import LinetypePattern') self.add_source_code_lines(self.new_table_entry('LTYPE', ltype.dxfattribs())) self.add_tags_source_code(ltype.pattern_tags.tags, prolog='tags = Tags([', epilog='])', indent=4) self.add_source_code_line(' t.pattern_tags = LinetypePattern(tags)') def _style(self, style: 'DXFEntity'): self.add_source_code_lines(self.new_table_entry('STYLE', style.dxfattribs())) def _dimstyle(self, dimstyle: 'DXFEntity'): self.add_source_code_lines(self.new_table_entry('DIMSTYLE', dimstyle.dxfattribs())) def _appid(self, appid: 'DXFEntity'): self.add_source_code_lines(self.new_table_entry('APPID', appid.dxfattribs()))
nilq/baby-python
python
from astropy.coordinates import Angle from neclib.parameters import PointingErrorData class TestPointingErrorData: expected = { "dAz": Angle("5314.2466754691195arcsec"), "de": Angle("382arcsec"), "chi_Az": Angle("-27.743114809726713arcsec"), "omega_Az": Angle("-10.004233550100272deg"), "eps": Angle("-13.562343977659976arcsec"), "chi2_Az": Angle("-3.2283345930067489arcsec"), "omega2_Az": Angle("-34.73486665318979deg"), "chi_El": Angle("-30.046387189617871arcsec"), "omega_El": Angle("-16.233694100299584deg"), "chi2_El": Angle("-1.1446000035021269arcsec"), "omega2_El": Angle("-41.474874481601418deg"), "g": -0.17220574801726421, "gg": 0.0, "ggg": 0.0, "gggg": 0.0, "dEl": Angle("6520.2376117807198arcsec"), "de_radio": Angle("-394.46arcsec"), "del_radio": Angle("210.7228arcsec"), "cor_v": Angle("27.434arcsec"), "cor_p": Angle("-31.6497deg"), "g_radio": -0.454659, "gg_radio": 0.0128757, "ggg_radio": 0.000000, "gggg_radio": 0.000000, } def test_from_file(self, data_dir): actual = PointingErrorData.from_file(data_dir / "example_pointing_param.toml") for name, value in self.expected.items(): assert getattr(actual, name) == value assert actual[name] == value def test_from_text_file(self, data_dir): actual = PointingErrorData.from_text_file( data_dir / "example_pointing_param.txt" ) for name, value in self.expected.items(): assert getattr(actual, name) == value assert actual[name] == value
nilq/baby-python
python
import pyglet.resource import pyglet.sprite import pyglet.graphics def get_room_wall_image(room): filename = 'res/rooms/walls/{}.jpg'.format(room.wall_variant) return pyglet.resource.image(filename) def get_forniture_image(forniture): filename = 'res/forniture/{}.png'.format(forniture.name) return pyglet.resource.image(filename) class RoomRender: def __init__(self, room): self.room = room self.background_group = pyglet.graphics.OrderedGroup(0) self.foreground_group = pyglet.graphics.OrderedGroup(1) self.info_group = pyglet.graphics.OrderedGroup(2) self.batch = pyglet.graphics.Batch() wall_bg = get_room_wall_image(room) self.wall_sprite = pyglet.sprite.Sprite(wall_bg, x=0, y=0, batch=self.batch, group=self.background_group) forniture_placements = [] for e in room.elements: elem_img = get_forniture_image(e.forniture) fs = pyglet.sprite.Sprite(elem_img, x=e.x, y=e.y, batch=self.batch, group=self.foreground_group) forniture_placements.append(fs) self.forniture_placements = forniture_placements pyglet.text.Label(room.name, font_name='Times New Roman', font_size=16, x=100, y=350, batch=self.batch, group=self.info_group) def render(self): self.batch.draw()
nilq/baby-python
python
# UNDER CONSTRUCTION ! light_metadata = { "name": { "type": "string" }, "version": { "type": "string" }, "data_preparation": { "type": "object", "properties": { "accepted_margin_of_error": { "type": "number" }, "total_row_count": { "type": "number" }, "used_row_count": { "type": "number" }, "test_row_count": { "type": "number" }, "train_row_count": { "type": "number" }, "validation_row_count": { "type": "number" } } }, "data_analysis": { "type": "object", "properties": { "target_columns_metadata": { "type": "array", "items": { "type": "object", "properties": { "column_name": { "type": "string" } } } } } } } scores = ['duplicates_score','empty_cells_score','data_type_distribution_score', 'similarity_score','z_test_based_outlier_score','value_distribution_score' ,'variability_score','redundancy_score','consistency_score','consistency_score','quality_score'] def gen_score(score_name): return [ score_name: { "type": "object", "properties": { "score": { "type": "number" }, "description": { "type": "string" } } } ] "data_analysis": { "target_columns_metadata": [ { "column_name": "string", "importance_score": 0, "data_type": "categorical", "data_type_distribution": { "type": "categorical", "x": [ "string" ], "y": [ 0 ] }, "data_distribution": { "data_histogram": { "type": "categorical", "x": [ "string" ], "y": [ 0 ] }, "clusters": [ { "group": "string", "members": [ "string" ] } ], "mean": "string" }, "consistency": { "score": "string", "metrics": [ { "type": "error", "score": 0, "description": "string" } ], "description": "string" }, "completeness": { "score": "string", "metrics": [ { "type": "error", "score": 0, "description": "string" } ], "description": "string" }, "variability": { "score": "string", "metrics": [ { "type": "error", "score": 0, "description": "string" } ], "description": "string" } } ], "input_columns_metadata": [ { "column_name": "string", "importance_score": 0, "data_type": "categorical", "data_type_distribution": { "type": "categorical", "x": [ "string" ], "y": [ 0 ] }, "data_distribution": { "data_histogram": { "type": "categorical", "x": [ "string" ], "y": [ 0 ] }, "clusters": [ { "group": "string", "members": [ "string" ] } ], "mean": "string" }, "consistency": { "score": "string", "metrics": [ { "type": "error", "score": 0, "description": "string" } ], "description": "string" }, "completeness": { "score": "string", "metrics": [ { "type": "error", "score": 0, "description": "string" } ], "description": "string" }, "variability": { "score": "string", "metrics": [ { "type": "error", "score": 0, "description": "string" } ], "description": "string" } } ] }, "model_analysis": [ { "column_name": "string", "overall_input_importance": { "type": "categorical", "x": [ "string" ], "y": [ 0 ] }, "train_accuracy_over_time": { "type": "categorical", "x": [ "string" ], "y": [ 0 ] }, "test_accuracy_over_time": { "type": "categorical", "x": [ "string" ], "y": [ 0 ] }, "accuracy_histogram": { "x": [ "string" ], "y": [ 0 ], "x_explained": [ [ { "column_name": "string", "importance_score": 0, "data_type": "categorical", "data_type_distribution": { "type": "categorical", "x": [ "string" ], "y": [ 0 ] }, "data_distribution": { "data_histogram": { "type": "categorical", "x": [ "string" ], "y": [ 0 ] }, "clusters": [ { "group": "string", "members": [ "string" ] } ], "mean": "string" }, "consistency": { "score": "string", "metrics": [ { "type": "error", "score": 0, "description": "string" } ], "description": "string" }, "completeness": { "score": "string", "metrics": [ { "type": "error", "score": 0, "description": "string" } ], "description": "string" }, "variability": { "score": "string", "metrics": [ { "type": "error", "score": 0, "description": "string" } ], "description": "string" } } ] ] } } ] })
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Created on Thu Jun 20 22:56:12 2019 @author: Suman JaipurRentals Jaipur’s Real Estate Market is experiencing an incredible resurgence, with property prices soaring by double-digits on an yearly basis since 2013. While home owners have a lot of reasons to laugh about, the same cannot be said of people looking for a home to buy or rent. In Jaipur, property rental market is said to be as crazy as property purchasing market. You are new to Jaipur and want to rent a decent apartment. Since you have the knowledge of Machine Learning you decided to build a model, that could help you out to get a nice apartment at best price. Get Your data from various apartment rental sites and move towards the following observation points like: · How does the general rental prices distribution looks like? (Graphical representation is appreciated) · Which are the hottest areas? · Which area would be more interesting to start hunting? · Are you able to predict rental price of an apartment? """ import pandas as pd data = pd.read_csv('processed_data.csv') from collections import Counter top = Counter(data.location) data.index = range(data.shape[0]) property_type = data.PropertyType.unique() loc_price = {} for i in range(len(data)): if loc_price.get(data.iloc[i].location): loc_price[ data.iloc[i].location] += data.iloc[i].price else: loc_price[data.iloc[i].location] = data.iloc[i].price avg_price = {} for items in loc_price.keys(): avg_price[items] = loc_price.get(items)/top[items] location = loc_price.keys() #import matplotlib.pyplot as plt # #plt.figure(figsize=(30,10)) #plt.bar(height = avg_price.values(), x=avg_price.keys()) #plt.margins(x=0) #plt.xticks(fontsize = 10,fontname = "Comic Sans MS", rotation = 90) #plt.xlabel('Locations') #plt.ylabel('Average Price') #plt.savefig('chart.svg',format='svg',dpi=1500,bbox_inches = 'tight') #plt.show() #· Which are the hottest areas? import operator a = dict(sorted(avg_price.items(), key=operator.itemgetter(1), reverse=True)[:10]) #print('Top 10 Locations\n') #for item in a.keys(): # print(item.title()) # Which area would be more interesting to start hunting? hunt = pd.DataFrame() for loc,num in top.most_common(10): temp = [] for i in range(1,11): try: temp.append(str(str(i)+' BHK Average rate: '+str(int(data['price'][(data.location==loc) & (data.BHK==i)].mean())))) except: temp.append(str(str(i)+' BHK Not Available')) hunt[loc] = temp # #hunt3 = pd.DataFrame() #labels = [] #for loc,num in top.most_common(10): # top3price = [] # for i in range(1,4): # top3price.append(int(data['price'][(data.location==loc) & (data.BHK==i)].mean())) # hunt3[loc] = top3price # labels.append(loc) # # #newhunt3 = pd.DataFrame({'one':hunt3.iloc[0:1].values[0],'two':hunt3.iloc[1:2].values[0],'three':hunt3.iloc[2:3].values[0]}) # #import matplotlib.pyplot as plt # #x = [1,2,3,4,5,6,7,8,9,10] #y = newhunt3.one.values #plt.plot(x, y, label='1 BHK',marker='o') #y = newhunt3.two.values #plt.plot(x, y, label='2 BHK',marker='o') #y = newhunt3.three.values #plt.plot(x, y, label='3 BHK',marker='o') # #plt.xticks(x, labels, rotation='vertical') #plt.xlabel('Locations') #plt.ylabel('Price') #plt.margins(0.1) #plt.subplots_adjust(bottom=0.15) #plt.legend() #plt.savefig('top10loc1.svg',dpi=1500,bbox_inches = 'tight') #plt.show() import pickle with open('model.pkl','rb') as f1: model = pickle.load(f1)
nilq/baby-python
python
import data as tours_data def data_html(): ret = "<h1>Все туры:</h1>"+"\n" for i in tours_data.tours.keys(): ret = ret + "<p>"+\ tours_data.tours[i]["country"]+\ ': <a href="/data/tours/'+str(i)+'/">'+\ tours_data.tours[i]["title"]+\ "</a></p>" return ret
nilq/baby-python
python
# The MIT License (MIT) # # Copyright (c) 2019 Melissa LeBlanc-Williams for Adafruit Industries # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ `adafruit_rgb_display.hx8357` ==================================================== A simple driver for the HX8357-based displays. * Author(s): Melissa LeBlanc-Williams """ from micropython import const from adafruit_rgb_display.rgb import DisplaySPI __version__ = "0.0.0-auto.0" __repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_RGB_Display.git" _SWRESET = const(0x01) _SLPOUT = const(0x11) _NORON = const(0x13) _INVOFF = const(0x20) _INVON = const(0x21) _DISPOFF = const(0x28) _DISPON = const(0x29) _CASET = const(0x2a) _PASET = const(0x2b) _RAMWR = const(0x2c) _RAMRD = const(0x2e) _TEON = const(0x35) _MADCTL = const(0x36) _COLMOD = const(0x3a) _TEARLINE = const(0x44) _SETOSC = const(0xb0) _SETPWR1 = const(0xb1) _SETRGB = const(0xb3) _SETCYC = const(0xb4) _SETCOM = const(0xb6) _SETC = const(0xb9) _SETSTBA = const(0xc0) _SETPANEL = const(0xcc) _SETGAMMA = const(0xe0) class HX8357(DisplaySPI): """ A simple driver for the HX8357-based displays. >>> import busio >>> import digitalio >>> import board >>> from adafruit_rgb_display import color565 >>> import adafruit_rgb_display.hx8357 as hx8357 >>> spi = busio.SPI(clock=board.SCK, MOSI=board.MOSI, MISO=board.MISO) >>> display = hx8357.HX8357(spi, cs=digitalio.DigitalInOut(board.GPIO0), ... dc=digitalio.DigitalInOut(board.GPIO15)) >>> display.fill(0x7521) >>> display.pixel(64, 64, 0) """ _COLUMN_SET = _CASET _PAGE_SET = _PASET _RAM_WRITE = _RAMWR _RAM_READ = _RAMRD _INIT = ( (_SWRESET, None), (_SETC, b'\xFF\x83\x57'), (_SETRGB, b'\x80\x00\x06\x06'), # 0x80 enables SDO pin (0x00 disables) (_SETCOM, b'\x25'), # -1.52V (_SETOSC, b'\x68'), # Normal mode 70Hz, Idle mode 55 Hz (_SETPANEL, b'\x05'), # BGR, Gate direction swapped (_SETPWR1, b'\x00\x15\x1C\x1C\x83\xAA'), # Not deep standby BT VSPR VSNR AP (_SETSTBA, b'\x50\x50\x01\x3C\x1E\x08'), # OPON normal OPON idle STBA GEN (_SETCYC, b'\x02\x40\x00\x2A\x2A\x0D\x78'), # NW 0x02 RTN DIV DUM DUM GDON GDOFF (_SETGAMMA, b'\x02\x0A\x11\x1d\x23\x35\x41\x4b\x4b\x42\x3A\x27\x1B\x08\x09\x03\x02' \ b'\x0A\x11\x1d\x23\x35\x41\x4b\x4b\x42\x3A\x27\x1B\x08\x09\x03\x00\x01'), (_COLMOD, b'\x55'), # 16 bit (_MADCTL, b'\xc0'), (_TEON, b'\x00'), (_TEARLINE, b'\x00\x02'), # TW off (_SLPOUT, None), (_MADCTL, b'\xa0'), (_DISPON, None), ) _ENCODE_PIXEL = ">H" _ENCODE_POS = ">HH" #pylint: disable-msg=useless-super-delegation, too-many-arguments def __init__(self, spi, dc, cs, rst=None, width=480, height=320, baudrate=16000000, polarity=0, phase=0, rotation=0): super().__init__(spi, dc, cs, rst, width, height, baudrate=baudrate, polarity=polarity, phase=phase, rotation=rotation)
nilq/baby-python
python
__author__ = 'yinjun' #@see http://www.jiuzhang.com/solutions/longest-common-subsequence/ class Solution: """ @param A, B: Two strings. @return: The length of longest common subsequence of A and B. """ def longestCommonSubsequence(self, A, B): # write your code here x = len(A) y = len(B) dp = [[0 for j in range(y+1)] for i in range(x+1)] for i in range(1, x+1): for j in range(1, y+1): if A[i-1] == B[j-1]: dp[i][j] = dp[i-1][j-1] + 1 else: dp[i][j] = max(dp[i-1][j], dp[i][j-1]) return dp[x][y]
nilq/baby-python
python
import sys from typing import Any, Collection, Dict, List, Optional, Union from pydantic import BaseModel from rest_api.config import DEFAULT_TOP_K_READER, DEFAULT_TOP_K_RETRIEVER MAX_RECURSION_DEPTH = sys.getrecursionlimit() - 1 class Question(BaseModel): questions: List[str] filters: Optional[Dict[str, Optional[Union[str, List[str]]]]] = None top_k_reader: int = DEFAULT_TOP_K_READER top_k_retriever: int = DEFAULT_TOP_K_RETRIEVER @classmethod def from_elastic_query_dsl(cls, query_request: Dict[str, Any], top_k_reader: int = DEFAULT_TOP_K_READER): # Refer Query DSL # https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-bool-query.html # Currently do not support query matching with field parameter query_strings: List[str] = [] filters: Dict[str, str] = {} top_k_retriever: int = DEFAULT_TOP_K_RETRIEVER if "size" not in query_request else query_request["size"] cls._iterate_dsl_request(query_request, query_strings, filters) if len(query_strings) != 1: raise SyntaxError('Only one valid `query` field required expected, ' 'refer https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-match-query.html') return cls(questions=query_strings, filters=filters if len(filters) else None, top_k_retriever=top_k_retriever, top_k_reader=top_k_reader) @classmethod def _iterate_dsl_request(cls, query_dsl: Any, query_strings: List[str], filters: Dict[str, str], depth: int = 0): if depth == MAX_RECURSION_DEPTH: raise RecursionError('Parsing incoming DSL reaching current value of the recursion limit') # For question: Only consider values of "query" key for "match" and "multi_match" request. # For filter: Only consider Dict[str, str] value of "term" or "terms" key if isinstance(query_dsl, List): for item in query_dsl: cls._iterate_dsl_request(item, query_strings, filters, depth + 1) elif isinstance(query_dsl, Dict): for key, value in query_dsl.items(): # "query" value should be "str" type if key == 'query' and isinstance(value, str): query_strings.append(value) elif key in ["filter", "filters"]: cls._iterate_filters(value, filters, depth + 1) elif isinstance(value, Collection): cls._iterate_dsl_request(value, query_strings, filters, depth + 1) @classmethod def _iterate_filters(cls, filter_dsl: Any, filters: Dict[str, str], depth: int = 0): if depth == MAX_RECURSION_DEPTH: raise RecursionError('Parsing incoming DSL reaching current value of the recursion limit') if isinstance(filter_dsl, List): for item in filter_dsl: cls._iterate_filters(item, filters, depth + 1) elif isinstance(filter_dsl, Dict): for key, value in filter_dsl.items(): if key in ["term", "terms"]: if isinstance(value, Dict): for filter_key, filter_value in value.items(): # Currently only accepting Dict[str, str] if isinstance(filter_value, str): filters[filter_key] = filter_value elif isinstance(value, Collection): cls._iterate_filters(value, filters, depth + 1)
nilq/baby-python
python
from hyperadmin.links import LinkPrototype class FormStepLinkPrototype(LinkPrototype): def get_link_kwargs(self, **kwargs): link_kwargs = {'on_submit':self.handle_submission, 'method':'POST', 'url':self.get_url(), 'form_class': self.get_form_class(), 'prompt':'step', 'rel':'step',} link_kwargs.update(kwargs) return super(FormStepLinkPrototype, self).get_link_kwargs(**link_kwargs) def handle_submission(self, link, submit_kwargs): """ Called when the link is submitted. Returns a link representing the response. :rtype: Link """ form = link.get_form(**submit_kwargs) if form.is_valid(): self.endpoint.form_valid(form) return self.on_success() self.endpoint.form_invalid(form) return link.clone(form=form) def get_next_step_kwargs(self): return { 'skip_steps': self.endpoint.get_skip_steps(), 'desired_step': self.endpoint.get_desired_step(), } def on_success(self, item=None): params = self.get_next_step_kwargs() return self.endpoint.wizard.next_step(**params) class ControlStepLinkPrototype(LinkPrototype): def get_link_kwargs(self, **kwargs): link_kwargs = {'on_submit':self.handle_submission, 'method':'POST', 'url':self.get_url(), 'form_class': self.get_form_class(), 'prompt':'step', 'rel':'step',} link_kwargs.update(kwargs) return super(ControlStepLinkPrototype, self).get_link_kwargs(**link_kwargs) def handle_submission(self, link, submit_kwargs): """ Called when the link is submitted. Returns a link representing the response. :rtype: Link """ form = link.get_form(**submit_kwargs) if form.is_valid(): return self.on_success(form) return link.clone(form=form) def get_next_step_kwargs(self, form): return { 'skip_steps': form.cleaned_data.get('skip_steps', []), 'desired_step': form.cleaned_data.get('desired_step', None), } def on_success(self, form): params = self.get_next_step_kwargs(form) return self.endpoint.wizard.next_step(**params)
nilq/baby-python
python
from tests import PMGLiveServerTestCase from mock import patch import unittest from pmg.models import db, User from tests.fixtures import dbfixture, UserData, RoleData, OrganisationData class TestAdminUsersPage(PMGLiveServerTestCase): def setUp(self): super(TestAdminUsersPage, self).setUp() self.fx = dbfixture.data(UserData, RoleData, OrganisationData) self.fx.setup() self.user = self.fx.UserData.admin self.create_user_data = { "email": "test@example.com", "name": "Test user", "active": "y", "roles": self.fx.RoleData.admin.id, "organisation": self.fx.OrganisationData.pmg.id, "expiry": "2065-02-06", } def tearDown(self): self.delete_created_objects() self.fx.teardown() super(TestAdminUsersPage, self).tearDown() def test_admin_users_page(self): """ Test admin users page (/admin/user/) """ self.make_request("/admin/user/", self.user, follow_redirects=True) self.assertIn("Users", self.html) self.assertIn(self.fx.UserData.admin.email, self.html) self.assertIn(self.fx.UserData.editor.email, self.html) self.assertIn(self.fx.UserData.inactive.email, self.html) def test_admin_user_new_page(self): """ Test admin get new user page (/admin/user/new) """ url = "/admin/user/new" self.make_request( url, self.user, follow_redirects=True, ) self.assertIn("Email", self.html) self.assertIn("Email address confirmed at", self.html) self.assertIn("Subscribe Daily Schedule", self.html) def test_post_admin_users_new_page(self): """ Test admin new users page (/admin/user/new) """ before_count = len(User.query.all()) url = "/admin/user/new/?url=%2Fadmin%2Fuser%2F" response = self.make_request( url, self.user, data=self.create_user_data, method="POST", follow_redirects=True, ) self.assertEqual(200, response.status_code) after_count = len(User.query.all()) self.assertLess(before_count, after_count) created_user = User.query.filter( User.email == self.create_user_data["email"] ).scalar() self.assertTrue(created_user) self.created_objects.append(created_user)
nilq/baby-python
python
from model import db, Product, Accounts def deleteUser(rowid) user = db.session.query(User).filter(User.id == user_id).first() if user: db.session.query(User).filter(User.id==user_id).delete() db.session().commit() def deleteProduct(rowid): user = db.session.query(User).filter(User.id == user_id).first() if user: db.session.query(User).filter(User.id==user_id).delete() db.session().commit()
nilq/baby-python
python
"""Hata Yönetimi - Raise Deyimi.""" # Python dili kırmızı yazılar ile kendine has hata mesajları yayınlamaktadır # Bizde bir hata meydana geldiğinde bu şekilde mesajlar yayınlayabiliriz. # Bunun için Raise deyimi kullanılır. sayi = 5 try: if sayi == 5: raise Exception('Sayı 5\'e eşit olamaz!') else: print(sayi) except Exception as e: print('ERROR! =>', e)
nilq/baby-python
python
# -*- coding: utf-8 -*- import copy __author__ = "Grant Hulegaard" __copyright__ = "Copyright (C) Nginx, Inc. All rights reserved." __license__ = "" __maintainer__ = "Grant Hulegaard" __email__ = "grant.hulegaard@nginx.com" def collect_cache_size(collector, data, stamp): collector.object.statsd.gauge('plus.cache.size', data['size'], stamp=stamp) if 'max_size' in data: collector.object.statsd.gauge('plus.cache.max_size', data['max_size'], stamp=stamp) def collect_cache_metrics(collector, data, stamp): types = [ 'bypass', 'expired', 'hit', 'miss', 'revalidated', 'stale', 'updating' ] for label in types: data_bucket = data[label] metric_base = 'plus.cache.%s' % label filtered_names = filter( lambda name: name not in ('responses_written', 'bytes_written'), data_bucket.keys() ) counted_vars = {} for name in filtered_names: metric_name = metric_base + '.%s' % name counted_vars[metric_name] = data_bucket[name] # metric base is used to store total responses counted_vars.update({ metric_base: data_bucket['responses'] }) collector.aggregate_counters(copy.deepcopy(counted_vars), stamp=stamp) CACHE_COLLECT_INDEX = [ collect_cache_size, collect_cache_metrics, ]
nilq/baby-python
python
from abc import ABCMeta import numpy as np from falconcv.decor import typeassert import logging logger = logging.getLogger(__name__) class ApiModel(metaclass=ABCMeta): def train(self, *args, **kwargs): return self def freeze(self, *args, **kwargs): return self def eval(self, *args, **kwargs): return self @typeassert(input_image=[str, np.ndarray], size=tuple, threshold=float, top_k=int) def __call__(self, input_image, size=None, threshold=0.5, top_k=10): pass
nilq/baby-python
python
import sqlite3 CREATE_QUERY = """ CREATE TABLE IF NOT EXISTS chat_table ( chat_id INT PRIMARY KEY ) """ SELECT_ALL_QUERY = """ SELECT chat_id from chat_table """ INSERT_ONE_QUERY = """ INSERT INTO chat_table (chat_id) VALUES (%s) """ def execute_query(query): try: sqlite_connection = sqlite3.connect('sqlite.db') cursor = sqlite_connection.cursor() cursor.execute(query) result = cursor.fetchall() sqlite_connection.commit() cursor.close() return result except sqlite3.Error as error: print("Error while connecting:", error) finally: if (sqlite_connection): sqlite_connection.close() def add_id(chat_id): # print("q - ", INSERT_ONE_QUERY % chat_id) a = execute_query(INSERT_ONE_QUERY % chat_id) # print("res of insert - ", a) def get_all_ids(): return execute_query(SELECT_ALL_QUERY) execute_query(CREATE_QUERY)
nilq/baby-python
python
import pandas from openpyxl import load_workbook from openpyxl.utils.dataframe import dataframe_to_rows wb = load_workbook('data/regions.xlsx') ws = wb.active df = pandas.read_excel('data/all_shifts.xlsx') df1 = df[['Sales Rep', 'Cost per', 'Units Sold']] df1['Total'] = df1['Cost per'] * df1['Units Sold'] rows = dataframe_to_rows(df1, index=False) for row_index, row in enumerate(rows, 1): for column_index, col in enumerate(row, 6): ws.cell(row=row_index, column=column_index, value=col) wb.save('output/combinded.xlsx')
nilq/baby-python
python
from __future__ import unicode_literals, print_function, division import os import matplotlib.pyplot as plt from compute_scores import compute_scores # Draw figures in Figure 2. for rel in ['mirgene', 'ppi', 'ploc']: # Heuristic of trigger words. scores = compute_scores(rel, 'h2') precisions = [s[2] for s in scores] recalls = [s[3] for s in scores] fscores = [s[4] for s in scores] prec_color, recall_color, fscore_color = 'black', 'black', 'black' x_axis = [ 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, ] fig = plt.figure() ax1 = fig.add_subplot(111) ax1.set_xlabel('# of trigger stems', fontsize=24) ax1.plot(x_axis, precisions[:10], marker='o', color=prec_color, markersize=15, lw=2, linestyle='-', fillstyle='none') ax1.plot(x_axis, recalls[:10], marker='s', color=recall_color, markersize=15, lw=2, linestyle='-', fillstyle='none') ax1.plot(x_axis, fscores[:10], marker='^', color=fscore_color, markersize=15, lw=2, linestyle='-', fillstyle='none') ax1.plot(x_axis, precisions[10:], color=prec_color, markersize=15, lw=2, marker='o', linestyle='--', fillstyle='none') ax1.plot(x_axis, recalls[10:], color=recall_color, markersize=15, lw=2, marker='s', linestyle='--', fillstyle='none') ax1.plot(x_axis, fscores[10:], color=fscore_color, markersize=15, lw=2, marker='^', linestyle='--', fillstyle='none') plt.tick_params(labelsize=20) # Fig 2. a-c. figfile = os.path.join('eval/figures/{}_trigger.png'.format(rel)) fig.savefig(figfile, dpi=300, pad_inches=0 ,bbox_inches='tight') plt.show() # High-confidence patterns. scores = compute_scores(rel, 'h3') precisions = [s[2] for s in scores] recalls = [s[3] for s in scores] fscores = [s[4] for s in scores] x_axis = [ 20, 40, 60, 80, 100, 120, 140, 160, 180, 200 ] fig = plt.figure() ax1 = fig.add_subplot(111) ax1.set_xlabel('# of patterns', fontsize=24) ax1.plot(x_axis, precisions[:10], marker='o', color=prec_color, markersize=15, lw=2, linestyle='-', fillstyle='none') ax1.plot(x_axis, recalls[:10], marker='s', color=recall_color, markersize=15, lw=2, linestyle='-', fillstyle='none') ax1.plot(x_axis, fscores[:10], marker='^', color=fscore_color, markersize=15, lw=2, linestyle='-', fillstyle='none') ax1.plot(x_axis, precisions[10:], color=prec_color, markersize=15, lw=2, marker='o', linestyle='--', fillstyle='none') ax1.plot(x_axis, recalls[10:], color=recall_color, markersize=15, lw=2, marker='s', linestyle='--', fillstyle='none') ax1.plot(x_axis, fscores[10:], color=fscore_color, markersize=15, lw=2, marker='^', linestyle='--', fillstyle='none') plt.tick_params(labelsize=20) # Fig. 2 d-f. figfile = os.path.join('eval/figures/{}_pattern.png'.format(rel)) fig.savefig(figfile, dpi=300, pad_inches=0 ,bbox_inches='tight') plt.show()
nilq/baby-python
python
#You are given a data structure of employee information, which includes the employee's unique id, his importance value and his direct subordinates' id. # For example, employee 1 is the leader of employee 2, and employee 2 is the leader of employee 3. They have importance value 15, 10 and 5, respectively. Then employee 1 has a data structure like [1, 15, [2]], and employee 2 has [2, 10, [3]], and employee 3 has [3, 5, []]. Note that although employee 3 is also a subordinate of employee 1, the relationship is not direct. # Now given the employee information of a company, and an employee id, you need to return the total importance value of this employee and all his subordinates. class Solution(object): def getImportance(self, employees, id): """ :type employees: Employee :type id: int :rtype: int """ # Time: O(n) # Space: O(n) emps = {employee.id: employee for employee in employees} def dfs(id): subordinates_importance = sum([dfs(sub_id) for sub_id in emps[id].subordinates]) return subordinates_importance + emps[id].importance return dfs(id)
nilq/baby-python
python
import copy import cv2 # import torch from mindspore import Tensor import numpy as np from PIL import Image from util.config import config as cfg from util.misc import find_bottom, find_long_edges, split_edge_seqence, \ norm2, vector_sin, split_edge_seqence_by_step, sample, fourier_transform, \ clockwise, find_start_point def pil_load_img(path): image = Image.open(path) image = np.array(image) return image class TextInstance(object): def __init__(self, points, orient, text): self.orient = orient self.text = text self.bottoms = None self.e1 = None self.e2 = None if self.text != "#": self.label = 1 else: self.label = -1 remove_points = [] self.points = np.array(points) if len(points) > 4: # remove point if area is almost unchanged after removing it ori_area = cv2.contourArea(points) for p in range(len(points)): # attempt to remove p index = list(range(len(points))) index.remove(p) area = cv2.contourArea(points[index]) if np.abs(ori_area - area)/ori_area < 0.0017 and len(points) - len(remove_points) > 4: remove_points.append(p) self.points = np.array([point for i, point in enumerate(points) if i not in remove_points]) else: self.points = np.array(points) def find_bottom_and_sideline(self): self.bottoms = find_bottom(self.points) # find two bottoms of this Text self.e1, self.e2 = find_long_edges(self.points, self.bottoms) # find two long edge sequence def disk_cover(self, n_disk=15): """ cover text region with several disks :param n_disk: number of disks :return: """ inner_points1 = split_edge_seqence(self.points, self.e1, n_disk) inner_points2 = split_edge_seqence(self.points, self.e2, n_disk) inner_points2 = inner_points2[::-1] # innverse one of long edge center_points = (inner_points1 + inner_points2) / 2 # disk center radii = norm2(inner_points1 - center_points, axis=1) # disk radius return inner_points1, inner_points2, center_points, radii def equal_width_bbox_cover(self, step=16.0): inner_points1, inner_points2 = split_edge_seqence_by_step(self.points, self.e1, self.e2, step=step) inner_points2 = inner_points2[::-1] # innverse one of long edge center_points = (inner_points1 + inner_points2) / 2 # disk center return inner_points1, inner_points2, center_points def __repr__(self): return str(self.__dict__) def __getitem__(self, item): return getattr(self, item) class TextDataset(object): def __init__(self, transform, is_training=False): super().__init__() self.transform = transform self.is_training = is_training @staticmethod def fill_polygon(mask, pts, value): cv2.fillPoly(mask, [pts.astype(np.int32)], color=(value,)) def make_text_region(self, img, polygon, tr_mask, train_mask, x_map, y_map, k, scale=1/2): [h, w] = img.shape[:2] h = int(h * scale) w = int(w * scale) deal_mask = np.zeros((h, w), np.uint8) points = (polygon.points * scale).astype(np.int32) cv2.fillPoly(tr_mask, [points], color=(1,)) cv2.fillPoly(deal_mask, [points], color=(1,)) if polygon.text == '#': cv2.fillPoly(train_mask, [points], color=(0,)) pts = sample(polygon.points * scale) pts = find_start_point(pts) c = fourier_transform(pts, k) c = clockwise(c, k) vector_x = np.real(c) vector_y = np.imag(c) for i in range(-k, k+1): if i != 0: x_map[:, :, i + k] = deal_mask * vector_x[i + k] + (1 - deal_mask) * x_map[:, :, i + k] y_map[:, :, i + k] = deal_mask * vector_y[i + k] + (1 - deal_mask) * y_map[:, :, i + k] else: for y, x in np.argwhere(deal_mask > 0.5): x_map[y, x, k] = vector_x[k] - x y_map[y, x, k] = vector_y[k] - y def make_text_center_line(self, sideline1, sideline2, center_line, tcl_msk1, expand=0.3, shrink=1): p1 = np.mean(sideline1, axis=0) p2 = np.mean(sideline2, axis=0) vpp = vector_sin(p1 - p2) if vpp >= 0: top_line = sideline2 bot_line = sideline1 else: top_line = sideline1 bot_line = sideline2 if len(center_line) < 5: shrink = 0 for i in range(shrink, len(center_line) - 1 - shrink): c1 = center_line[i] c2 = center_line[i + 1] top1 = top_line[i] top2 = top_line[i + 1] bottom1 = bot_line[i] bottom2 = bot_line[i + 1] p1 = c1 + (top1 - c1) * expand p2 = c1 + (bottom1 - c1) * expand p3 = c2 + (bottom2 - c2) * expand p4 = c2 + (top2 - c2) * expand ploy1 = np.stack([p1, p2, p3, p4]) self.fill_polygon(tcl_msk1, ploy1, value=1) def get_training_data(self, image, polygons, k, image_id, image_path): H, W, _ = image.shape if self.transform: image, polygons = self.transform(image, copy.copy(polygons)) h, w, _ = image.shape tr_mask_3 = np.zeros((int(h/8), int(w/8), 1), np.uint8) train_mask_3 = np.ones((int(h/8), int(w/8), 1), np.uint8) tcl_mask_3 = np.zeros((int(h / 8), int(w / 8), 1), np.uint8) x_map_3 = np.zeros((int(h/8), int(w/8), 2 * k + 1), np.float32) y_map_3 = np.zeros((int(h/8), int(w/8), 2 * k + 1), np.float32) tr_mask_4 = np.zeros((int(h/16), int(w/16), 1), np.uint8) train_mask_4 = np.ones((int(h/16), int(w/16), 1), np.uint8) tcl_mask_4 = np.zeros((int(h/16), int(w/16), 1), np.uint8) x_map_4 = np.zeros((int(h/16), int(w/16), 2 * k + 1), np.float32) y_map_4 = np.zeros((int(h/16), int(w/16), 2 * k + 1), np.float32) tr_mask_5 = np.zeros((int(h/32), int(w/32), 1), np.uint8) train_mask_5 = np.ones((int(h/32), int(w/32), 1), np.uint8) tcl_mask_5 = np.zeros((int(h/32), int(w/32), 1), np.uint8) x_map_5 = np.zeros((int(h/32), int(w/32), 2 * k + 1), np.float32) y_map_5 = np.zeros((int(h/32), int(w/32), 2 * k + 1), np.float32) if polygons is not None: for polygon in polygons: x_max = polygon.points[:, 0].max() x_min = polygon.points[:, 0].min() y_max = polygon.points[:, 1].max() y_min = polygon.points[:, 1].min() dx = x_max - x_min dy = y_max - y_min criterion = max(dx, dy) / (h + 1e-5) polygon.find_bottom_and_sideline() sideline1, sideline2, center_points = polygon.equal_width_bbox_cover(step=4.0) if criterion < 0.4: self.make_text_region(image, polygon, tr_mask_3, train_mask_3, x_map_3, y_map_3, k, scale=1 / 8) self.make_text_center_line(sideline1/8, sideline2/8, center_points/8, tcl_mask_3) if criterion > 0.3 and criterion < 0.7: self.make_text_region(image, polygon, tr_mask_4, train_mask_4, x_map_4, y_map_4, k, scale=1 / 16) self.make_text_center_line(sideline1/16, sideline2/16, center_points/16, tcl_mask_4) if criterion > 0.6: self.make_text_region(image, polygon, tr_mask_5, train_mask_5, x_map_5, y_map_5, k, scale=1 / 32) self.make_text_center_line(sideline1/32, sideline2/32, center_points/32, tcl_mask_5) # clip value (0, 1) tr_mask_3 = np.clip(tr_mask_3, 0, 1) train_mask_3 = np.clip(train_mask_3, 0, 1) tcl_mask_3 = np.clip(tcl_mask_3, 0, 1) tr_mask_4 = np.clip(tr_mask_4, 0, 1) train_mask_4 = np.clip(train_mask_4, 0, 1) tcl_mask_4 = np.clip(tcl_mask_4, 0, 1) tr_mask_5 = np.clip(tr_mask_5, 0, 1) train_mask_5 = np.clip(train_mask_5, 0, 1) tcl_mask_5 = np.clip(tcl_mask_5, 0, 1) label_3 = np.concatenate([tr_mask_3, train_mask_3, x_map_3, y_map_3, tcl_mask_3], axis=2) label_4 = np.concatenate([tr_mask_4, train_mask_4, x_map_4, y_map_4, tcl_mask_4], axis=2) label_5 = np.concatenate([tr_mask_5, train_mask_5, x_map_5, y_map_5, tcl_mask_5], axis=2) # # to pytorch channel sequence image = image.transpose(2, 0, 1) # image = Tensor.from_numpy(image).astype("float32") # label_3 = Tensor.from_numpy(label_3).astype("float32") # label_4 = Tensor.from_numpy(label_4).astype("float32") # label_5 = Tensor.from_numpy(label_5).astype("float32") if not self.is_training: points = np.zeros((cfg.max_annotation, cfg.max_points, 2)) length = np.zeros(cfg.max_annotation, dtype=int) if polygons is not None: for i, polygon in enumerate(polygons): pts = polygon.points points[i, :pts.shape[0]] = polygon.points length[i] = pts.shape[0] meta = { 'image_id': image_id, 'image_path': image_path, 'annotation': points, 'n_annotation': length, 'Height': H, 'Width': W } # meta = [image_id, image_path, points, length, H, W] # meta = np.array(meta) return image, label_3, label_4, label_5, meta return image, label_3, label_4, label_5 def get_test_data(self, image, image_id, image_path): H, W, _ = image.shape if self.transform: image, polygons = self.transform(image) # to pytorch channel sequence image = image.transpose(2, 0, 1) meta = { 'image_id': image_id, 'image_path': image_path, 'Height': H, 'Width': W } return image, meta def __len__(self): raise NotImplementedError()
nilq/baby-python
python
#!/usr/bin/python import broker import select ## demo receiver that is subscribed to the topic "demo/select_fd" ep = broker.Endpoint() subscriber = ep.make_subscriber("demo/select_fd") ep.listen("127.0.0.1", 9999) while(True): ## this will block until we have read-readiness on the file descriptor # print("wait ...") fd_sets = select.select([subscriber.fd()], [], []) # print ("go on...") if not fd_sets[0]: print("boom. this is the end.") (topic, data) = subscriber.get() #// we could also subscriber.poll() and handle array of messages received_event = broker.bro.Event(data) print("received on topic: {} event name: {} content: {}".format(topic, received_event.name(), received_event.args())) ## in fact, with a blocking select this is pretty similar to the "simple" example. The call to `subscriber.get()` blocks as well. To handle this nicely, we have to wrap it into a thread.
nilq/baby-python
python
# # @lc app=leetcode.cn id=275 lang=python3 # # [275] H 指数 II # # https://leetcode-cn.com/problems/h-index-ii/description/ # # algorithms # Medium (41.25%) # Likes: 139 # Dislikes: 0 # Total Accepted: 39K # Total Submissions: 85.6K # Testcase Example: '[0,1,3,5,6]' # # 给定一位研究者论文被引用次数的数组(被引用次数是非负整数),数组已经按照 升序排列 。编写一个方法,计算出研究者的 h 指数。 # # h 指数的定义: “h 代表“高引用次数”(high citations),一名科研人员的 h 指数是指他(她)的 (N 篇论文中)总共有 h # 篇论文分别被引用了至少 h 次。(其余的 N - h 篇论文每篇被引用次数不多于 h 次。)" # # # # 示例: # # 输入: citations = [0,1,3,5,6] # 输出: 3 # 解释: 给定数组表示研究者总共有 5 篇论文,每篇论文相应的被引用了 0, 1, 3, 5, 6 次。 # 由于研究者有 3 篇论文每篇至少被引用了 3 次,其余两篇论文每篇被引用不多于 3 次,所以她的 h 指数是 3。 # # # # 说明: # # 如果 h 有多有种可能的值 ,h 指数是其中最大的那个。 # # # # 进阶: # # # 这是 H 指数 的延伸题目,本题中的 citations 数组是保证有序的。 # 你可以优化你的算法到对数时间复杂度吗? # # # # @lc code=start class Solution: def hIndex(self, citations: List[int]) -> int: n = len(citations) l,r = 0,n-1 while l < r: m = l + (r - l) // 2 if citations[m] >= n - m: r = m else: l = m + 1 return n - l if citations[l] >= n - l else 0 # @lc code=end def hIndex(self, citations: List[int]) -> int: h = 0 n = len(citations) i = 0 citations.sort(reverse=True) while i < n and citations[i] > h: h += 1 i += 1 return h def hIndex(self, citations: List[int]) -> int: if len(citations) == 1: return citations[0] if citations[0] <= 1 else 1 l, r = 0, len(citations) - 1 n = len(citations) while l <= r: m = l + (r - l) // 2 if citations[m] >= n - m: r = m - 1 else: l = m + 1 return n - l
nilq/baby-python
python
from twotest.fixtures import client, django_client from wheelcms_axle.tests.fixtures import localtyperegistry, localtemplateregistry, root
nilq/baby-python
python
resposta = 'S' soma = count = maior = menor = 0 while resposta in 'Ss': num = int(input('Digite um numero: ')) soma += num count += 1 if count == 1: maior = menor = num else: if num > maior: maior = num else: menor = num resposta = str(input('Quer continuar ? S/N ')).upper().strip()[0] media = soma / count print('A media foi: {}'.format(media)) print('O maior e menor número foram:{} {} '.format(maior, menor))
nilq/baby-python
python
#!/usr/bin/env python # to be used with 'rps_pico_client.py' # Example to illustrate RPS feature to run tiny (pico) services, such as reading sensor data or # controlling a device; here, time at server is requested. In this example a new service task is # created to serve a request. Compare this to 'task_pico_service.py' where requests are processed # by the same server task. import sys import time import random import pycos # import netpycos to add networking to pycos import pycos.netpycos # PyPI / pip packaging adjusts assertion below for Python 3.7+ if sys.version_info.major == 3: assert sys.version_info.minor < 7, \ ('"%s" is not suitable for Python version %s.%s; use file installed by pip instead' % (__file__, sys.version_info.major, sys.version_info.minor)) def pico_service(req, task=None): if not isinstance(req, dict): raise Exception('request must be a dictionary') client = req.get('client', None) if req.get('name', None) != 'time': raise Exception('request should have "name" set to "time"') if not isinstance(client, pycos.Task): raise Exception('request should have "client" set to task of requester') delay = random.uniform(0.5, 2) # simulate delay in getting result (e.g., reading a sensor or computation) yield task.sleep(delay) raise StopIteration({'result': time.asctime(), 'server': task}) if __name__ == '__main__': # pycos.logger.setLevel(pycos.Logger.DEBUG) # 'secret' is set so only peers that use same secret can communicate; # SSL can be used for encryption if required; see 'rps_node_*.py' for authentication of peers scheduler = pycos.Pycos(name='pico_server', secret='PycOS') # create RPS and register it so remote clients can request execution pycos.RPS(pico_service).register() if sys.version_info.major > 2: read_input = input else: read_input = raw_input while 1: try: line = read_input('Enter "quit" or "exit" to terminate pico_service: ').strip().lower() if line in ('quit', 'exit'): break except Exception: break
nilq/baby-python
python
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals __doc__ = """ hgvs.edit -- representation of edit operations in HGVS variants NARefAlt and AARefAlt are abstractions of several major variant types. They are distinguished by whether the ref and alt elements of the structure. The HGVS grammar for NA and AA are subtly different (e.g., the ref AA in a protein substitution is part of the location). """ import recordtype from bioutils.sequences import aa_to_aa1, aa1_to_aa3 from hgvs.exceptions import HGVSError class Edit(object): pass class NARefAlt( Edit, recordtype.recordtype('NARefAlt', [('ref',None),('alt',None),('uncertain',False)]) ): """ represents substitutions, deletions, insertions, and indels. :ivar ref: reference sequence or length :ivar alt: alternate sequence :ivar uncertain: boolean indicating whether the variant is uncertain/undetermined """ @property def ref_s(self): """ returns a string representing the ref sequence, if it is not None and smells like a sequence >>> NARefAlt('ACGT').ref_s u'ACGT' >>> NARefAlt('7').ref_s >>> NARefAlt(7).ref_s """ return self.ref if (isinstance(self.ref,basestring) and self.ref and self.ref[0] in 'ACGTUN') else None @property def ref_n(self): """ returns an integer, either from the `ref` instance variable if it's a number, or the length of ref if it's a string, or None otherwise >>> NARefAlt('ACGT').ref_n 4 >>> NARefAlt('7').ref_n 7 >>> NARefAlt(7).ref_n 7 """ try: return int(self.ref) except ValueError: return len(self.ref) if self.ref else None def __str__(self): if self.ref is None and self.alt is None: raise HGVSError('RefAlt: ref and alt sequences are both undefined') # subst and delins if self.ref is not None and self.alt is not None: if self.ref == self.alt: s = '=' elif len(self.alt) == 1 and len(self.ref) == 1 and not self.ref.isdigit(): # don't turn del5insT into 5>T s = '{self.ref}>{self.alt}'.format(self=self) else: s = 'del{self.ref}ins{self.alt}'.format(self=self) # del case elif self.ref is not None: s = 'del{self.ref}'.format(self=self) # ins case else: # self.alt is not None s = 'ins{self.alt}'.format(self=self) return '('+s+')' if self.uncertain else s def _set_uncertain(self): """sets the uncertain flag to True; used primarily by the HGVS grammar :returns: self """ self.uncertain = True return self @property def type(self): """return the type of this Edit :returns: edit type (str) """ if self.ref is not None and self.alt is not None: if self.ref == self.alt: edit_type = 'identity' elif len(self.alt) == 1 and len(self.ref) == 1 and not self.ref.isdigit(): edit_type = 'sub' else: edit_type = 'delins' elif self.ref is not None: edit_type = 'del' else: edit_type = 'ins' return edit_type class AARefAlt( Edit, recordtype.recordtype('AARefAlt', [('ref',None),('alt',None), ('uncertain',False)]) ): def __init__(self,ref, alt, uncertain=False): super(AARefAlt, self).__init__(ref=aa_to_aa1(ref), alt=aa_to_aa1(alt), uncertain=uncertain) def __str__(self): if self.ref is None and self.alt is None: #raise HGVSError('RefAlt: ref and alt sequences are both undefined') return '=' # subst and delins if self.ref is not None and self.alt is not None: if self.ref == self.alt: s = '=' elif len(self.ref) == 1 and len(self.alt) == 1: s = aa1_to_aa3(self.alt) else: s = 'delins{alt}'.format(alt = aa1_to_aa3(self.alt)) # del case elif self.ref is not None and self.alt is None: s = 'del' # ins case elif self.ref is None and self.alt is not None: s = 'ins{alt}'.format(alt=aa1_to_aa3(self.alt)) else: raise RuntimeError("Should not be here") return '('+s+')' if self.uncertain else s def _set_uncertain(self): """sets the uncertain flag to True; used primarily by the HGVS grammar :returns: self """ self.uncertain = True return self @property def type(self): """return the type of this Edit :returns: edit type (str) """ if self.ref is not None and self.alt is not None: if self.ref == self.alt: edit_type = 'identity' elif len(self.ref) == 1 and len(self.alt) == 1: edit_type = 'sub' else: edit_type = 'delins' elif self.ref is not None and self.alt is None: edit_type = 'del' elif self.ref is None and self.alt is not None: edit_type = 'ins' return edit_type class AASub( AARefAlt ): def __str__(self): s = aa1_to_aa3(self.alt) if self.alt != '?' else self.alt return '('+s+')' if self.uncertain else s @property def type(self): """return the type of this Edit :returns: edit type (str) """ return 'sub' class AAFs(Edit, recordtype.recordtype('AAFs', [('ref',None),('alt',None),('length',None),('uncertain',False)])): def __init__(self,ref,alt,length=None,uncertain=False): super(AAFs, self).__init__(ref=aa_to_aa1(ref), alt=aa_to_aa1(alt), length=length, uncertain=uncertain) def __str__(self): st_length = self.length or '' s = "{alt}fsTer{length}".format(alt=aa1_to_aa3(self.alt), length=st_length) return '('+s+')' if self.uncertain else s def _set_uncertain(self): """sets the uncertain flag to True; used primarily by the HGVS grammar :returns: self """ self.uncertain = True return self @property def type(self): """return the type of this Edit :returns: edit type (str) """ return 'fs' class AAExt(Edit, recordtype.recordtype('AAExt', [('ref',None),('alt',None), ('aaterm', None), ('length',None), ('uncertain',False)])): def __init__(self,ref,alt,aaterm=None, length=None,uncertain=False): super(AAExt, self).__init__(ref=aa_to_aa1(ref), alt=aa_to_aa1(alt), aaterm=aa_to_aa1(aaterm), length=length, uncertain=uncertain) def __str__(self): st_alt = self.alt or '' st_aaterm = self.aaterm or '' st_length = self.length or '' s = "{alt}ext{term}{length}".format(alt=aa1_to_aa3(st_alt), term=aa1_to_aa3(st_aaterm), length=st_length) return '('+s+')' if self.uncertain else s def _set_uncertain(self): """sets the uncertain flag to True; used primarily by the HGVS grammar :returns: self """ self.uncertain = True return self @property def type(self): """return the type of this Edit :returns: edit type (str) """ return 'ext' class Dup( Edit, recordtype.recordtype('Dup', [('seq',None),('uncertain',False)]) ): def __str__(self): return 'dup' + (self.seq or '') def _set_uncertain(self): """sets the uncertain flag to True; used primarily by the HGVS grammar :returns: self """ self.uncertain = True return self @property def type(self): """return the type of this Edit :returns: edit type (str) """ return 'dup' class Repeat( Edit, recordtype.recordtype('Repeat', [('seq',None),('min',None),('max',None),('uncertain',False)]) ): def __str__(self): if self.min > self.max: raise HGVSError('Repeat min count must be less than or equal to max count') if self.min == self.max: return '{self.seq}[{self.min}]'.format(self=self) return '{self.seq}({self.min}_{self.max})'.format(self=self) def _set_uncertain(self): """sets the uncertain flag to True; used primarily by the HGVS grammar :returns: self """ self.uncertain = True return self @property def type(self): """return the type of this Edit :returns: edit type (str) """ return 'repeat' class NACopy(Edit, recordtype.recordtype('NACopy', ['copy', ('uncertain', False)])): def __str__(self): s = 'copy{}'.format(self.copy) return '('+s+')' if self.uncertain else s def _set_uncertain(self): """sets the uncertain flag to True; used primarily by the HGVS grammar :returns: self """ self.uncertain = True return self @property def type(self): """return the type of this Edit :returns: edit type (str) """ return 'copy' class NADupN(Edit, recordtype.recordtype('NADupN', ['n', ('uncertain', False)])): def __str__(self): s = 'dup{}'.format(self.n) return '('+s+')' if self.uncertain else s def _set_uncertain(self): """sets the uncertain flag to True; used primarily by the HGVS grammar :returns: self """ self.uncertain = True return self @property def type(self): """return the type of this Edit :returns: edit type (str) """ return 'dup' if __name__ == "__main__": import doctest doctest.testmod() # class Inv( Edit, recordtype.recordtype('Inv', [], default=None) ): # def __str__(self): # return '' # # class Con( Edit, recordtype.recordtype('Con', ['con'], default=None) ): # def __str__(self): # return self.con # # class ComplexVariant( Edit, recordtype.recordtype('ComplexVariant', ['edits','rel'], default=None) ): # def __str__(self): # return '[' + self.rel.join( self.edits ) + ']' # # class CompoundVariant( Edit, recordtype.recordtype('CompoundVariant', ['edits'], default=None) ): # def __str__(self): # return ';'.join( [ '['+e+']' for e in self.edits ] ) # # class MosaicVariant( Edit, recordtype.recordtype('Edit', ['edit'], default=None) ): # def __str__(self): # return '[=/{self.edit}]'.format(self=self) # # class ChimericVariant( Edit, recordtype.recordtype('Edit', ['edit'], default=None) ): # def __str__(self): # return '[=//{self.edit}]'.format(self=self) ## <LICENSE> ## Copyright 2014 HGVS Contributors (https://bitbucket.org/hgvs/hgvs) ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ## </LICENSE>
nilq/baby-python
python
#!/usr/bin/env python """"Downloads and unzips the KITTI tracking data. Warning: This can take a while, and use up >100Gb of disk space.""" #ref from https://github.com/utiasSTARS/pykitti/blob/master/pykitti/downloader/tracking.py from __future__ import print_function import argparse import os import sys from subprocess import call import glob #Object tracking 2012 #http://www.cvlibs.net/datasets/kitti/eval_tracking.php URL_BASE="https://s3.eu-central-1.amazonaws.com/avg-kitti/" tracking_dir_names = ['image_02', 'image_03', 'velodyne', 'calib', 'oxts', 'label_02', 'det_02'] #folder name under tracking/training tracking_dir_zip_tags = ['image_2', 'image_3', 'velodyne', 'calib', 'oxts', 'label_2', 'det_2_lsvm'] #original zip file name #lsvm is L-SVM reference detections for training and test set (L-SVM), 108 MB) #data_tracking_oxts.zip is GPS/IMU data, if you want to use map information (8 MB) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--kitti_root', type=str, default=os.path.join('/mnt/DATA5T', 'Kitti')) #/mnt/DATA5T/Kitti, /DATA5T/Datasets # parser.add_argument('--root', type=str, default=None, help='data folder') return parser.parse_args(sys.argv[1:]) ## Need to clean up lsvm as their files have trailing whitespaces def clean_file(filename): f = open(filename, 'r') new_lines = [] for line in f.readlines(): new_lines.append(line.rstrip()) f.close() f = open(filename, 'w') for line in new_lines: f.write(line + '\n') f.close() def clean_lsvm(lsvm_dir): for filename in glob.glob(lsvm_dir + '/*.txt'): print('Cleaning ', filename) clean_file(filename) def main(): args = parse_args() kitti_dir = args.kitti_root # Perform a few sanity checks to make sure we're operating in the right dir # when left with the default args. if not os.path.isabs(kitti_dir): if not os.path.isdir('src'): os.chdir('..') if not os.path.isdir('src'): print("Please make sure to run this tool from the DynSLAM " "project root when using relative paths.") return 1 tracking_dir = os.path.join(kitti_dir, 'tracking') #/DATA5T/Datasets/Kitti/tracking os.makedirs(tracking_dir, exist_ok=True) os.chdir(tracking_dir) tracking_zip_names = ["data_tracking_" + name + ".zip" for name in tracking_dir_zip_tags] for dir_name, zip_name in zip(tracking_dir_names, tracking_zip_names): canary_dir = os.path.join('training', dir_name) if os.path.isdir(canary_dir): print("Directory {} canary dir seems to exist, so I will assume the data is there.".format(canary_dir)) else: if os.path.exists(zip_name): print("File {} exists. Not re-downloading.".format(zip_name)) else: url = URL_BASE + zip_name print("Downloading file {} to folder {}.".format(zip_name, kitti_dir)) call(['wget', url]) call(['unzip', '-o', zip_name]) if str(canary_dir) == 'training/det_02': print("Need to trim whitespaces for lsvm label files") clean_lsvm('training/det_02') return 0 if __name__ == '__main__': sys.exit(main())
nilq/baby-python
python
from collections.abc import Iterable from enum import Enum import json import logging import subprocess from typing import Union, Any, Optional from pathlib import Path from scipy import sparse import scipy from typer.models import NoneType logger = logging.getLogger(__name__) def expand_paths(path_or_pattern): """ Make a list of paths from a glob pattern From https://stackoverflow.com/a/51108375 """ path = Path(path_or_pattern).expanduser() parts = path.parts[1:] if path.is_absolute() else path.parts return list(Path(path.root).glob(str(Path("").joinpath(*parts)))) def get_total_lines(paths: list[Union[Path, str]], encoding: str = "utf-8") -> int: """ Get the total number of lines (read: documents) to process """ logger.info("Calculating total number of documents...") try: # wc is faster than native python return sum( int(subprocess.check_output(f"/usr/bin/wc -l {p}", shell=True).split()[0]) for p in paths ) except subprocess.CalledProcessError: return sum(1 for p in paths for line in open(p, encoding=encoding)) def read_lines(path: Union[Path, str], encoding: str = "utf-8") -> list[str]: """ Read the lines in a file """ with open(path, encoding=encoding) as infile: return [line for line in infile if line.strip()] def save_lines(obj: Iterable, fpath: Union[str, Path]): with open(fpath, "w") as outfile: for i, x in enumerate(obj): if i == 0: outfile.write(x) else: outfile.write(f"\n{x}") def save_json(obj: Any, fpath: Union[str, Path], indent: Optional[int] = None): with open(fpath, "w") as outfile: json.dump(obj, outfile, indent=indent) def save_params(params: dict[str, Any], fpath: Union[str, Path]): safe_params = {} safe_types = (float, int, str, bool, type(None)) for k, v in params.items(): if isinstance(v, Enum): v = v.value if not isinstance(v, (tuple, list) + safe_types): v = str(v) if isinstance(v, (tuple, list)) and any( (not isinstance(i, safe_types)) for i in v ): v = [str(v) for v in v] safe_params[k] = v save_json(safe_params, fpath, indent=2) def save_dtm_as_jsonl( dtm: sparse.csr.csr_matrix, vocab: dict[str, int], ids: list[str], outpath: Union[str, Path], ): """ Save document-term matrix as a dictionary in the following format, where each row is a document: { "id": <doc_1>, "counts": { <word_2>: <count_of_word_2_in_doc_1>, <word_6>: <count_of_word_6_in_doc_1>, ... }, } """ inv_vocab = dict(zip(vocab.values(), vocab.keys())) with open(outpath, mode="w") as outfile: for i, (row, id) in enumerate(zip(dtm, ids)): words_in_doc = [inv_vocab[idx] for idx in row.indices] counts = [int(v) for v in row.data] # int64 not serializable word_counts = dict(zip(words_in_doc, counts)) row_json = json.dumps({"id": id, "counts": word_counts}) if i == 0: outfile.write(row_json) else: outfile.write(f"\n{row_json}") def gen_ngrams(tokens: list[str], min_n: int, max_n: int) -> list[str]: """ Create all ngrams from `tokens` where n is between `min_n`, `max_n`, inclusive. """ return [ "_".join(tokens[i : i + n]) for n in range(min_n, max_n + 1) for i in range(len(tokens) - n + 1) ]
nilq/baby-python
python
#---------------------------------------------------------------------- # This programme provides a simple example of how to use "approach control" # for colour light (and to improve the automation of semaphore signals. # # For Colour Light Signals - this can be used where it is necessary to slow # a train down to take a diverging route. For "Approach on Red", the junction # signal is set to DANGER (and all the signals behind show increasingly # restrictive aspects as appropriate). When the signal is approached, it # automatically changes to PROCEED, enabling the train to continue along # the divergent route. "Approach on Yellow" is used for when the speed # restriction on the divergent route is less restrictive but still slower # than the speed restriction of the main route. In this case the junction # signal shows a CAUTION aspect, and the signals behind show flashing # yellow and double flashing yellow to indicate the divergent route. # # For Semaphore signals, this can be used for simulating/automating the # series of signals within a block section (e.g.outer home, inner home, # starter, advanced starter etc). A home signal should show a PROCEED aspect # for an approaching train if a subsequent home signal (in the same 'Block # Section') is set to DANGER. In this case all preceding home signals (and # the distant for the block section) would remain ON to slow down the train # on the approach to the first home signal. As each signal is approached, # the signal would then be cleared to enable the train to proceed (at low # speed) towards the next home signal (which would be ON). As the train # approaches the second Home signal, the signal would be cleared - etc # # This programme also provides an example of a multiple windows application. # showing how all callbacks (from external sensor events) are injected back # into the main Tkinter thread (via the Tkinter event queue) # --------------------------------------------------------------------- from tkinter import * from model_railway_signals import * import logging import threading #---------------------------------------------------------------------- # Configure the log level. If no 'level' is specified specified only warnings and errors # will be generated. A level of 'INFO' will tell you what the various functions are doing # 'under the hood' - useful when developing/debugging a layout signalling Scheme. A level # of 'DEBUG' will additionally report the DCC Bus commands being sent to the Pi-SPROG #---------------------------------------------------------------------- logging.basicConfig(format='%(levelname)s: %(message)s',level=logging.DEBUG) #---------------------------------------------------------------------- # WINDOW 2 - Main Callback for 'Release on Yellow' Approach Control (Colour Light Signals) #---------------------------------------------------------------------- def window1_callback_function(item_id,callback_type): print("Window 1: Callback into main program - Item: "+str(item_id)+" - Callback Type: "+str(callback_type)) print("Window 1: Receiving callback event in thread " + str(threading.get_ident())) # Deal with changes to the Track Occupancy (based on "signal Passed" Events) # We use the label of the cleared section (returned by 'clear_section_occupied') # to set the label for the next section (i.e. 'pass' the current train along) if callback_type == sig_callback_type.sig_passed: if item_id == 21: set_section_occupied(21) elif item_id == 22: clear_section_occupied(21) set_section_occupied(22) elif item_id == 23: clear_section_occupied(22) set_section_occupied(23) elif item_id == 24 and point_switched(21): set_section_occupied(25,clear_section_occupied(23)) elif item_id == 24 and not point_switched(1): set_section_occupied(24,clear_section_occupied(23)) elif item_id == 25: trigger_timed_signal (25,0,5) clear_section_occupied(25) elif item_id == 26: trigger_timed_signal (26,0,5) clear_section_occupied(24) # Override signals based on track occupancy - we could use signal passed events but # we also need to allow for manual setting/resetting of the track occupancy sections # Note that we leave the Distant (signal 1) to deal with later if section_occupied(22): set_signal_override(22) else: clear_signal_override(22) if section_occupied(23):set_signal_override(23) else:clear_signal_override(23) if ((point_switched(21) and section_occupied(25))or (not point_switched(21) and section_occupied(24))): set_signal_override(24) else: clear_signal_override(24) # Refresh the route settings if point_switched(21): set_route(24,route=route_type.LH1) else: set_route(24,route=route_type.MAIN) # Process the signal/point interlocking if not fpl_active(21): lock_signal(24) else: unlock_signal(24) if signal_clear(24): lock_point(21) else: unlock_point(21) # Here is the approach control code if callback_type not in (sig_callback_type.sig_released,): if point_switched(21) and signal_state(25) != signal_state_type.PROCEED: set_approach_control(24) elif not point_switched(21) and signal_state(26) != signal_state_type.PROCEED: set_approach_control(24) else: clear_approach_control(24) if signal_state(24) != signal_state_type.PROCEED: set_approach_control(23) else: clear_approach_control(23) if signal_state(23) != signal_state_type.PROCEED: set_approach_control(22) else: clear_approach_control(22) # Finally - Override the distant signal if any of the home signals ahead are set # to DANGER or if the train has just entered the section immediately beyond the # signal. In this case, we only need to check the state of the signal ahead if section_occupied(21) or signal_state(22) != signal_state_type.PROCEED: set_signal_override(21) else: clear_signal_override(21) return() #---------------------------------------------------------------------------------------------- # WINDOW 2 - Main Callback for 'Release on Yellow' Approach Control (Colour Light Signals) #---------------------------------------------------------------------------------------------- def window2_callback_function(item_id,callback_type): print("Window 2: Callback into main program - Item: "+str(item_id)+" - Callback Type: "+str(callback_type)) print("Window 2: Receiving callback event in thread " + str(threading.get_ident())) # Deal with changes to the Track Occupancy (based on "signal Passed" Events) # We use the label of the cleared section (returned by 'clear_section_occupied') # to set the label for the next section (i.e. 'pass' the current train along) if callback_type == sig_callback_type.sig_passed: if item_id == 1: set_section_occupied(1) elif item_id == 2: set_section_occupied(2,clear_section_occupied(1)) elif item_id == 3: set_section_occupied(3,clear_section_occupied(2)) elif item_id == 4 and point_switched(1): set_section_occupied(5,clear_section_occupied(3)) elif item_id == 4 and not point_switched(1): set_section_occupied(4,clear_section_occupied(3)) elif item_id == 5: trigger_timed_signal (5,0,5) clear_section_occupied(5) elif item_id == 6: trigger_timed_signal (6,0,5) clear_section_occupied(4) # Override signals based on track occupancy - we could use signal passed events but # we also need to allow for manual setting/resetting of the track occupancy sections if section_occupied(1): set_signal_override(1) else: clear_signal_override(1) if section_occupied(2): set_signal_override(2) else: clear_signal_override(2) if section_occupied(3): set_signal_override(3) else: clear_signal_override(3) if ((point_switched(1) and section_occupied(5)) or (not point_switched(1) and section_occupied(4))): set_signal_override(4) else: clear_signal_override(4) # Refresh the signal aspects based on the route settings - Need to work back # along the route that is set to ensure we are updating based on the signal ahead if point_switched(1): set_route(4,route=route_type.LH1) update_signal(4,sig_ahead_id=5) else: set_route(4,route=route_type.MAIN) update_signal(4,sig_ahead_id=6) update_signal(3,sig_ahead_id=4) update_signal(2,sig_ahead_id=3) update_signal(1,sig_ahead_id=2) # Process the signal/point interlocking if not fpl_active(1):lock_signal(4) else: unlock_signal(4) if signal_clear(4): lock_point(1) else: unlock_point(1) # Here is the approach control code - we only want to SET the approach control when # the route is first set up for the diverging route or when the signal is passed # This is so we don't inadvertantly SET the approach control on other events received # between the train releasing the approach control and the train passing the signal. # We also need to CLEAR the approach control if the route is switched back to main if ((callback_type == point_callback_type.point_switched and item_id==1 and point_switched(1)) or (callback_type == sig_callback_type.sig_passed and item_id==4 and point_switched(1)) ): set_approach_control (4,release_on_yellow=True) if callback_type == point_callback_type.point_switched and item_id==1 and not point_switched(1): clear_approach_control (4) return() #---------------------------------------------------------------------------------------------- # WINDOW 3 - Main Callback for 'Release on Red' Approach Control (Colour Light Signals) #---------------------------------------------------------------------------------------------- def window3_callback_function(item_id,callback_type): print("Window 3: Callback into main program - Item: "+str(item_id)+" - Callback Type: "+str(callback_type)) print("Window 3: Receiving callback event in thread " + str(threading.get_ident())) # Deal with changes to the Track Occupancy (based on "signal Passed" Events) # We use the label of the cleared section (returned by 'clear_section_occupied') # to set the label for the next section (i.e. 'pass' the current train along) if callback_type == sig_callback_type.sig_passed: if item_id == 11: set_section_occupied(11) elif item_id == 12: set_section_occupied(12,clear_section_occupied(11)) elif item_id == 13: set_section_occupied(13,clear_section_occupied(12)) elif item_id == 14 and point_switched(11): set_section_occupied(15,clear_section_occupied(13)) elif item_id == 14 and not point_switched(11): set_section_occupied(14,clear_section_occupied(13)) elif item_id == 15: trigger_timed_signal (15,0,3) clear_section_occupied(15) elif item_id == 16: trigger_timed_signal (16,0,3) clear_section_occupied(14) # Override signals based on track occupancy - we could use signal passed events but # we also need to allow for manual setting/resetting of the track occupancy sections if section_occupied(11): set_signal_override(11) else: clear_signal_override(11) if section_occupied(12): set_signal_override(12) else: clear_signal_override(12) if section_occupied(13): set_signal_override(13) else: clear_signal_override(13) if ((point_switched(11) and section_occupied(15)) or (not point_switched(11) and section_occupied(14))): set_signal_override(14) else: clear_signal_override(14) # Refresh the signal aspects based on the route settings - Need to work back # along the route that is set to ensure we are updating based on the signal ahead if point_switched(11): set_route(14,theatre_text="1") update_signal(14,sig_ahead_id=15) else: set_route(14,theatre_text="2") update_signal(14,sig_ahead_id=16) update_signal(13,sig_ahead_id=14) update_signal(12,sig_ahead_id=13) update_signal(11,sig_ahead_id=12) # Process the signal/point interlocking if not fpl_active(11): lock_signal(14) else: unlock_signal(14) if signal_clear(14): lock_point(11) else: unlock_point(11) # Here is the approach control code - we only want to SET the approach control when # the route is first set up for the diverging route or when the signal is passed # This is so we don't inadvertantly SET the approach control on other events received # between the train releasing the approach control and the train passing the signal. # We also need to CLEAR the approach control if the route is switched back to main if ((callback_type == point_callback_type.point_switched and item_id==1 and point_switched(1)) or (callback_type == sig_callback_type.sig_passed and item_id==4 and point_switched(1)) ): set_approach_control (4,release_on_yellow=True) if callback_type == point_callback_type.point_switched and item_id==1 and not point_switched(1): clear_approach_control (4) if ( (callback_type == point_callback_type.point_switched and item_id==11 and point_switched(11)) or (callback_type == sig_callback_type.sig_passed and item_id==14 and point_switched(11)) ): set_approach_control (14,release_on_yellow=False) if callback_type == point_callback_type.point_switched and item_id==11 and not point_switched(11): clear_approach_control (14) return() #------------------------------------------------------------------------------------ # This is where the code begins #------------------------------------------------------------------------------------ print ("Creating Windows and Canvases") window1 = Tk() window1.title("Root Window 1: An example of using 'Release on Red' Approach Control for Semaphore Signals") canvas1 = Canvas(window1,height=300,width=1100,bg="grey85") canvas1.pack() window2 = Toplevel(window1) window2.title("Window 2: An example of using 'Release on Yellow' Approach Control for Colour Light Signals") canvas2 = Canvas(window2,height=300,width=1100,bg="grey85") canvas2.pack() window3 = Toplevel(window2) window3.title("Window 3: An example of using 'Release on Red' Approach Control for Colour Light Signals") canvas3 = Canvas(window3,height=300,width=1100,bg="grey85") canvas3.pack() print ("Initialising Pi Sprog") initialise_pi_sprog () request_dcc_power_on() #---------------------------------------------------------------------------------------------- # WINDOW 3 - An example of using 'Release on Red' Approach Control for Colour Light Signals #---------------------------------------------------------------------------------------------- print ("Window 3: Drawing Schematic and creating points") # Draw the the Bottom line (up to the first point) canvas3.create_line(0,150,810,150,fill="black",width=3) # Create (and draw) the first point - a left hand point with a Facing Point Lock create_point(canvas3,11,point_type.LH, 835,150,"black",point_callback=window3_callback_function,fpl=True) # Draw the Main Line and Loop Line canvas3.create_line(835,125,860,100,fill="black",width=3) # 45 degree line from point to start of loop canvas3.create_line(860,100,1100,100,fill="black",width=3) # Loop line canvas3.create_line(860,150,1100,150,fill="black",width=3) # Main Line print ("Window 3: Creating the track Occupancy Sections") create_section(canvas3,11,175,150,section_callback=window3_callback_function) create_section(canvas3,12,400,150,section_callback=window3_callback_function) create_section(canvas3,13,625,150,section_callback=window3_callback_function) create_section(canvas3,14,925,150,section_callback=window3_callback_function) create_section(canvas3,15,925,100,section_callback=window3_callback_function) print ("Window 2: Creating Signals") create_colour_light_signal (canvas3,11,50,150, signal_subtype = signal_sub_type.four_aspect, sig_callback=window3_callback_function, sig_passed_button = True, fully_automatic = True, refresh_immediately = False) create_colour_light_signal (canvas3,12,275,150, signal_subtype = signal_sub_type.four_aspect, sig_callback=window3_callback_function, sig_passed_button = True, fully_automatic = True, refresh_immediately = False) create_colour_light_signal (canvas3,13,500,150, signal_subtype = signal_sub_type.four_aspect, sig_callback=window3_callback_function, sig_passed_button = True, fully_automatic = True, refresh_immediately = False) create_colour_light_signal (canvas3,14,725,150, signal_subtype = signal_sub_type.four_aspect, sig_callback=window3_callback_function, sig_passed_button = True, refresh_immediately = False, approach_release_button = True, theatre_route_indicator = True) create_colour_light_signal (canvas3,15,1000,100, signal_subtype = signal_sub_type.four_aspect, sig_callback=window3_callback_function, fully_automatic=True, sig_passed_button=True) create_colour_light_signal (canvas3,16,1000,150, signal_subtype = signal_sub_type.four_aspect, sig_callback=window3_callback_function, fully_automatic=True, sig_passed_button=True) print ("Window 3: Setting Initial Route and Interlocking") # Set the initial interlocking conditions by running the main callback function window3_callback_function(None, None) #---------------------------------------------------------------------------------------------- # WINDOW 2 - An example of using 'Release on Yellow' Approach Control for Colour Light Signals #---------------------------------------------------------------------------------------------- print ("Window 2: Creating DCC Mappings") # Define the DCC mappings for the signals. In this instance, we're only going to generate mappings # for the signals that support flashing aspects (i.e. Traintech 4 aspects with flashing aspects) # Signal 2 (addresses 13,14,15,16) - uses the simplified Train_Tech signal mapping function map_traintech_signal (sig_id = 3, base_address = 13) # Signal 3 (addresses 17,18,19,20) - uses the simplified Train_Tech signal mapping function map_traintech_signal (sig_id = 3, base_address = 17) print ("Window 2: Drawing Schematic and creating points") # Draw the the Top line (up to the first point) canvas2.create_line(0,150,800,150,fill="black",width=3) # Create (and draw) the first point - a left hand point with a Facing Point Lock create_point(canvas2,1,point_type.LH, 825,150,"black",point_callback=window2_callback_function,fpl=True) # Draw the Main Line and Loop Line canvas2.create_line(825,125,850,100,fill="black",width=3) # 45 degree line from point to start of loop canvas2.create_line(850,100,1100,100,fill="black",width=3) # Loop line canvas2.create_line(850,150,1100,150,fill="black",width=3) # Main Line print ("Window 2: Creating the track Occupancy Sections") create_section(canvas2,1,175,150,section_callback=window2_callback_function) create_section(canvas2,2,400,150,section_callback=window2_callback_function) create_section(canvas2,3,625,150,section_callback=window2_callback_function) create_section(canvas2,4,925,150,section_callback=window2_callback_function) create_section(canvas2,5,925,100,section_callback=window2_callback_function) print ("Window 2: Creating Signals") create_colour_light_signal (canvas2,1,50,150, signal_subtype = signal_sub_type.four_aspect, sig_callback=window2_callback_function, sig_passed_button = True, fully_automatic = True, refresh_immediately = False) create_colour_light_signal (canvas2,2,275,150, signal_subtype = signal_sub_type.four_aspect, sig_callback=window2_callback_function, sig_passed_button = True, fully_automatic = True, refresh_immediately = False) create_colour_light_signal (canvas2,3,500,150, signal_subtype = signal_sub_type.four_aspect, sig_callback=window2_callback_function, sig_passed_button = True, fully_automatic = True, refresh_immediately = False) create_colour_light_signal (canvas2,4,725,150, signal_subtype = signal_sub_type.four_aspect, sig_callback=window2_callback_function, sig_passed_button = True, refresh_immediately = False, approach_release_button = True, lhfeather45 = True) create_colour_light_signal (canvas2,5,1000,100, signal_subtype = signal_sub_type.four_aspect, sig_callback=window2_callback_function, fully_automatic=True, sig_passed_button=True) create_colour_light_signal (canvas2,6,1000,150, signal_subtype = signal_sub_type.four_aspect, sig_callback=window2_callback_function, fully_automatic=True, sig_passed_button=True) print ("Window 2: Setting Initial Route and Interlocking") # Set the initial interlocking conditions by running the main callback function window2_callback_function(None, None) #---------------------------------------------------------------------------------------------- # WINDOW 1 - An example of using 'Release on Red' Approach Control for Semaphore Signals #---------------------------------------------------------------------------------------------- print ("Window 1: Drawing Schematic and creating points") # Draw the the Top line (up to the first point) canvas1.create_line(0,150,800,150,fill="black",width=3) # Create (and draw) the first point - a left hand point with a Facing Point Lock create_point(canvas1,21,point_type.LH, 825,150,"black",point_callback=window1_callback_function,fpl=True) # Draw the Main Line and Loop Line canvas1.create_line(825,125,850,100,fill="black",width=3) # 45 degree line from point to start of loop canvas1.create_line(850,100,1100,100,fill="black",width=3) # Loop line canvas1.create_line(850,150,1100,150,fill="black",width=3) # Main Line print ("Window 1: Creating the track Occupancy Sections") create_section(canvas1,21,175,150,section_callback=window1_callback_function) create_section(canvas1,22,400,150,section_callback=window1_callback_function) create_section(canvas1,23,625,150,section_callback=window1_callback_function) create_section(canvas1,24,925,150,section_callback=window1_callback_function) create_section(canvas1,25,925,100,section_callback=window1_callback_function) print ("Window 1: Creating Signals") create_semaphore_signal (canvas1,21,50,150, distant = True, sig_callback=window1_callback_function, sig_passed_button = True) create_semaphore_signal (canvas1,22,275,150, sig_callback=window1_callback_function, approach_release_button = True, sig_passed_button = True) create_semaphore_signal (canvas1,23,500,150, sig_callback=window1_callback_function, approach_release_button = True, sig_passed_button = True) create_semaphore_signal (canvas1,24,725,150, sig_callback=window1_callback_function, sig_passed_button = True, approach_release_button = True, lh1_signal = True) create_semaphore_signal (canvas1,25,1000,100, sig_callback=window1_callback_function, sig_passed_button=True) create_semaphore_signal (canvas1,26,1000,150, sig_callback=window1_callback_function, sig_passed_button=True) print ("Window 1: Setting Initial Route and Interlocking") # Set the initial interlocking conditions by running the main callback function window1_callback_function(None, None) #---------------------------------------------------------------------------------------- print("Entering Main Event Loop") print("Main Thread is: " + str(threading.get_ident())) # Now enter the main event loop and wait for a button press (which will trigger a callback) window1.mainloop()
nilq/baby-python
python
import subprocess import json from datetime import datetime from pydruid.client import PyDruid from pydruid.utils.aggregators import (longmax, doublemax) from pydruid.utils.filters import Dimension from kafka import KafkaProducer from iso8601utils import validators class KafkaAccessLayer(object): def __init__(self): self.connection = None def connect(self, uri): try: def serializer(v): return json.dumps(v).encode('utf-8') self.connection = KafkaProducer(bootstrap_servers=uri, value_serializer=serializer) except Exception: raise Exception('Kafka connection error: {0}'.format(uri)) def write_stats(self, id, name, stats, **kwargs): for stat in stats: msg = {'agent_id': id, 'process_name': name, 'timestamp': datetime.utcfromtimestamp(stat[0]) .strftime("%Y-%m-%dT%H:%M:%S.%fZ"), 'cpu': stat[1], 'mem': stat[2]} self.connection.send('supervisor', msg) self.connection.flush() kafka = KafkaAccessLayer() class PlyQLError(Exception): def __init__(self, expr, msg): self.expr = expr self.message = msg class PlyQLConnectionError(PlyQLError): def __init__(self, expr, msg, uri): super(PlyQLConnectionError, self).__init__(expr, msg) self.uri = uri class PlyQL(object): def __init__(self, uri): self.uri = uri def query(self, q, interval=None): command = ['plyql', '-h', str(self.uri), '-q', str(q), '-o', 'json'] if interval: command.extend(['-i', interval]) process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = process.communicate() if err: try: (_, _, uri) = err.split(' ') raise PlyQLConnectionError(err, 'Could not connect to Druid.', uri) except ValueError: raise PlyQLError(err, 'Error executing query.') else: return json.loads(out) class DruidAccessLayer(object): timeseries_granularities = ['none', 'second', 'minute', 'fifteen_minute', 'thirty_minute', 'hour', 'day', 'week', 'month', 'quarter', 'year'] select_granularities = ['all', 'second', 'minute', 'fifteen_minute', 'thirty_minute', 'hour', 'day', 'week', 'month', 'quarter', 'year'] def __init__(self): self.connection = None self.plyql = None def connect(self, uri): self.connection = PyDruid('http://{0}'.format(uri), 'druid/v2/') self.plyql = PlyQL(uri) try: tables = self.tables() if {'Tables_in_database': 'supervisor'} not in tables: raise Exception('Druid connection error: missing ' '"supervisor" table') except Exception: raise Exception('Druid connection error: {0}'.format(uri)) def __validate_granularity__(self, granularity, supported_granularities): if granularity in self.timeseries_granularities: query_granularity = granularity elif validators.duration(granularity): query_granularity = {'type': 'period', 'period': granularity} else: raise ValueError( 'Unsupported granularity "{0}"'.format(granularity)) return query_granularity def __validate_intervals__(self, intervals): if not validators.interval(intervals): raise ValueError('Unsupported interval "{0}"'.format(intervals)) return intervals def tables(self): return self.plyql.query('SHOW TABLES') def processes(self, agent_id, period='P6W'): return self.plyql.query('SELECT process_name AS process, ' 'COUNT() AS count, MAX(__time) AS time ' 'FROM supervisor WHERE agent_id = "{0}" ' 'GROUP BY process_name;' .format(agent_id), period) def timeseries(self, agent_id, process_name, granularity='none', intervals='P6W', descending=False): query_granularity = self.__validate_granularity__( granularity, self.timeseries_granularities) intervals = self.__validate_intervals__(intervals) return self.connection.timeseries( datasource='supervisor', granularity=query_granularity, descending=descending, intervals=intervals, aggregations={'cpu': doublemax('cpu'), 'mem': longmax('mem')}, context={'skipEmptyBuckets': 'true'}, filter=(Dimension('agent_id') == agent_id) & (Dimension('process_name') == process_name)) def select(self, agent_id, process_name, granularity='all', intervals='P6W', descending=True): query_granularity = self.__validate_granularity__( granularity, self.select_granularities) intervals = self.__validate_intervals__(intervals) return self.connection.select( datasource='supervisor', granularity=query_granularity, intervals=intervals, descending=descending, dimensions=['process_name'], metrics=['cpu', 'mem'], filter=(Dimension('agent_id') == agent_id) & (Dimension('process_name') == process_name), paging_spec={'pagingIdentifiers': {}, "threshold": 1} ) druid = DruidAccessLayer()
nilq/baby-python
python
import getpass import smtplib from email.mime.text import MIMEText from email.utils import formataddr import urllib.request, urllib.parse, urllib.error import ssl import json import time import re import os import sys # Email setting for notification def Email(sender, password, recipient, emailsub, emailmsg, smtpsever, smtpport): try: msg = MIMEText(emailmsg, 'plain', 'utf-8') msg['From'] = formataddr(['Catfood Reminder', sender]) msg['To'] = formataddr([recipient, recipient]) msg['Subject'] = emailsub server = smtplib.SMTP_SSL(smtpsever, smtpport) server.login(sender, password) server.sendmail(sender,[recipient,],msg.as_string()) server.quit() print('Succeed to send e-mail') return True except: print('Failed to send e-mail') def MacOsNotification(ostitle, osmsg): if sys.platform == 'darwin': os.system('osascript -e \'display notification "' + osmsg + '" sound name "default" with title "' + ostitle + '"\'') def GetDobanTopic(keywords): # Load saved topic data try: with open('record.json', 'r') as record_file: record = json.load(record_file) record_topics = record['topics'] lasttime = record['time'] record_file.close() except: record = dict() record_topics = dict() lasttime = "2020-01-01 00:00:00" # Write new topic data with open('record.json', 'w') as record_file: # Request 1000pcs of topics from Douban info = [] for i in range(0, 10): # Ignore SSL certificate errors ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE # Request data in JSON format count = 100 start = i * count url = 'https://api.douban.com/v2/group/656297/topics?start=' + str(start) + '&count=' + str(count) header = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36 Edg/79.0.309.56'} req = urllib.request.Request(url = url, headers = header) nowtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) try: data = json.loads(urllib.request.urlopen(req, context = ctx).read()) except: continue # Filtrate concerned topics for number in range(0, count): topic = data['topics'][number] content = topic['title'] + topic ['content'] if topic['updated'] <= lasttime: break if re.search(keywords, content, re.I|re.M|re.S) != None: if topic['id'] not in record_topics.keys(): info.append(topic['updated'] + '\r\n' + topic['title'] + '\r\n' + topic['share_url'] + '\r\n' + '-' * 50) print(topic['updated'] + '\n' + topic['title'] + '\n' + topic['share_url'] + '\n' + '-' * 50) record_topics[topic['id']] = {'updated':topic['updated'], 'title':topic['title'], 'link':topic['share_url']} if number < (count - 1): break record['time'] = nowtime record['topics'] = record_topics json.dump(record, record_file, ensure_ascii = False) if len(info) == 0: print('No new message ' + nowtime) else: message = str(len(info)) + ' new message(s) ' + nowtime print(message) MacOsNotification('Catfood Reminder', message) Email(SenderAddress, Password, RecipientAddress, message, "\r\n".join(info), SMTPSever, SMTPPort) record_file.close() return #Setup e-mail while True: # Login in E-mail SenderAddress = input('Please input the sender\'s e-mail address: ') Password = getpass.getpass('Please input the sender\'s e-mail password: ') SMTPSever = input('Please input the sender\'s e-mail SMTP Sever address: ') SMTPPort = input('Please input the sender\'s e-mail SMTP Port: ') RecipientAddress = input('Please input the recipient\'s e-mail address: ') #Test E-mail testemail = Email(SenderAddress, Password, RecipientAddress, 'TEST MESSAGE', 'THIS IS TEST TEXT', SMTPSever, SMTPPort) if testemail == True: print('Valid e-mail setting, start searching...') break else: print('Invalid e-mail setting is invalid, please retry') # Search new topic every 10 min while True: GetDobanTopic(r'(开车).*?(go)') #change into your target keywords print('Next search will start in 10 min') time.sleep(600)
nilq/baby-python
python
import unittest from buscasrc.core.analyzer import Analyzer class TestAnalyzer(unittest.TestCase): def setUp(self): self.analyzer = Analyzer() def test_prepare_text(self): text = "Conan, the barbarian is a great HQ. Conan, #MustRead!" self.assertListEqual(self.analyzer.prepare_text(text), [ ("conan", [0, 4]), ("barbarian", [1]), ("great", [2]), ("hq", [3]), ("mustread", [5])]) def test_execute_before_filters(self): text = "Hello! Can i help you? Some things we have: rice, beans," \ " chicken, ..." result = self.analyzer._execute_before_filters(text) self.assertEquals(result, "Hello Can i help you Some things we have rice" " beans chicken ") def test_execute_after_filters(self): tokens_list = ["After", "all", "we", "will", "resist", "-", "JOHN"] result = self.analyzer._execute_after_filters(tokens_list) self.assertEquals(result, ["will", "resist", "john"]) def test_generate_tokens_with_positions(self): tokens_list = ["john", "will", "resist", "john"] result = self.analyzer._generate_tokens_with_positions(tokens_list) self.assertEquals(result, [ ("john", [0, 3]), ("will", [1]), ("resist", [2])]) def test_get_token_positions(self): token = "conan" tokens_list = ["conan", "barbarian", "axe", "conan", "sword"] self.assertEquals( self.analyzer._get_token_positions(token, tokens_list), [0, 3])
nilq/baby-python
python
import yaml from functools import lru_cache from flask import current_app as app from atst.utils import getattr_path class LocalizationInvalidKeyError(Exception): def __init__(self, key, variables): self.key = key self.variables = variables def __str__(self): return "Requested {key} and variables {variables} with but an error occured".format( key=self.key, variables=self.variables ) @lru_cache(maxsize=None) def _translations_file(): file_name = "translations.yaml" if app: file_name = app.config.get("DEFAULT_TRANSLATIONS_FILE", file_name) f = open(file_name) return yaml.safe_load(f) def all_keys(): translations = _translations_file() keys = [] def _recursive_key_lookup(chain): results = getattr_path(translations, chain) if isinstance(results, str): keys.append(chain) else: [_recursive_key_lookup(".".join([chain, result])) for result in results] [_recursive_key_lookup(key) for key in translations] return keys def translate(key, variables=None): translations = _translations_file() value = getattr_path(translations, key) if variables is None: variables = {} if value is None: raise LocalizationInvalidKeyError(key, variables) return value.format(**variables).replace("\n", "")
nilq/baby-python
python
from .validate import Validate __all__ = ["Valdate"]
nilq/baby-python
python
"""Application tests."""
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import sys import inselect REQUIREMENTS = [ # TODO How to specify OpenCV? 'cv2>=3.1.0', 'numpy>=1.11.1,<1.12', 'Pillow>=3.4.2,<3.5', 'python-dateutil>=2.6.0,<2.7', 'pytz>=2016.7', 'PyYAML>=3.12,<3.2', 'schematics>=1.1.1,<1.2', 'scikit-learn>=0.18.1,<0.19', 'scipy>=0.18.1,<0.19', 'unicodecsv>=0.14.1,<0.15', ] SCRIPTS = ('export_metadata', 'ingest', 'read_barcodes', 'save_crops', 'segment') setup_data = { 'name': 'inselect', 'version': inselect.__version__, 'author': (u'Lawrence Hudson, Alice Heaton, Pieter Holtzhausen, ' u'Stéfan van der Walt'), 'author_email': 'l.hudson@nhm.ac.uk', 'maintainer': 'Lawrence Hudson', 'maintainer_email': 'l.hudson@nhm.ac.uk', 'url': 'https://github.com/NaturalHistoryMuseum/inselect/', 'license': 'Modified BSD', 'description': inselect.__doc__, 'long_description': inselect.__doc__, 'packages': [ 'inselect', 'inselect.gui', 'inselect.gui.plugins', 'inselect.gui.views', 'inselect.gui.views.boxes', 'inselect.lib', 'inselect.lib.templates', 'inselect.scripts', ], 'include_package_data': True, 'test_suite': 'inselect.tests', 'scripts': ['inselect/scripts/{0}.py'.format(script) for script in SCRIPTS], 'install_requires': REQUIREMENTS, 'extras_require': { 'gui': [ 'ExifRead>=2.1.2', 'humanize>=0.5.1', 'psutil>=5.0.0', 'PyQt5>=5.6.0' ], 'barcodes': ['gouda>=0.1.13', 'pylibdmtx>=0.1.6', 'pyzbar>=0.1.3'], 'windows': ['pywin32>=220'], 'development': ['coveralls>=1.1', 'mock>=2.0.0', 'nose>=1.3.7'], }, 'entry_points': { 'gui_scripts': ['inselect = inselect.gui.app:main'], 'console_scripts': ['{0} = inselect.scripts.{0}:main'.format(script) for script in SCRIPTS], }, 'classifiers': [ 'Development Status :: 4 - Beta', 'Topic :: Utilities', 'Topic :: Scientific/Engineering :: Bio-Informatics' 'Programming Language :: Python :: 3.5', ], } def setuptools_setup(): """setuptools setup""" from setuptools import setup setup(**setup_data) def _qt_files(site_packages): """Returns a list of tuples (src, dest) of Qt dependencies to be installed. Elements are instances of Path. site_packages should be an instance of Path to the site-packages directory. IF we leave cx_Freeze to do its thing then the entirety of PyQt5, Qt5 and uic are included in the installer. The only way to avoid horrible bloat is to hand-tune which files we include. This whole system is fucked beyond belief. """ from pathlib import Path return [ # Qt DLLs ( site_packages.joinpath('PyQt5/Qt/bin').joinpath(dep), dep ) for dep in ('Qt5Core.dll', 'Qt5Gui.dll', 'Qt5Widgets.dll') ] + [ # Qt plugins ( site_packages.joinpath('PyQt5/Qt/plugins/platforms').joinpath(dep), Path('platforms').joinpath(dep) ) for dep in ('qwindows.dll',) ] + [ # PyQt extension modules ( site_packages.joinpath('PyQt5').joinpath(dep), Path('PyQt5').joinpath(dep) ) for dep in ('__init__.py', 'Qt.pyd', 'QtCore.pyd', 'QtGui.pyd', 'QtWidgets.pyd') ] def cx_setup(): """cx_Freeze setup. Used for building Windows installers""" import scipy from pathlib import Path from distutils.sysconfig import get_python_lib from cx_Freeze import setup, Executable from pylibdmtx import pylibdmtx from pyzbar import pyzbar # Useful paths environment_root = Path(sys.executable).parent site_packages = Path(get_python_lib()) project_root = Path(__file__).parent # Files as tuples (source, dest) include_files = [ # Evil, evil, evil # cx_Freeze breaks pywintypes and pythoncom on Python 3.5 # https://bitbucket.org/anthony_tuininga/cx_freeze/issues/194/error-with-frozen-executable-using-35-and (site_packages.joinpath('win32/lib/pywintypes.py'), 'pywintypes.py'), (site_packages.joinpath('pythoncom.py'), 'pythoncom.py'), # Binary dependencies that are not detected (environment_root.joinpath('Library/bin/mkl_core.dll'), 'mkl_core.dll'), (environment_root.joinpath('Library/bin/mkl_intel_thread.dll'), 'mkl_intel_thread.dll'), (environment_root.joinpath('Library/bin/libiomp5md.dll'), 'libiomp5md.dll'), # Stylesheet (project_root.joinpath('inselect/gui/inselect.qss'), 'inselect.qss'), ] + [ # DLLs that are not detected because they are loaded by ctypes (dep._name, Path(dep._name).name) for dep in pylibdmtx.EXTERNAL_DEPENDENCIES + pyzbar.EXTERNAL_DEPENDENCIES ] + _qt_files(site_packages) # Convert instances of Path to strs include_files = [(str(source), str(dest)) for source, dest in include_files] # Directories as strings include_files += [ # Fixes scipy freeze # http://stackoverflow.com/a/32822431/1773758 str(Path(scipy.__file__).parent), ] # Packages to exclude. exclude_packages = [ str(p.relative_to(site_packages)).replace('\\', '.') for p in site_packages.rglob('*/tests') ] setup( name=setup_data['name'], version=setup_data['version'], options={ 'build_exe': { 'packages': setup_data.get('packages', []) + [ 'urllib', 'sklearn.neighbors', 'win32com.gen_py', 'win32timezone', ], 'excludes': [ # '_bz2', # Required by sklearn '_decimal', '_elementtree', '_hashlib', '_lzma', '_ssl', 'curses', 'distutils', 'email', 'http', 'lib2to3', 'mock', 'nose', 'PyQt5', # 'pydoc', # Required by sklearn 'tcl', 'Tkinter', 'ttk', 'Tkconstants', # 'unittest', # Required by numpy.core.multiarray 'win32com.HTML', 'win32com.test', 'win32evtlog', 'win32pdh', 'win32trace', 'win32ui', 'win32wnet', 'xml', 'xmlrpc', 'inselect.tests', ] + exclude_packages, 'includes': [ ], 'include_files': include_files, 'include_msvcr': True, 'optimize': 2, }, 'bdist_msi': { 'upgrade_code': '{fe2ed61d-cd5e-45bb-9d16-146f725e522f}' } }, executables=[ Executable( script='inselect/scripts/inselect.py', targetName='inselect.exe', icon='icons/inselect.ico', base='Win32GUI', shortcutName='Inselect', # See http://stackoverflow.com/a/15736406 shortcutDir='ProgramMenuFolder' ) ] + [ Executable( script='inselect/scripts/{0}.py'.format(script), targetName='{0}.exe'.format(script), icon='icons/inselect.ico', base='Console' ) for script in SCRIPTS ], ) if (3, 5) <= sys.version_info: if 'bdist_msi' in sys.argv: cx_setup() else: setuptools_setup() else: sys.exit('Only Python >= 3.5 is supported')
nilq/baby-python
python
# get camera list import logging from datetime import datetime from typing import Any from typing import List import requests from protect_archiver.dataclasses import Camera def get_camera_list(session: Any, connected: bool = True) -> List[Camera]: cameras_uri = f"{session.authority}{session.base_path}/cameras" response = requests.get( cameras_uri, cookies={"TOKEN": session.get_api_token()}, verify=session.verify_ssl, ) if response.status_code != 200: print(f"Error while loading camera list: {response.status_code}") return [] logging.info(f"Successfully retrieved data from {cameras_uri}") cameras = response.json() camera_list = [] for camera in cameras: cameraData = Camera(id=camera["id"], name=camera["name"], recording_start=0) if camera["stats"]["video"]["recordingStart"]: cameraData.recording_start = datetime.utcfromtimestamp( camera["stats"]["video"]["recordingStart"] / 1000 ) camera_list.append(cameraData) logging.info( "Cameras found:\n{}".format( "\n".join(f"- {camera.name} ({camera.id})" for camera in camera_list) ) ) return camera_list
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Created on Thu Apr 26 15:15:55 2018 @author: Madhur Kashyap 2016EEZ8350 """ import os import sys import logging import numpy as np from functools import partial from keras.optimizers import Adadelta from sklearn.metrics import confusion_matrix prog = os.path.basename(__file__) codedir = os.path.join(os.path.dirname(__file__),"..","code") sys.path.append(codedir) from Utils import * from PlotUtils import * from SpeechCorpus import Timit from AcousticModels import * from TrainUtils import train_model,weighted_categorical_crossentropy from AcousticDataGenerator import AcousticDataGenerator #logfile = prog+'.log' #rootlog = initlog(logfile,level=logging.DEBUG); #rootlog.info('Starting new session'); if len(sys.argv)>1: corpus = Timit(root=sys.argv[1]); else: corpus = Timit(root='C:/Users/nxa17016/ML/pyml/RNN/assignment3/dataset') corpus.split_validation(); #rootlog.info(corpus.report_statistics(folder='report/images')); adg = AcousticDataGenerator(corpus=corpus,mbatch_size=32, mfcc_win=0.0125,mfcc_step=0.005, ce_encoding_mode='best', mode='phoneme', model_silence=True); adg.fit_train(n_samples=1000); model = bidi_lstm(input_dim=adg.feature_dim,units=20,output_dim=adg.n_classes, batchnorm=True,after_dropout=0.0); train_model(model,adg.train_generator(),adg.valid_generator(),'bidi_gru_20', epochs=1,steps_per_epoch=adg.nb_train-100,validation_steps=adg.nb_valid-10, verbose=1,save_period=0,optimizer=Adadelta(),report_stats=True, class_names=list(adg.outmap[0].keys()));
nilq/baby-python
python
from connect4 import Connect4Board from connect4 import GameState from connect4 import Player def test_win_condition(): # check vertical for row in range(6-3): for col in range(7): game = Connect4Board() for i in range(row, row+4): game.board[i][col] = Player.PLAYER_1 if not game.check_win(Player.PLAYER_1): return False, "Failed vertical win condition for player 1", game game = Connect4Board() for i in range(row, row+4): game.board[i][col] = Player.PLAYER_2 if not game.check_win(Player.PLAYER_2): return False, "Failed vertical win condition for player 2", game # check horizontal for row in range(6): for col in range(7-3): game = Connect4Board() for i in range(col, col+4): game.board[row][i] = Player.PLAYER_1 if not game.check_win(Player.PLAYER_1): return False, "Failed horizontal win condition for player 1", game game = Connect4Board() for i in range(col, col+4): game.board[row][i] = Player.PLAYER_2 if not game.check_win(Player.PLAYER_2): return False, "Failed horizontal win condition for player 2", game # check diagonal for row in range(6-3): for col in range(7-3): game = Connect4Board() for i in range(4): game.board[row+i][col+i] = Player.PLAYER_1 if not game.check_win(Player.PLAYER_1): return False, "Failed diagonal / win condition for player 1", game game = Connect4Board() for i in range(4): game.board[row+i][col+i] = Player.PLAYER_2 if not game.check_win(Player.PLAYER_2): return False, "Failed diagonal / win condition for player 2", game for row in range(6-3): for col in range(3,7): game = Connect4Board() for i in range(4): game.board[row+i][col-i] = Player.PLAYER_1 if not game.check_win(Player.PLAYER_1): return False, "Failed diagonal \ win condition for player 1:", game game = Connect4Board() for i in range(4): game.board[row+i][col-i] = Player.PLAYER_2 if not game.check_win(Player.PLAYER_2): return False, "Failed diagonal \ win condition for player 2", game return True, None, None if __name__ == "__main__": all_tests_pass = True test_win_condition_pass, error, game = test_win_condition() if not test_win_condition_pass: print("Failed win condition test! Error:", error) game.print_board() all_tests_pass = False if all_tests_pass: print("All tests pass!")
nilq/baby-python
python
import urllib import os import threading import time import errno from functools import partial import weakref import base64 import json import socket from socketserver import ThreadingMixIn from http.server import SimpleHTTPRequestHandler, HTTPServer from urllib.parse import unquote from urllib.parse import urlparse from urllib.parse import parse_qs """ HTTP Server interface """ class LVRequestHandler(SimpleHTTPRequestHandler, object): def __init__(self, viewer_weakref, *args, **kwargs): #Used with partial() to provide the viewer object try: self._lv = viewer_weakref super(LVRequestHandler, self).__init__(*args, **kwargs) except (IOError) as e: pass #Just ignore IO errors on server if e.errno == errno.EPIPE: # EPIPE error, ignore pass elif e.errno == errno.EPROTOTYPE: # MacOS "Protocol wrong type for socket" error, ignore pass else: raise e def serveResponse(self, data, datatype): try: #Serve provided data, with error check for SIGPIPE (broken connection) self.send_response(200) self.send_header('Content-type', datatype) self.send_header('Access-Control-Allow-Origin', '*') self.send_header('x-colab-notebook-cache-control', 'no-cache') #Colab: disable offline access cache self.end_headers() if data: self.wfile.write(data) #This specific error sometimes occurs on windows, ConnectionError is the base class and covers a few more #except (IOError,ConnectionAbortedError) as e: # if isinstance(e,ConnectionAbortedError): except (IOError,ConnectionError) as e: if isinstance(e,ConnectionError): pass elif e.errno == errno.EPIPE: # EPIPE error, ignore pass else: raise e def do_HEAD(self): self.serveResponse(None, 'text/html') def do_POST(self): #Always interpret post data as commands #(can perform other actions too based on self.path later if we want) data_string = self.rfile.read(int(self.headers['Content-Length'])) self.serveResponse(b'', 'text/plain') #cmds = str(data_string, 'utf-8') #python3 only try: #Python3 from urllib.parse import unquote data_string = unquote(data_string) except: #Python2 from urllib import unquote data_string = unquote(data_string).decode('utf8') cmds = str(data_string.decode('utf-8')) #Run viewer commands self._execute(cmds) def do_GET(self): lv = self._get_viewer() parsed = urlparse(self.path) query = parse_qs(parsed.query) def img_response(): resp = None if 'width' in query and 'height' in query: resp = lv.jpeg(resolution=(int(query['width'][0]), int(query['height'][0]))) elif 'width' in query: resp = lv.jpeg(resolution=(int(query['width'][0]), 0)) else: resp = lv.jpeg() #Ensure the response is valid before serving if resp is not None: self.serveResponse(resp, 'image/jpeg') if self.path.find('image') > 0: img_response() elif self.path.find('command=') > 0: pos1 = self.path.find('=') pos2 = self.path.find('?') if pos2 < 0: pos2 = len(self.path) cmds = unquote(self.path[pos1+1:pos2]) #Run viewer commands self._execute(cmds) #Serve image or just respond 200 if self.path.find('icommand=') > 0: img_response() else: self.serveResponse(b'', 'text/plain') elif self.path.find('getstate') > 0: state = lv.app.getState() self.serveResponse(bytearray(state, 'utf-8'), 'text/plain; charset=utf-8') #self.serveResponse(bytearray(state, 'utf-8'), 'text/plain') elif self.path.find('connect') > 0: if 'url' in query: #Save first valid connection URL on the viewer url = query['url'][0] if len(lv._url) == 0: lv._url = url uid = id(lv) self.serveResponse(bytearray(str(uid), 'utf-8'), 'text/plain; charset=utf-8') elif self.path.find('key=') > 0: pos2 = self.path.find('&') cmds = unquote(self.path[1:pos2]) lv.commands('key ' + cmds, True) self.serveResponse(b'', 'text/plain') elif self.path.find('mouse=') > 0: pos2 = self.path.find('&') cmds = unquote(self.path[1:pos2]) lv.commands('mouse ' + cmds, True) self.serveResponse(b'', 'text/plain') elif len(self.path) <= 1: #Root requested, returns interactive view w = lv.control.Window(align=None, wrapper=None) code = lv.control.show(True, filename="") self.serveResponse(bytearray(code, 'utf-8'), 'text/html; charset=utf-8') else: return SimpleHTTPRequestHandler.do_GET(self) #Serve files from lavavu html dir def translate_path(self, path): lv = self._get_viewer() if not os.path.exists(path): #print(' - not found in cwd') if path[0] == '/': path = path[1:] path = os.path.join(lv.htmlpath, path) if os.path.exists(path) and os.path.isfile(path): #print(' - found in htmlpath') return path else: #print(' - not found in htmlpath') return SimpleHTTPRequestHandler.translate_path(self, self.path) else: return SimpleHTTPRequestHandler.translate_path(self, path) #Stifle log output def log_message(self, format, *args): return def _get_viewer(self): #Get from weak reference, if deleted raise exception lv = self._lv() if not lv: self._closing = True raise(Exception("Viewer not found")) return lv def _execute(self, cmds): lv = self._get_viewer() if len(cmds) and cmds[0] == '_': #base64 encoded commands or JSON state cmds = str(base64.b64decode(cmds).decode('utf-8')) #cmds = str(base64.b64decode(cmds), 'utf-8') #Object to select can be provided in preceding angle brackets selobj = None if cmds[0] == '<': pos = cmds.find('>') selobj = lv.objects[cmds[1:pos]] cmds = cmds[pos+1:] #Execute commands via python API by preceding with '.' done = False if cmds[0] == '.': attr = cmds.split()[0][1:] pos = cmds.find(' ') params = cmds[pos+1:] if selobj: #Call on Object func = getattr(selobj, attr) if func and callable(func): func(params) done = True else: #Call on Viewer func = getattr(lv, attr) if func and callable(func): func(params) done = True elif cmds[0] == '$': #Requests prefixed by '$' are sent #from property collection controls #format is $ID KEY VALUE # - ID is the python id() of the properties object # All properties collections are stored on their parent # object using this id in the _collections dict # - KEY is the property name key to set # - VALUE is a json string containing the value to set S = cmds.split() target = S[0][1:] if target in lv._collections: #Get from _collections by id (weakref) props = lv._collections[target]() props[S[1]] = json.loads(S[2]) #Check for callback - if provided, call with updated props func = getattr(props, 'callback') if func and callable(func): func(props) #Default, call via lv.commands() scripting API if not done: if selobj: selobj.select() lv.commands(cmds) #Optional thread per request version: class ThreadingHTTPServer(ThreadingMixIn, HTTPServer): pass """ HTTP Server manager class """ class Server(threading.Thread): def __init__(self, viewer, port=None, ipv6=False, retries=100): self.host = 0 if port is None: port = 8080 self._closing = False #Allow viewer to be garbage collected self.viewer = weakref.ref(viewer) self.port = port self.retries = retries self.maxretries = retries self.ipv6 = ipv6 super(Server, self).__init__() self.daemon = True #Place in background so will be closed on program exit self._cv = threading.Condition() def handle(self): try: httpd.handle_request() except (socket.exception) as e: #print(str(e)) pass def run(self): httpd = None HTTPServer.allow_reuse_address = False try: # We "partially apply" our first argument to get the viewer object into LVRequestHandler handler = partial(LVRequestHandler, self.viewer) if self.ipv6: HTTPServer.address_family = socket.AF_INET6 hosts = ['::', 'localhost', '::1'] host = hosts[self.host] #httpd = HTTPServer((host, self.port), handler) httpd = ThreadingHTTPServer((host, self.port), handler) else: HTTPServer.address_family = socket.AF_INET hosts = ['0.0.0.0', 'localhost', '127.0.0.1'] host = hosts[self.host] #httpd = HTTPServer((host, self.port), handler) httpd = ThreadingHTTPServer(('0.0.0.0', self.port), handler) #print("Server running on host %s port %s" % (host, self.port)) #Sync with starting thread here to ensure server thread has initialised before it continues with self._cv: self._cv.notifyAll() # Handle requests #print("Using port: ", self.port) # A timeout is needed for server to check periodically if closing httpd.timeout = 0.05 #50 millisecond timeout while self.viewer() is not None and not self._closing: httpd.handle_request() except (Exception) as e: self.retries -= 1 if self.retries < 1: print("Failed to start server, max retries reached") #Try another port if e.errno == errno.EADDRINUSE: #98 self.port += 1 #Try again self.run() elif e.errno == errno.EAFNOSUPPORT: #97 : Address family not supported by protocol #Try next host name/address self.host += 1 if self.host > 2: #Try again without ipv6? if self.ipv6: self.ipv6 = False else: self.ipv6 = True self.host = 0 #Try again self.run() else: print("Server start failed: ",e, e.errno, self.port) def serve(viewer, port=None, ipv6=False, retries=100): s = Server(viewer, port, ipv6, retries) #Start the thread and wait for it to finish initialising with s._cv: s.start() s._cv.wait() return s #Ignore SIGPIPE altogether (does not apply on windows) import sys if sys.platform != 'win32': from signal import signal, SIGPIPE, SIG_IGN signal(SIGPIPE, SIG_IGN) """ Main entry point - run server and open browser interface """ if __name__ == '__main__': import lavavu lv = lavavu.Viewer() #lv.animate(1) #Required to show viewer window and handle mouse/keyboard events there too lv.browser() lv._thread.join() #Wait for server to quit
nilq/baby-python
python
from twisted.trial import unittest from twisted.internet import defer from nodeset.core import config from nodeset.common.twistedapi import NodeSetAppOptions class ConfigurationTest(unittest.TestCase): def setUp(self): cfg = NodeSetAppOptions() cfg.parseOptions(['-n', '--listen', 'localhost:4333', '--dispatcher-url', 'pbu://localhost:5333/dispatcher']) self.config = config.Configurator() self.config._config = cfg def testListenParam(self): self.assertTrue(self.config['listen'] == 'localhost:4333') def testDispatcherParam(self): self.assertTrue(self.config['dispatcher-url'] == 'pbu://localhost:5333/dispatcher') def testAnotherInstance(self): c = config.Configurator() self.assertTrue(c['listen'] == 'localhost:4333') def testUpdate(self): self.config['new_option'] = 'value' self.assertTrue(self.config['new_option'] == 'value') def testAnotherRoutine(self): def anotherRoutine(d): c = config.Configurator() self.assertTrue(c['listen'] == 'host.name.com:4111') self.config['listen'] = 'host.name.com:4111' d = defer.Deferred() d.addCallback(anotherRoutine) d.callback(None) def testPassingAsArgument(self): def routine(conf): c = config.Configurator() self.assertTrue(c == conf) d = defer.Deferred() d.addCallback(routine) d.callback(config.Configurator()) def tearDown(self): del self.config
nilq/baby-python
python
acceptable_addrs = ["192.168.0.16"]
nilq/baby-python
python
# Generated by Django 2.1.5 on 2019-01-27 00:11 from django.db import migrations, models import django.db.models.deletion import simplemde.fields import uuid class Migration(migrations.Migration): initial = True dependencies = [] operations = [ migrations.CreateModel( name="Event", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "uuid", models.UUIDField(default=uuid.uuid4, editable=False, unique=True), ), ("title", models.CharField(max_length=200)), ( "description", simplemde.fields.SimpleMDEField( blank=True, max_length=2000, null=True ), ), ("invitee_capacity", models.PositiveIntegerField(default=0)), ("event_day", models.DateField()), ("initial_hour", models.TimeField()), ("end_hour", models.TimeField()), ("place_name", models.CharField(max_length=200)), ("open_street_map_url", models.URLField()), ], ), migrations.CreateModel( name="Invitee", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "uuid", models.UUIDField(default=uuid.uuid4, editable=False, unique=True), ), ("enrolled_at", models.DateTimeField(auto_now_add=True)), ("cancelled", models.BooleanField(default=False)), ( "event", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="events.Event" ), ), ], ), ]
nilq/baby-python
python
from .operator_bot import main main()
nilq/baby-python
python
from episerver.vanir.configuration import Configuration from azure.common.client_factory import get_client_from_json_dict from azure.mgmt.resource import ResourceManagementClient class AddEnvironmentCommand: def __init__(self): config = Configuration() self.resource_client = get_client_from_json_dict(ResourceManagementClient, config.get_configuration()) def execute(self, args): resource_group = self.resource_client.resource_groups.create_or_update(args.name, { "location": f"{args.location}" }) print(f"Provisioned resource group {resource_group.name} in the {resource_group.location} region")
nilq/baby-python
python
from tensorize import * class InceptionResnetV1(Model): def inference(self, inputs, output): stem(inputs, outputs) for x in xrange(4): inceptionA() reductionA() for x in xrange(7): inceptionB() reductionB() for x in xrange(3): inceptionC() AveragePooling() Dropout(0.8) CategoricalPredictionOutput(output) def train(self, outputs): CategoricalCrossEntropy() CategoricalAccuracy(outputs) GradientDescentOptimizer() class InceptionResnetV2(Model): def inference(self, inputs, output): stem(inputs, outputs) for x in xrange(4): inceptionA() reductionA() for x in xrange(7): inceptionB() reductionB() for x in xrange(3): inceptionC() AveragePooling() Dropout(0.8) CategoricalPredictionOutput(output) def train(self, outputs): CategoricalCrossEntropy() CategoricalAccuracy(outputs) GradientDescentOptimizer() def stem(inputs, outputs): BatchImageInput(inputs) Convolution3x3(filters=32) Convolution3x3(filters=32) Convolution3x3(filters=64) with ParallelBlock() as parallel: with parallel: MaxPooling2D() with parallel: Convolution3x3(filters=64) FilterConcat() with ParallelBlock() as parallel: with parallel: Convolution1x1(filters=64) Convolution3x3(filters=96) with parallel: Convolution1x1(filters=64) Convolution2D([7, 1], filters=64) Convolution2D([1, 7], filters=64) Convolution3x3(filters=96) FilterConcat() with ParallelBlock() as block: with block: MaxPooling2D() with block: Convolution3x3(filters=64) FilterConcat() def inceptionA(): with ParallelBlock() as parallel: with parallel: AveragePooling() Convolution1x1(filters=96) with parallel: Convolution1x1(filters=96) with parallel: Convolution1x1(filters=64) Convolution3x3(filters=96) with parallel: Convolution1x1(filters=64) Convolution3x3(filters=96) Convolution3x3(filters=96) FilterConcat() def inceptionB(): with ParallelBlock() as parallel: with parallel: AveragePooling() Convolution1x1(filters=128) with parallel: Convolution1x1(filters=384) with parallel: Convolution1x1(filters=192) Convolution2D([1, 7], filters=224) Convolution2D([1, 7], filters=256) with parallel: Convolution1x1(filters=192) Convolution2D([1, 7], filters=192) Convolution2D([7, 1], filters=224) Convolution2D([1, 7], filters=224) Convolution2D([7, 1], filters=256) FilterConcat() def inceptionC(): with ParallelBlock() as parallel: with parallel: AveragePooling() Convolution1x1(filters=256) with parallel: Convolution1x1(filters=256) with parallel: Convolution1x1(filters=384) with ParallelBlock() as parallel_inner: with parallel_inner: Convolution2D([1, 3], filters=256) with parallel_inner: Convolution2D([3, 1], filters=256) with parallel: Convolution1x1(filters=384) Convolution2D([1, 3], filters=384) Convolution2D([3, 1], filters=512) FilterConcat() def reduceA(n, l, k, m): with ParallelBlock() as parallel: with parallel: MaxPooling2D([3, 3]) with parallel: Convolution3x3(n) with parallel: Convolution1x1(filters=k) Convolution3x3(filters=l) Convolution3x3(filters=m) FilterConcat() def reduceB(): with ParallelBlock() as parallel: with parallel: MaxPooling2D([3, 3], stride=2) with parallel: Convolution1x1(192) Convolution3x3(192) with parallel: Convolution1x1(filters=256) Convolution2D([1, 7], filters=256) Convolution2D([7, 1], filters=320) Convolution3x3(filters=320, stride=2) FilterConcat() def inceptionResnetA(): RectifiedLinearUnit() with ParallelBlock() as parallel: with parallel: with ParallelBlock() as parallel_inner: with parallel_inner: Convolution1x1(32) with parallel_inner: Convolution1x1(32) Convolution3x3(32) with parallel_inner: Convolution1x1(32) Convolution3x3(32) Convolution3x3(32) Convolution1x1(filters=256) Sum() def inceptionResnetB(): RectifiedLinearUnit() with ParallelBlock() as parallel: with parallel: with ParallelBlock() as parallel_inner: with parallel_inner: Convolution1x1(128) with parallel_inner: Convolution1x1(128) Convolution2D([1, 7], filters=128) Convolution2D([7, 1], filters=128) Convolution1x1(filters=896) Sum()
nilq/baby-python
python
# -*- coding: utf-8 -*- import trello.checklist as checklist class Checklist(checklist.Checklist): pass
nilq/baby-python
python
import os import unittest from skidl import * class UnitTestsElectronicDesignAutomationSkidlExamples(unittest.TestCase): def test_introduction(self): print("test_introduction") # Create input & output voltages and ground reference. vin, vout, gnd = Net('VI'), Net('VO'), Net('GND') # Create two resistors. r1, r2 = 2 * Part("Device", 'R', TEMPLATE, footprint='Resistor_SMD.pretty:R_0805_2012Metric') r1.value = '1K' # Set upper resistor value. r2.value = '500' # Set lower resistor value. # Connect the nets and resistors. vin += r1[1] # Connect the input to the upper resistor. gnd += r2[2] # Connect the lower resistor to ground. vout += r1[2], r2[1] # Output comes from the connection of the two resistors. # Or you could do it with a single line of code: # vin && r1 && vout && r2 && gnd # Output the netlist to a file. generate_netlist() def test_finding_parts(self): print("test_finding_parts") with open("finding_parts.py", "w") as file: file.write("from skidl import * \n\n") file.write("search('opamp') \n") file.write("search('^lm386$') \n") file.write("search('opamp low-noise dip-8') \n") file.write("search('opamp (low-noise|dip-8)') \n") file.write("search('opamp " + '"' + "high performance" + '"' + ") \n") file.write("show('Amplifier_Audio', 'lm386') \n") file.write("show('Amplifier_Audio', 'lm38') \n") file.write("search_footprints('QFN-48') \n") os.system("python finding_parts.py") def test_instantiating_parts(self): print("test_instantiating_parts") with open('instantiating_parts.py', 'w') as file: file.write("from skidl import * \n\n") file.write("resistor = Part('Device','R') \n") file.write("resistor.value = '1K' \n") file.write("print('resistor.value : ' + resistor.value) \n") file.write("resistor = Part('Device','R', value='2K') \n") file.write("print('resistor.value : ' + resistor.value) \n") file.write("print('resistor.value : ' + resistor.value) \n") file.write("print('resistor.ref : ' + resistor.ref) \n") file.write("resistor.ref = 'R5' \n") file.write("print('resistor.ref : ' + resistor.ref) \n") file.write("another_res = Part('Device','R') \n") file.write("print('another_res.ref : ' + another_res.ref) \n") file.write("resistor.ref = 'R1' \n") file.write("print('resistor.ref : ' + resistor.ref) \n") os.system("python instantiating_parts.py") def test_connecting_pins(self): print("test_connecting_pins") with open('connecting_pins.py', 'w') as file: file.write("from skidl import * \n\n") file.write("rup = Part('Device', 'R', value='1K', footprint='Resistor_SMD.pretty:R_0805_2012Metric') \n") file.write("rlow = Part('Device', 'R', value='500', footprint='Resistor_SMD.pretty:R_0805_2012Metric') \n") file.write("print('rup.ref : ' + rup.ref) \n") file.write("print('rlow.ref : ' + rlow.ref) \n") file.write("print('rup.value : ' + rup.value) \n") file.write("print('rup.value : ' + rlow.value) \n") file.write("v_in = Net('VIN') \n") file.write("print('v_in.name : ' + str(v_in.name)) \n") file.write("rup[1] += v_in \n") file.write("print('rup[1].net : ' + str(rup[1].net)) \n") file.write("gnd = Net('GND') \n") file.write("rlow[1] += gnd \n") file.write("print('rlow[1].net : ' + str(rlow[1].net)) \n") file.write("v_out = Net('VO') \n") file.write("v_out += rup[2], rlow[2] \n") file.write("print('rup[2].net : ' + str(rup[2].net)) \n") file.write("print('rlow[2].net : ' + str(rlow[2].net)) \n") file.write("rup[2] += rlow[2] \n") file.write("v_out = Net('VO') \n") file.write("v_out += rlow[2] \n") file.write("print('rup[2].net : ' + str(rup[2].net)) \n") file.write("print('rlow[2].net : ' + str(rlow[2].net)) \n") file.write("ERC() \n") file.write("v_in.do_erc = False \n") file.write("gnd.do_erc = False \n") file.write("ERC() \n") file.write("generate_netlist() \n") file.write("generate_xml() \n") os.system("python connecting_pins.py") def test_searching_transistor_npn(self): print("test_searching_transistor_npn") with open("searching_transistor_npn.py", "w") as file: file.write("from skidl import * \n\n") file.write("search('transistor (npn)') \n") os.system("python searching_transistor_npn.py") def test_searching_bridge_rectifier(self): print("test_searching_bridge_rectifier") with open("test_searching_bridge_rectifier.py", "w") as file: file.write("from skidl import * \n\n") file.write("search('bridge rectifier') \n") os.system("python test_searching_bridge_rectifier.py") def test_searching_optocoupler(self): print("test_searching_optocoupler") with open("test_searching_optocoupler.py", "w") as file: file.write("from skidl import * \n\n") file.write("search('optocoupler') \n") os.system("python test_searching_optocoupler.py") def test_searching_resistor(self): print("test_searching_resistor") with open("test_searching_resistor.py", "w") as file: file.write("from skidl import * \n\n") file.write("search('resistor') \n") os.system("python test_searching_resistor.py") def test_searching_terminal_block(self): print("test_searching_terminal_block") with open("test_searching_terminal_block.py", "w") as file: file.write("from skidl import * \n\n") file.write("search('analog') \n") os.system("python test_searching_terminal_block.py") def test_searching_footprint(self): print("test_searching_footprint") with open("test_searching_footprint.py", "w") as file: file.write("from skidl import * \n\n") file.write("search('footprint') \n") os.system("python test_searching_footprint.py") def test_searching_footprints_of_one_resistor(self): print("test_searching_footprints_of_one_resistor") with open("test_searching_footprints_of_one_resistor.py", "w") as file: file.write("from skidl import * \n\n") file.write("search_footprints('R') \n") os.system("python test_searching_footprints_of_one_resistor.py") def test_searching_footprints_of_one_transistor(self): print("test_searching_footprints_of_one_transistor") with open("test_searching_footprints_of_one_transistor.py", "w") as file: file.write("from skidl import * \n\n") file.write("search_footprints('transistor') \n") os.system("python test_searching_footprints_of_one_transistor.py") def test_searching_footprints_of_one_optocoupler(self): print("test_searching_footprints_of_one_optocoupler") with open("test_searching_footprints_of_one_optocoupler.py", "w") as file: file.write("from skidl import * \n\n") # file.write("search_footprints('optocoupler') \n") file.write("search_footprints('Relay_SolidState') \n") os.system("python test_searching_footprints_of_one_optocoupler.py") def test_searching_footprints_of_one_diode_bridge_rectifier(self): print("test_searching_footprints_of_one_diode_bridge_rectifier") with open("test_searching_footprints_of_one_diode_bridge_rectifier.py", "w") as file: file.write("from skidl import * \n\n") # file.write("search_footprints('bridge rectifier') \n") # file.write("search_footprints('GUO40-08NO1') \n") file.write("search_footprints('Diode_Bridge') \n") os.system("python test_searching_footprints_of_one_diode_bridge_rectifier.py") if __name__ == '__main__': unittest.main()
nilq/baby-python
python
# Copyright Contributors to the Pyro project. # SPDX-License-Identifier: Apache-2.0 import contextlib import importlib import itertools import numbers import operator from collections import OrderedDict, namedtuple from functools import reduce import numpy as np import opt_einsum from multipledispatch import dispatch from multipledispatch.variadic import Variadic import funsor.ops as ops from funsor.cnf import Contraction from funsor.delta import Delta from funsor.domains import Bint, Domain, Real from funsor.gaussian import Gaussian from funsor.tensor import Tensor from funsor.terms import Funsor, Number from funsor.util import get_backend @contextlib.contextmanager def xfail_if_not_implemented(msg="Not implemented", *, match=None): try: yield except NotImplementedError as e: if match is not None and match not in str(e): raise e from None import pytest pytest.xfail(reason="{}:\n{}".format(msg, e)) @contextlib.contextmanager def xfail_if_not_found(msg="Not implemented"): try: yield except AttributeError as e: import pytest pytest.xfail(reason="{}:\n{}".format(msg, e)) def requires_backend(*backends, reason=None): import pytest if reason is None: reason = "Test requires backend {}".format(" or ".join(backends)) return pytest.mark.skipif(get_backend() not in backends, reason=reason) def excludes_backend(*backends, reason=None): import pytest if reason is None: reason = "Test excludes backend {}".format(" and ".join(backends)) return pytest.mark.skipif(get_backend() in backends, reason=reason) class ActualExpected(namedtuple("LazyComparison", ["actual", "expected"])): """ Lazy string formatter for test assertions. """ def __repr__(self): return "\n".join(["Expected:", str(self.expected), "Actual:", str(self.actual)]) def id_from_inputs(inputs): if isinstance(inputs, (dict, OrderedDict)): inputs = inputs.items() if not inputs: return "()" return ",".join(k + "".join(map(str, d.shape)) for k, d in inputs) @dispatch(object, object, Variadic[float]) def allclose(a, b, rtol=1e-05, atol=1e-08): if type(a) != type(b): return False return ops.abs(a - b) < rtol + atol * ops.abs(b) dispatch(np.ndarray, np.ndarray, Variadic[float])(np.allclose) @dispatch(Tensor, Tensor, Variadic[float]) def allclose(a, b, rtol=1e-05, atol=1e-08): if a.inputs != b.inputs or a.output != b.output: return False return allclose(a.data, b.data, rtol=rtol, atol=atol) def is_array(x): if isinstance(x, Funsor): return False if get_backend() == "torch": return False return ops.is_numeric_array(x) def assert_close(actual, expected, atol=1e-6, rtol=1e-6): msg = ActualExpected(actual, expected) if is_array(actual): assert is_array(expected), msg elif isinstance(actual, Tensor) and is_array(actual.data): assert isinstance(expected, Tensor) and is_array(expected.data), msg elif ( isinstance(actual, Contraction) and isinstance(actual.terms[0], Tensor) and is_array(actual.terms[0].data) ): assert isinstance(expected, Contraction) and is_array( expected.terms[0].data ), msg elif isinstance(actual, Contraction) and isinstance(actual.terms[0], Delta): assert isinstance(expected, Contraction) and isinstance( expected.terms[0], Delta ), msg elif isinstance(actual, Gaussian): assert isinstance(expected, Gaussian) else: assert type(actual) == type(expected), msg if isinstance(actual, Funsor): assert isinstance(expected, Funsor), msg assert actual.inputs == expected.inputs, (actual.inputs, expected.inputs) assert actual.output == expected.output, (actual.output, expected.output) if isinstance(actual, (Number, Tensor)): assert_close(actual.data, expected.data, atol=atol, rtol=rtol) elif isinstance(actual, Delta): assert frozenset(n for n, p in actual.terms) == frozenset( n for n, p in expected.terms ) actual = actual.align(tuple(n for n, p in expected.terms)) for (actual_name, (actual_point, actual_log_density)), ( expected_name, (expected_point, expected_log_density), ) in zip(actual.terms, expected.terms): assert actual_name == expected_name assert_close(actual_point, expected_point, atol=atol, rtol=rtol) assert_close(actual_log_density, expected_log_density, atol=atol, rtol=rtol) elif isinstance(actual, Gaussian): # Note white_vec and prec_sqrt are expected to agree only up to an # orthogonal factor, but precision and info_vec should agree exactly. assert_close(actual._info_vec, expected._info_vec, atol=atol, rtol=rtol) assert_close(actual._precision, expected._precision, atol=atol, rtol=rtol) elif isinstance(actual, Contraction): assert actual.red_op == expected.red_op assert actual.bin_op == expected.bin_op assert actual.reduced_vars == expected.reduced_vars assert len(actual.terms) == len(expected.terms) for ta, te in zip(actual.terms, expected.terms): assert_close(ta, te, atol, rtol) elif type(actual).__name__ == "Tensor": assert get_backend() == "torch" import torch assert actual.dtype == expected.dtype, msg assert actual.shape == expected.shape, msg if actual.dtype in (torch.long, torch.uint8, torch.bool): assert (actual == expected).all(), msg else: eq = actual == expected if eq.all(): return if eq.any(): actual = actual[~eq] expected = expected[~eq] diff = (actual.detach() - expected.detach()).abs() if rtol is not None: assert (diff / (atol + expected.detach().abs())).max() < rtol, msg elif atol is not None: assert diff.max() < atol, msg elif is_array(actual): if get_backend() == "jax": import jax assert jax.numpy.result_type(actual.dtype) == jax.numpy.result_type( expected.dtype ), msg else: assert actual.dtype == expected.dtype, msg assert actual.shape == expected.shape, msg if actual.dtype in (np.int32, np.int64, np.uint8, bool): assert (actual == expected).all(), msg else: actual, expected = np.asarray(actual), np.asarray(expected) eq = actual == expected if eq.all(): return if eq.any(): actual = actual[~eq] expected = expected[~eq] diff = abs(actual - expected) if rtol is not None: assert (diff / (atol + abs(expected))).max() < rtol, msg elif atol is not None: assert diff.max() < atol, msg elif isinstance(actual, numbers.Number): diff = abs(actual - expected) if rtol is not None: assert diff < (atol + abs(expected)) * rtol, msg elif atol is not None: assert diff < atol, msg elif isinstance(actual, dict): assert isinstance(expected, dict) assert set(actual) == set(expected) for k, actual_v in actual.items(): assert_close(actual_v, expected[k], atol=atol, rtol=rtol) elif isinstance(actual, tuple): assert isinstance(expected, tuple) assert len(actual) == len(expected) for actual_v, expected_v in zip(actual, expected): assert_close(actual_v, expected_v, atol=atol, rtol=rtol) else: raise ValueError("cannot compare objects of type {}".format(type(actual))) def check_funsor(x, inputs, output, data=None): """ Check dims and shape modulo reordering. """ assert isinstance(x, Funsor) assert dict(x.inputs) == dict(inputs) if output is not None: assert x.output == output if data is not None: if x.inputs == inputs: x_data = x.data else: x_data = x.align(tuple(inputs)).data if inputs or output.shape: assert (x_data == data).all() else: assert x_data == data def xfail_param(*args, **kwargs): import pytest return pytest.param(*args, marks=[pytest.mark.xfail(**kwargs)]) def make_einsum_example(equation, fill=None, sizes=(2, 3)): symbols = sorted(set(equation) - set(",->")) sizes = {dim: size for dim, size in zip(symbols, itertools.cycle(sizes))} inputs, outputs = equation.split("->") inputs = inputs.split(",") outputs = outputs.split(",") operands = [] for dims in inputs: shape = tuple(sizes[dim] for dim in dims) x = randn(shape) operand = x if fill is None else (x - x + fill) # no need to use pyro_dims for numpy backend if not isinstance(operand, np.ndarray): operand._pyro_dims = dims operands.append(operand) funsor_operands = [ Tensor(operand, OrderedDict([(d, Bint[sizes[d]]) for d in inp])) for inp, operand in zip(inputs, operands) ] assert equation == ",".join( ["".join(operand.inputs.keys()) for operand in funsor_operands] ) + "->" + ",".join(outputs) return inputs, outputs, sizes, operands, funsor_operands def assert_equiv(x, y): """ Check that two funsors are equivalent up to permutation of inputs. """ check_funsor(x, y.inputs, y.output, y.data) def rand(*args): if isinstance(args[0], tuple): assert len(args) == 1 shape = args[0] else: shape = args backend = get_backend() if backend == "torch": import torch return torch.rand(shape) else: # work around numpy random returns float object instead of np.ndarray object when shape == () return np.array(np.random.rand(*shape)) def randint(low, high, size): backend = get_backend() if backend == "torch": import torch return torch.randint(low, high, size=size) else: return np.random.randint(low, high, size=size) def randn(*args): if isinstance(args[0], tuple): assert len(args) == 1 shape = args[0] else: shape = args backend = get_backend() if backend == "torch": import torch return torch.randn(shape) else: # work around numpy random returns float object instead of np.ndarray object when shape == () return np.array(np.random.randn(*shape)) def random_scale_tril(*args): if isinstance(args[0], tuple): assert len(args) == 1 shape = args[0] else: shape = args from funsor.distribution import BACKEND_TO_DISTRIBUTIONS_BACKEND backend_dist = importlib.import_module( BACKEND_TO_DISTRIBUTIONS_BACKEND[get_backend()] ).dist if get_backend() == "torch": data = randn(shape) return backend_dist.transforms.transform_to( backend_dist.constraints.lower_cholesky )(data) else: data = randn(shape[:-2] + (shape[-1] * (shape[-1] + 1) // 2,)) return backend_dist.biject_to(backend_dist.constraints.lower_cholesky)(data) def zeros(*args): if isinstance(args[0], tuple): assert len(args) == 1 shape = args[0] else: shape = args backend = get_backend() if backend == "torch": import torch return torch.zeros(shape) else: return np.zeros(shape) def ones(*args): if isinstance(args[0], tuple): assert len(args) == 1 shape = args[0] else: shape = args backend = get_backend() if backend == "torch": import torch return torch.ones(shape) else: return np.ones(shape) def empty(*args): if isinstance(args[0], tuple): assert len(args) == 1 shape = args[0] else: shape = args backend = get_backend() if backend == "torch": import torch return torch.empty(shape) else: return np.empty(shape) def random_tensor(inputs, output=Real): """ Creates a random :class:`funsor.tensor.Tensor` with given inputs and output. """ backend = get_backend() assert isinstance(inputs, OrderedDict) assert isinstance(output, Domain) shape = tuple(d.dtype for d in inputs.values()) + output.shape if output.dtype == "real": data = randn(shape) else: num_elements = reduce(operator.mul, shape, 1) if backend == "torch": import torch data = torch.multinomial( torch.ones(output.dtype), num_elements, replacement=True ) else: data = np.random.choice(output.dtype, num_elements, replace=True) data = data.reshape(shape) return Tensor(data, inputs, output.dtype) def random_gaussian(inputs): """ Creates a random :class:`funsor.gaussian.Gaussian` with given inputs. """ assert isinstance(inputs, OrderedDict) batch_shape = tuple(d.dtype for d in inputs.values() if d.dtype != "real") event_shape = (sum(d.num_elements for d in inputs.values() if d.dtype == "real"),) prec_sqrt = randn(batch_shape + event_shape + event_shape) precision = ops.matmul(prec_sqrt, ops.transpose(prec_sqrt, -1, -2)) precision = precision + 0.5 * ops.new_eye(precision, event_shape[:1]) prec_sqrt = ops.cholesky(precision) loc = randn(batch_shape + event_shape) white_vec = ops.matmul(prec_sqrt, ops.unsqueeze(loc, -1)).squeeze(-1) return Gaussian(white_vec=white_vec, prec_sqrt=prec_sqrt, inputs=inputs) def random_mvn(batch_shape, dim, diag=False): """ Generate a random :class:`torch.distributions.MultivariateNormal` with given shape. """ backend = get_backend() rank = dim + dim loc = randn(batch_shape + (dim,)) cov = randn(batch_shape + (dim, rank)) cov = cov @ ops.transpose(cov, -1, -2) if diag: cov = cov * ops.new_eye(cov, (dim,)) if backend == "torch": import pyro return pyro.distributions.MultivariateNormal(loc, cov) elif backend == "jax": import numpyro return numpyro.distributions.MultivariateNormal(loc, cov) def make_plated_hmm_einsum(num_steps, num_obs_plates=1, num_hidden_plates=0): assert num_obs_plates >= num_hidden_plates t0 = num_obs_plates + 1 obs_plates = "".join(opt_einsum.get_symbol(i) for i in range(num_obs_plates)) hidden_plates = "".join(opt_einsum.get_symbol(i) for i in range(num_hidden_plates)) inputs = [str(opt_einsum.get_symbol(t0))] for t in range(t0, num_steps + t0): inputs.append( str(opt_einsum.get_symbol(t)) + str(opt_einsum.get_symbol(t + 1)) + hidden_plates ) inputs.append(str(opt_einsum.get_symbol(t + 1)) + obs_plates) equation = ",".join(inputs) + "->" return (equation, "".join(sorted(tuple(set(obs_plates + hidden_plates))))) def make_chain_einsum(num_steps): inputs = [str(opt_einsum.get_symbol(0))] for t in range(num_steps): inputs.append(str(opt_einsum.get_symbol(t)) + str(opt_einsum.get_symbol(t + 1))) equation = ",".join(inputs) + "->" return equation def make_hmm_einsum(num_steps): inputs = [str(opt_einsum.get_symbol(0))] for t in range(num_steps): inputs.append(str(opt_einsum.get_symbol(t)) + str(opt_einsum.get_symbol(t + 1))) inputs.append(str(opt_einsum.get_symbol(t + 1))) equation = ",".join(inputs) + "->" return equation def iter_subsets(iterable, *, min_size=None, max_size=None): if min_size is None: min_size = 0 if max_size is None: max_size = len(iterable) for size in range(min_size, max_size + 1): yield from itertools.combinations(iterable, size) class DesugarGetitem: """ Helper to desugar ``.__getitem__()`` syntax. Example:: >>> desugar_getitem[1:3, ..., None] (slice(1, 3), Ellipsis, None) """ def __getitem__(self, index): return index desugar_getitem = DesugarGetitem()
nilq/baby-python
python
import unittest import torch.cuda as cuda from inferno.utils.model_utils import MultiscaleModelTester class TestUnetMultiscale(unittest.TestCase): def test_unet_multiscale_2d(self): from neurofire.models import UNet2DMultiscale input_shape = (1, 1, 512, 512) output_shape = ((1, 1, 512, 512), (1, 1, 256, 256), (1, 1, 128, 128), (1, 1, 64, 64)) tester = MultiscaleModelTester(input_shape, output_shape) if cuda.is_available(): tester.cuda() tester(UNet2DMultiscale(1, 1, initial_num_fmaps=12, fmap_growth=3)) # this may fail on travis due to insufficient ram @unittest.expectedFailure def test_unet_multiscale_3d(self): from neurofire.models import UNet3DMultiscale input_shape = (1, 1, 32, 128, 128) output_shape = ((1, 1, 32, 128, 128), (1, 1, 16, 64, 64), (1, 1, 8, 32, 32), (1, 1, 4, 16, 16)) tester = MultiscaleModelTester(input_shape, output_shape) if cuda.is_available(): tester.cuda() # test default unet 3d tester(UNet3DMultiscale(1, 1, initial_num_fmaps=12, fmap_growth=3, scale_factor=2)) # test with residual block tester(UNet3DMultiscale(1, 1, initial_num_fmaps=12, fmap_growth=3, scale_factor=2, add_residual_connections=True)) # test unet 3d with anisotropic sampling output_shape = ((1, 1, 32, 128, 128), (1, 1, 32, 64, 64), (1, 1, 32, 32, 32), (1, 1, 32, 16, 16)) tester = MultiscaleModelTester(input_shape, output_shape) if cuda.is_available(): tester.cuda() tester(UNet3DMultiscale(1, 1, initial_num_fmaps=12, fmap_growth=3, scale_factor=[(1, 2, 2), (1, 2, 2), (1, 2, 2)])) if __name__ == '__main__': unittest.main()
nilq/baby-python
python
from .linear_growth_class import CosmoLinearGrowth from .linear_growth_functions import a2z from .linear_growth_functions import z2a from .linear_growth_functions import get_Hz from .linear_growth_functions import get_Dz from .linear_growth_functions import get_r from .linear_growth_functions import get_omega_m_z from .linear_growth_functions import get_fz from .linear_growth_functions import get_fz_numerical from .linear_growth_functions import get_sigma_8 from .linear_growth_functions import get_z_array
nilq/baby-python
python
#!/usr/bin/python #-*- coding: utf-8 -*- # >.>.>.>.>.>.>.>.>.>.>.>.>.>.>.>. # Licensed under the Apache License, Version 2.0 (the "License") # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # --- File Name: loss_uneven.py # --- Creation Date: 19-04-2021 # --- Last Modified: Sat 24 Apr 2021 00:10:40 AEST # --- Author: Xinqi Zhu # .<.<.<.<.<.<.<.<.<.<.<.<.<.<.<.< """ Loss for Uneven Network. Code borrowed from Nvidia StyleGAN2-ada-pytorch. """ import numpy as np import torch from torch import nn from torch_utils import training_stats from torch_utils import misc from torch_utils.ops import conv2d_gradfix from training.loss import StyleGAN2Loss #---------------------------------------------------------------------------- class UnevenLoss(StyleGAN2Loss): def __init__(self, device, G_mapping, G_synthesis, D, augment_pipe=None, style_mixing_prob=0.9, r1_gamma=10, pl_batch_shrink=2, pl_decay=0.01, pl_weight=2, w1reg_lambda=0., uneven_reg_maxval=1., reg_type='linear', plz_weight=0., plz_decay=0.01, plzsep_weight=0., plzsep_decay=0.01): super().__init__(device, G_mapping, G_synthesis, D, augment_pipe, style_mixing_prob, r1_gamma, pl_batch_shrink, pl_decay, pl_weight) self.w1reg_lambda = w1reg_lambda self.uneven_reg_maxval = uneven_reg_maxval self.reg_type = reg_type self.plz_weight = plz_weight self.plz_decay = plz_decay self.plz_mean = torch.zeros([], device=device) self.plzsep_weight = plzsep_weight self.plzsep_decay = plzsep_decay self.plzsep_mean = torch.linspace(0., 1., G_mapping.module.z_dim, device=device)**2 # if self.reg_type == 'cumax_ada' or self.reg_type == 'monoconst_ada': # self.ada_logits = nn.Parameter(torch.ones(self.G_mapping.z_dim), requires_grad=True) def get_w1reg_scale(self, w1, cur_device): # if self.reg_type == 'cumax_ada': # # if self.use_cumax_adaptive: # reg_softmax = nn.functional.softmax(self.ada_logits, dim=0) # reg = torch.cumsum(reg_softmax, dim=0) * self.uneven_reg_maxval # elif self.reg_type == 'monoconst_ada': # reg_softmax = nn.functional.softmax(self.ada_logits, dim=0) # reg_cumax = torch.cumsum(reg_softmax, dim=0) # reg = reg_cumax / torch.sum(reg_cumax, dim=0) * self.uneven_reg_maxval if self.reg_type == 'exp': reg = torch.linspace(0., self.uneven_reg_maxval, w1.size(1)).to(cur_device) reg = torch.exp(reg) else: reg = torch.linspace(0., self.uneven_reg_maxval, w1.size(1)).to(cur_device) return reg def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, sync, gain): assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg', 'Dboth', 'Gw1reg', 'Dw1reg', 'Gplzreg', 'Dplzreg', 'Gplzsepreg', 'Dplzsepreg'] do_Gmain = (phase in ['Gmain', 'Gboth']) do_Dmain = (phase in ['Dmain', 'Dboth']) do_Gpl = (phase in ['Greg', 'Gboth']) and (self.pl_weight != 0) do_Dr1 = (phase in ['Dreg', 'Dboth']) and (self.r1_gamma != 0) do_Gw1reg = (phase in ['Gw1reg', 'Gboth']) and (self.w1reg_lambda != 0) do_Gplz = (phase in ['Gplzreg', 'Gboth']) and (self.plz_weight != 0) do_Gplzsep = (phase in ['Gplzsepreg', 'Gboth']) and (self.plzsep_weight != 0) # Gmain: Maximize logits for generated images. if do_Gmain: with torch.autograd.profiler.record_function('Gmain_forward'): gen_img, _gen_ws = self.run_G(gen_z, gen_c, sync=(sync and not do_Gpl and not do_Gplz and not do_Gplzsep)) # May get synced by Gpl. gen_logits = self.run_D(gen_img, gen_c, sync=False) training_stats.report('Loss/scores/fake', gen_logits) training_stats.report('Loss/signs/fake', gen_logits.sign()) loss_Gmain = torch.nn.functional.softplus(-gen_logits) # -log(sigmoid(gen_logits)) training_stats.report('Loss/G/loss', loss_Gmain) with torch.autograd.profiler.record_function('Gmain_backward'): loss_Gmain.mean().mul(gain).backward() # Gpl: Apply path length regularization. if do_Gpl: with torch.autograd.profiler.record_function('Gpl_forward'): batch_size = gen_z.shape[0] // self.pl_batch_shrink gen_img, gen_ws = self.run_G(gen_z[:batch_size], gen_c[:batch_size], sync=sync) pl_noise = torch.randn_like(gen_img) / np.sqrt(gen_img.shape[2] * gen_img.shape[3]) with torch.autograd.profiler.record_function('pl_grads'), conv2d_gradfix.no_weight_gradients(): pl_grads = torch.autograd.grad(outputs=[(gen_img * pl_noise).sum()], inputs=[gen_ws], create_graph=True, only_inputs=True)[0] pl_lengths = pl_grads.square().sum(2).mean(1).sqrt() pl_mean = self.pl_mean.lerp(pl_lengths.mean(), self.pl_decay) self.pl_mean.copy_(pl_mean.detach()) pl_penalty = (pl_lengths - pl_mean).square() training_stats.report('Loss/pl_penalty', pl_penalty) loss_Gpl = pl_penalty * self.pl_weight training_stats.report('Loss/G/reg', loss_Gpl) with torch.autograd.profiler.record_function('Gpl_backward'): (gen_img[:, 0, 0, 0] * 0 + loss_Gpl).mean().mul(gain).backward() # Dmain: Minimize logits for generated images. loss_Dgen = 0 if do_Dmain: with torch.autograd.profiler.record_function('Dgen_forward'): gen_img, _gen_ws = self.run_G(gen_z, gen_c, sync=False) gen_logits = self.run_D(gen_img, gen_c, sync=False) # Gets synced by loss_Dreal. training_stats.report('Loss/scores/fake', gen_logits) training_stats.report('Loss/signs/fake', gen_logits.sign()) loss_Dgen = torch.nn.functional.softplus(gen_logits) # -log(1 - sigmoid(gen_logits)) with torch.autograd.profiler.record_function('Dgen_backward'): loss_Dgen.mean().mul(gain).backward() # Dmain: Maximize logits for real images. # Dr1: Apply R1 regularization. if do_Dmain or do_Dr1: name = 'Dreal_Dr1' if do_Dmain and do_Dr1 else 'Dreal' if do_Dmain else 'Dr1' with torch.autograd.profiler.record_function(name + '_forward'): real_img_tmp = real_img.detach().requires_grad_(do_Dr1) real_logits = self.run_D(real_img_tmp, real_c, sync=sync) training_stats.report('Loss/scores/real', real_logits) training_stats.report('Loss/signs/real', real_logits.sign()) loss_Dreal = 0 if do_Dmain: loss_Dreal = torch.nn.functional.softplus(-real_logits) # -log(sigmoid(real_logits)) training_stats.report('Loss/D/loss', loss_Dgen + loss_Dreal) loss_Dr1 = 0 if do_Dr1: with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients(): r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[real_img_tmp], create_graph=True, only_inputs=True)[0] r1_penalty = r1_grads.square().sum([1,2,3]) loss_Dr1 = r1_penalty * (self.r1_gamma / 2) training_stats.report('Loss/r1_penalty', r1_penalty) training_stats.report('Loss/D/reg', loss_Dr1) with torch.autograd.profiler.record_function(name + '_backward'): (real_logits * 0 + loss_Dreal + loss_Dr1).mean().mul(gain).backward() # Gplz: Apply path length regularization on z. if do_Gplz: with torch.autograd.profiler.record_function('Gplz_forward'): batch_size = gen_z.shape[0] // self.pl_batch_shrink gen_z_used = gen_z[:batch_size] gen_z_used.requires_grad = True gen_img, gen_ws = self.run_G(gen_z_used, gen_c[:batch_size], sync=sync) plz_noise = torch.randn_like(gen_img) / np.sqrt(gen_img.shape[2] * gen_img.shape[3]) with torch.autograd.profiler.record_function('plz_grads'), conv2d_gradfix.no_weight_gradients(): plz_grads = torch.autograd.grad(outputs=[(gen_img * plz_noise).sum()], inputs=[gen_z_used], create_graph=True, only_inputs=True)[0] gen_z_used.requires_grad = False plz_lengths = plz_grads.square().sum(-1).sqrt() plz_mean = self.plz_mean.lerp(plz_lengths.mean(), self.plz_decay) self.plz_mean.copy_(plz_mean.detach()) plz_penalty = (plz_lengths - plz_mean).square() training_stats.report('Loss/plz_penalty', plz_penalty) loss_Gplz = plz_penalty * self.plz_weight training_stats.report('Loss/G/plz_reg', loss_Gplz) with torch.autograd.profiler.record_function('Gplz_backward'): (gen_img[:, 0, 0, 0] * 0 + loss_Gplz).mean().mul(gain).backward() # Gplzsep: Apply path length regularization on z each dimension. if do_Gplzsep: with torch.autograd.profiler.record_function('Gplzsep_forward'): batch_size = gen_z.shape[0] // self.pl_batch_shrink gen_z_used = gen_z[:batch_size] gen_z_used.requires_grad = True gen_img, gen_ws = self.run_G(gen_z_used, gen_c[:batch_size], sync=sync) plzsep_noise = torch.randn_like(gen_img) / np.sqrt(gen_img.shape[2] * gen_img.shape[3]) with torch.autograd.profiler.record_function('plzsep_grads'), conv2d_gradfix.no_weight_gradients(): plzsep_grads = torch.autograd.grad(outputs=[(gen_img * plzsep_noise).sum()], inputs=[gen_z_used], create_graph=True, only_inputs=True)[0] gen_z_used.requires_grad = False plzsep_lengths = plzsep_grads.square().sqrt() plzsep_mean = self.plzsep_mean.lerp(plzsep_lengths.mean(dim=0), self.plzsep_decay) self.plzsep_mean.copy_(plzsep_mean.detach()) plzsep_penalty = (plzsep_lengths - plzsep_mean).square().sum() training_stats.report('Loss/plzsep_penalty', plzsep_penalty) loss_Gplzsep = plzsep_penalty * self.plzsep_weight training_stats.report('Loss/G/plzsep_reg', loss_Gplzsep) with torch.autograd.profiler.record_function('Gplzsep_backward'): (gen_img[:, 0, 0, 0] * 0 + loss_Gplzsep).mean().mul(gain).backward() # Gw1reg: Constrain first-layer w by different latent dimensions. if do_Gw1reg: with torch.autograd.profiler.record_function('Gw1reg_forward'): w1 = getattr(self.G_mapping.module, f'fc{0}').weight # (out, z_in) cur_device = w1.device reg = self.get_w1reg_scale(w1, cur_device) w1_sq = torch.sum(w1 * w1, dim=0) # (z_in) loss_w1reg = torch.sum(w1_sq * reg, dim=0) * self.w1reg_lambda training_stats.report('Loss/G/loss_w1reg', loss_w1reg) with torch.autograd.profiler.record_function('Gw1reg_backward'): loss_w1reg.mean().mul(gain).backward() #----------------------------------------------------------------------------
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os from pathlib import Path from wildfires.utils import handle_array_job_args try: # This will only work after the path modification carried out in the job script. from specific import ( CACHE_DIR, SimpleCache, get_model, data_split_cache, get_shap_values, ) except ImportError: """Not running as an HPC job yet.""" def func(): # Used to re-compute specific failed jobs, `None` otherwise. indices = [ 2, 3, 6, 7, 14, 15, 16, 17, 19, 21, 22, 28, 29, 35, 36, 52, 53, 54, 55, 59, 60, 61, 62, 68, 69, 70, 71, 72, 73, 76, 81, 82, 83, 84, 94, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 111, 112, 113, 114, 115, 116, 117, 118, 119, 123, 124, 125, 129, 130, 132, 133, 134, 135, 141, 142, 155, 156, 170, 171, 172, 173, 175, 176, 177, 178, 183, 184, 185, 186, 188, 189, 196, 201, 202, 203, 204, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 235, 236, 237, 238, 239, 240, 241, 245, 246, 252, 254, 255, 257, 258, 261, 262, 263, 264, 266, 267, 276, 277, 311, 312, 313, 314, 316, 319, 320, 321, 327, 328, 329, 330, 331, 332, 338, 341, 342, 343, 344, 369, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 393, 395, 396, 401, 402, 409, 410, 411, 412, 413, 414, 422, 423, 437, 438, 439, 440, 441, 444, 445, 447, 453, 454, 455, 456, 457, 459, 462, 470, 471, 472, 473, 508, 509, 510, 511, 512, 513, 514, 515, 516, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 535, 536, 537, 539, 540, 543, 544, 545, 546, 548, 549, ] index = int(os.environ["PBS_ARRAY_INDEX"]) if indices is not None: index = indices[index] print("Index:", index) X_train, X_test, y_train, y_test = data_split_cache.load() rf = get_model() job_samples = 50 tree_path_dependent_shap_interact_cache = SimpleCache( f"tree_path_dependent_shap_interact_{index}_{job_samples}", cache_dir=os.path.join(CACHE_DIR, "shap_interaction"), ) @tree_path_dependent_shap_interact_cache def cached_get_interact_shap_values(model, X): return get_shap_values(model, X, interaction=True) cached_get_interact_shap_values( rf, X_train[index * job_samples : (index + 1) * job_samples] ) if __name__ == "__main__": handle_array_job_args( Path(__file__).resolve(), func, ncpus=1, mem="7gb", walltime="11:00:00", max_index=221, )
nilq/baby-python
python
import aiohttp import asyncio import json import time from pydigitalstrom.client import DSClient from pydigitalstrom.log import DSLog class DSWebsocketEventListener: def __init__(self, client: DSClient, event_name: str): self._client = client self._event_name = event_name self._callbacks = [] self._ws = None self._last_keepalive = None def register(self, callback: callable): self._callbacks.append(callback) async def _get_cookie(self): return dict(token=await self._client.get_session_token()) async def start(self): session = await self._client.get_aiohttp_session( cookies=await self._get_cookie() ) url = f"wss://{self._client.host}:{self._client.port}/websocket" self._ws = session.ws_connect(url=url) async with self._ws as ws: async for msg in ws: if msg.type == aiohttp.WSMsgType.TEXT: await self._handle_event(event=json.loads(msg.data)) else: DSLog.logger.warn(f"DS websocket got unknown command: {msg}") async def stop(self): if self._ws is not None: await self._ws.close() self._ws = None async def _handle_event(self, event: dict): if "name" not in event: return if event["name"] == "keepWebserviceAlive": self._last_keepalive = time.time() * 1000.0 # subscribed event if event["name"] == self._event_name: for callback in self._callbacks: await callback(event=event)
nilq/baby-python
python
from django.core.exceptions import ObjectDoesNotExist from django.db import models class OrderField(models.PositiveIntegerField): def __init__(self, for_fields=None, *args, **kwargs): self.for_fields = for_fields super().__init__(*args, **kwargs) def pre_save(self, model_instance, add): if getattr(model_instance, self.attname) is None: # no current value try: qs = self.model.objects.all() if self.for_fields: # filter by objects with the same field values # for the fields in "for_fields" query = {field: getattr(model_instance, field) for field in self.for_fields} qs = qs.filter(**query) # get the order of the last item last_item = qs.latest(self.attname) value = last_item.order + 1 except ObjectDoesNotExist: value = 0 setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add)
nilq/baby-python
python
# Copyright 2020 Lynn Root """Functional tests for interrogate/coverage.py.""" import os import sys import pytest from interrogate import config from interrogate import coverage HERE = os.path.abspath(os.path.join(os.path.abspath(__file__), os.path.pardir)) SAMPLE_DIR = os.path.join(HERE, "sample") FIXTURES = os.path.join(HERE, "fixtures") IS_WINDOWS = sys.platform in ("cygwin", "win32") @pytest.mark.parametrize( "paths,conf,exp_results", ( ([os.path.join(SAMPLE_DIR, "empty.py"),], {}, (1, 0, 1, "0.0")), ( [os.path.join(SAMPLE_DIR, "empty.py"),], {"ignore_module": True}, (0, 0, 0, "0.0"), ), ([SAMPLE_DIR,], {}, (56, 26, 30, "46.4")), ([os.path.join(SAMPLE_DIR, "partial.py")], {}, (22, 7, 15, "31.8")), ( [os.path.join(SAMPLE_DIR, "full.py"),], {"ignore_nested_functions": True}, (17, 17, 0, "100.0"), ), ( [os.path.join(SAMPLE_DIR, "partial.py"),], {"ignore_nested_functions": True}, (20, 6, 14, "30.0"), ), ), ) def test_coverage_simple(paths, conf, exp_results, mocker): """Happy path - get expected results given a file or directory""" conf = config.InterrogateConfig(**conf) interrogate_coverage = coverage.InterrogateCoverage(paths=paths, conf=conf) results = interrogate_coverage.get_coverage() assert exp_results[0] == results.total assert exp_results[1] == results.covered assert exp_results[2] == results.missing assert exp_results[3] == "{:.1f}".format(results.perc_covered) def test_coverage_errors(capsys): """Exit when no Python files are found.""" path = os.path.join(SAMPLE_DIR, "ignoreme.txt") interrogate_coverage = coverage.InterrogateCoverage(paths=[path]) with pytest.raises(SystemExit, match="1"): interrogate_coverage.get_coverage() captured = capsys.readouterr() assert "E: Invalid file" in captured.err interrogate_coverage = coverage.InterrogateCoverage(paths=[FIXTURES]) with pytest.raises(SystemExit, match="1"): interrogate_coverage.get_coverage() captured = capsys.readouterr() assert "E: No Python files found to interrogate in " in captured.err @pytest.mark.parametrize( "level,exp_fixture_file", ( # (0, "expected_no_verbosity.txt"), # (1, "expected_summary.txt"), (2, "expected_detailed.txt"), ), ) def test_print_results(level, exp_fixture_file, capsys, monkeypatch): """Output of test results differ by verbosity.""" monkeypatch.setattr(coverage.utils.OutputFormatter, "TERMINAL_WIDTH", 80) interrogate_coverage = coverage.InterrogateCoverage(paths=[SAMPLE_DIR]) results = interrogate_coverage.get_coverage() interrogate_coverage.print_results( results=results, output=None, verbosity=level ) captured = capsys.readouterr() expected_fixture = os.path.join(FIXTURES, exp_fixture_file) if IS_WINDOWS: expected_fixture = os.path.join(FIXTURES, "windows", exp_fixture_file) with open(expected_fixture, "r") as f: expected_out = f.read() assert expected_out in captured.out @pytest.mark.parametrize( "ignore_module,level,exp_fixture_file", ( (False, 2, "expected_detailed.txt"), (True, 2, "expected_detailed_no_module.txt"), (False, 1, "expected_summary.txt"), (True, 1, "expected_summary_no_module.txt"), ), ) def test_print_results_ignore_module( ignore_module, level, exp_fixture_file, capsys, monkeypatch ): """Do not print module info if ignore_module is True.""" monkeypatch.setattr(coverage.utils.OutputFormatter, "TERMINAL_WIDTH", 80) conf = {"ignore_module": ignore_module} conf = config.InterrogateConfig(**conf) interrogate_coverage = coverage.InterrogateCoverage( paths=[SAMPLE_DIR], conf=conf ) results = interrogate_coverage.get_coverage() interrogate_coverage.print_results( results=results, output=None, verbosity=level ) captured = capsys.readouterr() expected_fixture = os.path.join(FIXTURES, exp_fixture_file) if IS_WINDOWS: expected_fixture = os.path.join(FIXTURES, "windows", exp_fixture_file) with open(expected_fixture, "r") as f: expected_out = f.read() assert expected_out in captured.out def test_print_results_single_file(capsys, monkeypatch): """Results for a single file should still list the filename.""" monkeypatch.setattr(coverage.utils.OutputFormatter, "TERMINAL_WIDTH", 80) single_file = os.path.join(SAMPLE_DIR, "full.py") interrogate_coverage = coverage.InterrogateCoverage(paths=[single_file]) results = interrogate_coverage.get_coverage() interrogate_coverage.print_results( results=results, output=None, verbosity=2 ) captured = capsys.readouterr() expected_fixture = os.path.join( FIXTURES, "expected_detailed_single_file.txt" ) if IS_WINDOWS: expected_fixture = os.path.join( FIXTURES, "windows", "expected_detailed_single_file.txt" ) with open(expected_fixture, "r") as f: expected_out = f.read() assert expected_out in captured.out # I don't want to deal with path mocking out just to get tests to run # everywhere if not IS_WINDOWS: assert "tests/functional/sample/" in captured.out assert "tests/functional/sample/full.py" not in captured.out else: assert "tests\\functional\\sample\\" in captured.out assert "tests\\functional\\sample\\full.py" not in captured.out
nilq/baby-python
python