content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
import tensorflow as tf from tensorflow.contrib.framework.python.ops import arg_scope #from utils_fn import * from ops import * import time class InpaintModel(): def __init__(self, args): self.model_name = "InpaintModel" # name for checkpoint self.img_size = args.IMG_SHAPES # yj def build_inpaint_net(self, x, edge, grad, mask, args=None, reuse=False, training=True, padding='SAME', name='inpaint_net'): """Inpaint network. Args: x: incomplete image[-1, 1] with shape of (batch_size, h, w, c) edge: incomplete edge {0, 1} with shape of (batch_size, h, w) grad map: incomplete grad with shape of (batch_size, h, w, 6) mask: mask region {0, 1} Returns: complete image, grad map, middle result """ x = tf.reshape(x, [-1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], args.IMG_SHAPES[2]]) mask = tf.reshape(mask, [-1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], 1]) edge = tf.reshape(edge, [-1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], 1]) # grad = tf.reshape(grad, [-1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], 6]) xin = x ones_x = tf.ones_like(x)[:, :, :, 0:1] x = tf.concat([x, ones_x * edge, ones_x * mask, grad], axis=3) # add a mask channel,the input channel is 4 # encoder-decoder network: channel 64-128-256-128-64 cnum = 64 # initial channel # a decorate: arg_scope([op1, op2,..], xx,..) means: # attributes or parameters xx defined here are the default in op1 and op2,.. with tf.variable_scope(name, reuse=reuse), \ arg_scope([gen_conv, gen_deconv], training=training, padding=padding): # Encoder # scale 256 channels activation: relu x = gen_conv(x, cnum, 7, stride=1, activation=tf.nn.relu, name='en_conv1') # 9 -> 64, ksize=7x7, stride=1 # scale 128 x = gen_conv(x, 2 * cnum, 4, stride=2, activation=tf.nn.relu, name='en_conv2') # scale 64 x = gen_conv(x, 4 * cnum, 4, stride=2, activation=tf.nn.relu, name='en_conv3') # res block x = resnet_blocks(x, 4 * cnum, 3, stride=1, rate=2, block_num=8, activation=tf.nn.relu, name='en_64_8') # Decoder # TODO: output scale 64 Down scale = 2 (origin) pool scale = 2 (origin) # share attention x = attention(x, 4 * cnum, down_scale=2, pool_scale=2, name='attention_pooling_64') # out of predict grad map x_64 = gen_conv(x, 4 * cnum, 5, stride=1, activation=tf.nn.relu, name='out64_grad_out') x_grad_out_64 = gen_conv(x_64, 6, 1, stride=1, activation=None, name='grad64') x_out_64 = gen_conv(x_64, 3, 1, stride=1, activation=tf.nn.tanh, name='out64') # scale 64 - 128 x = tf.concat([x, x_64], axis=3) x = gen_deconv(x, 2 * cnum, 4, method='deconv', activation=tf.nn.relu, name='de128_conv4_upsample') # TODO: output scale 128 # share attention x = attention(x, 2 * cnum, down_scale=2, pool_scale=2, name='attention_pooling_128') # out of predict grad map x_128 = gen_conv(x, 2 * cnum, 5, stride=1, activation=tf.nn.relu, name='out128_grad_out') x_grad_out_128 = gen_conv(x_128, 6, 1, stride=1, activation=None, name='grad128') x_out_128 = gen_conv(x_128, 3, 1, stride=1, activation=tf.nn.tanh, name='out128') # scale 128 - 256 x = tf.concat([x, x_128], axis=3) x = gen_deconv(x, cnum, 4, method='deconv', activation=tf.nn.relu, name='de256_conv5_upsample') # TODO: output scale 256 # share attention x = attention(x, cnum, down_scale=2, pool_scale=2, name='attention_pooling_256') # out of predict grad map x = gen_conv(x, cnum, 5, stride=1, activation=tf.nn.relu, name='out256_grad_out') x_grad = gen_conv(x, 6, 1, stride=1, activation=None, name='grad256') # grad map no activation x = gen_conv(x, 3, 1, stride=1, activation=tf.nn.tanh, name='out256') return x def evaluate(self, x, edge, mask, args, training=False, reuse=False): # image, grad map image = normalize(x) grad = tf.image.sobel_edges(image) # normalization? grad = tf.reshape(grad, [1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], 6]) # 6 channel # x for image x = tf.reshape(image, [1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], args.IMG_SHAPES[2]]) # [1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], args.IMG_SHAPES[2]] mask = tf.reshape(mask, [-1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], 1]) edge = tf.reshape(edge, [-1, args.IMG_SHAPES[0], args.IMG_SHAPES[1], 1]) # incomplete image x_incomplete = x * (1. - mask) # incomplete edge at full scale input_edge = 1 - edge edge_incomplete = input_edge * (1 - mask) + mask # 0 (black) for edge when save and input, 1 (white) for non edge # grad grad_incomplete = (1. - mask) * grad out_256 = self.build_inpaint_net(x_incomplete, edge_incomplete, grad_incomplete, mask, args, reuse=reuse,training=training, padding=args.PADDING) raw_x = inverse_transform(x) raw_x_incomplete = raw_x * (1 - mask) raw_x_complete = raw_x_incomplete + inverse_transform(out_256) * mask return raw_x_complete
nilq/baby-python
python
import os #determine n - the number of training observations nFile = open('CV_folds/training_set_0.txt','r') n = 0.0 for i in nFile: n += 1 #determine the number of folds #nFolds = sum(os.path.isdir(i) for i in os.listdir('CV_decomp')) nFolds = len(os.listdir('CV_decomp')) print nFolds #determine values of p inList = os.listdir('CV_result') numList = [] for i in inList: numList.append(float(i.split("_")[2])) print "p SSD MARE RMSPE" #for each value of p for j in set(numList): i = 0 SSDsum = MAREsum = RMSPEsum = 0 #for each fold while i < nFolds: #compute sum of errors inFile = open('CV_result/st_idw_' + str(j) + "_" + str(i) + ".txt", 'r') r = inFile.readline().split(",") SSD, MARE, RMSPE = float(r[0]),float(r[1]),float(r[2]) SSDsum += SSD MAREsum += MARE RMSPEsum += RMSPE i += 1 print j, ((SSDsum/n) ** 0.5) / 10.0, (MAREsum/n)/10.0, (((RMSPEsum/n) ** 0.5) * 100.0) / 10.0
nilq/baby-python
python
import re import random import os import pandas as pd try: import torch except ImportError: pass from tqdm import tqdm import spacy from spacy import displacy from visuUtils import train2myVisu, build_color_scheme from visuUtils import conll2sent_list, sent_list2spacy, myScores class visualizer(object): ''' Integrate spacy visualization for conll and spacy formats:''' def __init__(self, data, predictions, verbose = False, column = -1): ''' Input: - data, conll file path, - prediction, conll file path for predictions - column, the column to be selected as annotation in the conll file (default is lasts column) ''' self.path2conll = data # convert to spaCy readable json format self.data = sent_list2spacy(*conll2sent_list(data, column = column)) unique_entities = [] nb_sents = 0 nb_tokens = 0 for sent in self.data: for ent in sent[1]['entities']: if ent[2] not in unique_entities: unique_entities.append(ent[2]) nb_sents += 1 nb_tokens += len(sent[0].split(' ')) # Set summary statistics self.nb_sents = nb_sents self.nb_tokens = nb_tokens self.unique_ents = unique_entities # Create a separate container for visualizable data self.visu_gold = [train2myVisu(sent) for sent in self.data] # Build options for the displayer self.options = build_color_scheme([ent.upper() for ent in self.unique_ents]) self.path2predicted = predictions self.pred_data = sent_list2spacy(*conll2sent_list(self.path2predicted)) self.visu_pred = [train2myVisu(sent) for sent in self.pred_data] if verbose: print('Data contains {} sentences with an average of {:.2f} tokens per sentence totalizing {} tokens.'.format( self.nb_sents, self.nb_tokens/self.nb_sents, self.nb_tokens)) print('There are {} entities plus the "O" label: {}'.format(len(self.unique_ents), self.unique_ents)) return ''' Apply pre-annotation and get the predictions and scores''' def pre_annot_pred(self, column = 6): pred_data = sent_list2spacy(*conll2sent_list(self.path2conll, column = column)) self.pred_data = pred_data self.visu_pred = [train2myVisu(sent) for sent in self.pred_data] self.path2predicted = self.path2conll return def score_predictions(self, average = 'weighted', grouping = None, column = -1, punct_ignore = False, col_sep = '\t'): ''' Score the model on the predicted data input: - average, [None, 'macro', 'micro', 'weighted'] see. http://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html for explanantion - grouping, dictionnary with entry of the form {'NAME':[FNAME, LNAME]} (one to many) - column, integer, column from the prediction file to be selected as the prediction (in case of multiple prediction for example) - punct_ignore, should the punctuation be removed from the evaluation - col_sep, separator between word and label in the conll file ''' self.scores = myScores(self.path2conll, self.path2predicted, average = average, grouping = grouping, column = column, punct_ignore = punct_ignore, col_sep = col_sep) if column != -1: print('Scoring using the {} column of the test set'.format(column)) # build false positive and false negative lists self.gold_per_label = {} self.pred_per_label = {} self.FP_dic = {} self.FN_dic = {} for lab in self.unique_ents: self.gold_per_label[lab] = [] self.pred_per_label[lab] = [] self.FP_dic[lab] = [] self.FN_dic[lab] = [] punct = ['_', '-', "'", ',', ';', '.', '(', ')', '/', '\\', ':'] # fill gold_ents entities dict for i, sents in enumerate(self.data): ents = sents[1]['entities'] for ent in ents: if (sents[0][ent[0]:ent[1]] not in punct) & punct_ignore: self.gold_per_label[ent[2]].append((i, *ent)) elif not punct_ignore: self.gold_per_label[ent[2]].append((i, *ent)) # fill pred_ents entities dict for i, sents in enumerate(self.pred_data): ents = sents[1]['entities'] for ent in ents: if (sents[0][ent[0]:ent[1]] not in punct) & punct_ignore: self.pred_per_label[ent[2]].append((i, *ent)) elif not punct_ignore: self.pred_per_label[ent[2]].append((i, *ent)) for lab in self.unique_ents: for ent in self.pred_per_label[lab]: if ent not in self.gold_per_label[lab]: self.FP_dic[lab].append(ent) for ent in self.gold_per_label[lab]: if ent not in self.pred_per_label[lab]: self.FN_dic[lab].append(ent) return self.scores def scores2pd(self): ''' Wrapper to return scores as dataframe''' df_scores = pd.DataFrame(data = {'precision':self.scores[0], 'recall':self.scores[1], 'f1':self.scores[2], 'count':self.scores[3]}, index = self.unique_ents) df_scores.sort_index(inplace=True) return df_scores def visu_gold_sample(self, ix = None, verbose = True, context = 0): ''' Visualize a random gold sample''' if ix is None: ix = random.randint(0, len(self.data)) #elif(ix >= len(self.data)): # print('ix out of bound, please selecte an index smaller than {}'.format(len(self.data))) # return if verbose: print('sentence {}/{}'.format(ix, len(self.data))) displacy.render(self.visu_gold[(ix-context):(ix+context+1)], style = 'ent', jupyter = True, manual = True, options = self.options) return def visu_pred_sample(self, ix = None, verbose = True, context = 0): ''' Visualize a random pred sample''' if ix is None: ix = random.randint(0, len(self.data)) elif(ix >= len(self.data)): print('ix out of bound, please selecte an index smaller than {}'.format(len(self.data))) return if verbose: print('sentence {}/{}'.format(ix, len(self.data))) displacy.render(self.visu_pred[(ix-context):(ix+context+1)], style = 'ent', jupyter = True, manual = True, options = self.options) return def visu_compare(self, ix = None, context = 0): ''' Visualize the same sample from gold and pred''' if ix is None: ix = random.randint(0, len(self.data)) print('Gold:') self.visu_gold_sample(ix, verbose = True, context = context) print('Predicted:') self.visu_pred_sample(ix, verbose = False, context = context) return def visu_FP_sample(self, lab = None, i = None, context = 0, verbose = True): ''' Visualize one False Positive for a given category''' if lab is None: lab = random.choice(self.unique_ents) nb_errors = len(self.FP_dic[lab]) if i is None: i = random.randint(0, nb_errors) ix = self.FP_dic[lab][i][0] if verbose: print('There are {} FP for the {} category.'.format(nb_errors, lab)) print('Displaying FP {}/{}'.format(i, nb_errors)) self.visu_compare(ix, context = context) return def visu_FPs(self, lab = None, context = 0): ''' Visualize all FPs of a given category''' if lab is None: lab = random.choice(self.unique_ents) nb_errors = len(self.FP_dic[lab]) print('There are {} FP for the {} category.'.format(nb_errors, lab)) # little astuce to avoid showing several time the same sentence ix_prec = -1 for i in range(0, nb_errors): ix_tmp = self.FP_dic[lab][i][0] if ix_prec != ix_tmp: self.visu_compare(ix_tmp, context = context) ix_prec = ix_tmp print('----------------------------------------------\n') def visu_FN_sample(self, lab = None, i = None, context = 0, verbose = True): ''' Visualize one False Negative for a given category''' if lab is None: lab = random.choice(self.unique_ents) nb_errors = len(self.FN_dic[lab]) if i is None: i = random.randint(0, nb_errors) ix = self.FN_dic[lab][i][0] if verbose: print('There are {} FN for the {} category.'.format(nb_errors, lab)) print('Displaying FN {}/{}'.format(i, nb_errors)) self.visu_compare(ix, context = context) return def visu_FNs(self, lab = None, context = 0): ''' Visualize all FNs of a given category''' if lab is None: lab = random.choice(self.unique_ents) nb_errors = len(self.FN_dic[lab]) print('There are {} FN for the {} category.'.format(nb_errors, lab)) # little astuce to avoid showing several time the same sentence ix_prec = -1 for i in range(0, nb_errors): ix_tmp = self.FN_dic[lab][i][0] if ix_prec != ix_tmp: self.visu_compare(ix_tmp, context = context) ix_prec = ix_tmp print('----------------------------------------------') def group_labs(self, grouping): ''' Given a grouping of categories, change the categories of the visu object to perform new visualization and scoring based on these new labels.''' # change the labels in gold_data new_data = [] for sent in self.data: ents = sent[1]['entities'] new_ents = [] for ent in ents: for k, v in grouping.items(): if ent[2] in v: ent = (ent[0], ent[1], k) if ent[2]!='O': new_ents.append(ent) new_sent = (sent[0], {'entities':new_ents}) new_data.append(new_sent) self.data = new_data self.visu_gold = [train2myVisu(sent) for sent in self.data] # change the label in pred_data new_data = [] for sent in self.pred_data: ents = sent[1]['entities'] new_ents = [] for ent in ents: for k, v in grouping.items(): if ent[2] in v: ent = (ent[0], ent[1], k) if ent[2]!='O': new_ents.append(ent) new_sent = (sent[0], {'entities':new_ents}) new_data.append(new_sent) self.pred_data = new_data self.visu_pred = [train2myVisu(sent) for sent in self.pred_data] # recompute the unique entities unique_entities = [] for sent in self.data: for ent in sent[1]['entities']: if ent[2] not in unique_entities: unique_entities.append(ent[2]) self.unique_ents = unique_entities print('New unique entities after grouping:', self.unique_ents) self.options = build_color_scheme(self.unique_ents) return # Decoding functions for the different frameworks # For now on there are Yaset, NCRFpp and spaCy try: from ncrfpp.utils.myUtils import evaluate, load_model_decode from ncrfpp.utils.data import Data except ImportError: pass def ncrf_decoding(decode_conf_dict, verbose = False): ''' Perform ncrf decoding from a config file''' data = Data() data.read_config(decode_conf_dict) status = data.status.lower() data.HP_gpu = torch.cuda.is_available() print("MODEL: decode") data.load(data.dset_dir) ## needed after data.load(data.dset_dir) data.read_config(decode_conf_dict) print(data.raw_dir) # exit(0) data.show_data_summary() data.generate_instance('raw') print("nbest: %s"%(data.nbest)) decode_results, pred_scores = load_model_decode(data, 'raw') if data.nbest: data.write_nbest_decoded_results(decode_results, pred_scores, 'raw') else: data.write_decoded_results(decode_results, 'raw') # convert to the same format as yaset with open(decode_conf_dict['decode_dir'], 'r') as f: predictions = f.read().splitlines() new_preds = [] for l in predictions: if l != '': if l[0] != '#': ll = '\t'.join(l.split(' ')) + '\n' new_preds.append(ll) elif l == '': new_preds.append('\n') with open(decode_conf_dict['decode_dir'], 'w') as f: f.writelines(new_preds) print('end') return def yaset_pred(path2model, pathgold, path2save): ''' Apply yaset and get the predictions and scores''' print('Applying yaset model...(1 to 2 mins)') apply_yaset = 'yaset APPLY --working-dir '+path2save+' --input-file '+pathgold+' --model-path '+ path2model os.system(apply_yaset) return
nilq/baby-python
python
from ._text import BufferText __all__ = [ "BufferText", ]
nilq/baby-python
python
#!/usr/bin/env python import xml.etree.ElementTree as ET import six from leather import svg from leather import theme class Axis(object): """ A horizontal or vertical chart axis. :param ticks: Instead of inferring tick values from the data, use exactly this sequence of ticks values. These will still be passed to the :code:`tick_formatter`. :param tick_formatter: An optional :func:`.tick_format_function`. """ def __init__(self, ticks=None, tick_formatter=None, name=None): self._ticks = ticks self._tick_formatter = tick_formatter self._name = six.text_type(name) if name is not None else None def _estimate_left_tick_width(self, scale): """ Estimate the y axis space used by tick labels. """ tick_values = self._ticks or scale.ticks() tick_count = len(tick_values) tick_formatter = self._tick_formatter or scale.format_tick max_len = 0 for i, value in enumerate(tick_values): max_len = max(max_len, len(tick_formatter(value, i, tick_count))) return max_len * theme.tick_font_char_width def estimate_label_margin(self, scale, orient): """ Estimate the space needed for the tick labels. """ margin = 0 if orient == "left": margin += self._estimate_left_tick_width(scale) + (theme.tick_size * 2) elif orient == "bottom": margin += theme.tick_font_char_height + (theme.tick_size * 2) if self._name: margin += theme.axis_title_font_char_height + theme.axis_title_gap return margin def to_svg(self, width, height, scale, orient): """ Render this axis to SVG elements. """ group = ET.Element("g") group.set("class", "axis " + orient) # Axis title if self._name is not None: if orient == "left": title_x = -( self._estimate_left_tick_width(scale) + theme.axis_title_gap ) title_y = height / 2 dy = "" transform = svg.rotate(270, title_x, title_y) elif orient == "bottom": title_x = width / 2 title_y = ( height + theme.tick_font_char_height + (theme.tick_size * 2) + theme.axis_title_gap ) dy = "1em" transform = "" title = ET.Element( "text", x=six.text_type(title_x), y=six.text_type(title_y), dy=dy, fill=theme.axis_title_color, transform=transform, ) title.set("text-anchor", "middle") title.set("font-family", theme.axis_title_font_family) title.text = self._name group.append(title) # Ticks if orient == "left": label_x = -(theme.tick_size * 2) x1 = -theme.tick_size x2 = width range_min = height range_max = 0 elif orient == "bottom": label_y = height + (theme.tick_size * 2) y1 = 0 y2 = height + theme.tick_size range_min = 0 range_max = width tick_values = self._ticks or scale.ticks() tick_count = len(tick_values) tick_formatter = self._tick_formatter or scale.format_tick zero_tick_group = None for i, value in enumerate(tick_values): # Tick group tick_group = ET.Element("g") tick_group.set("class", "tick") if value == 0: zero_tick_group = tick_group else: group.append(tick_group) # Tick line projected_value = scale.project(value, range_min, range_max) if value == 0: tick_color = theme.zero_color else: tick_color = theme.tick_color if orient == "left": y1 = projected_value y2 = projected_value elif orient == "bottom": x1 = projected_value x2 = projected_value tick = ET.Element( "line", x1=six.text_type(x1), y1=six.text_type(y1), x2=six.text_type(x2), y2=six.text_type(y2), stroke=tick_color, ) tick.set("stroke-width", six.text_type(theme.tick_width)) tick_group.append(tick) # Tick label if orient == "left": x = label_x y = projected_value dy = "0.32em" text_anchor = "end" elif orient == "bottom": x = projected_value y = label_y dy = "1em" text_anchor = "middle" label = ET.Element( "text", x=six.text_type(x), y=six.text_type(y), dy=dy, fill=theme.label_color, ) label.set("text-anchor", text_anchor) label.set("font-family", theme.tick_font_family) value = tick_formatter(value, i, tick_count) label.text = six.text_type(value) tick_group.append(label) if zero_tick_group is not None: group.append(zero_tick_group) return group def tick_format_function(value, index, tick_count): """ This example shows how to define a function to format tick values for display. :param x: The value to be formatted. :param index: The index of the tick. :param tick_count: The total number of ticks being displayed. :returns: A stringified tick value for display. """ return six.text_type(value)
nilq/baby-python
python
import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F class GaussianMixture(nn.Module): def __init__(self, n_mix, d_inp, learn_var=True, share_prior=False): super(GaussianMixture, self).__init__() """ The current implementation is super simplified, treating each dim of the target as a one-dimensional Gaussian mixture with separate mixture weights if `share_prior == False` (default). When `share_prior == True`, all `d_tgt` target dims share the same mixture weights, which poses some inductive bias. However, neither is the optimal case, as corellations between the target dims is essentially ignored. Input: inp : [... x d_inp] target : [... x d_tgt] Return: nll : [... x d_tgt] """ self.n_mix = n_mix self.d_tgt = d_tgt = 1 self.d_inp = d_inp self.learn_var = learn_var self.share_prior = share_prior self.mean = nn.Linear(d_inp, d_tgt * n_mix) if learn_var: self.var = nn.Linear(d_inp, d_tgt * n_mix, bias=False) if n_mix > 1: if share_prior: self.prior = nn.Linear(d_inp, n_mix) else: self.prior = nn.Linear(d_inp, d_tgt * n_mix) else: assert n_mix == 1, '`n_mix` must be positive integers' self.const = -0.5 * math.log(2 * math.pi) def log_likelihood(self, target, mean, log_std, log_prior=None): """ target : [... x d_tgt] mean : [... x d_tgt x n_mix] log_std : [... x d_tgt x n_mix] log_prior : [... x d_tgt x n_mix] or None """ # Gaussian log-likelihood is not safe for half precision due to the # `log_std.exp()` operation, especially in the backward pass. # For simplicity, we use float32 for log-likelihood computation. tgt_ = target.unsqueeze(-1).float() mean = mean.float() # [... x d_tgt x n_mix] log_probs = self.const - log_std \ - 0.5 * (((tgt_ - mean) / log_std.exp()) ** 2) if log_prior is None: # n_mix = 1 log_prob = log_probs.squeeze(-1) else: log_prior = log_prior.float() # [... x d_tgt x dim] w_log_probs = log_prior + log_probs # [... x d_tgt x 1] max_w_log_prob = w_log_probs.max(-1, keepdim=True)[0] # [... x d_tgt] log_prob = torch.logsumexp(w_log_probs - max_w_log_prob, dim=-1) \ + max_w_log_prob.squeeze(-1) return log_prob def forward(self, inp, target, return_mean=False): mean = self.mean(inp) mean = mean.view(*mean.size()[:-1], self.d_tgt, self.n_mix) if self.learn_var: log_std = self.var(inp) log_std = log_std.view(*log_std.size()[:-1], self.d_tgt, self.n_mix) else: log_std = torch.zeros(1, dtype=inp.dtype, device=inp.device) if self.n_mix > 1: prior = self.prior(inp) if self.share_prior: prior = prior.view(*prior.size()[:-1], 1, self.n_mix) else: prior = prior.view(*prior.size()[:-1], self.d_tgt, self.n_mix) log_prior = F.log_softmax(prior, dim=-1) else: log_prior = None log_prob = self.log_likelihood(target, mean, log_std, log_prior) nll = - log_prob if return_mean: return nll, mean else: return nll
nilq/baby-python
python
#!/usr/bin/env python from iris_sdk.models.base_resource import BaseData from iris_sdk.models.data.rate_centers_list import RateCentersList from iris_sdk.models.maps.rate_centers import RateCentersMap class RateCentersData(RateCentersMap, BaseData): @property def total_count(self): return self.result_count @total_count.setter def total_count(self, total_count): self.result_count = total_count def __init__(self): self.rate_centers = RateCentersList()
nilq/baby-python
python
from rest_framework import permissions from rest_framework.generics import CreateAPIView from django.contrib.auth.models import User from rest_framework import viewsets from usuarios.serializers import UserSerializer class UserViewSet(viewsets.ModelViewSet): serializer_class = UserSerializer def get_queryset(self): queryset = User.objects.filter(username=self.request.user) return queryset class CreateUserView(CreateAPIView): model = User permission_classes = [ permissions.AllowAny ] serializer_class = UserSerializer
nilq/baby-python
python
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('lpp_test', '0003_auto_20191024_1701'), ] operations = [ migrations.AddField( model_name='uhost', name='name', field=models.CharField(default=b'', max_length=128, verbose_name='\u4e3b\u673a\u540d\u79f0'), ), ]
nilq/baby-python
python
import pglet from pglet import Icon def test_icon_add(): c = Icon(name="Mail", color="#FF7F50", size="tiny") assert isinstance(c, pglet.Control) assert isinstance(c, pglet.Icon) # raise Exception(s.get_cmd_str()) assert c.get_cmd_str() == ( 'icon color="#FF7F50" name="Mail" size="tiny"' ), "Test failed"
nilq/baby-python
python
# CyberHeist Lab - Beginner Level # Good luck :D import hashlib import binascii import colorama import cowsay USERNAME = "Grubsy" PASSWORD = "4aa765fdbe4bf83f7a51a1af53b170ad9e2aab35a9b9f0b066fd069952cffe44" # PASSWORD HINT: Tristan really likes noodles # In order he likes: # 1) udonnoodles (not the password) # 2) ********noodles (probably the password) # 3) ramannoodles (probably not the password) # ASK HIM! # This has nothing to do with completing this challenge :P # The only purpose of this list is for text output. synonymsForBankButNotTheMoneyKindTheLandmassKindBecauseImHilariousAndLoveLongVariableNamesThatAreSuperDescriptive = [ "edge", "side", "embankment", "levee", "border", "verge", "boundary", "margin", "rim", "fringe", "fringes", "flank", "brink", "perimeter", "circumference", "extremity", "periphery", "limit", "outer" "limit", "limits", "bound", "bounds", "literarymarge", "bourn", "skirt" ] def handleSelection(selection): # If the user selected "View our products" if selection == 1: print(colorama.Style.RESET_ALL) print("Wowee look at all these cool product names :D") synonymList = "" for synonym in synonymsForBankButNotTheMoneyKindTheLandmassKindBecauseImHilariousAndLoveLongVariableNamesThatAreSuperDescriptive: synonymList += synonym + "\n" cowsay.turtle(synonymList) # If the user selected "Talk to a professional banker" elif selection == 2: print(colorama.Style.RESET_ALL) cowsay.turtle("There's no such thing as a professional banker, hehe. :P") # If the user selected "Secret Login Portal (FOR ADMINS ONLY)" # This might be a place of interest... ############################################################################# # !!!!!!!!!!!!!!!!!!!!!!!! SUPER SECURE CODE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # ############################################################################# elif selection == 3: print(colorama.Style.RESET_ALL) print(colorama.Back.BLUE + colorama.Fore.WHITE) try: username = input("Username: ") password = input("Password: ") hashedPassword = hashlib.sha256(password.encode()).hexdigest() except ValueError: print("WRONG") print("Stop trying to rek my code ;__;") else: # NOTE: For future developer reference, USERNAME and PASSWORD are # displayed at the top of this file. :D print(colorama.Style.RESET_ALL) if username != USERNAME: print("WRONG USERNAME .____.") if hashedPassword != PASSWORD: print("WRONG PASSWORD ;__;") if password == PASSWORD: cowsay.turtle("You copy and pasted the PASSWORD variable as the password you silly goose :P" + "\nThat variable is a SHA256 hash of the REAL password so it wont work :/ sry") if password == "ramannoodles": print("I told you it PROBABLY wasnt raman noodles but you tried it anyway, ya bum.") if password == "udonnoodles": cowsay.turtle("I love these noodles, their THIQness makes them superior to all other noodles.") if username == USERNAME and hashedPassword == PASSWORD: cowsay.dragon("CHALLENGE 1 COMPLETE \n Remember that username and password ;)") exit(1) ############################################################################# ############################################################################# ############################################################################# def run(): while True: print(colorama.Style.RESET_ALL) print(colorama.Back.YELLOW + colorama.Fore.RED) print("MAIN MENU") print("1) View our products") print("2) Talk to a professional banker") print("3) Secret Login Portal (FOR ADMINS ONLY)") print("4) Quit" + colorama.Style.RESET_ALL) print(colorama.Back.RED + colorama.Fore.WHITE) try: selection = int(input("Enter your choice: ")) except ValueError: print(colorama.Style.RESET_ALL) cowsay.turtle("Plz be nice to my program ;__;") continue else: if selection not in set([1,2,3,4]): print(colorama.Style.RESET_ALL) cowsay.turtle(";__; computer sad, why u give invalid input? ;__;") continue if selection == 4: exit(1) handleSelection(selection) if __name__ == "__main__": colorama.init() print(colorama.Back.WHITE + '\033[31m' + 'Welcome to Grubsy Banks Inc. - Your embankment needs are our business' + '\033[0m') print(colorama.Back.WHITE + '\033[31m' + "Banker - Someone who works on banks, not the money ones, the geographical ones :)" + '\033[0m') run()
nilq/baby-python
python
# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import absolute_import, division, print_function, unicode_literals import logging import click from ..data import EventList from ..maps import Map from ..cube import fill_map_counts log = logging.getLogger(__name__) @click.command("bin") @click.argument("event_file", type=str) @click.argument("reference_file", type=str) @click.argument("out_file", type=str) @click.option("--overwrite", is_flag=True, help="Overwrite existing files?") def cli_image_bin(event_file, reference_file, out_file, overwrite): """Bin events into an image. You have to give the event, reference and out FITS filename. """ log.info("Executing cli_image_bin") log.info("Reading {}".format(event_file)) events = EventList.read(event_file) log.info("Reading {}".format(reference_file)) m_ref = Map.read(reference_file) counts_map = Map.from_geom(m_ref.geom) fill_map_counts(counts_map, events) log.info("Writing {}".format(out_file)) counts_map.write(out_file, overwrite=overwrite)
nilq/baby-python
python
import json from threading import Lock from requests.exceptions import HTTPError from py42.exceptions import Py42ChecksumNotFoundError from py42.exceptions import Py42Error from py42.exceptions import Py42HTTPError from py42.exceptions import Py42SecurityPlanConnectionError from py42.exceptions import raise_py42_error from py42.sdk.queries.fileevents.file_event_query import FileEventQuery from py42.sdk.queries.fileevents.filters.file_filter import MD5 from py42.sdk.queries.fileevents.filters.file_filter import SHA256 from py42.settings import debug class SecurityModule(object): def __init__( self, security_client, storage_client_factory, microservices_client_factory ): self._security_client = security_client self._storage_client_factory = storage_client_factory self._microservices_client_factory = microservices_client_factory self._client_cache = {} self._client_cache_lock = Lock() @property def savedsearches(self): """A collection of methods related to retrieving forensic search data. Returns: :class: `py42._internal.clients.securitydata.SavedSearchClient` """ return self._microservices_client_factory.get_saved_search_client() def get_security_plan_storage_info_list(self, user_uid): """Gets IDs (plan UID, node GUID, and destination GUID) for the storage nodes containing the file activity event data for the user with the given UID. `REST Documentation <https://console.us.code42.com/swagger/#/Feature/getStorageNode>`__ Args: user_uid (str): The UID of the user to get plan storage information for. Returns: list[:class:`py42.modules.securitydata.PlanStorageInfo`] """ locations = None try: response = self._security_client.get_security_event_locations(user_uid) locations = response[u"securityPlanLocationsByDestination"] except HTTPError as err: if err.response.status_code == 404: pass else: raise_py42_error(err) if locations: plan_destination_map = _get_plan_destination_map(locations) selected_plan_infos = self._get_plan_storage_infos(plan_destination_map) if not selected_plan_infos: raise Py42SecurityPlanConnectionError( u"Could not establish a connection to retrieve " u"security events for user {}".format(user_uid) ) return selected_plan_infos def get_all_plan_security_events( self, plan_storage_info, cursor=None, include_files=True, event_types=None, min_timestamp=None, max_timestamp=None, ): """Gets events for legacy Endpoint Monitoring file activity on removable media, in cloud sync folders, and browser uploads. `Support Article <https://support.code42.com/Administrator/6/Configuring/Endpoint_monitoring>`__ Args: plan_storage_info (:class:`py42.sdk.modules.securitydata.PlanStorageInfo`): Information about storage nodes for a plan to get file event activity for. cursor (str, optional): A cursor position for only getting file events you did not previously get. Defaults to None. include_files (bool, optional): Whether to include the files related to the file events. Defaults to None. event_types: (str, optional): A comma-separated list of event types to filter by. Available options are: - ``DEVICE_APPEARED`` - ``DEVICE_DISAPPEARED`` - ``DEVICE_FILE_ACTIVITY`` - ``PERSONAL_CLOUD_FILE_ACTIVITY`` - ``RESTORE_JOB`` - ``RESTORE_FILE`` - ``FILE_OPENED`` - ``RULE_MATCH`` - ``DEVICE_SCAN_RESULT`` - ``PERSONAL_CLOUD_SCAN_RESULT`` Defaults to None. min_timestamp (float, optional): A POSIX timestamp representing the beginning of the date range of events to get. Defaults to None. max_timestamp (float, optional): A POSIX timestamp representing the end of the date range of events to get. Defaults to None. Returns: generator: An object that iterates over :class:`py42.response.Py42Response` objects that each contain a page of events. """ return self._get_security_detection_events( plan_storage_info, cursor, include_files, event_types, min_timestamp, max_timestamp, ) def get_all_user_security_events( self, user_uid, cursor=None, include_files=True, event_types=None, min_timestamp=None, max_timestamp=None, ): """Gets legacy Endpoint Monitoring file activity events for the user with the given UID. Args: user_uid (str): The UID of the user to get security events for. cursor (str, optional): A cursor position for only getting events you did not previously get. Defaults to None. include_files (bool, optional): Whether to include the files related to the file activity events. Defaults to None. event_types: (str, optional): A comma-separated list of event types to filter by. Available options are: - ``DEVICE_APPEARED`` - ``DEVICE_DISAPPEARED`` - ``DEVICE_FILE_ACTIVITY`` - ``PERSONAL_CLOUD_FILE_ACTIVITY`` - ``RESTORE_JOB`` - ``RESTORE_FILE`` - ``FILE_OPENED`` - ``RULE_MATCH`` - ``DEVICE_SCAN_RESULT`` - ``PERSONAL_CLOUD_SCAN_RESULT`` Defaults to None. min_timestamp (float, optional): A POSIX timestamp representing the beginning of the date range of events to get. Defaults to None. max_timestamp (float, optional): A POSIX timestamp representing the end of the date range of events to get. Defaults to None. Returns: generator: An object that iterates over :class:`py42.response.Py42Response` objects that each contain a page of events. """ security_plan_storage_infos = self.get_security_plan_storage_info_list(user_uid) return self._get_security_detection_events( security_plan_storage_infos, cursor, include_files, event_types, min_timestamp, max_timestamp, ) def search_file_events(self, query): """Searches for file events. `REST Documentation <https://support.code42.com/Administrator/Cloud/Monitoring_and_managing/Forensic_File_Search_API>`__ Args: query (:class:`py42.sdk.queries.fileevents.file_event_query.FileEventQuery`): Also accepts a raw JSON str. Returns: :class:`py42.response.Py42Response`: A response containing the first 10,000 events. """ file_event_client = self._microservices_client_factory.get_file_event_client() return file_event_client.search(query) def _search_by_hash(self, hash, type): query = FileEventQuery.all(type.eq(hash)) response = self.search_file_events(query) return response[u"fileEvents"] def _find_file_versions(self, md5_hash, sha256_hash): file_event_client = self._microservices_client_factory.get_file_event_client() pds_client = ( self._microservices_client_factory.get_preservation_data_service_client() ) response = file_event_client.get_file_location_detail_by_sha256(sha256_hash) if u"locations" not in response and not len(response[u"locations"]): raise Py42Error( u"PDS service can't find requested file " u"with md5 hash {} and sha256 hash {}.".format(md5_hash, sha256_hash) ) for device_id, paths in _parse_file_location_response(response): try: yield pds_client.find_file_versions( md5_hash, sha256_hash, device_id, paths ) except Py42HTTPError as err: # API searches multiple paths to find the file to be streamed, as returned by # 'get_file_location_detail_by_sha256', hence we keep looking until we find a stream # to return debug.logger.warning( u"Failed to find any file version for md5 hash {} / sha256 hash {}. " u"Error: ".format(md5_hash, sha256_hash), err, ) def _stream_file(self, file_generator, checksum): for response in file_generator: if response.status_code == 204: continue try: storage_node_client = self._microservices_client_factory.create_storage_preservation_client( response[u"storageNodeURL"] ) token = storage_node_client.get_download_token( response[u"archiveGuid"], response[u"fileId"], response[u"versionTimestamp"], ) return storage_node_client.get_file(str(token)) except Py42HTTPError: # API searches multiple paths to find the file to be streamed, as returned by # 'get_file_location_detail_by_sha256', hence we keep looking until we find a stream # to return debug.logger.warning( u"Failed to stream file with hash {}, info: {}.".format( checksum, response.text ) ) raise Py42Error( u"No file with hash {} available for download on any storage node.".format( checksum ) ) def stream_file_by_sha256(self, checksum): """Stream file based on SHA256 checksum. Args: checksum (str): SHA256 hash of the file. Returns: Returns a stream of the requested file. """ events = self._search_by_hash(checksum, SHA256) if not len(events): raise Py42ChecksumNotFoundError(u"SHA256", checksum) md5_hash = events[0][u"md5Checksum"] return self._stream_file(self._find_file_versions(md5_hash, checksum), checksum) def stream_file_by_md5(self, checksum): """Stream file based on MD5 checksum. Args: checksum (str): MD5 hash of the file. Returns: Returns a stream of the requested file. """ events = self._search_by_hash(checksum, MD5) if not len(events): raise Py42ChecksumNotFoundError(u"MD5", checksum) sha256_hash = events[0][u"sha256Checksum"] return self._stream_file( self._find_file_versions(checksum, sha256_hash), checksum ) def _get_plan_storage_infos(self, plan_destination_map): plan_infos = [] for plan_uid in plan_destination_map: destinations = plan_destination_map[plan_uid] storage_info = self._get_storage_info_for_plan(plan_uid, destinations) if storage_info: plan_infos.append(storage_info) return plan_infos def _get_storage_info_for_plan(self, plan_uid, destinations): for destination in destinations: # try to connect to every storage node for this plan until one works plan_storage_info = self._get_storage_info_for_plan_destination( plan_uid, destination ) if plan_storage_info: return plan_storage_info def _get_storage_info_for_plan_destination(self, plan_uid, destination): try: destination_guid = destination[u"destinationGuid"] node_guid = destination[u"nodeGuid"] plan_storage_info = PlanStorageInfo(plan_uid, destination_guid, node_guid) self._try_get_security_detection_event_client(plan_storage_info) return plan_storage_info except HTTPError: # This function is called in a loop until we get a result that is not None. # If all return None, then the calling function raises Py42SecurityPlanConnectionError. pass def _try_get_security_detection_event_client(self, plan_storage_info): # check if we have already created and stored this client client = self._client_cache.get(plan_storage_info.node_guid) # otherwise, create it if client is None: client = self._storage_client_factory.from_plan_info( plan_storage_info.plan_uid, plan_storage_info.destination_guid ).securitydata # store this client via its guid so that we don't have to call StorageAuthToken # just to determine what storage client to use with self._client_cache_lock: self._client_cache.update({plan_storage_info.node_guid: client}) return client def _get_security_detection_events( self, plan_storage_infos, cursor, include_files, event_types, min_timestamp, max_timestamp, ): if not isinstance(plan_storage_infos, (list, tuple)): plan_storage_infos = [plan_storage_infos] # get the storage node client for each plan for plan_storage_info in plan_storage_infos: client = self._try_get_security_detection_event_client(plan_storage_info) started = False # get all pages of events for this plan while cursor or not started: started = True response = client.get_plan_security_events( plan_storage_info.plan_uid, cursor=cursor, include_files=include_files, event_types=event_types, min_timestamp=min_timestamp, max_timestamp=max_timestamp, ) if response.text: # we use json.loads here because the cursor prop doesn't appear # on responses that have no results cursor = json.loads(response.text).get(u"cursor") # if there are no results, we don't get a cursor and have reached the end if cursor: yield response, cursor def _get_plan_destination_map(locations_list): plan_destination_map = {} for plans in _get_destinations_in_locations_list(locations_list): for plan_uid in plans: plan_destination_map[plan_uid] = plans[plan_uid] return plan_destination_map def _get_destinations_in_locations_list(locations_list): for destination in locations_list: for node in destination[u"securityPlanLocationsByNode"]: yield _get_plans_in_node(destination, node) def _get_plans_in_node(destination, node): return { plan_uid: [ { u"destinationGuid": destination[u"destinationGuid"], u"nodeGuid": node[u"nodeGuid"], } ] for plan_uid in node[u"securityPlanUids"] } def _parse_file_location_response(response): for location in response[u"locations"]: paths = [] file_name = location[u"fileName"] device_id = location[u"deviceUid"] paths.append(u"{}{}".format(location[u"filePath"], file_name)) yield device_id, paths class PlanStorageInfo(object): def __init__(self, plan_uid, destination_guid, node_guid): self._plan_uid = plan_uid self._destination_uid = destination_guid self._node_guid = node_guid @property def plan_uid(self): """The UID of the storage plan.""" return self._plan_uid @property def destination_guid(self): """The GUID of the destination containing the storage archive.""" return self._destination_uid @property def node_guid(self): """The GUID of the storage node containing the archive.""" return self._node_guid
nilq/baby-python
python
from trame import state from trame.html import vuetify, Element, simput from ..engine.simput import KeyDatabase def update_cycle_list(*args, **kwargs): pxm = KeyDatabase().pxm cycleIds = [] subCycleIds = {} for cycle in pxm.get_instances_of_type("Cycle"): cycleIds.append(cycle.id) subCycleIds[cycle.id] = [] for subCycleId in cycle.own: subCycleIds[cycle.id].append(subCycleId) state.cycleIds = cycleIds state.subCycleIds = subCycleIds def create_cycle(proxy_type, owner_id=None, **kwargs): pxm = KeyDatabase().pxm proxy = pxm.create(proxy_type, **kwargs) if owner_id is not None: owner = pxm.get(owner_id) owner._own.add(proxy.id) update_cycle_list() return proxy def delete_cycle(id, proxy_type, owner_id=None): pxm = KeyDatabase().pxm if owner_id is not None: owner = pxm.get(owner_id) owner._own.remove(id) pxm.delete(id) update_cycle_list() def initialize(): state.update({ "cycleIds": [], "subCycleIds": {}, }) cycle = create_cycle("Cycle", Name="constant", repeat=-1) create_cycle("SubCycle", cycle.id, Name="alltime", Length=1) cycle = create_cycle("Cycle", Name="rainrec", repeat=-1) create_cycle("SubCycle", cycle.id, Name="rain") create_cycle("SubCycle", cycle.id, Name="rec") def create_ui(): Element("H1", "Timing") simput.SimputItem(itemId=("timingId",)) Element("H1", "Cycles") with vuetify.VContainer(v_for=("(cycleId, index) in cycleIds",), fluid=True): with vuetify.VContainer(style="display: flex;", fluid=True): simput.SimputItem(itemId=("cycleId",), style="flex-grow: 1;") with vuetify.VBtn(click=(delete_cycle, "[cycleId, 'Cycle']"), small=True, icon=True): vuetify.VIcon('mdi-delete') with vuetify.VContainer(fluid=True, style="padding: 2rem;"): with vuetify.VContainer(v_for=("(subId, subI) in subCycleIds[cycleId]",), fluid=True, style="display: flex;"): simput.SimputItem(itemId=("subId",), style="flex-grow: 1;") with vuetify.VBtn(click=(delete_cycle, "[subId, 'SubCycle', cycleId]"), small=True, icon=True): vuetify.VIcon('mdi-delete') with vuetify.VBtn(click=(create_cycle, "['SubCycle', cycleId]")): vuetify.VIcon('mdi-plus') Element("span", "Add Sub Cycle") with vuetify.VBtn(click=(create_cycle, "['Cycle']")): vuetify.VIcon('mdi-plus') Element("span", "Add Cycle")
nilq/baby-python
python
from typing import List def pascal(N: int) -> List[int]: """ Return the Nth row of Pascal triangle """ # you code ... if N == 0: return [] triangle_rows = [] for i in range(1, N+1): add_row = [None]*i add_row[0] = 1 add_row[-1] = 1 if i == 3: add_row[1] = triangle_rows[1][0] + triangle_rows[1][1] if i >= 4: for j in range(1,i-1): add_row[j] = triangle_rows[i-2][j-1] + triangle_rows[i-2][j] triangle_rows.append(add_row) return triangle_rows[N-1]
nilq/baby-python
python
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.fluid.core as core import paddle import numpy as np from paddle.fluid.framework import _test_eager_guard, EagerParamBase, _in_eager_mode from paddle.fluid.data_feeder import convert_dtype import unittest import copy import paddle.compat as cpt class EagerScaleTestCase(unittest.TestCase): def test_scale_base(self): with _test_eager_guard(): paddle.set_device("cpu") arr = np.ones([4, 16, 16, 32]).astype('float32') tensor = paddle.to_tensor(arr, 'float32', core.CPUPlace()) print(tensor) tensor = core.eager.scale(tensor, 2.0, 0.9, True, False) for i in range(0, 100): tensor = core.eager.scale(tensor, 2.0, 0.9, True, False) print(tensor) self.assertEqual(tensor.shape, [4, 16, 16, 32]) self.assertEqual(tensor.stop_gradient, True) def test_retain_grad_and_run_backward(self): with _test_eager_guard(): paddle.set_device("cpu") input_data = np.ones([4, 16, 16, 32]).astype('float32') data_eager = paddle.to_tensor(input_data, 'float32', core.CPUPlace(), False) grad_data = np.ones([4, 16, 16, 32]).astype('float32') grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace()) data_eager.retain_grads() out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True) self.assertIsNone(data_eager.grad) out_eager.backward(grad_eager, False) self.assertIsNotNone(data_eager.grad) self.assertTrue(np.array_equal(data_eager.grad.numpy(), input_data)) def test_retain_grad_and_run_backward_raises(self): with _test_eager_guard(): paddle.set_device("cpu") input_data = np.ones([4, 16, 16, 32]).astype('float32') data_eager = paddle.to_tensor(input_data, 'float32', core.CPUPlace(), False) grad_data = np.ones([4, 16, 16, 32]).astype('float32') grad_data2 = np.ones([4, 16]).astype('float32') grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace()) grad_eager2 = paddle.to_tensor(grad_data2, 'float32', core.CPUPlace()) data_eager.retain_grads() out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True) self.assertIsNone(data_eager.grad) with self.assertRaisesRegexp( AssertionError, "The type of grad_tensor must be paddle.Tensor"): out_eager.backward(grad_data, False) with self.assertRaisesRegexp( AssertionError, "Tensor shape not match, Tensor of grad_tensor /*"): out_eager.backward(grad_eager2, False) class EagerDtypeTestCase(unittest.TestCase): def check_to_tesnsor_and_numpy(self, dtype, proto_dtype): with _test_eager_guard(): arr = np.random.random([4, 16, 16, 32]).astype(dtype) tensor = paddle.to_tensor(arr, dtype) self.assertEqual(tensor.dtype, proto_dtype) self.assertTrue(np.array_equal(arr, tensor.numpy())) def test_dtype_base(self): print("Test_dtype") self.check_to_tesnsor_and_numpy('bool', core.VarDesc.VarType.BOOL) self.check_to_tesnsor_and_numpy('int8', core.VarDesc.VarType.INT8) self.check_to_tesnsor_and_numpy('uint8', core.VarDesc.VarType.UINT8) self.check_to_tesnsor_and_numpy('int16', core.VarDesc.VarType.INT16) self.check_to_tesnsor_and_numpy('int32', core.VarDesc.VarType.INT32) self.check_to_tesnsor_and_numpy('int64', core.VarDesc.VarType.INT64) self.check_to_tesnsor_and_numpy('float16', core.VarDesc.VarType.FP16) self.check_to_tesnsor_and_numpy('float32', core.VarDesc.VarType.FP32) self.check_to_tesnsor_and_numpy('float64', core.VarDesc.VarType.FP64) self.check_to_tesnsor_and_numpy('complex64', core.VarDesc.VarType.COMPLEX64) self.check_to_tesnsor_and_numpy('complex128', core.VarDesc.VarType.COMPLEX128) class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): def constructor(self, place): egr_tensor = core.eager.Tensor() self.assertEqual(egr_tensor.persistable, False) self.assertTrue("generated" in egr_tensor.name) self.assertEqual(egr_tensor.shape, []) self.assertEqual(egr_tensor.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor.stop_gradient, True) egr_tensor0 = core.eager.Tensor(core.VarDesc.VarType.FP32, [4, 16, 16, 32], "test_eager_tensor", core.VarDesc.VarType.LOD_TENSOR, True) self.assertEqual(egr_tensor0.persistable, True) self.assertEqual(egr_tensor0.name, "test_eager_tensor") self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32) arr0 = np.random.rand(4, 16, 16, 32).astype('float32') egr_tensor1 = core.eager.Tensor(arr0, place, True, False, "numpy_tensor1", False) self.assertEqual(egr_tensor1.persistable, True) self.assertEqual(egr_tensor1.name, "numpy_tensor1") self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor1.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor1.stop_gradient, False) self.assertTrue(egr_tensor1.place._equals(place)) self.assertTrue(np.array_equal(egr_tensor1.numpy(), arr0)) arr1 = np.random.randint(100, size=(4, 16, 16, 32), dtype=np.int64) egr_tensor2 = core.eager.Tensor(arr1, place, False, True, "numpy_tensor2", True) self.assertEqual(egr_tensor2.persistable, False) self.assertEqual(egr_tensor2.name, "numpy_tensor2") self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor2.dtype, core.VarDesc.VarType.INT64) self.assertEqual(egr_tensor2.stop_gradient, True) self.assertTrue(egr_tensor2.place._equals(place)) self.assertTrue(np.array_equal(egr_tensor2.numpy(), arr1)) arr2 = np.random.rand(4, 16, 16, 32, 64).astype('float32') egr_tensor3 = core.eager.Tensor(arr2) self.assertEqual(egr_tensor3.persistable, False) self.assertTrue("generated_tensor" in egr_tensor3.name) self.assertEqual(egr_tensor3.shape, [4, 16, 16, 32, 64]) self.assertEqual(egr_tensor3.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor3.stop_gradient, True) self.assertTrue( egr_tensor3.place._equals( paddle.fluid.framework._current_expected_place())) self.assertTrue(np.array_equal(egr_tensor3.numpy(), arr2)) egr_tensor3.stop_gradient = False egr_tensor4 = core.eager.Tensor(egr_tensor3) self.assertEqual(egr_tensor4.persistable, False) self.assertTrue("generated_tensor" in egr_tensor4.name) self.assertEqual(egr_tensor4.shape, egr_tensor3.shape) self.assertEqual(egr_tensor4.dtype, egr_tensor3.dtype) self.assertEqual(egr_tensor4.stop_gradient, True) self.assertTrue( egr_tensor4.place._equals( paddle.fluid.framework._current_expected_place())) self.assertTrue( np.array_equal(egr_tensor4.numpy(), egr_tensor3.numpy())) arr4 = np.random.rand(4, 16, 16, 32).astype('float32') egr_tensor5 = core.eager.Tensor(arr4, place) self.assertEqual(egr_tensor5.persistable, False) self.assertTrue("generated_tensor" in egr_tensor5.name) self.assertEqual(egr_tensor5.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor5.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor5.stop_gradient, True) self.assertTrue(egr_tensor5.place._equals(place)) self.assertTrue(np.array_equal(egr_tensor5.numpy(), arr4)) egr_tensor6 = core.eager.Tensor(egr_tensor5, core.CPUPlace()) self.assertEqual(egr_tensor6.persistable, False) self.assertTrue("generated_tensor" in egr_tensor6.name) self.assertEqual(egr_tensor6.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor6.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor6.stop_gradient, True) self.assertEqual(egr_tensor6.place.is_cpu_place(), True) self.assertTrue( np.array_equal(egr_tensor6.numpy(), egr_tensor5.numpy())) egr_tensor7 = core.eager.Tensor(arr4, place, True) self.assertEqual(egr_tensor7.persistable, True) self.assertTrue("generated_tensor" in egr_tensor7.name) self.assertEqual(egr_tensor7.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor7.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor7.stop_gradient, True) self.assertTrue(egr_tensor7.place._equals(place)) self.assertTrue(np.array_equal(egr_tensor7.numpy(), arr4)) egr_tensor8 = core.eager.Tensor(egr_tensor6, place, "egr_tensor8") self.assertEqual(egr_tensor8.persistable, False) self.assertEqual(egr_tensor8.name, "egr_tensor8") self.assertEqual(egr_tensor8.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor8.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor8.stop_gradient, True) self.assertTrue(egr_tensor8.place._equals(place)) self.assertTrue( np.array_equal(egr_tensor8.numpy(), egr_tensor5.numpy())) egr_tensor9 = core.eager.Tensor(arr4, place, True, True) self.assertEqual(egr_tensor9.persistable, True) self.assertTrue("generated_tensor" in egr_tensor9.name) self.assertEqual(egr_tensor9.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor9.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor9.stop_gradient, True) self.assertTrue(egr_tensor9.place._equals(place)) self.assertTrue(np.array_equal(egr_tensor9.numpy(), arr4)) x = np.random.rand(3, 3).astype('float32') t = paddle.fluid.Tensor() t.set(x, paddle.fluid.CPUPlace()) egr_tensor10 = core.eager.Tensor(t, place) self.assertEqual(egr_tensor10.persistable, False) self.assertTrue("generated_tensor" in egr_tensor10.name) self.assertEqual(egr_tensor10.shape, [3, 3]) self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor10.stop_gradient, True) self.assertTrue(egr_tensor10.place._equals(place)) self.assertTrue(np.array_equal(egr_tensor10.numpy(), x)) egr_tensor11 = core.eager.Tensor(t, place, "framework_constructed") self.assertEqual(egr_tensor11.persistable, False) self.assertTrue("framework_constructed" in egr_tensor11.name) self.assertEqual(egr_tensor11.shape, [3, 3]) self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor11.stop_gradient, True) self.assertTrue(egr_tensor11.place._equals(place)) self.assertTrue(np.array_equal(egr_tensor11.numpy(), x)) egr_tensor12 = core.eager.Tensor(t) self.assertEqual(egr_tensor12.persistable, False) self.assertTrue("generated_tensor" in egr_tensor12.name) self.assertEqual(egr_tensor12.shape, [3, 3]) self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor12.stop_gradient, True) self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace())) self.assertTrue(np.array_equal(egr_tensor12.numpy(), x)) egr_tensor13 = paddle.randn([2, 2]) self.assertTrue("eager_tmp" in egr_tensor13.name) with self.assertRaisesRegexp( ValueError, "The shape of Parameter should not be None"): eager_param = EagerParamBase(shape=None, dtype="float32") with self.assertRaisesRegexp( ValueError, "The dtype of Parameter should not be None"): eager_param = EagerParamBase(shape=[1, 1], dtype=None) with self.assertRaisesRegexp( ValueError, "The dimensions of shape for Parameter must be greater than 0"): eager_param = EagerParamBase(shape=[], dtype="float32") with self.assertRaisesRegexp( ValueError, "Each dimension of shape for Parameter must be greater than 0, but received /*" ): eager_param = EagerParamBase(shape=[-1], dtype="float32") eager_param = EagerParamBase(shape=[1, 1], dtype="float32") self.assertTrue(eager_param.trainable) eager_param.trainable = False self.assertFalse(eager_param.trainable) with self.assertRaisesRegexp( ValueError, "The type of trainable MUST be bool, but the type is /*"): eager_param.trainable = "False" def test_constructor(self): print("Test_constructor") paddle.set_device("cpu") place_list = [core.CPUPlace()] if core.is_compiled_with_cuda(): place_list.append(core.CUDAPlace(0)) with _test_eager_guard(): for p in place_list: self.constructor(p) def constructor_with_kwargs(self, place): # init Tensor by Python array arr = np.random.rand(4, 16, 16, 32).astype('float32') egr_tensor0 = core.eager.Tensor(value=arr) self.assertEqual(egr_tensor0.persistable, False) self.assertTrue("generated" in egr_tensor0.name) self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32]) self.assertTrue( egr_tensor0.place._equals( paddle.fluid.framework._current_expected_place())) self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor0.stop_gradient, True) egr_tensor1 = core.eager.Tensor(value=arr, place=place) self.assertEqual(egr_tensor1.persistable, False) self.assertTrue("generated" in egr_tensor1.name) self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32]) self.assertTrue(egr_tensor1.place._equals(place)) self.assertEqual(egr_tensor1.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor1.stop_gradient, True) egr_tensor2 = core.eager.Tensor(arr, place=place) self.assertEqual(egr_tensor2.persistable, False) self.assertTrue("generated" in egr_tensor2.name) self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32]) self.assertTrue(egr_tensor2.place._equals(place)) self.assertEqual(egr_tensor2.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor2.stop_gradient, True) egr_tensor3 = core.eager.Tensor( arr, place=place, name="new_eager_tensor") self.assertEqual(egr_tensor3.persistable, False) self.assertTrue("new_eager_tensor" in egr_tensor3.name) self.assertEqual(egr_tensor3.shape, [4, 16, 16, 32]) self.assertTrue(egr_tensor3.place._equals(place)) self.assertEqual(egr_tensor3.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor3.stop_gradient, True) egr_tensor4 = core.eager.Tensor( arr, place=place, persistable=True, name="new_eager_tensor") self.assertEqual(egr_tensor4.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor4.name) self.assertEqual(egr_tensor4.shape, [4, 16, 16, 32]) self.assertTrue(egr_tensor4.place._equals(place)) self.assertEqual(egr_tensor4.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor4.stop_gradient, True) egr_tensor5 = core.eager.Tensor( arr, core.CPUPlace(), persistable=True, name="new_eager_tensor", zero_copy=True) self.assertEqual(egr_tensor5.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor5.name) self.assertEqual(egr_tensor5.shape, [4, 16, 16, 32]) self.assertTrue(egr_tensor5.place.is_cpu_place()) self.assertEqual(egr_tensor5.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor5.stop_gradient, True) egr_tensor6 = core.eager.Tensor( arr, place=core.CPUPlace(), persistable=True, name="new_eager_tensor", zero_copy=True) self.assertEqual(egr_tensor6.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor6.name) self.assertEqual(egr_tensor6.shape, [4, 16, 16, 32]) self.assertTrue(egr_tensor6.place.is_cpu_place()) self.assertEqual(egr_tensor6.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor6.stop_gradient, True) egr_tensor7 = core.eager.Tensor( arr, place=place, persistable=True, name="new_eager_tensor", zero_copy=True) self.assertEqual(egr_tensor7.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor7.name) self.assertEqual(egr_tensor7.shape, [4, 16, 16, 32]) self.assertTrue(egr_tensor7.place._equals(place)) self.assertEqual(egr_tensor7.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor7.stop_gradient, True) egr_tensor8 = core.eager.Tensor( arr, place=place, persistable=True, name="new_eager_tensor", zero_copy=True, stop_gradient=False) self.assertEqual(egr_tensor8.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor8.name) self.assertEqual(egr_tensor8.shape, [4, 16, 16, 32]) self.assertTrue(egr_tensor8.place._equals(place)) self.assertEqual(egr_tensor8.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor8.stop_gradient, False) egr_tensor9 = core.eager.Tensor( arr, place, True, True, "new_eager_tensor", stop_gradient=False) self.assertEqual(egr_tensor9.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor9.name) self.assertEqual(egr_tensor9.shape, [4, 16, 16, 32]) self.assertTrue(egr_tensor9.place._equals(place)) self.assertEqual(egr_tensor9.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor9.stop_gradient, False) egr_tensor10 = core.eager.Tensor( arr, place, True, True, name="new_eager_tensor", stop_gradient=False) self.assertEqual(egr_tensor10.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor10.name) self.assertEqual(egr_tensor10.shape, [4, 16, 16, 32]) self.assertTrue(egr_tensor10.place._equals(place)) self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor10.stop_gradient, False) egr_tensor11 = core.eager.Tensor( arr, place, True, zero_copy=True, name="new_eager_tensor", stop_gradient=False) self.assertEqual(egr_tensor11.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor11.name) self.assertEqual(egr_tensor11.shape, [4, 16, 16, 32]) self.assertTrue(egr_tensor11.place._equals(place)) self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor11.stop_gradient, False) egr_tensor12 = core.eager.Tensor( arr, place, persistable=True, zero_copy=True, name="new_eager_tensor", stop_gradient=False) self.assertEqual(egr_tensor12.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor12.name) self.assertEqual(egr_tensor12.shape, [4, 16, 16, 32]) self.assertTrue(egr_tensor12.place._equals(place)) self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor12.stop_gradient, False) egr_tensor13 = core.eager.Tensor( value=arr, place=place, persistable=True, zero_copy=True, name="new_eager_tensor", stop_gradient=False) self.assertEqual(egr_tensor13.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor13.name) self.assertEqual(egr_tensor13.shape, [4, 16, 16, 32]) self.assertTrue(egr_tensor13.place._equals(place)) self.assertEqual(egr_tensor13.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor13.stop_gradient, False) # special case egr_tensor14 = core.eager.Tensor( dtype=core.VarDesc.VarType.FP32, dims=[4, 16, 16, 32], name="special_eager_tensor", type=core.VarDesc.VarType.LOD_TENSOR, persistable=True) self.assertEqual(egr_tensor14.persistable, True) self.assertEqual(egr_tensor14.name, "special_eager_tensor") self.assertEqual(egr_tensor14.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor14.dtype, core.VarDesc.VarType.FP32) # init Tensor by Tensor egr_tensor15 = core.eager.Tensor(value=egr_tensor4) self.assertEqual(egr_tensor15.persistable, True) self.assertTrue("generated" in egr_tensor15.name) self.assertEqual(egr_tensor15.shape, egr_tensor4.shape) self.assertEqual(egr_tensor15.dtype, egr_tensor4.dtype) self.assertEqual(egr_tensor15.stop_gradient, True) self.assertTrue( egr_tensor15.place._equals( paddle.fluid.framework._current_expected_place())) self.assertTrue( np.array_equal(egr_tensor15.numpy(), egr_tensor4.numpy())) egr_tensor16 = core.eager.Tensor( value=egr_tensor4, name="new_eager_tensor") self.assertEqual(egr_tensor16.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor16.name) self.assertEqual(egr_tensor16.shape, egr_tensor4.shape) self.assertEqual(egr_tensor16.dtype, egr_tensor4.dtype) self.assertEqual(egr_tensor16.stop_gradient, True) self.assertTrue( egr_tensor16.place._equals( paddle.fluid.framework._current_expected_place())) self.assertTrue( np.array_equal(egr_tensor16.numpy(), egr_tensor4.numpy())) egr_tensor17 = core.eager.Tensor( value=egr_tensor4, place=place, name="new_eager_tensor", ) self.assertEqual(egr_tensor17.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor17.name) self.assertEqual(egr_tensor17.shape, egr_tensor4.shape) self.assertEqual(egr_tensor17.dtype, egr_tensor4.dtype) self.assertEqual(egr_tensor17.stop_gradient, True) self.assertTrue(egr_tensor17.place._equals(place)) self.assertTrue( np.array_equal(egr_tensor17.numpy(), egr_tensor4.numpy())) egr_tensor18 = core.eager.Tensor( egr_tensor4, place=place, name="new_eager_tensor", ) self.assertEqual(egr_tensor18.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor18.name) self.assertEqual(egr_tensor18.shape, egr_tensor4.shape) self.assertEqual(egr_tensor18.dtype, egr_tensor4.dtype) self.assertEqual(egr_tensor18.stop_gradient, True) self.assertTrue(egr_tensor18.place._equals(place)) self.assertTrue( np.array_equal(egr_tensor18.numpy(), egr_tensor4.numpy())) egr_tensor19 = core.eager.Tensor( egr_tensor4, place, name="new_eager_tensor", ) self.assertEqual(egr_tensor19.persistable, True) self.assertTrue("new_eager_tensor" in egr_tensor19.name) self.assertEqual(egr_tensor19.shape, egr_tensor4.shape) self.assertEqual(egr_tensor19.dtype, egr_tensor4.dtype) self.assertEqual(egr_tensor19.stop_gradient, True) self.assertTrue(egr_tensor19.place._equals(place)) self.assertTrue( np.array_equal(egr_tensor19.numpy(), egr_tensor4.numpy())) # init eager tensor by framework tensor x = np.random.rand(3, 3).astype('float32') t = paddle.fluid.Tensor() t.set(x, paddle.fluid.CPUPlace()) egr_tensor20 = core.eager.Tensor(value=t) self.assertEqual(egr_tensor20.persistable, False) self.assertTrue("generated_tensor" in egr_tensor20.name) self.assertEqual(egr_tensor20.shape, [3, 3]) self.assertEqual(egr_tensor20.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor20.stop_gradient, True) self.assertTrue( egr_tensor20.place._equals( paddle.fluid.framework._current_expected_place())) self.assertTrue(np.array_equal(egr_tensor20.numpy(), x)) egr_tensor21 = core.eager.Tensor(value=t, place=place) self.assertEqual(egr_tensor21.persistable, False) self.assertTrue("generated_tensor" in egr_tensor21.name) self.assertEqual(egr_tensor21.shape, [3, 3]) self.assertEqual(egr_tensor21.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor21.stop_gradient, True) self.assertTrue(egr_tensor21.place._equals(place)) self.assertTrue(np.array_equal(egr_tensor21.numpy(), x)) egr_tensor22 = core.eager.Tensor(t, place=place) self.assertEqual(egr_tensor22.persistable, False) self.assertTrue("generated_tensor" in egr_tensor22.name) self.assertEqual(egr_tensor22.shape, [3, 3]) self.assertEqual(egr_tensor22.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor22.stop_gradient, True) self.assertTrue(egr_tensor22.place._equals(place)) self.assertTrue(np.array_equal(egr_tensor22.numpy(), x)) egr_tensor23 = core.eager.Tensor(t, place, name="from_framework_tensor") self.assertEqual(egr_tensor23.persistable, False) self.assertTrue("from_framework_tensor" in egr_tensor23.name) self.assertEqual(egr_tensor23.shape, [3, 3]) self.assertEqual(egr_tensor23.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor23.stop_gradient, True) self.assertTrue(egr_tensor23.place._equals(place)) self.assertTrue(np.array_equal(egr_tensor23.numpy(), x)) egr_tensor24 = core.eager.Tensor( value=t, place=place, name="from_framework_tensor") self.assertEqual(egr_tensor24.persistable, False) self.assertTrue("from_framework_tensor" in egr_tensor24.name) self.assertEqual(egr_tensor24.shape, [3, 3]) self.assertEqual(egr_tensor24.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor24.stop_gradient, True) self.assertTrue(egr_tensor24.place._equals(place)) self.assertTrue(np.array_equal(egr_tensor24.numpy(), x)) # Bad usage # SyntaxError: positional argument follows keyword argument # egr_tensor25 = core.eager.Tensor(value=t, place) def test_constructor_with_kwargs(self): print("Test_constructor_with_kwargs") paddle.set_device("cpu") place_list = [core.CPUPlace()] if core.is_compiled_with_cuda(): place_list.append(core.CUDAPlace(0)) with _test_eager_guard(): for p in place_list: self.constructor_with_kwargs(p) def test_copy_and_copy_to(self): print("Test_copy_and_copy_to") with _test_eager_guard(): paddle.set_device("cpu") arr = np.ones([4, 16, 16, 32]).astype('float32') arr1 = np.zeros([4, 16]).astype('float32') arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones( [4, 16, 16, 32]).astype('float32') tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32, core.CPUPlace()) self.assertEqual(tensor.stop_gradient, True) tensor.stop_gradient = False print("Set persistable") tensor.persistable = False tensor1 = paddle.to_tensor(arr1, core.VarDesc.VarType.FP32, core.CPUPlace()) tensor1.persistable = True self.assertEqual(tensor1.stop_gradient, True) self.assertTrue(np.array_equal(tensor.numpy(), arr)) print("Test copy_") tensor.copy_(tensor1, True) self.assertEqual(tensor.persistable, False) self.assertEqual(tensor.shape, [4, 16]) self.assertEqual(tensor.dtype, core.VarDesc.VarType.FP32) self.assertTrue(np.array_equal(tensor.numpy(), arr1)) print("Test _copy_to") tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32, core.CPUPlace()) self.assertTrue(np.array_equal(tensor2.numpy(), arr2)) self.assertTrue(tensor2.place.is_cpu_place()) tensor2.persistable = True tensor2.stop_gradient = False if core.is_compiled_with_cuda(): tensor3 = tensor2._copy_to(core.CUDAPlace(0), True) self.assertTrue(np.array_equal(tensor3.numpy(), arr2)) self.assertEqual(tensor3.persistable, True) self.assertEqual(tensor3.stop_gradient, True) self.assertTrue(tensor3.place.is_gpu_place()) tensor4 = tensor2.cuda(0, True) self.assertTrue(np.array_equal(tensor4.numpy(), arr2)) self.assertEqual(tensor4.persistable, True) self.assertEqual(tensor4.stop_gradient, False) self.assertTrue(tensor4.place.is_gpu_place()) tensor5 = tensor4.cpu() self.assertTrue(np.array_equal(tensor5.numpy(), arr2)) self.assertEqual(tensor5.persistable, True) self.assertEqual(tensor5.stop_gradient, False) self.assertTrue(tensor5.place.is_cpu_place()) tensor10 = paddle.to_tensor([1, 2, 3], place='gpu_pinned') tensor11 = tensor10._copy_to(core.CUDAPlace(0), True) self.assertTrue( np.array_equal(tensor10.numpy(), tensor11.numpy())) else: tensor3 = tensor2._copy_to(core.CPUPlace(), True) self.assertTrue(np.array_equal(tensor3.numpy(), arr2)) self.assertEqual(tensor3.persistable, True) self.assertEqual(tensor3.stop_gradient, True) self.assertTrue(tensor3.place.is_cpu_place()) tensor4 = tensor2.cpu() self.assertTrue(np.array_equal(tensor4.numpy(), arr2)) self.assertEqual(tensor4.persistable, True) self.assertEqual(tensor4.stop_gradient, False) self.assertTrue(tensor4.place.is_cpu_place()) def test_share_buffer_to(self): with _test_eager_guard(): arr = np.ones([4, 16, 16, 32]).astype('float32') arr1 = np.zeros([4, 16]).astype('float32') arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones( [4, 16, 16, 32]).astype('float32') tensor = None tensor2 = None tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32, core.CPUPlace()) tensor3 = core.eager.Tensor() if core.is_compiled_with_cuda(): tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32, core.CUDAPlace(0)) else: tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32, core.CPUPlace()) self.assertTrue(np.array_equal(tensor.numpy(), arr)) self.assertTrue(np.array_equal(tensor2.numpy(), arr2)) tensor2._share_buffer_to(tensor) self.assertTrue(np.array_equal(tensor.numpy(), arr2)) self.assertTrue(np.array_equal(tensor2.numpy(), arr2)) self.assertTrue(tensor._is_shared_buffer_with(tensor2)) self.assertTrue(tensor2._is_shared_buffer_with(tensor)) tensor._share_buffer_to(tensor3) self.assertTrue(np.array_equal(tensor3.numpy(), arr2)) self.assertTrue(tensor3._is_shared_buffer_with(tensor)) def test_share_underline_tensor_to(self): with _test_eager_guard(): arr = np.ones([4, 16, 16, 32]).astype('float32') arr1 = np.zeros([4, 16]).astype('float32') arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones( [4, 16, 16, 32]).astype('float32') tensor = None tensor2 = None tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32, core.CPUPlace()) tensor3 = core.eager.Tensor() if core.is_compiled_with_cuda(): tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32, core.CUDAPlace(0)) else: tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32, core.CPUPlace()) self.assertTrue(np.array_equal(tensor.numpy(), arr)) self.assertTrue(np.array_equal(tensor2.numpy(), arr2)) tensor2._share_underline_tensor_to(tensor) self.assertTrue(np.array_equal(tensor.numpy(), arr2)) self.assertTrue(np.array_equal(tensor2.numpy(), arr2)) self.assertTrue(tensor._is_shared_underline_tensor_with(tensor2)) self.assertTrue(tensor2._is_shared_underline_tensor_with(tensor)) tensor._share_underline_tensor_to(tensor3) self.assertTrue(np.array_equal(tensor3.numpy(), arr2)) self.assertTrue(tensor3._is_shared_underline_tensor_with(tensor)) def test_properties(self): print("Test_properties") with _test_eager_guard(): paddle.set_device("cpu") arr = np.ones([4, 16, 16, 32]).astype('float32') tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32, core.CPUPlace()) self.assertEqual(tensor.shape, [4, 16, 16, 32]) tensor.name = 'tensor_name_test' self.assertEqual(tensor.name, 'tensor_name_test') self.assertEqual(tensor.persistable, False) tensor.persistable = True self.assertEqual(tensor.persistable, True) tensor.persistable = False self.assertEqual(tensor.persistable, False) self.assertTrue(tensor.place.is_cpu_place()) self.assertEqual(tensor._place_str, 'Place(cpu)') self.assertEqual(tensor.stop_gradient, True) tensor.stop_gradient = False self.assertEqual(tensor.stop_gradient, False) tensor.stop_gradient = True self.assertEqual(tensor.stop_gradient, True) self.assertEqual(tensor.type, core.VarDesc.VarType.LOD_TENSOR) def test_global_properties(self): print("Test_global_properties") self.assertFalse(core._in_eager_mode()) with _test_eager_guard(): self.assertTrue(core._in_eager_mode()) self.assertFalse(core._in_eager_mode()) def test_place_guard(self): core._enable_eager_mode() if core.is_compiled_with_cuda(): paddle.set_device("gpu:0") with paddle.fluid.framework._dygraph_place_guard(core.CPUPlace()): self.assertTrue(core.eager._get_expected_place().is_cpu_place()) else: paddle.set_device("cpu") with paddle.fluid.framework._dygraph_place_guard(core.CPUPlace()): self.assertTrue(core.eager._get_expected_place().is_cpu_place()) core._disable_eager_mode() def test_value(self): with _test_eager_guard(): arr = np.random.rand(4, 16, 16, 32).astype('float64') egr_tensor0 = core.eager.Tensor(value=arr) self.assertEqual(egr_tensor0.persistable, False) self.assertTrue("generated" in egr_tensor0.name) self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32]) self.assertTrue( egr_tensor0.place._equals( paddle.fluid.framework._current_expected_place())) self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP64) self.assertEqual(egr_tensor0.stop_gradient, True) self.assertTrue(egr_tensor0.value().get_tensor()._dtype(), core.VarDesc.VarType.FP64) self.assertTrue(egr_tensor0.value().get_tensor()._place(), paddle.fluid.framework._current_expected_place()) self.assertTrue(egr_tensor0.value().get_tensor()._is_initialized()) def test_set_value(self): with _test_eager_guard(): ori_arr = np.random.rand(4, 16, 16, 32).astype('float32') egr_tensor = core.eager.Tensor(value=ori_arr) self.assertEqual(egr_tensor.stop_gradient, True) self.assertEqual(egr_tensor.shape, [4, 16, 16, 32]) self.assertTrue(np.array_equal(egr_tensor.numpy(), ori_arr)) ori_place = egr_tensor.place new_arr = np.random.rand(4, 16, 16, 32).astype('float32') self.assertFalse(np.array_equal(egr_tensor.numpy(), new_arr)) egr_tensor.set_value(new_arr) self.assertEqual(egr_tensor.stop_gradient, True) self.assertTrue(egr_tensor.place._equals(ori_place)) self.assertEqual(egr_tensor.shape, [4, 16, 16, 32]) self.assertTrue(np.array_equal(egr_tensor.numpy(), new_arr)) def test_sharding_related_api(self): with _test_eager_guard(): arr0 = np.random.rand(4, 16, 16, 32).astype('float32') egr_tensor1 = core.eager.Tensor(arr0, core.CPUPlace(), True, False, "numpy_tensor1", False) self.assertEqual(egr_tensor1._numel(), 32768) self.assertEqual(egr_tensor1._slice(0, 2)._numel(), 16384) def test_copy_gradient_from(self): with _test_eager_guard(): np_x = np.random.random((2, 2)) np_y = np.random.random((2, 2)) x = paddle.to_tensor(np_x, dtype="float64", stop_gradient=False) y = paddle.to_tensor(np_y, dtype="float64") out = x + x out.backward() x._copy_gradient_from(y) self.assertTrue(np.array_equal(x.grad.numpy(), np_y)) def test_clear(self): with _test_eager_guard(): np_x = np.random.random((3, 8, 8)) x = paddle.to_tensor(np_x, dtype="float64") self.assertTrue(x._is_initialized()) x._clear() self.assertFalse(x._is_initialized()) class EagerParamBaseUsageTestCase(unittest.TestCase): def test_print(self): with _test_eager_guard(): linear = paddle.nn.Linear(3, 3, bias_attr=False) print(linear.weight) def test_copy(self): with _test_eager_guard(): linear = paddle.nn.Linear(1, 3) linear_copy = copy.deepcopy(linear) linear_copy2 = linear.weight._copy_to(core.CPUPlace(), True) self.assertTrue( np.array_equal(linear.weight.numpy(), linear_copy.weight.numpy())) self.assertTrue( np.array_equal(linear.weight.numpy(), linear_copy2.numpy())) def func_fp16_initilaizer(self): paddle.set_default_dtype("float16") linear1 = paddle.nn.Linear(1, 3, bias_attr=False) linear2 = paddle.nn.Linear( 1, 3, bias_attr=False, weight_attr=paddle.fluid.initializer.Uniform()) linear3 = paddle.nn.Linear( 1, 3, bias_attr=False, weight_attr=paddle.fluid.initializer.TruncatedNormalInitializer()) linear4 = paddle.nn.Linear( 1, 3, bias_attr=False, weight_attr=paddle.fluid.initializer.MSRAInitializer()) res = [ linear1.weight.numpy(), linear2.weight.numpy(), linear3.weight.numpy(), linear4.weight.numpy() ] paddle.set_default_dtype("float32") return res def test_fp16_initializer(self): res1 = list() res2 = list() paddle.seed(102) paddle.framework.random._manual_program_seed(102) with _test_eager_guard(): res1 = self.func_fp16_initilaizer() res2 = self.func_fp16_initilaizer() for i in range(len(res1)): self.assertTrue(np.array_equal(res1[i], res2[i])) def func_layer_helper_base(self, value): base = paddle.fluid.layer_helper_base.LayerHelperBase("test_layer", "test_layer") return base.to_variable(value).numpy() def func_base_to_variable(self, value): paddle.fluid.dygraph.base.to_variable(value) def test_to_variable(self): value = np.random.rand(4, 16, 16, 32).astype('float32') res1 = None res3 = None with _test_eager_guard(): res1 = self.func_layer_helper_base(value) res3 = self.func_base_to_variable(value) res2 = self.func_layer_helper_base(value) res4 = self.func_base_to_variable(value) self.assertTrue(np.array_equal(res1, res2)) self.assertTrue(np.array_equal(res3, res4)) def test_backward_with_single_tensor(self): with _test_eager_guard(): arr4 = np.random.rand(4, 16, 16, 32).astype('float32') egr_tensor12 = core.eager.Tensor(arr4, core.CPUPlace()) egr_tensor12.retain_grads() arr = np.ones([4, 16, 16, 32]).astype('float32') self.assertEqual(egr_tensor12.persistable, False) self.assertTrue("generated_tensor" in egr_tensor12.name) self.assertEqual(egr_tensor12.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor12.stop_gradient, True) self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace())) self.assertTrue(np.array_equal(egr_tensor12.numpy(), arr4)) self.assertTrue(np.array_equal(egr_tensor12.gradient(), None)) egr_tensor12.stop_gradient = False egr_tensor12.backward() self.assertTrue(np.array_equal(egr_tensor12.gradient(), arr)) def test_set_value(self): with _test_eager_guard(): linear = paddle.nn.Linear(1, 3) ori_place = linear.weight.place new_weight = np.ones([1, 3]).astype('float32') self.assertFalse(np.array_equal(linear.weight.numpy(), new_weight)) linear.weight.set_value(new_weight) self.assertTrue(np.array_equal(linear.weight.numpy(), new_weight)) self.assertTrue(linear.weight.place._equals(ori_place)) class EagerGuardTestCase(unittest.TestCase): def test__test_eager_guard(self): tracer = paddle.fluid.dygraph.tracer.Tracer() with _test_eager_guard(tracer): self.assertTrue(_in_eager_mode()) if __name__ == "__main__": unittest.main()
nilq/baby-python
python
import numpy as np import pandas as pd from pandas import DataFrame from pandas.core.indexes.timedeltas import timedelta_range import pandas.util.testing as tm from pandas.util.testing import assert_frame_equal class TestTimedeltaIndex(object): def test_asfreq_bug(self): import datetime as dt df = DataFrame(data=[1, 3], index=[dt.timedelta(), dt.timedelta(minutes=3)]) result = df.resample('1T').asfreq() expected = DataFrame(data=[1, np.nan, np.nan, 3], index=timedelta_range('0 day', periods=4, freq='1T')) assert_frame_equal(result, expected) def test_resample_with_nat(self): # GH 13223 index = pd.to_timedelta(['0s', pd.NaT, '2s']) result = DataFrame({'value': [2, 3, 5]}, index).resample('1s').mean() expected = DataFrame({'value': [2.5, np.nan, 5.0]}, index=timedelta_range('0 day', periods=3, freq='1S')) assert_frame_equal(result, expected) def test_resample_as_freq_with_subperiod(self): # GH 13022 index = timedelta_range('00:00:00', '00:10:00', freq='5T') df = DataFrame(data={'value': [1, 5, 10]}, index=index) result = df.resample('2T').asfreq() expected_data = {'value': [1, np.nan, np.nan, np.nan, np.nan, 10]} expected = DataFrame(data=expected_data, index=timedelta_range('00:00:00', '00:10:00', freq='2T')) tm.assert_frame_equal(result, expected)
nilq/baby-python
python
# -*- coding: utf-8 -*- import os import sys package_path = '/user/specified/path/to/matsdp/' sys.path.insert(0, os.path.abspath(package_path)) def test_plot_proxigram_csv(): from matsdp.apt import apt_plot retn_val = apt_plot.plot_proxigram_csv( proxigram_csv_file_path = './apt/profile-interface0.csv', sysname = 'M2', visible_elmt_list = ['Ni','Al'], interplation_on = False, fig_width = 6, fig_height = 5, fig_dpi = 600, fig_format = 'png', ) assert retn_val == 0
nilq/baby-python
python
import os import pandas as pd import nltk import gensim from gensim import corpora, models, similarities os.chdir("D:\semicolon\Deep Learning"); df=pd.read_csv('jokes.csv'); x=df['Question'].values.tolist() y=df['Answer'].values.tolist() corpus= x+y tok_corp= [nltk.word_tokenize(sent.decode('utf-8')) for sent in corpus] model = gensim.models.Word2Vec(tok_corp, min_count=1, size = 32) #model.save('testmodel') #model = gensim.models.Word2Vec.load('test_model') #model.most_similar('word') #model.most_similar([vector])
nilq/baby-python
python
#!/usr/bin/env python import subprocess x = list(range(1,9)) print(x) y = []; resultf = open('avg', 'a') for i in x: i = 2 ** i print(i) out_bytes = subprocess.check_output(['../../build/bin/mapreduce_hand', '131072', str(i)]) out_text = out_bytes.decode('ascii') value = out_text.split('\n')[-2] value = value.split('\t')[1] resultf.write(str(i) + '\t' + str(value) + '\n') resultf.close()
nilq/baby-python
python
# coding: utf-8 """ InfluxDB OSS API Service. The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501 OpenAPI spec version: 2.0.0 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six class TelegrafPlugins(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'version': 'str', 'os': 'str', 'plugins': 'list[TelegrafPlugin]' } attribute_map = { 'version': 'version', 'os': 'os', 'plugins': 'plugins' } def __init__(self, version=None, os=None, plugins=None): # noqa: E501,D401,D403 """TelegrafPlugins - a model defined in OpenAPI.""" # noqa: E501 self._version = None self._os = None self._plugins = None self.discriminator = None if version is not None: self.version = version if os is not None: self.os = os if plugins is not None: self.plugins = plugins @property def version(self): """Get the version of this TelegrafPlugins. :return: The version of this TelegrafPlugins. :rtype: str """ # noqa: E501 return self._version @version.setter def version(self, version): """Set the version of this TelegrafPlugins. :param version: The version of this TelegrafPlugins. :type: str """ # noqa: E501 self._version = version @property def os(self): """Get the os of this TelegrafPlugins. :return: The os of this TelegrafPlugins. :rtype: str """ # noqa: E501 return self._os @os.setter def os(self, os): """Set the os of this TelegrafPlugins. :param os: The os of this TelegrafPlugins. :type: str """ # noqa: E501 self._os = os @property def plugins(self): """Get the plugins of this TelegrafPlugins. :return: The plugins of this TelegrafPlugins. :rtype: list[TelegrafPlugin] """ # noqa: E501 return self._plugins @plugins.setter def plugins(self, plugins): """Set the plugins of this TelegrafPlugins. :param plugins: The plugins of this TelegrafPlugins. :type: list[TelegrafPlugin] """ # noqa: E501 self._plugins = plugins def to_dict(self): """Return the model properties as a dict.""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Return the string representation of the model.""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`.""" return self.to_str() def __eq__(self, other): """Return true if both objects are equal.""" if not isinstance(other, TelegrafPlugins): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Return true if both objects are not equal.""" return not self == other
nilq/baby-python
python
# flake8: noqa """ This is the local_settings file for Mezzanine's docs. """ from random import choice from mezzanine.project_template.project_name.settings import * DEBUG = False ROOT_URLCONF = "mezzanine.project_template.project_name.urls" characters = "abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)" # Generate a SECRET_KEY for this build SECRET_KEY = "".join([choice(characters) for i in range(50)]) if "mezzanine.accounts" not in INSTALLED_APPS: INSTALLED_APPS = tuple(INSTALLED_APPS) + ("mezzanine.accounts",)
nilq/baby-python
python
import json import datetime as dt import dateutil.parser import backends.entities.Players as Players import backends.database as db import backends.trueskillWrapper as ts ## A comment on why the login-offset is nessesary ## ## - losing teams tend to have players leaving and joining more rapidly ## - every time a new player joins he has to setup ## - new players are unfamiliar with postions of enemy team ## - new players have to run from spawn ## --> their impact factor (which is calculated from their active time) must account for that loginoffset = dt.timedelta(seconds=60) class Round: def __init__(self, winnerTeam, loserTeam, _map, duration, startTime, winnerSide): if duration <= dt.timedelta(0): raise ValueError("Duration cannot be zero") if duration <= dt.timedelta(seconds=120): raise ValueError("Games was too short") self.winners = winnerTeam self.losers = loserTeam self.winnerSide = winnerSide self.map = _map self.duration = duration self.start = startTime ### Sync players from Databse ### for p in self.winners + self.losers: playerInDB = db.getOrCreatePlayer(p, startTime) p.rating = playerInDB.rating self.prediction, self.confidence = ts.predictOutcome(self.winners, self.losers) def normalized_playtimes(self): '''returns a dict-Object with {key=(teamid,player):value=player_time_played/total_time_of_round}''' np = dict() for p in self.winners: if self.duration == None: d = 1.0 else: d = (p.activeTime-loginoffset)/self.duration if d < -1: raise AssertionError("Normalized Playtime was less than -1 ??") if d < 0: d = 0.0 elif d > 1: d = 1.0 np.update({(0,p):d}) for p in self.losers: if self.duration == None: d = 1.0 else: d = (p.activeTime-loginoffset)/self.duration if d < 0: d = 0.0 elif d > 1: d = 1.0 np.update({(1,p):d}) return np def pt_difference(self): '''Used to check difference in playtimes per team''' if self.duration == None: return 1 w1 = w2 = 0 for p in self.winners: if p.is_fake: w1 += 1.0 continue d = (p.activeTime-loginoffset)/self.duration if d < 0: d = 0.0 elif d > 1: d = 1.0 w1 += d for p in self.losers: d = (p.activeTime-loginoffset)/self.duration if p.is_fake: w2 += 1.0 continue if d < 0: d = 0.0 elif d > 1: d = 1.0 w2 += d # no div0 plox if min(w1,w2) <= 0: return 0 return max(w1,w2)/min(w1,w2) def toJson(self): winnersList = [] losersList = [] for w in self.winners: winnersList += [{ "playerId" : w.id, "playerName" : w.name, "isFake" : w.is_fake, "activeTime" : w.activeTime.total_seconds() }] for w in self.losers: losersList += [{ "playerId" : w.id, "playerName" : w.name, "isFake" : w.is_fake, "activeTime" : w.activeTime.total_seconds() }] retDict = { "winners" : winnersList, "losers" : losersList, "startTime" : self.start.isoformat(), "duration" : self.duration.total_seconds(), "map" : self.map, "winner-side" : self.winnerSide } return json.dumps(retDict) def fromJson(jsonDict): winnersList = [] losersList = [] timestamp = dateutil.parser.isoparse(jsonDict["startTime"]) winnerTeam = jsonDict.get("winner-side") if not winnerTeam: winnerTeam = -1 loserTeam = -2 else: loserTeam = (winnerTeam % 2) + 2 for p in jsonDict["winners"]: pObj = Players.PlayerInRound(p["playerId"], p["playerName"], winnerTeam, timestamp) pObj.activeTime = dt.timedelta(int(p["activeTime"])) winnersList += [pObj] for p in jsonDict["losers"]: pObj = Players.PlayerInRound(p["playerId"], p["playerName"], loserTeam, timestamp) pObj.activeTime = dt.timedelta(int(p["activeTime"])) losersList += [pObj] duration = dt.timedelta(seconds=int(jsonDict["duration"])) return Round(winnersList, losersList, jsonDict["map"], duration, timestamp, winnerTeam)
nilq/baby-python
python
from .declare_a_mapping import User, create_table from .connection import Session session = Session() def user_operator(): ed_user = User(name='ed', fullname='Ed Jones', nickname='edsnickname') session.add(ed_user) our_user = session.query(User).filter_by(name='ed').first() print(ed_user is our_user) # Add more users. session.add_all([ User(name='wendy', fullname='Wendy Williams', nickname='windy'), User(name='mary', fullname='Mary Contrary', nickname='mary'), User(name='fred', fullname='Fred Flintstone', nickname='freddy')]) # Dirty data. ed_user.nickname = 'eddie' dirty = session.dirty print(f'session dirty data: {dirty}') # New data. new_data = session.new print(f'session new data: {new_data}') session.commit() # Rolling back. ed_user.name = 'Edwardo' fake_user = User(name='fakeuser', fullname='Invalid', nickname='12345') session.add(fake_user) users = session.query(User).filter(User.name.in_(['Edwardo', 'fakeuser'])).all() print(f'users: {users}') session.rollback() print(f'ed_user.name: {ed_user.name}') is_fake_user_exist = fake_user in session print(f'is fake user in session: {is_fake_user_exist}') def main(): create_table() user_operator() if __name__ == '__main__': main()
nilq/baby-python
python
import enchant import re import itertools # derived from a google image search of an "old fashioned" phone letters_from_numbers_lookup = {'2': ['A', 'B', 'C'], '3': ['D', 'E', 'F'], '4': ['G', 'H', 'I'], '5': ['J', 'K', 'L'], '6': ['M', 'N', 'O'], '7': ['P', 'Q', 'R', 'S'], '8': ['T', 'U', 'V'], '9': ['W', 'X', 'Y' 'Z']} numbers_from_letters_lookup = {'A': '2', 'B': '2', 'C': '2', 'D': '3', 'E': '3', 'F': '3', 'G': '4', 'H': '4', 'I': '4', 'J': '5', 'K': '5', 'L': '5', 'M': '6', 'N': '6', 'O': '6', 'P': '7', 'Q': '7', 'R': '7', 'S': '7', 'T': '8', 'U': '8', 'V': '8', 'W': '9', 'X': '9', 'Y': '9', 'Z': '9'} english_word_lookup = enchant.Dict("en_US") # TODO: it might make sense to allow 'I' and 'a' with the stipulation that they be followed by a valid word... def is_valid_word(word_to_check: str, min_length=2, exceptions=list()) -> bool: if type(word_to_check) is not str: raise ValueError("Non-string entered") if (len(word_to_check) < min_length) and (word_to_check not in exceptions): return False else: return english_word_lookup.check(word_to_check) def format_phone_number(phone_digit_list: list) -> str: #TODO: we should actually probably check that each 'digit' is a string rather than forcing it out_str = '' # length check if (len(phone_digit_list) not in [10, 11]) or (type(phone_digit_list) is not list): raise ValueError("not a valid phone number") # country code if len(phone_digit_list) == 11: out_str = (phone_digit_list.pop(0) + '-') # zipcode for digit in phone_digit_list[:3]: out_str += str(digit) out_str += '-' # the...next three digits (I'm sure this has a name) for digit in phone_digit_list[3:6]: out_str += str(digit) out_str += '-' # and the last four for digit in phone_digit_list[6:]: out_str += str(digit) return out_str def get_character_list(phone_words: str) -> list: if type(phone_words) is not str: raise ValueError("Not a Valid Input") return [x for x in re.sub('\W+', '', phone_words)] def all_values_from_number(num: str) -> list: letters = letters_from_numbers_lookup.get(num, [num]) if num not in letters: letters += [num] return letters def all_combinations(number_list: list) -> list: """ :param number_list: array of strings representing digits between 0 and 9 :return: all possible number-letter combinations """ all_chars = [all_values_from_number(x) for x in number_list] # note: I broke this out for ease of testing, # but really we'd want this to return the iterable for efficiency return list(itertools.product(*all_chars)) def has_valid_word(char_list: list) -> bool: """ :param char_list: array of strings, can be combination of digits and letters :return: whether there is a valid English word in this array, based on the letters in order note that this word must be surrounded on both sides by numbers (1800-PAINTX is not a valid word) """ phone_number = ''.join(char_list) only_letters = re.sub("\d", " ", phone_number).strip() letters_split = only_letters.split(' ') n_valid = 0 n_char = 0 has_preposition = False for i in range(len(letters_split)): sub_word = letters_split[i] if sub_word != '': if i == 0: if (len(sub_word) < 3) and (sub_word not in ['A', 'AN', 'I']): return False elif sub_word in ['A', 'AN', 'I']: n_valid += 1 n_char += 1 has_preposition = True elif (len(sub_word) < 3) or (is_valid_word(''.join(sub_word)) is False): return False else: n_valid += 1 n_char += 1 elif (len(sub_word) < 3) or (is_valid_word(''.join(sub_word)) is False): return False else: n_valid += 1 n_char += 1 if has_preposition: if len(letters_split) > 1: return (n_valid == n_char) and (n_valid > 0) else: return False else: return (n_valid == n_char) and (n_valid > 0) def format_wordification(char_list: list) -> str: """ :param char_list: letter-number combination in an array (all strings) :return: valid wordification with dashes between any letter/number chunks """ out = '' n = len(char_list) char_str = ''.join(char_list) num_letter_list = re.split('(\d+)', char_str) if len(num_letter_list) == 3: out = format_phone_number(list(char_list)) else: for chunk in num_letter_list: if chunk in ['', ' ']: pass else: out += chunk out += '-' out = out[:-1] if n == 11: if (char_list[0] == '1') and(out[1] != '-'): out = '1-' + out[1:] if out[2:5].isdigit(): out = out[:5] + "-" + out[5:] if (n == 10) and (out[:3].isdigit()): out = out[:3] + "-" + out[3:] out = re.sub(r'([A-Z])-([A-Z])', r'\1\2', out) return out.replace('--', '-')
nilq/baby-python
python
# --depends-on config # --depends-on format_activity from src import ModuleManager, utils from src.Logging import Logger as log @utils.export("botset", utils.BoolSetting("print-motd", "Set whether I print /motd")) @utils.export( "botset", utils.BoolSetting("pretty-activity", "Whether or not to pretty print activity"), ) # Used to migrate word stats from prior to v1.19.0 @utils.export( "channelset", utils.BoolSetting("print", "Whether or not to print activity a channel to logs"), ) class Module(ModuleManager.BaseModule): def _print(self, event): if event["channel"] and not event["channel"].get_setting("print", True): return line = event["line"] if event["pretty"] and self.bot.get_setting("pretty-activity", False): line = event["pretty"] server = event["server"] server = server if not hasattr(server, "alias") else server.alias context = ( event["context"] if (event["context"] not in ["*", ""]) and (event["context"] is not None) else "Server" ) log.info(line, server=server, context=context, formatting=True) @utils.hook("formatted.message.channel") @utils.hook("formatted.notice.channel") @utils.hook("formatted.notice.private") @utils.hook("formatted.join") @utils.hook("formatted.part") @utils.hook("formatted.nick") @utils.hook("formatted.invite") @utils.hook("formatted.mode.channel") @utils.hook("formatted.topic") @utils.hook("formatted.topic-timestamp") @utils.hook("formatted.kick") @utils.hook("formatted.quit") @utils.hook("formatted.rename") @utils.hook("formatted.chghost") @utils.hook("formatted.account") @utils.hook("formatted.delete") def formatted(self, event): self._print(event) @utils.hook("formatted.motd") def motd(self, event): if self.bot.get_setting("print-motd", True): self._print(event)
nilq/baby-python
python
import numpy as np import dask.array as da from napari.components import ViewerModel from napari.util import colormaps base_colormaps = colormaps.CYMRGB two_colormaps = colormaps.MAGENTA_GREEN def test_multichannel(): """Test adding multichannel image.""" viewer = ViewerModel() np.random.seed(0) data = np.random.random((15, 10, 5)) viewer.add_image(data, channel_axis=-1) assert len(viewer.layers) == data.shape[-1] for i in range(data.shape[-1]): assert np.all(viewer.layers[i].data == data.take(i, axis=-1)) assert viewer.layers[i].colormap[0] == base_colormaps[i] def test_two_channel(): """Test adding multichannel image with two channels.""" viewer = ViewerModel() np.random.seed(0) data = np.random.random((15, 10, 2)) viewer.add_image(data, channel_axis=-1) assert len(viewer.layers) == data.shape[-1] for i in range(data.shape[-1]): assert np.all(viewer.layers[i].data == data.take(i, axis=-1)) assert viewer.layers[i].colormap[0] == two_colormaps[i] def test_one_channel(): """Test adding multichannel image with one channel.""" viewer = ViewerModel() np.random.seed(0) data = np.random.random((15, 10, 1)) viewer.add_image(data, channel_axis=-1) assert len(viewer.layers) == data.shape[-1] for i in range(data.shape[-1]): assert np.all(viewer.layers[i].data == data.take(i, axis=-1)) assert viewer.layers[i].colormap[0] == two_colormaps[i] def test_specified_multichannel(): """Test adding multichannel image with color channel set.""" viewer = ViewerModel() np.random.seed(0) data = np.random.random((5, 10, 15)) viewer.add_image(data, channel_axis=0) assert len(viewer.layers) == data.shape[0] for i in range(data.shape[0]): assert np.all(viewer.layers[i].data == data.take(i, axis=0)) def test_names(): """Test adding multichannel image with custom names.""" viewer = ViewerModel() np.random.seed(0) data = np.random.random((15, 10, 5)) names = ['multi ' + str(i + 3) for i in range(data.shape[-1])] viewer.add_image(data, name=names, channel_axis=-1) assert len(viewer.layers) == data.shape[-1] for i in range(data.shape[-1]): assert viewer.layers[i].name == names[i] viewer = ViewerModel() name = 'example' names = [name] + [name + f' [{i + 1}]' for i in range(data.shape[-1] - 1)] viewer.add_image(data, name=name, channel_axis=-1) assert len(viewer.layers) == data.shape[-1] for i in range(data.shape[-1]): assert viewer.layers[i].name == names[i] def test_colormaps(): """Test adding multichannel image with custom colormaps.""" viewer = ViewerModel() np.random.seed(0) data = np.random.random((15, 10, 5)) colormap = 'gray' viewer.add_image(data, colormap=colormap, channel_axis=-1) assert len(viewer.layers) == data.shape[-1] for i in range(data.shape[-1]): assert viewer.layers[i].colormap[0] == colormap viewer = ViewerModel() colormaps = ['gray', 'blue', 'red', 'green', 'yellow'] viewer.add_image(data, colormap=colormaps, channel_axis=-1) assert len(viewer.layers) == data.shape[-1] for i in range(data.shape[-1]): assert viewer.layers[i].colormap[0] == colormaps[i] def test_split_rgb_image(): """Test adding multichannel image with custom colormaps.""" viewer = ViewerModel() np.random.seed(0) data = np.random.random((15, 10, 3)) colormaps = ['red', 'green', 'blue'] viewer.add_image(data, colormap=colormaps, channel_axis=-1) assert len(viewer.layers) == data.shape[-1] for i in range(data.shape[-1]): assert viewer.layers[i].colormap[0] == colormaps[i] def test_contrast_limits(): """Test adding multichannel image with custom contrast limits.""" viewer = ViewerModel() np.random.seed(0) data = np.random.random((15, 10, 5)) clims = [0.3, 0.7] viewer.add_image(data, contrast_limits=clims, channel_axis=-1) assert len(viewer.layers) == data.shape[-1] for i in range(data.shape[-1]): assert viewer.layers[i].contrast_limits == clims viewer = ViewerModel() clims = [[0.3, 0.7], [0.1, 0.9], [0.3, 0.9], [0.4, 0.9], [0.2, 0.9]] viewer.add_image(data, contrast_limits=clims, channel_axis=-1) assert len(viewer.layers) == data.shape[-1] for i in range(data.shape[-1]): assert viewer.layers[i].contrast_limits == clims[i] def test_gamma(): """Test adding multichannel image with custom gamma.""" viewer = ViewerModel() np.random.seed(0) data = np.random.random((15, 10, 5)) gamma = 0.7 viewer.add_image(data, gamma=gamma, channel_axis=-1) assert len(viewer.layers) == data.shape[-1] for i in range(data.shape[-1]): assert viewer.layers[i].gamma == gamma viewer = ViewerModel() gammas = [0.3, 0.4, 0.5, 0.6, 0.7] viewer.add_image(data, gamma=gammas, channel_axis=-1) assert len(viewer.layers) == data.shape[-1] for i in range(data.shape[-1]): assert viewer.layers[i].gamma == gammas[i] def test_multichannel_pyramid(): """Test adding multichannel pyramid.""" viewer = ViewerModel() np.random.seed(0) shapes = [(40, 20, 4), (20, 10, 4), (10, 5, 4)] np.random.seed(0) data = [np.random.random(s) for s in shapes] viewer.add_image(data, channel_axis=-1, is_pyramid=True) assert len(viewer.layers) == data[0].shape[-1] for i in range(data[0].shape[-1]): assert np.all( [ np.all(l_d == d) for l_d, d in zip( viewer.layers[i].data, [data[j].take(i, axis=-1) for j in range(len(data))], ) ] ) assert viewer.layers[i].colormap[0] == base_colormaps[i] def test_rgb_images(): """Test adding multiple rgb images.""" viewer = ViewerModel() np.random.seed(0) data = np.random.random((15, 10, 5, 3)) viewer.add_image(data, channel_axis=2, rgb=True) assert len(viewer.layers) == data.shape[2] for i in range(data.shape[-1]): assert viewer.layers[i].rgb is True assert viewer.layers[i]._data_view.ndim == 3 def test_dask_array(): """Test adding multichannel dask array.""" viewer = ViewerModel() np.random.seed(0) data = da.random.random((2, 10, 10, 5)) viewer.add_image(data, channel_axis=0) assert len(viewer.layers) == data.shape[0] for i in range(data.shape[0]): assert viewer.layers[i].data.shape == data.shape[1:] assert isinstance(viewer.layers[i].data, da.Array)
nilq/baby-python
python
import argparse import collections import datetime import os import shutil import time import dataset import mlconfig import toolbox import torch import util import madrys import numpy as np from evaluator import Evaluator from tqdm import tqdm from trainer import Trainer mlconfig.register(madrys.MadrysLoss) # General Options parser = argparse.ArgumentParser(description='ClasswiseNoise') parser.add_argument('--seed', type=int, default=0, help='seed') parser.add_argument('--version', type=str, default="resnet18") parser.add_argument('--exp_name', type=str, default="test_exp") parser.add_argument('--config_path', type=str, default='configs/cifar10') parser.add_argument('--load_model', action='store_true', default=False) parser.add_argument('--data_parallel', action='store_true', default=False) # Datasets Options parser.add_argument('--train_batch_size', default=512, type=int, help='perturb step size') parser.add_argument('--eval_batch_size', default=512, type=int, help='perturb step size') parser.add_argument('--num_of_workers', default=8, type=int, help='workers for loader') parser.add_argument('--train_data_type', type=str, default='CIFAR10') parser.add_argument('--train_data_path', type=str, default='../datasets') parser.add_argument('--test_data_type', type=str, default='CIFAR10') parser.add_argument('--test_data_path', type=str, default='../datasets') # Perturbation Options parser.add_argument('--universal_train_portion', default=0.2, type=float) parser.add_argument('--universal_stop_error', default=0.5, type=float) parser.add_argument('--universal_train_target', default='train_subset', type=str) parser.add_argument('--train_step', default=10, type=int) parser.add_argument('--use_subset', action='store_true', default=False) parser.add_argument('--attack_type', default='min-min', type=str, choices=['min-min', 'min-max', 'random'], help='Attack type') parser.add_argument('--perturb_type', default='classwise', type=str, choices=['classwise', 'samplewise'], help='Perturb type') parser.add_argument('--patch_location', default='center', type=str, choices=['center', 'random'], help='Location of the noise') parser.add_argument('--noise_shape', default=[10, 3, 32, 32], nargs='+', type=int, help='noise shape') parser.add_argument('--epsilon', default=8, type=float, help='perturbation') parser.add_argument('--num_steps', default=1, type=int, help='perturb number of steps') parser.add_argument('--step_size', default=0.8, type=float, help='perturb step size') parser.add_argument('--random_start', action='store_true', default=False) args = parser.parse_args() # Convert Eps args.epsilon = args.epsilon / 255 args.step_size = args.step_size / 255 # Set up Experiments if args.exp_name == '': args.exp_name = 'exp_' + datetime.datetime.now() exp_path = os.path.join(args.exp_name, args.version) log_file_path = os.path.join(exp_path, args.version) checkpoint_path = os.path.join(exp_path, 'checkpoints') checkpoint_path_file = os.path.join(checkpoint_path, args.version) util.build_dirs(exp_path) util.build_dirs(checkpoint_path) logger = util.setup_logger(name=args.version, log_file=log_file_path + ".log") # CUDA Options logger.info("PyTorch Version: %s" % (torch.__version__)) if torch.cuda.is_available(): torch.cuda.manual_seed(args.seed) torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True device = torch.device('cuda') device_list = [torch.cuda.get_device_name(i) for i in range(0, torch.cuda.device_count())] logger.info("GPU List: %s" % (device_list)) else: device = torch.device('cpu') # Load Exp Configs config_file = os.path.join(args.config_path, args.version)+'.yaml' config = mlconfig.load(config_file) config.set_immutable() for key in config: logger.info("%s: %s" % (key, config[key])) shutil.copyfile(config_file, os.path.join(exp_path, args.version+'.yaml')) def train(starting_epoch, model, optimizer, scheduler, criterion, trainer, evaluator, ENV, data_loader): for epoch in range(starting_epoch, config.epochs): logger.info("") logger.info("="*20 + "Training Epoch %d" % (epoch) + "="*20) # Train ENV['global_step'] = trainer.train(epoch, model, criterion, optimizer) ENV['train_history'].append(trainer.acc_meters.avg*100) scheduler.step() # Eval logger.info("="*20 + "Eval Epoch %d" % (epoch) + "="*20) evaluator.eval(epoch, model) payload = ('Eval Loss:%.4f\tEval acc: %.2f' % (evaluator.loss_meters.avg, evaluator.acc_meters.avg*100)) logger.info(payload) ENV['eval_history'].append(evaluator.acc_meters.avg*100) ENV['curren_acc'] = evaluator.acc_meters.avg*100 # Reset Stats trainer._reset_stats() evaluator._reset_stats() # Save Model target_model = model.module if args.data_parallel else model util.save_model(ENV=ENV, epoch=epoch, model=target_model, optimizer=optimizer, scheduler=scheduler, filename=checkpoint_path_file) logger.info('Model Saved at %s', checkpoint_path_file) return def universal_perturbation_eval(noise_generator, random_noise, data_loader, model, eval_target=args.universal_train_target): loss_meter = util.AverageMeter() err_meter = util.AverageMeter() random_noise = random_noise.to(device) model = model.to(device) for i, (images, labels) in enumerate(data_loader[eval_target]): images, labels = images.to(device, non_blocking=True), labels.to(device, non_blocking=True) if random_noise is not None: for i in range(len(labels)): class_index = labels[i].item() noise = random_noise[class_index] mask_cord, class_noise = noise_generator._patch_noise_extend_to_img(noise, image_size=images[i].shape, patch_location=args.patch_location) images[i] += class_noise pred = model(images) err = (pred.data.max(1)[1] != labels.data).float().sum() loss = torch.nn.CrossEntropyLoss()(pred, labels) loss_meter.update(loss.item(), len(labels)) err_meter.update(err / len(labels)) return loss_meter.avg, err_meter.avg def universal_perturbation(noise_generator, trainer, evaluator, model, criterion, optimizer, scheduler, random_noise, ENV): # Class-Wise perturbation # Generate Data loader datasets_generator = dataset.DatasetGenerator(train_batch_size=args.train_batch_size, eval_batch_size=args.eval_batch_size, train_data_type=args.train_data_type, train_data_path=args.train_data_path, test_data_type=args.test_data_type, test_data_path=args.test_data_path, num_of_workers=args.num_of_workers, seed=args.seed, no_train_augments=True) if args.use_subset: data_loader = datasets_generator._split_validation_set(train_portion=args.universal_train_portion, train_shuffle=True, train_drop_last=True) else: data_loader = datasets_generator.getDataLoader(train_shuffle=True, train_drop_last=True) condition = True data_iter = iter(data_loader['train_dataset']) logger.info('=' * 20 + 'Searching Universal Perturbation' + '=' * 20) if hasattr(model, 'classify'): model.classify = True while condition: if args.attack_type == 'min-min' and not args.load_model: # Train Batch for min-min noise for j in range(0, args.train_step): try: (images, labels) = next(data_iter) except: data_iter = iter(data_loader['train_dataset']) (images, labels) = next(data_iter) images, labels = images.to(device), labels.to(device) # Add Class-wise Noise to each sample train_imgs = [] for i, (image, label) in enumerate(zip(images, labels)): noise = random_noise[label.item()] mask_cord, class_noise = noise_generator._patch_noise_extend_to_img(noise, image_size=image.shape, patch_location=args.patch_location) train_imgs.append(images[i]+class_noise) # Train model.train() for param in model.parameters(): param.requires_grad = True trainer.train_batch(torch.stack(train_imgs).to(device), labels, model, optimizer) for i, (images, labels) in tqdm(enumerate(data_loader[args.universal_train_target]), total=len(data_loader[args.universal_train_target])): images, labels, model = images.to(device), labels.to(device), model.to(device) # Add Class-wise Noise to each sample batch_noise, mask_cord_list = [], [] for i, (image, label) in enumerate(zip(images, labels)): noise = random_noise[label.item()] mask_cord, class_noise = noise_generator._patch_noise_extend_to_img(noise, image_size=image.shape, patch_location=args.patch_location) batch_noise.append(class_noise) mask_cord_list.append(mask_cord) # Update universal perturbation model.eval() for param in model.parameters(): param.requires_grad = False batch_noise = torch.stack(batch_noise).to(device) if args.attack_type == 'min-min': perturb_img, eta = noise_generator.min_min_attack(images, labels, model, optimizer, criterion, random_noise=batch_noise) elif args.attack_type == 'min-max': perturb_img, eta = noise_generator.min_max_attack(images, labels, model, optimizer, criterion, random_noise=batch_noise) else: raise('Invalid attack') class_noise_eta = collections.defaultdict(list) for i in range(len(eta)): x1, x2, y1, y2 = mask_cord_list[i] delta = eta[i][:, x1: x2, y1: y2] class_noise_eta[labels[i].item()].append(delta.detach().cpu()) for key in class_noise_eta: delta = torch.stack(class_noise_eta[key]).mean(dim=0) - random_noise[key] class_noise = random_noise[key] class_noise += delta random_noise[key] = torch.clamp(class_noise, -args.epsilon, args.epsilon) # Eval termination conditions loss_avg, error_rate = universal_perturbation_eval(noise_generator, random_noise, data_loader, model, eval_target=args.universal_train_target) logger.info('Loss: {:.4f} Acc: {:.2f}%'.format(loss_avg, 100 - error_rate*100)) random_noise = random_noise.detach() ENV['random_noise'] = random_noise if args.attack_type == 'min-min': condition = error_rate > args.universal_stop_error elif args.attack_type == 'min-max': condition = error_rate < args.universal_stop_error return random_noise def samplewise_perturbation_eval(random_noise, data_loader, model, eval_target='train_dataset', mask_cord_list=[]): loss_meter = util.AverageMeter() err_meter = util.AverageMeter() # random_noise = random_noise.to(device) model = model.to(device) idx = 0 for i, (images, labels) in enumerate(data_loader[eval_target]): images, labels = images.to(device, non_blocking=True), labels.to(device, non_blocking=True) if random_noise is not None: for i, (image, label) in enumerate(zip(images, labels)): if not torch.is_tensor(random_noise): sample_noise = torch.tensor(random_noise[idx]).to(device) else: sample_noise = random_noise[idx].to(device) c, h, w = image.shape[0], image.shape[1], image.shape[2] mask = np.zeros((c, h, w), np.float32) x1, x2, y1, y2 = mask_cord_list[idx] mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy() sample_noise = torch.from_numpy(mask).to(device) images[i] = images[i] + sample_noise idx += 1 pred = model(images) err = (pred.data.max(1)[1] != labels.data).float().sum() loss = torch.nn.CrossEntropyLoss()(pred, labels) loss_meter.update(loss.item(), len(labels)) err_meter.update(err / len(labels)) return loss_meter.avg, err_meter.avg def sample_wise_perturbation(noise_generator, trainer, evaluator, model, criterion, optimizer, scheduler, random_noise, ENV): datasets_generator = dataset.DatasetGenerator(train_batch_size=args.train_batch_size, eval_batch_size=args.eval_batch_size, train_data_type=args.train_data_type, train_data_path=args.train_data_path, test_data_type=args.test_data_type, test_data_path=args.test_data_path, num_of_workers=args.num_of_workers, seed=args.seed, no_train_augments=True) if args.train_data_type == 'ImageNetMini' and args.perturb_type == 'samplewise': data_loader = datasets_generator._split_validation_set(0.2, train_shuffle=False, train_drop_last=False) data_loader['train_dataset'] = data_loader['train_subset'] else: data_loader = datasets_generator.getDataLoader(train_shuffle=False, train_drop_last=False) mask_cord_list = [] idx = 0 for images, labels in data_loader['train_dataset']: for i, (image, label) in enumerate(zip(images, labels)): noise = random_noise[idx] mask_cord, _ = noise_generator._patch_noise_extend_to_img(noise, image_size=image.shape, patch_location=args.patch_location) mask_cord_list.append(mask_cord) idx += 1 condition = True train_idx = 0 data_iter = iter(data_loader['train_dataset']) logger.info('=' * 20 + 'Searching Samplewise Perturbation' + '=' * 20) while condition: if args.attack_type == 'min-min' and not args.load_model: # Train Batch for min-min noise for j in tqdm(range(0, args.train_step), total=args.train_step): try: (images, labels) = next(data_iter) except: train_idx = 0 data_iter = iter(data_loader['train_dataset']) (images, labels) = next(data_iter) images, labels = images.to(device), labels.to(device) # Add Sample-wise Noise to each sample for i, (image, label) in enumerate(zip(images, labels)): sample_noise = random_noise[train_idx] c, h, w = image.shape[0], image.shape[1], image.shape[2] mask = np.zeros((c, h, w), np.float32) x1, x2, y1, y2 = mask_cord_list[train_idx] if type(sample_noise) is np.ndarray: mask[:, x1: x2, y1: y2] = sample_noise else: mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy() # mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy() sample_noise = torch.from_numpy(mask).to(device) images[i] = images[i] + sample_noise train_idx += 1 model.train() for param in model.parameters(): param.requires_grad = True trainer.train_batch(images, labels, model, optimizer) # Search For Noise idx = 0 for i, (images, labels) in tqdm(enumerate(data_loader['train_dataset']), total=len(data_loader['train_dataset'])): images, labels, model = images.to(device), labels.to(device), model.to(device) # Add Sample-wise Noise to each sample batch_noise, batch_start_idx = [], idx for i, (image, label) in enumerate(zip(images, labels)): sample_noise = random_noise[idx] c, h, w = image.shape[0], image.shape[1], image.shape[2] mask = np.zeros((c, h, w), np.float32) x1, x2, y1, y2 = mask_cord_list[idx] if type(sample_noise) is np.ndarray: mask[:, x1: x2, y1: y2] = sample_noise else: mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy() # mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy() sample_noise = torch.from_numpy(mask).to(device) batch_noise.append(sample_noise) idx += 1 # Update sample-wise perturbation model.eval() for param in model.parameters(): param.requires_grad = False batch_noise = torch.stack(batch_noise).to(device) if args.attack_type == 'min-min': perturb_img, eta = noise_generator.min_min_attack(images, labels, model, optimizer, criterion, random_noise=batch_noise) elif args.attack_type == 'min-max': perturb_img, eta = noise_generator.min_max_attack(images, labels, model, optimizer, criterion, random_noise=batch_noise) else: raise('Invalid attack') for i, delta in enumerate(eta): x1, x2, y1, y2 = mask_cord_list[batch_start_idx+i] delta = delta[:, x1: x2, y1: y2] if torch.is_tensor(random_noise): random_noise[batch_start_idx+i] = delta.detach().cpu().clone() else: random_noise[batch_start_idx+i] = delta.detach().cpu().numpy() # Eval termination conditions loss_avg, error_rate = samplewise_perturbation_eval(random_noise, data_loader, model, eval_target='train_dataset', mask_cord_list=mask_cord_list) logger.info('Loss: {:.4f} Acc: {:.2f}%'.format(loss_avg, 100 - error_rate*100)) if torch.is_tensor(random_noise): random_noise = random_noise.detach() ENV['random_noise'] = random_noise if args.attack_type == 'min-min': condition = error_rate > args.universal_stop_error elif args.attack_type == 'min-max': condition = error_rate < args.universal_stop_error # Update Random Noise to shape if torch.is_tensor(random_noise): new_random_noise = [] for idx in range(len(random_noise)): sample_noise = random_noise[idx] c, h, w = image.shape[0], image.shape[1], image.shape[2] mask = np.zeros((c, h, w), np.float32) x1, x2, y1, y2 = mask_cord_list[idx] mask[:, x1: x2, y1: y2] = sample_noise.cpu().numpy() new_random_noise.append(torch.from_numpy(mask)) new_random_noise = torch.stack(new_random_noise) return new_random_noise else: return random_noise def main(): # Setup ENV datasets_generator = dataset.DatasetGenerator(train_batch_size=args.train_batch_size, eval_batch_size=args.eval_batch_size, train_data_type=args.train_data_type, train_data_path=args.train_data_path, test_data_type=args.test_data_type, test_data_path=args.test_data_path, num_of_workers=args.num_of_workers, seed=args.seed) data_loader = datasets_generator.getDataLoader() model = config.model().to(device) logger.info("param size = %fMB", util.count_parameters_in_MB(model)) optimizer = config.optimizer(model.parameters()) scheduler = config.scheduler(optimizer) criterion = config.criterion() if args.perturb_type == 'samplewise': train_target = 'train_dataset' else: if args.use_subset: data_loader = datasets_generator._split_validation_set(train_portion=args.universal_train_portion, train_shuffle=True, train_drop_last=True) train_target = 'train_subset' else: data_loader = datasets_generator.getDataLoader(train_shuffle=True, train_drop_last=True) train_target = 'train_dataset' trainer = Trainer(criterion, data_loader, logger, config, target=train_target) evaluator = Evaluator(data_loader, logger, config) ENV = {'global_step': 0, 'best_acc': 0.0, 'curren_acc': 0.0, 'best_pgd_acc': 0.0, 'train_history': [], 'eval_history': [], 'pgd_eval_history': [], 'genotype_list': []} if args.data_parallel: model = torch.nn.DataParallel(model) if args.load_model: checkpoint = util.load_model(filename=checkpoint_path_file, model=model, optimizer=optimizer, alpha_optimizer=None, scheduler=scheduler) ENV = checkpoint['ENV'] trainer.global_step = ENV['global_step'] logger.info("File %s loaded!" % (checkpoint_path_file)) noise_generator = toolbox.PerturbationTool(epsilon=args.epsilon, num_steps=args.num_steps, step_size=args.step_size) if args.attack_type == 'random': noise = noise_generator.random_noise(noise_shape=args.noise_shape) torch.save(noise, os.path.join(args.exp_name, 'perturbation.pt')) logger.info(noise) logger.info(noise.shape) logger.info('Noise saved at %s' % (os.path.join(args.exp_name, 'perturbation.pt'))) elif args.attack_type == 'min-min' or args.attack_type == 'min-max': if args.attack_type == 'min-max': # min-max noise need model to converge first train(0, model, optimizer, scheduler, criterion, trainer, evaluator, ENV, data_loader) if args.random_start: random_noise = noise_generator.random_noise(noise_shape=args.noise_shape) else: random_noise = torch.zeros(*args.noise_shape) if args.perturb_type == 'samplewise': noise = sample_wise_perturbation(noise_generator, trainer, evaluator, model, criterion, optimizer, scheduler, random_noise, ENV) elif args.perturb_type == 'classwise': noise = universal_perturbation(noise_generator, trainer, evaluator, model, criterion, optimizer, scheduler, random_noise, ENV) torch.save(noise, os.path.join(args.exp_name, 'perturbation.pt')) logger.info(noise) logger.info(noise.shape) logger.info('Noise saved at %s' % (os.path.join(args.exp_name, 'perturbation.pt'))) else: raise('Not implemented yet') return if __name__ == '__main__': for arg in vars(args): logger.info("%s: %s" % (arg, getattr(args, arg))) start = time.time() main() end = time.time() cost = (end - start) / 86400 payload = "Running Cost %.2f Days \n" % cost logger.info(payload)
nilq/baby-python
python
""" """ from jax import numpy as jnp from jax import jit as jjit @jjit def _calc_weights(x, x_table): n_table = x_table.size lgt_interp = jnp.interp(x, x_table, jnp.arange(0, n_table)) it_lo = jnp.floor(lgt_interp).astype("i4") it_hi = it_lo + 1 weight_hi = lgt_interp - it_lo weight_lo = 1 - weight_hi it_hi = jnp.where(it_hi > n_table - 1, n_table - 1, it_hi) return (it_lo, weight_lo), (it_hi, weight_hi) @jjit def _calc_weighted_table(x, x_table, y_table): (it_lo, weight_lo), (it_hi, weight_hi) = _calc_weights(x, x_table) return weight_lo * y_table[it_lo] + weight_hi * y_table[it_hi] @jjit def _calc_2d_weighted_table(x, y, x_table, y_table, z_table): (it_xlo, weight_xlo), (it_xhi, weight_xhi) = _calc_weights(x, x_table) (it_ylo, weight_ylo), (it_yhi, weight_yhi) = _calc_weights(y, y_table) z_xlo_ylo = z_table[it_xlo, it_ylo, :] * weight_xlo * weight_ylo z_xlo_yhi = z_table[it_xlo, it_yhi, :] * weight_xlo * weight_yhi z_xhi_ylo = z_table[it_xhi, it_ylo, :] * weight_xhi * weight_ylo z_xhi_yhi = z_table[it_xhi, it_yhi, :] * weight_xhi * weight_yhi return z_xlo_ylo + z_xlo_yhi + z_xhi_ylo * z_xhi_yhi
nilq/baby-python
python
import os DEBUG = True SECRET_KEY = os.getenv("APP_SECRET_KEY") MYSQL_USERNAME = os.getenv("MYSQL_USERNAME") MYSQL_PASSWORD = os.getenv("MYSQL_PASSWORD") MYSQL_PORT = 3306 MYSQL_DB = os.getenv("MYSQL_DB") LOGGING_LEVEL = "DEBUG" LOGGIN_FILE = "activity.log" LOGGING_BACKUPS = 2 LOGGING_MAXBYTES = 1024 TIMEZONE = "America/Montreal" STORE_SCREENSHOT_URI = "https://marketvault-bucket.s3.ca-central-1.amazonaws.com/screenshots/"
nilq/baby-python
python
# # PySNMP MIB module HP-ICF-OOBM-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HP-ICF-OOBM-MIB # Produced by pysmi-0.3.4 at Wed May 1 13:34:51 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint") hpSwitch, = mibBuilder.importSymbols("HP-ICF-OID", "hpSwitch") ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex") InetAddress, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType") snmpTargetAddrEntry, = mibBuilder.importSymbols("SNMP-TARGET-MIB", "snmpTargetAddrEntry") ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup") Gauge32, Counter32, Bits, NotificationType, ModuleIdentity, Integer32, TimeTicks, ObjectIdentity, iso, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, IpAddress, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Counter32", "Bits", "NotificationType", "ModuleIdentity", "Integer32", "TimeTicks", "ObjectIdentity", "iso", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "IpAddress", "Counter64") DisplayString, TruthValue, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TruthValue", "TextualConvention") hpicfOobmMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58)) hpicfOobmMIB.setRevisions(('2010-03-26 00:00', '2009-02-13 00:00',)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): if mibBuilder.loadTexts: hpicfOobmMIB.setRevisionsDescriptions(('Added oobm member tables', 'Initial Revision',)) if mibBuilder.loadTexts: hpicfOobmMIB.setLastUpdated('201003260000Z') if mibBuilder.loadTexts: hpicfOobmMIB.setOrganization('HP Networking') if mibBuilder.loadTexts: hpicfOobmMIB.setContactInfo('Hewlett-Packard Company 8000 Foothills Blvd. Roseville, CA 95747') if mibBuilder.loadTexts: hpicfOobmMIB.setDescription('The MIB module is for representing Oobm entity') hpicfOobmNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 0)) hpicfOobmObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1)) hpicfOobmConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 3)) class HpicfOobmServerIndex(TextualConvention, Integer32): description = 'An enumerated value that indications the Server application type. Server application type is index for this table.' status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5)) namedValues = NamedValues(("telnet", 1), ("ssh", 2), ("tftp", 3), ("http", 4), ("snmp", 5)) class HpicfOobmServerState(TextualConvention, Integer32): description = "An enumerated value which provides an indication of the Application server's presence. Default value is oobm only. Application server can be run for oobm only, data only, or for both." status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3)) namedValues = NamedValues(("oobm", 1), ("data", 2), ("both", 3)) hpicfOobmScalars = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 1)) hpicfOobmStatus = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite") if mibBuilder.loadTexts: hpicfOobmStatus.setStatus('current') if mibBuilder.loadTexts: hpicfOobmStatus.setDescription('Global Oobm (Out Of Band Management) status. By default oobm is globally enabled. On the stackable device, when stacking is enabled, this enables oobm on all the member switches.') hpicfOobmServers = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 2)) hpicfOobmServerTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 2, 1), ) if mibBuilder.loadTexts: hpicfOobmServerTable.setStatus('current') if mibBuilder.loadTexts: hpicfOobmServerTable.setDescription('This table contains one row for every server application in the switch entity. On a stackable device, when stacking is enabled, the server entry is created on all the member switches.') hpicfOobmServerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 2, 1, 1), ).setIndexNames((0, "HP-ICF-OOBM-MIB", "hpicfOobmServerType")) if mibBuilder.loadTexts: hpicfOobmServerEntry.setStatus('current') if mibBuilder.loadTexts: hpicfOobmServerEntry.setDescription('Information about Server Application table.') hpicfOobmServerType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 2, 1, 1, 1), HpicfOobmServerIndex()) if mibBuilder.loadTexts: hpicfOobmServerType.setStatus('current') if mibBuilder.loadTexts: hpicfOobmServerType.setDescription('The index that is used to access the switch server application table.') hpicfOobmServerListenMode = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 2, 1, 1, 2), HpicfOobmServerState()).setMaxAccess("readwrite") if mibBuilder.loadTexts: hpicfOobmServerListenMode.setStatus('current') if mibBuilder.loadTexts: hpicfOobmServerListenMode.setDescription('The current state of the server application. Default value is Oobm. Depending on the interface on which server application is running, incoming queries will be listened by the switch.') hpicfOobmSnmpTargetAddrIsOobm = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 3)) hpicfSnmpTargetAddrIsOobmTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 3, 1), ) if mibBuilder.loadTexts: hpicfSnmpTargetAddrIsOobmTable.setStatus('current') if mibBuilder.loadTexts: hpicfSnmpTargetAddrIsOobmTable.setDescription('Adds an HpicfSnmpTargetAddrIsOobmEntry to snmpTargetAddrTable.') hpicfSnmpTargetAddrIsOobmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 3, 1, 1), ) snmpTargetAddrEntry.registerAugmentions(("HP-ICF-OOBM-MIB", "hpicfSnmpTargetAddrIsOobmEntry")) hpicfSnmpTargetAddrIsOobmEntry.setIndexNames(*snmpTargetAddrEntry.getIndexNames()) if mibBuilder.loadTexts: hpicfSnmpTargetAddrIsOobmEntry.setStatus('current') if mibBuilder.loadTexts: hpicfSnmpTargetAddrIsOobmEntry.setDescription('Adds an HpicfSnmpTargetAddrIsOobmEntry to snmpTargetAddrTable.') hpicfSnmpTargetAddrIsOobm = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 3, 1, 1, 1), TruthValue().clone('false')).setMaxAccess("readcreate") if mibBuilder.loadTexts: hpicfSnmpTargetAddrIsOobm.setStatus('current') if mibBuilder.loadTexts: hpicfSnmpTargetAddrIsOobm.setDescription('This object indicates if the target is reachable over OOBM (Out OF Band Management) interface or not. This mib object will be applicable only if there is a physical OOBM port on the device.') hpicfOobmDefGateway = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 4)) hpicfOobmDefGatewayTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 4, 1), ) if mibBuilder.loadTexts: hpicfOobmDefGatewayTable.setStatus('current') if mibBuilder.loadTexts: hpicfOobmDefGatewayTable.setDescription('This table contains one row for every default gateway configured for OOBM Interface.') hpicfOobmDefGatewayEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 4, 1, 1), ).setIndexNames((0, "HP-ICF-OOBM-MIB", "hpicfOobmDefGatewayType")) if mibBuilder.loadTexts: hpicfOobmDefGatewayEntry.setStatus('current') if mibBuilder.loadTexts: hpicfOobmDefGatewayEntry.setDescription('Information about Default Gateway table.') hpicfOobmDefGatewayType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 4, 1, 1, 1), InetAddressType()) if mibBuilder.loadTexts: hpicfOobmDefGatewayType.setStatus('current') if mibBuilder.loadTexts: hpicfOobmDefGatewayType.setDescription('Address type of default gateway configured for OOBM Interface.') hpicfOobmDefGatewayAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 4, 1, 1, 2), InetAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: hpicfOobmDefGatewayAddr.setStatus('current') if mibBuilder.loadTexts: hpicfOobmDefGatewayAddr.setDescription('The IP Address of the default gateway configured for OOBM interface.') hpicfOobmStackMembers = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 5)) hpicfOobmMemberDefGatewayTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 5, 3), ) if mibBuilder.loadTexts: hpicfOobmMemberDefGatewayTable.setStatus('current') if mibBuilder.loadTexts: hpicfOobmMemberDefGatewayTable.setDescription('This table contains one row for every default gateway configured for OOBM Interface and for each member of the stack.') hpicfOobmMemberDefGatewayEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 5, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "HP-ICF-OOBM-MIB", "hpicfOobmMemberDefGatewayType")) if mibBuilder.loadTexts: hpicfOobmMemberDefGatewayEntry.setStatus('current') if mibBuilder.loadTexts: hpicfOobmMemberDefGatewayEntry.setDescription('Information about Default Gateway table.') hpicfOobmMemberDefGatewayType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 5, 3, 1, 1), InetAddressType()) if mibBuilder.loadTexts: hpicfOobmMemberDefGatewayType.setStatus('current') if mibBuilder.loadTexts: hpicfOobmMemberDefGatewayType.setDescription('Address type of default gateway configured for OOBM Interface.') hpicfOobmMemberDefGatewayAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 1, 5, 3, 1, 2), InetAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: hpicfOobmMemberDefGatewayAddr.setStatus('current') if mibBuilder.loadTexts: hpicfOobmMemberDefGatewayAddr.setDescription('The IP Address of the default gateway configured for OOBM interface.') hpicfOobmCompliance = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 3, 1)) hpicfOobmGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 3, 2)) hpicfOobmMibCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 3, 1, 1)).setObjects(("HP-ICF-OOBM-MIB", "hpicfOobmScalarsGroup"), ("HP-ICF-OOBM-MIB", "hpicfOobmServersGroup"), ("HP-ICF-OOBM-MIB", "hpicfSnmpTargetAddrIsOobmGroup"), ("HP-ICF-OOBM-MIB", "hpicfOobmDefGatewayGroup"), ("HP-ICF-OOBM-MIB", "hpicfOobmMemberGroup"), ("HP-ICF-OOBM-MIB", "hpicfOobmGroups")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): hpicfOobmMibCompliance = hpicfOobmMibCompliance.setStatus('current') if mibBuilder.loadTexts: hpicfOobmMibCompliance.setDescription('The compliance statement for entries which implement the Oobm application servers MIB.') hpicfOobmScalarsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 3, 2, 1)).setObjects(("HP-ICF-OOBM-MIB", "hpicfOobmStatus")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): hpicfOobmScalarsGroup = hpicfOobmScalarsGroup.setStatus('current') if mibBuilder.loadTexts: hpicfOobmScalarsGroup.setDescription('Basic Scalars required in Oobm MIB implementation.') hpicfOobmServersGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 3, 2, 2)).setObjects(("HP-ICF-OOBM-MIB", "hpicfOobmServerListenMode")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): hpicfOobmServersGroup = hpicfOobmServersGroup.setStatus('current') if mibBuilder.loadTexts: hpicfOobmServersGroup.setDescription('Oobm Server MIB parameters.') hpicfSnmpTargetAddrIsOobmGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 3, 2, 3)).setObjects(("HP-ICF-OOBM-MIB", "hpicfSnmpTargetAddrIsOobm")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): hpicfSnmpTargetAddrIsOobmGroup = hpicfSnmpTargetAddrIsOobmGroup.setStatus('current') if mibBuilder.loadTexts: hpicfSnmpTargetAddrIsOobmGroup.setDescription('A group of objects to add an HpicfSnmpTargetAddrIsOobmEntry to snmpTargetAddrTable.') hpicfOobmDefGatewayGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 3, 2, 4)).setObjects(("HP-ICF-OOBM-MIB", "hpicfOobmDefGatewayAddr")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): hpicfOobmDefGatewayGroup = hpicfOobmDefGatewayGroup.setStatus('current') if mibBuilder.loadTexts: hpicfOobmDefGatewayGroup.setDescription('OOBM Default Gateway MIB parameters') hpicfOobmMemberGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 58, 3, 2, 5)).setObjects(("HP-ICF-OOBM-MIB", "hpicfOobmMemberDefGatewayAddr")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): hpicfOobmMemberGroup = hpicfOobmMemberGroup.setStatus('current') if mibBuilder.loadTexts: hpicfOobmMemberGroup.setDescription('OOBM stack member parameters') mibBuilder.exportSymbols("HP-ICF-OOBM-MIB", HpicfOobmServerState=HpicfOobmServerState, hpicfOobmDefGatewayGroup=hpicfOobmDefGatewayGroup, hpicfOobmServerEntry=hpicfOobmServerEntry, hpicfOobmDefGateway=hpicfOobmDefGateway, hpicfOobmStatus=hpicfOobmStatus, hpicfOobmDefGatewayEntry=hpicfOobmDefGatewayEntry, hpicfOobmServerTable=hpicfOobmServerTable, hpicfOobmCompliance=hpicfOobmCompliance, hpicfOobmObjects=hpicfOobmObjects, hpicfOobmDefGatewayType=hpicfOobmDefGatewayType, hpicfOobmMemberDefGatewayAddr=hpicfOobmMemberDefGatewayAddr, hpicfOobmMemberGroup=hpicfOobmMemberGroup, hpicfOobmDefGatewayAddr=hpicfOobmDefGatewayAddr, hpicfOobmSnmpTargetAddrIsOobm=hpicfOobmSnmpTargetAddrIsOobm, hpicfOobmServerType=hpicfOobmServerType, hpicfOobmServers=hpicfOobmServers, hpicfSnmpTargetAddrIsOobmEntry=hpicfSnmpTargetAddrIsOobmEntry, hpicfOobmMemberDefGatewayEntry=hpicfOobmMemberDefGatewayEntry, hpicfSnmpTargetAddrIsOobm=hpicfSnmpTargetAddrIsOobm, hpicfOobmMemberDefGatewayType=hpicfOobmMemberDefGatewayType, hpicfOobmServerListenMode=hpicfOobmServerListenMode, PYSNMP_MODULE_ID=hpicfOobmMIB, hpicfOobmGroups=hpicfOobmGroups, hpicfOobmScalars=hpicfOobmScalars, hpicfOobmConformance=hpicfOobmConformance, HpicfOobmServerIndex=HpicfOobmServerIndex, hpicfSnmpTargetAddrIsOobmTable=hpicfSnmpTargetAddrIsOobmTable, hpicfOobmNotifications=hpicfOobmNotifications, hpicfSnmpTargetAddrIsOobmGroup=hpicfSnmpTargetAddrIsOobmGroup, hpicfOobmScalarsGroup=hpicfOobmScalarsGroup, hpicfOobmMemberDefGatewayTable=hpicfOobmMemberDefGatewayTable, hpicfOobmMIB=hpicfOobmMIB, hpicfOobmMibCompliance=hpicfOobmMibCompliance, hpicfOobmDefGatewayTable=hpicfOobmDefGatewayTable, hpicfOobmServersGroup=hpicfOobmServersGroup, hpicfOobmStackMembers=hpicfOobmStackMembers)
nilq/baby-python
python
""" MARL environment for google football """ import numpy as np import gym import gfootball.env.football_env as football_env from gfootball.env import _process_representation_wrappers from gfootball.env import _process_reward_wrappers from gfootball.env import config from gfootball.env import wrappers class GoogleFootballEnv(object): def __init__(self, num_of_left_agents, num_of_right_agents=0, env_name="test_example_multiagent", stacked=False, representation='extracted', rewards='scoring', write_goal_dumps=False, write_full_episode_dumps=False, render=False, write_video=False, dump_frequency=1, extra_players=None, channel_dimensions=(96, 72), other_config_options={}) -> None: assert num_of_left_agents >= 0 assert num_of_right_agents >= 0 assert num_of_left_agents + num_of_right_agents != 0 # config the environment scenario_config = config.Config({'level': env_name}).ScenarioConfig() players = [('agent:left_players=%d,right_players=%d' % (num_of_left_agents, num_of_right_agents))] if extra_players is not None: players.extend(extra_players) config_values = { 'dump_full_episodes': write_full_episode_dumps, 'dump_scores': write_goal_dumps, 'players': players, 'level': env_name, 'tracesdir': "/tmp/gfootball_log", 'write_video': write_video, } config_values.update(other_config_options) c = config.Config(config_values) self._env = football_env.FootballEnv(c) if dump_frequency > 1: self._env = wrappers.PeriodicDumpWriter(self._env, dump_frequency, render) elif render: self._env.render() # _apply_output_wrappers 在只有一个agent时非要加 wrapper self._env = _process_reward_wrappers(self._env, rewards) self._env = _process_representation_wrappers(self._env, representation, channel_dimensions) if stacked: self._env = wrappers.FrameStack(self._env, 4) self._env = wrappers.GetStateWrapper(self._env) self._action_space = gym.spaces.Discrete( self._env.action_space.nvec[0]) self._observation_space = None if representation == "raw" else gym.spaces.Box( low=self._env.observation_space.low[0], high=self._env.observation_space.high[0], dtype=self._env.observation_space.dtype) self._num_left = num_of_left_agents self._num_right = num_of_right_agents self._share_observation_space = gym.spaces.Box( low=np.concatenate([ self._observation_space.low for i in range(self._num_left + self._num_right) ], axis=-1), high=np.concatenate([ self._observation_space.high for i in range(self._num_left + self._num_right) ], axis=-1), dtype=self._observation_space.dtype) @property def action_space(self): return [ self._action_space for i in range(self._num_left + self._num_right) ] @property def observation_space(self): return [ self._observation_space for i in range(self._num_left + self._num_right) ] @property def share_observation_space(self): return [ self._share_observation_space for i in range(self._num_left + self._num_right) ] def seed(self, seed=None): return self._env.seed(seed) def reset(self): return self._env.reset() def step(self, actions): return self._env.step(actions) @property def num_of_left_agents(self): return self._num_left @property def num_of_right_agents(self): return self._num_right def random_step(self): return self._env.step([ self._action_space.sample() for i in range(self._num_left + self._num_right) ]) if __name__ == "__main__": e = GoogleFootballEnv(num_of_left_agents=2, num_of_right_agents=2, env_name='5_vs_5', representation="simple115v2") print(e.share_observation_space) print(e.action_space)
nilq/baby-python
python
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- """This module provides factory functions for creating authentication providers""" from .sk_authentication_provider import SymmetricKeyAuthenticationProvider from .sas_authentication_provider import SharedAccessSignatureAuthenticationProvider from .iotedge_authentication_provider import IotEdgeAuthenticationProvider def from_connection_string(connection_string): """Provides an AuthenticationProvider object that can be created simply with a connection string. :param connection_string: The connecting string. :return: a Symmetric Key AuthenticationProvider. """ return SymmetricKeyAuthenticationProvider.parse(connection_string) def from_shared_access_signature(sas_token_str): """Provides an `AuthenticationProvider` object that can be created simply with a shared access signature. :param sas_token_str: The shared access signature. :return: Shared Access Signature AuthenticationProvider. """ return SharedAccessSignatureAuthenticationProvider.parse(sas_token_str) def from_environment(): """Provides an `AuthenticationProvider` object that can be used inside of an Azure IoT Edge module. This method does not need any parameters because all of the information necessary to connect to Azure IoT Edge comes from the operating system of the module container and also from the IoTEdge service. :return: iotedge AuthenticationProvider. """ return IotEdgeAuthenticationProvider()
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Bottle is a fast and simple micro-framework for small web applications. It offers request dispatching (Routes) with url parameter support, templates, a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and template engines - all in a single file and with no dependencies other than the Python Standard Library. Homepage and documentation: http://bottlepy.org/ Copyright (c) 2011, Marcel Hellkamp. License: MIT (see LICENSE.txt for details) """ from __future__ import with_statement __author__ = 'Marcel Hellkamp' __version__ = '0.9.dev' __license__ = 'MIT' import base64 import cgi import email.utils import functools import hmac import httplib import itertools import mimetypes import os import re import subprocess import sys import tempfile import thread import threading import time import warnings from Cookie import SimpleCookie from tempfile import TemporaryFile from traceback import format_exc, print_exc from urllib import urlencode from urlparse import urlunsplit, urljoin from Queue import Empty from multiprocessing import Process, Queue, Event from multiprocessing import active_children try: from collections import MutableMapping as DictMixin except ImportError: # pragma: no cover from UserDict import DictMixin try: from urlparse import parse_qs except ImportError: # pragma: no cover from cgi import parse_qs try: import cPickle as pickle except ImportError: # pragma: no cover import pickle try: from json import dumps as json_dumps except ImportError: # pragma: no cover try: from simplejson import dumps as json_dumps except ImportError: # pragma: no cover try: from django.utils.simplejson import dumps as json_dumps except ImportError: # pragma: no cover json_dumps = None NCTextIOWrapper = None if sys.version_info >= (3,0,0): # pragma: no cover # See Request.POST from io import BytesIO def touni(x, enc='utf8', err='strict'): """ Convert anything to unicode """ return str(x, enc, err) if isinstance(x, bytes) else str(x) if sys.version_info < (3,2,0): from io import TextIOWrapper class NCTextIOWrapper(TextIOWrapper): ''' Garbage collecting an io.TextIOWrapper(buffer) instance closes the wrapped buffer. This subclass keeps it open. ''' def close(self): pass else: from StringIO import StringIO as BytesIO bytes = str def touni(x, enc='utf8', err='strict'): """ Convert anything to unicode """ return x if isinstance(x, unicode) else unicode(str(x), enc, err) def tob(data, enc='utf8'): """ Convert anything to bytes """ return data.encode(enc) if isinstance(data, unicode) else bytes(data) # Convert strings and unicode to native strings if sys.version_info >= (3,0,0): tonat = touni else: tonat = tob tonat.__doc__ = """ Convert anything to native strings """ class classinstancemethod(object): """ Acts like a class method when called from a class, like an instance method when called by an instance. The method should take two arguments, 'self' and 'cls'; one of these will be None depending on how the method was called. """ def __init__(self, func): self.func = func self.__doc__ = func.__doc__ def __get__(self, obj, type=None): return _methodwrapper(self.func, obj=obj, type=type) class _methodwrapper(object): def __init__(self, func, obj, type): self.func = func self.obj = obj self.type = type def __call__(self, *args, **kw): assert not kw.has_key('self') and not kw.has_key('cls'), ( "You cannot use 'self' or 'cls' arguments to a " "classinstancemethod") return self.func(*((self.obj, self.type) + args), **kw) def __repr__(self): if self.obj is None: return ('<bound class method %s.%s>' % (self.type.__name__, self.func.func_name)) else: return ('<bound method %s.%s of %r>' % (self.type.__name__, self.func.func_name, self.obj)) # Backward compatibility def depr(message, critical=False): if critical: raise DeprecationWarning(message) warnings.warn(message, DeprecationWarning, stacklevel=3) # Small helpers def makelist(data): if isinstance(data, (tuple, list, set, dict)): return list(data) elif data: return [data] else: return [] class DictProperty(object): ''' Property that maps to a key in a local dict-like attribute. ''' def __init__(self, attr, key=None, read_only=False): self.attr, self.key, self.read_only = attr, key, read_only def __call__(self, func): functools.update_wrapper(self, func, updated=[]) self.getter, self.key = func, self.key or func.__name__ return self def __get__(self, obj, cls): if not obj: return self key, storage = self.key, getattr(obj, self.attr) if key not in storage: storage[key] = self.getter(obj) return storage[key] def __set__(self, obj, value): if self.read_only: raise AttributeError("Read-Only property.") getattr(obj, self.attr)[self.key] = value def __delete__(self, obj): if self.read_only: raise AttributeError("Read-Only property.") del getattr(obj, self.attr)[self.key] def cached_property(func): ''' A property that, if accessed, replaces itself with the computed value. Subsequent accesses won't call the getter again. ''' return DictProperty('__dict__')(func) class lazy_attribute(object): # Does not need configuration -> lower-case name ''' A property that caches itself to the class object. ''' def __init__(self, func): functools.update_wrapper(self, func, updated=[]) self.getter = func def __get__(self, obj, cls): value = self.getter(cls) setattr(cls, self.__name__, value) return value ############################################################################### # Exceptions and Events ######################################################## ############################################################################### class BottleException(Exception): """ A base class for exceptions used by bottle. """ pass class HTTPResponse(BottleException): """ Used to break execution and immediately finish the response """ def __init__(self, output='', status=200, header=None): super(BottleException, self).__init__("HTTP Response %d" % status) self.status = int(status) self.output = output self.headers = HeaderDict(header) if header else None def apply(self, response): if self.headers: for key, value in self.headers.iterallitems(): response.headers[key] = value response.status = self.status class HTTPError(HTTPResponse): """ Used to generate an error page """ def __init__(self, code=500, output='Unknown Error', exception=None, traceback=None, header=None): super(HTTPError, self).__init__(output, code, header) self.exception = exception self.traceback = traceback def __repr__(self): return template(ERROR_PAGE_TEMPLATE, e=self) ############################################################################### # Routing ###################################################################### ############################################################################### class RouteError(BottleException): """ This is a base class for all routing related exceptions """ class RouteReset(BottleException): """ If raised by a plugin or request handler, the route is reset and all plugins are re-applied. """ class RouteSyntaxError(RouteError): """ The route parser found something not supported by this router """ class RouteBuildError(RouteError): """ The route could not been built """ class Router(object): ''' A Router is an ordered collection of route->target pairs. It is used to efficiently match WSGI requests against a number of routes and return the first target that satisfies the request. The target may be anything, usually a string, ID or callable object. A route consists of a path-rule and a HTTP method. The path-rule is either a static path (e.g. `/contact`) or a dynamic path that contains wildcards (e.g. `/wiki/:page`). By default, wildcards consume characters up to the next slash (`/`). To change that, you may add a regular expression pattern (e.g. `/wiki/:page#[a-z]+#`). For performance reasons, static routes (rules without wildcards) are checked first. Dynamic routes are searched in order. Try to avoid ambiguous or overlapping rules. The HTTP method string matches only on equality, with two exceptions: * ´GET´ routes also match ´HEAD´ requests if there is no appropriate ´HEAD´ route installed. * ´ANY´ routes do match if there is no other suitable route installed. An optional ``name`` parameter is used by :meth:`build` to identify routes. ''' default = '[^/]+' @lazy_attribute def syntax(cls): return re.compile(r'(?<!\\):([a-zA-Z_][a-zA-Z_0-9]*)?(?:#(.*?)#)?') def __init__(self): self.routes = {} # A {rule: {method: target}} mapping self.rules = [] # An ordered list of rules self.named = {} # A name->(rule, build_info) mapping self.static = {} # Cache for static routes: {path: {method: target}} self.dynamic = [] # Cache for dynamic routes. See _compile() def add(self, rule, method, target, name=None, static=False): ''' Add a new route or replace the target for an existing route. ''' if static: depr("Use a backslash to escape ':' in routes.") # 0.9 rule = rule.replace(':','\\:') if rule in self.routes: self.routes[rule][method.upper()] = target else: self.routes[rule] = {method.upper(): target} self.rules.append(rule) if self.static or self.dynamic: # Clear precompiler cache. self.static, self.dynamic = {}, {} if name: self.named[name] = (rule, None) def build(self, _name, *anon, **args): ''' Return a string that matches a named route. Use keyword arguments to fill out named wildcards. Remaining arguments are appended as a query string. Raises RouteBuildError or KeyError.''' if _name not in self.named: raise RouteBuildError("No route with that name.", _name) rule, pairs = self.named[_name] if not pairs: token = self.syntax.split(rule) parts = [p.replace('\\:',':') for p in token[::3]] names = token[1::3] if len(parts) > len(names): names.append(None) pairs = zip(parts, names) self.named[_name] = (rule, pairs) try: anon = list(anon) url = [s if k is None else s+str(args.pop(k)) if k else s+str(anon.pop()) for s, k in pairs] except IndexError: msg = "Not enough arguments to fill out anonymous wildcards." raise RouteBuildError(msg) except KeyError, e: raise RouteBuildError(*e.args) if args: url += ['?', urlencode(args)] return ''.join(url) def match(self, environ): ''' Return a (target, url_agrs) tuple or raise HTTPError(404/405). ''' targets, urlargs = self._match_path(environ) if not targets: raise HTTPError(404, "Not found: " + repr(environ['PATH_INFO'])) method = environ['REQUEST_METHOD'].upper() if method in targets: return targets[method], urlargs if method == 'HEAD' and 'GET' in targets: return targets['GET'], urlargs if 'ANY' in targets: return targets['ANY'], urlargs allowed = [verb for verb in targets if verb != 'ANY'] if 'GET' in allowed and 'HEAD' not in allowed: allowed.append('HEAD') raise HTTPError(405, "Method not allowed.", header=[('Allow',",".join(allowed))]) def _match_path(self, environ): ''' Optimized PATH_INFO matcher. ''' path = environ['PATH_INFO'] or '/' # Assume we are in a warm state. Search compiled rules first. match = self.static.get(path) if match: return match, {} for combined, rules in self.dynamic: match = combined.match(path) if not match: continue gpat, match = rules[match.lastindex - 1] return match, gpat.match(path).groupdict() if gpat else {} # Lazy-check if we are really in a warm state. If yes, stop here. if self.static or self.dynamic or not self.routes: return None, {} # Cold state: We have not compiled any rules yet. Do so and try again. if not environ.get('wsgi.run_once'): self._compile() return self._match_path(environ) # For run_once (CGI) environments, don't compile. Just check one by one. epath = path.replace(':','\\:') # Turn path into its own static rule. match = self.routes.get(epath) # This returns static rule only. if match: return match, {} for rule in self.rules: #: Skip static routes to reduce re.compile() calls. if rule.count(':') < rule.count('\\:'): continue match = self._compile_pattern(rule).match(path) if match: return self.routes[rule], match.groupdict() return None, {} def _compile(self): ''' Prepare static and dynamic search structures. ''' self.static = {} self.dynamic = [] def fpat_sub(m): return m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:' for rule in self.rules: target = self.routes[rule] if not self.syntax.search(rule): self.static[rule.replace('\\:',':')] = target continue gpat = self._compile_pattern(rule) fpat = re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))', fpat_sub, gpat.pattern) gpat = gpat if gpat.groupindex else None try: combined = '%s|(%s)' % (self.dynamic[-1][0].pattern, fpat) self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1]) self.dynamic[-1][1].append((gpat, target)) except (AssertionError, IndexError), e: # AssertionError: Too many groups self.dynamic.append((re.compile('(^%s$)'%fpat), [(gpat, target)])) except re.error, e: raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, e)) def _compile_pattern(self, rule): ''' Return a regular expression with named groups for each wildcard. ''' out = '' for i, part in enumerate(self.syntax.split(rule)): if i%3 == 0: out += re.escape(part.replace('\\:',':')) elif i%3 == 1: out += '(?P<%s>' % part if part else '(?:' else: out += '%s)' % (part or '[^/]+') return re.compile('^%s$'%out) ############################################################################### # Application Object ########################################################### ############################################################################### class Bottle(object): """ WSGI application """ def __init__(self, catchall=True, autojson=True, config=None): """ Create a new bottle instance. You usually don't do that. Use `bottle.app.push()` instead. """ self.routes = [] # List of installed routes including metadata. self.router = Router() # Maps requests to self.route indices. self.ccache = {} # Cache for callbacks with plugins applied. self.plugins = [] # List of installed plugins. self.mounts = {} self.error_handler = {} #: If true, most exceptions are catched and returned as :exc:`HTTPError` self.catchall = catchall self.config = config or {} self.serve = True # Default plugins self.hooks = self.install(HooksPlugin()) self.typefilter = self.install(TypeFilterPlugin()) if autojson: self.install(JSONPlugin()) def optimize(self, *a, **ka): depr("Bottle.optimize() is obsolete.") def mount(self, app, prefix, **options): ''' Mount an application to a specific URL prefix. The prefix is added to SCIPT_PATH and removed from PATH_INFO before the sub-application is called. :param app: an instance of :class:`Bottle`. :param prefix: path prefix used as a mount-point. All other parameters are passed to the underlying :meth:`route` call. ''' if not isinstance(app, Bottle): raise TypeError('Only Bottle instances are supported for now.') prefix = '/'.join(filter(None, prefix.split('/'))) if not prefix: raise TypeError('Empty prefix. Perhaps you want a merge()?') for other in self.mounts: if other.startswith(prefix): raise TypeError('Conflict with existing mount: %s' % other) path_depth = prefix.count('/') + 1 options.setdefault('method', 'ANY') self.mounts[prefix] = app @self.route('/%s/:#.*#' % prefix, **options) def mountpoint(): request.path_shift(path_depth) return app.handle(request.environ) def add_filter(self, ftype, func): depr("Filters are deprecated. Replace any filters with plugins.") #0.9 self.typefilter.add(ftype, func) def install(self, plugin): ''' Add a plugin to the list of plugins and prepare it for beeing applied to all routes of this application. A plugin may be a simple decorator or an object that implements the :class:`Plugin` API. ''' if hasattr(plugin, 'setup'): plugin.setup(self) if not callable(plugin) and not hasattr(plugin, 'apply'): raise TypeError("Plugins must be callable or implement .apply()") self.plugins.append(plugin) self.reset() return plugin def uninstall(self, plugin): ''' Uninstall plugins. Pass an instance to remove a specific plugin. Pass a type object to remove all plugins that match that type. Subclasses are not removed. Pass a string to remove all plugins with a matching ``name`` attribute. Pass ``True`` to remove all plugins. The list of affected plugins is returned. ''' removed, remove = [], plugin for i, plugin in list(enumerate(self.plugins))[::-1]: if remove is True or remove is plugin or remove is type(plugin) \ or getattr(plugin, 'name', True) == remove: removed.append(plugin) del self.plugins[i] if hasattr(plugin, 'close'): plugin.close() if removed: self.reset() return removed def reset(self, id=None): ''' Reset all routes (re-apply plugins) and clear all caches. If an ID is given, only that specific route is affected. ''' if id is None: self.ccache.clear() else: self.ccache.pop(id, None) def close(self): ''' Close the application and all installed plugins. ''' for plugin in self.plugins: if hasattr(plugin, 'close'): plugin.close() self.stopped = True def match(self, environ): """ Search for a matching route and return a (callback, urlargs) tuple. The first element is the associated route callback with plugins applied. The second value is a dictionary with parameters extracted from the URL. The :class:`Router` raises :exc:`HTTPError` (404/405) on a non-match.""" handle, args = self.router.match(environ) environ['route.handle'] = handle # TODO move to router? environ['route.url_args'] = args try: return self.ccache[handle], args except KeyError: config = self.routes[handle] callback = self.ccache[handle] = self._build_callback(config) return callback, args def _build_callback(self, config): ''' Apply plugins to a route and return a new callable. ''' wrapped = config['callback'] plugins = self.plugins + config['apply'] skip = config['skip'] try: for plugin in reversed(plugins): if True in skip: break if plugin in skip or type(plugin) in skip: continue if getattr(plugin, 'name', True) in skip: continue if hasattr(plugin, 'apply'): wrapped = plugin.apply(wrapped, config) else: wrapped = plugin(wrapped) if not wrapped: break functools.update_wrapper(wrapped, config['callback']) return wrapped except RouteReset: # A plugin may have changed the config dict inplace. return self._build_callback(config) # Apply all plugins again. def get_url(self, routename, **kargs): """ Return a string that matches a named route """ scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/' location = self.router.build(routename, **kargs).lstrip('/') return urljoin(urljoin('/', scriptname), location) def route(self, path=None, method='GET', callback=None, name=None, apply=None, skip=None, **config): """ A decorator to bind a function to a request URL. Example:: @app.route('/hello/:name') def hello(name): return 'Hello %s' % name The ``:name`` part is a wildcard. See :class:`Router` for syntax details. :param path: Request path or a list of paths to listen to. If no path is specified, it is automatically generated from the signature of the function. :param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of methods to listen to. (default: `GET`) :param callback: An optional shortcut to avoid the decorator syntax. ``route(..., callback=func)`` equals ``route(...)(func)`` :param name: The name for this route. (default: None) :param apply: A decorator or plugin or a list of plugins. These are applied to the route callback in addition to installed plugins. :param skip: A list of plugins, plugin classes or names. Matching plugins are not installed to this route. ``True`` skips all. Any additional keyword arguments are stored as route-specific configuration and passed to plugins (see :meth:`Plugin.apply`). """ if callable(path): path, callback = None, path plugins = makelist(apply) skiplist = makelist(skip) if 'decorate' in config: depr("The 'decorate' parameter was renamed to 'apply'") # 0.9 plugins += makelist(config.pop('decorate')) if 'template' in config: # TODO Make plugin depr("The 'template' parameter is no longer used. Add the view() "\ "decorator to the 'apply' parameter instead.") # 0.9 tpl, tplo = config.pop('template'), config.pop('template_opts', {}) plugins.insert(0, view(tpl, **tplo)) if config.pop('no_hooks', False): depr("The no_hooks parameter is no longer used. Add 'hooks' to the"\ "list of skipped plugins instead.") # 0.9 skiplist.append('hooks') static = config.get('static', False) # depr 0.9 def decorator(callback): for rule in makelist(path) or yieldroutes(callback): for verb in makelist(method): verb = verb.upper() cfg = dict(rule=rule, method=verb, callback=callback, name=name, app=self, config=config, apply=plugins, skip=skiplist) self.routes.append(cfg) cfg['id'] = self.routes.index(cfg) self.router.add(rule, verb, cfg['id'], name=name, static=static) return callback return decorator(callback) if callback else decorator def get(self, path=None, method='GET', **options): """ Equals :meth:`route`. """ return self.route(path, method, **options) def post(self, path=None, method='POST', **options): """ Equals :meth:`route` with a ``POST`` method parameter. """ return self.route(path, method, **options) def put(self, path=None, method='PUT', **options): """ Equals :meth:`route` with a ``PUT`` method parameter. """ return self.route(path, method, **options) def delete(self, path=None, method='DELETE', **options): """ Equals :meth:`route` with a ``DELETE`` method parameter. """ return self.route(path, method, **options) def error(self, code=500): """ Decorator: Register an output handler for a HTTP error code""" def wrapper(handler): self.error_handler[int(code)] = handler return handler return wrapper def hook(self, name): """ Return a decorator that attaches a callback to a hook. """ def wrapper(func): self.hooks.add(name, func) return func return wrapper def add_hook(self, name, func): depr("Call Bottle.hooks.add() instead.") #0.9 self.hooks.add(name, func) def remove_hook(self, name, func): depr("Call Bottle.hooks.remove() instead.") #0.9 self.hooks.remove(name, func) def handle(self, environ, method='GET'): """ Execute the first matching route callback and return the result. :exc:`HTTPResponse` exceptions are catched and returned. If :attr:`Bottle.catchall` is true, other exceptions are catched as well and returned as :exc:`HTTPError` instances (500). """ if isinstance(environ, str): depr("Bottle.handle() takes an environ dictionary.") # v0.9 environ = {'PATH_INFO': environ, 'REQUEST_METHOD': method.upper()} if not self.serve: return HTTPError(503, "Server stopped") try: callback, args = self.match(environ) return callback(**args) except HTTPResponse, r: return r except RouteReset: # Route reset requested by the callback or a plugin. del self.ccache[handle] return self.handle(environ) # Try again. except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception, e: if not self.catchall: raise return HTTPError(500, "Internal Server Error", e, format_exc(10)) def _cast(self, out, request, response, peek=None): """ Try to convert the parameter into something WSGI compatible and set correct HTTP headers when possible. Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like, iterable of strings and iterable of unicodes """ # Empty output is done here if not out: response.headers['Content-Length'] = 0 return [] # Join lists of byte or unicode strings. Mixed lists are NOT supported if isinstance(out, (tuple, list))\ and isinstance(out[0], (bytes, unicode)): out = out[0][0:0].join(out) # b'abc'[0:0] -> b'' # Encode unicode strings if isinstance(out, unicode): out = out.encode(response.charset) # Byte Strings are just returned if isinstance(out, bytes): response.headers['Content-Length'] = str(len(out)) return [out] # HTTPError or HTTPException (recursive, because they may wrap anything) if isinstance(out, HTTPError): out.apply(response) return self._cast(self.error_handler.get(out.status, repr)(out), request, response) if isinstance(out, HTTPResponse): out.apply(response) return self._cast(out.output, request, response) # File-like objects. if hasattr(out, 'read'): if 'wsgi.file_wrapper' in request.environ: return request.environ['wsgi.file_wrapper'](out) elif hasattr(out, 'close') or not hasattr(out, '__iter__'): return WSGIFileWrapper(out) # Handle Iterables. We peek into them to detect their inner type. try: out = iter(out) first = out.next() while not first: first = out.next() except StopIteration: return self._cast('', request, response) except HTTPResponse, e: first = e except Exception, e: first = HTTPError(500, 'Unhandled exception', e, format_exc(10)) if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\ or not self.catchall: raise # These are the inner types allowed in iterator or generator objects. if isinstance(first, HTTPResponse): return self._cast(first, request, response) if isinstance(first, bytes): return itertools.chain([first], out) if isinstance(first, unicode): return itertools.imap(lambda x: x.encode(response.charset), itertools.chain([first], out)) return self._cast(HTTPError(500, 'Unsupported response type: %s'\ % type(first)), request, response) def wsgi(self, environ, start_response): """ The bottle WSGI-interface. """ try: environ['bottle.app'] = self request.bind(environ) response.bind() out = self.handle(environ) out = self._cast(out, request, response) # rfc2616 section 4.3 if response.status in (100, 101, 204, 304) or request.method == 'HEAD': if hasattr(out, 'close'): out.close() out = [] status = '%d %s' % (response.status, HTTP_CODES[response.status]) start_response(status, response.headerlist) return out except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception, e: if not self.catchall: raise err = '<h1>Critical error while processing request: %s</h1>' \ % environ.get('PATH_INFO', '/') if DEBUG: err += '<h2>Error:</h2>\n<pre>%s</pre>\n' % repr(e) err += '<h2>Traceback:</h2>\n<pre>%s</pre>\n' % format_exc(10) environ['wsgi.errors'].write(err) #TODO: wsgi.error should not get html start_response('500 INTERNAL SERVER ERROR', [('Content-Type', 'text/html')]) return [tob(err)] def __call__(self, environ, start_response): return self.wsgi(environ, start_response) ############################################################################### # HTTP and WSGI Tools ########################################################## ############################################################################### class Request(threading.local, DictMixin): """ Represents a single HTTP request using thread-local attributes. The Request object wraps a WSGI environment and can be used as such. """ def __init__(self, environ=None): """ Create a new Request instance. You usually don't do this but use the global `bottle.request` instance instead. """ self.bind(environ or {},) def bind(self, environ): """ Bind a new WSGI environment. This is done automatically for the global `bottle.request` instance on every request. """ self.environ = environ # These attributes are used anyway, so it is ok to compute them here self.path = '/' + environ.get('PATH_INFO', '/').lstrip('/') self.method = environ.get('REQUEST_METHOD', 'GET').upper() @property def _environ(self): depr("Request._environ renamed to Request.environ") return self.environ def copy(self): ''' Returns a copy of self ''' return Request(self.environ.copy()) def path_shift(self, shift=1): ''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa. :param shift: The number of path fragments to shift. May be negative to change the shift direction. (default: 1) ''' script_name = self.environ.get('SCRIPT_NAME','/') self['SCRIPT_NAME'], self.path = path_shift(script_name, self.path, shift) self['PATH_INFO'] = self.path def __getitem__(self, key): return self.environ[key] def __delitem__(self, key): self[key] = ""; del(self.environ[key]) def __iter__(self): return iter(self.environ) def __len__(self): return len(self.environ) def keys(self): return self.environ.keys() def __setitem__(self, key, value): """ Shortcut for Request.environ.__setitem__ """ self.environ[key] = value todelete = [] if key in ('PATH_INFO','REQUEST_METHOD'): self.bind(self.environ) elif key == 'wsgi.input': todelete = ('body','forms','files','params') elif key == 'QUERY_STRING': todelete = ('get','params') elif key.startswith('HTTP_'): todelete = ('headers', 'cookies') for key in todelete: if 'bottle.' + key in self.environ: del self.environ['bottle.' + key] @property def query_string(self): """ The part of the URL following the '?'. """ return self.environ.get('QUERY_STRING', '') @property def fullpath(self): """ Request path including SCRIPT_NAME (if present). """ spath = self.environ.get('SCRIPT_NAME','').rstrip('/') + '/' rpath = self.path.lstrip('/') return urljoin(spath, rpath) @property def url(self): """ Full URL as requested by the client (computed). This value is constructed out of different environment variables and includes scheme, host, port, scriptname, path and query string. Special characters are NOT escaped. """ scheme = self.environ.get('wsgi.url_scheme', 'http') host = self.environ.get('HTTP_X_FORWARDED_HOST') host = host or self.environ.get('HTTP_HOST', None) if not host: host = self.environ.get('SERVER_NAME') port = self.environ.get('SERVER_PORT', '80') if (scheme, port) not in (('https','443'), ('http','80')): host += ':' + port parts = (scheme, host, self.fullpath, self.query_string, '') return urlunsplit(parts) @property def content_length(self): """ Content-Length header as an integer, -1 if not specified """ return int(self.environ.get('CONTENT_LENGTH', '') or -1) @property def header(self): depr("The Request.header property was renamed to Request.headers") return self.headers @DictProperty('environ', 'bottle.headers', read_only=True) def headers(self): ''' Request HTTP Headers stored in a :class:`HeaderDict`. ''' return WSGIHeaderDict(self.environ) @DictProperty('environ', 'bottle.get', read_only=True) def GET(self): """ The QUERY_STRING parsed into an instance of :class:`MultiDict`. """ data = parse_qs(self.query_string, keep_blank_values=True) get = self.environ['bottle.get'] = MultiDict() for key, values in data.iteritems(): for value in values: get[key] = value return get @DictProperty('environ', 'bottle.post', read_only=True) def POST(self): """ The combined values from :attr:`forms` and :attr:`files`. Values are either strings (form values) or instances of :class:`cgi.FieldStorage` (file uploads). """ post = MultiDict() safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'): if key in self.environ: safe_env[key] = self.environ[key] if NCTextIOWrapper: fb = NCTextIOWrapper(self.body, encoding='ISO-8859-1', newline='\n') else: fb = self.body data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True) for item in data.list or []: post[item.name] = item if item.filename else item.value return post @DictProperty('environ', 'bottle.forms', read_only=True) def forms(self): """ POST form values parsed into an instance of :class:`MultiDict`. This property contains form values parsed from an `url-encoded` or `multipart/form-data` encoded POST request bidy. The values are native strings. """ forms = MultiDict() for name, item in self.POST.iterallitems(): if not hasattr(item, 'filename'): forms[name] = item return forms @DictProperty('environ', 'bottle.files', read_only=True) def files(self): """ File uploads parsed into an instance of :class:`MultiDict`. This property contains file uploads parsed from an `multipart/form-data` encoded POST request body. The values are instances of :class:`cgi.FieldStorage`. """ files = MultiDict() for name, item in self.POST.iterallitems(): if hasattr(item, 'filename'): files[name] = item return files @DictProperty('environ', 'bottle.params', read_only=True) def params(self): """ A combined :class:`MultiDict` with values from :attr:`forms` and :attr:`GET`. File-uploads are not included. """ params = MultiDict(self.GET) for key, value in self.forms.iterallitems(): params[key] = value return params @DictProperty('environ', 'bottle.body', read_only=True) def _body(self): """ The HTTP request body as a seekable file-like object. This property returns a copy of the `wsgi.input` stream and should be used instead of `environ['wsgi.input']`. """ maxread = max(0, self.content_length) stream = self.environ['wsgi.input'] body = BytesIO() if maxread < MEMFILE_MAX else TemporaryFile(mode='w+b') while maxread > 0: part = stream.read(min(maxread, MEMFILE_MAX)) if not part: break body.write(part) maxread -= len(part) self.environ['wsgi.input'] = body body.seek(0) return body @property def body(self): self._body.seek(0) return self._body @property def auth(self): #TODO: Tests and docs. Add support for digest. namedtuple? """ HTTP authorization data as a (user, passwd) tuple. (experimental) This implementation currently only supports basic auth and returns None on errors. """ return parse_auth(self.headers.get('Authorization','')) @DictProperty('environ', 'bottle.cookies', read_only=True) def COOKIES(self): """ Cookies parsed into a dictionary. Signed cookies are NOT decoded automatically. See :meth:`get_cookie` for details. """ raw_dict = SimpleCookie(self.headers.get('Cookie','')) cookies = {} for cookie in raw_dict.itervalues(): cookies[cookie.key] = cookie.value return cookies def get_cookie(self, key, secret=None): """ Return the content of a cookie. To read a `Signed Cookies`, use the same `secret` as used to create the cookie (see :meth:`Response.set_cookie`). If anything goes wrong, None is returned. """ value = self.COOKIES.get(key) if secret and value: dec = cookie_decode(value, secret) # (key, value) tuple or None return dec[1] if dec and dec[0] == key else None return value or None @property def is_ajax(self): ''' True if the request was generated using XMLHttpRequest ''' #TODO: write tests return self.header.get('X-Requested-With') == 'XMLHttpRequest' class Response(threading.local): """ Represents a single HTTP response using thread-local attributes. """ def __init__(self): self.bind() def bind(self): """ Resets the Response object to its factory defaults. """ self._COOKIES = None self.status = 200 self.headers = HeaderDict() self.content_type = 'text/html; charset=UTF-8' @property def header(self): depr("Response.header renamed to Response.headers") return self.headers def copy(self): ''' Returns a copy of self. ''' copy = Response() copy.status = self.status copy.headers = self.headers.copy() copy.content_type = self.content_type return copy def wsgiheader(self): ''' Returns a wsgi conform list of header/value pairs. ''' for c in self.COOKIES.values(): if c.OutputString() not in self.headers.getall('Set-Cookie'): self.headers.append('Set-Cookie', c.OutputString()) # rfc2616 section 10.2.3, 10.3.5 if self.status in (204, 304) and 'content-type' in self.headers: del self.headers['content-type'] if self.status == 304: for h in ('allow', 'content-encoding', 'content-language', 'content-length', 'content-md5', 'content-range', 'content-type', 'last-modified'): # + c-location, expires? if h in self.headers: del self.headers[h] return list(self.headers.iterallitems()) headerlist = property(wsgiheader) @property def charset(self): """ Return the charset specified in the content-type header. This defaults to `UTF-8`. """ if 'charset=' in self.content_type: return self.content_type.split('charset=')[-1].split(';')[0].strip() return 'UTF-8' @property def COOKIES(self): """ A dict-like SimpleCookie instance. Use :meth:`set_cookie` instead. """ if not self._COOKIES: self._COOKIES = SimpleCookie() return self._COOKIES def set_cookie(self, key, value, secret=None, **kargs): ''' Add a cookie or overwrite an old one. If the `secret` parameter is set, create a `Signed Cookie` (described below). :param key: the name of the cookie. :param value: the value of the cookie. :param secret: required for signed cookies. (default: None) :param max_age: maximum age in seconds. (default: None) :param expires: a datetime object or UNIX timestamp. (defaut: None) :param domain: the domain that is allowed to read the cookie. (default: current domain) :param path: limits the cookie to a given path (default: /) If neither `expires` nor `max_age` are set (default), the cookie lasts only as long as the browser is not closed. Signed cookies may store any pickle-able object and are cryptographically signed to prevent manipulation. Keep in mind that cookies are limited to 4kb in most browsers. Warning: Signed cookies are not encrypted (the client can still see the content) and not copy-protected (the client can restore an old cookie). The main intention is to make pickling and unpickling save, not to store secret information at client side. ''' if secret: value = touni(cookie_encode((key, value), secret)) elif not isinstance(value, basestring): raise TypeError('Secret missing for non-string Cookie.') self.COOKIES[key] = value for k, v in kargs.iteritems(): self.COOKIES[key][k.replace('_', '-')] = v def delete_cookie(self, key, **kwargs): ''' Delete a cookie. Be sure to use the same `domain` and `path` parameters as used to create the cookie. ''' kwargs['max_age'] = -1 kwargs['expires'] = 0 self.set_cookie(key, '', **kwargs) def get_content_type(self): """ Current 'Content-Type' header. """ return self.headers['Content-Type'] def set_content_type(self, value): self.headers['Content-Type'] = value content_type = property(get_content_type, set_content_type, None, get_content_type.__doc__) ############################################################################### # Plugins ###################################################################### ############################################################################### class JSONPlugin(object): name = 'json' def __init__(self, json_dumps=json_dumps): self.json_dumps = json_dumps def apply(self, callback, context): dumps = self.json_dumps if not dumps: return callback def wrapper(*a, **ka): rv = callback(*a, **ka) if isinstance(rv, dict): response.content_type = 'application/json' return dumps(rv) return rv return wrapper class HooksPlugin(object): name = 'hooks' def __init__(self): self.hooks = {'before_request': [], 'after_request': []} self.app = None def _empty(self): return not (self.hooks['before_request'] or self.hooks['after_request']) def setup(self, app): self.app = app def add(self, name, func): ''' Attach a callback to a hook. ''' if name not in self.hooks: raise ValueError("Unknown hook name %s" % name) was_empty = self._empty() self.hooks[name].append(func) if self.app and was_empty and not self._empty(): self.app.reset() def remove(self, name, func): ''' Remove a callback from a hook. ''' if name not in self.hooks: raise ValueError("Unknown hook name %s" % name) was_empty = self._empty() self.hooks[name].remove(func) if self.app and not was_empty and self._empty(): self.app.reset() def apply(self, callback, context): if self._empty(): return callback before_request = self.hooks['before_request'] after_request = self.hooks['after_request'] def wrapper(*a, **ka): for hook in before_request: hook() rv = callback(*a, **ka) for hook in after_request[::-1]: hook() return rv return wrapper class TypeFilterPlugin(object): def __init__(self): self.filter = [] self.app = None def setup(self, app): self.app = app def add(self, ftype, func): if not self.filter and app: self.app.reset() if not isinstance(ftype, type): raise TypeError("Expected type object, got %s" % type(ftype)) self.filter = [(t, f) for (t, f) in self.filter if t != ftype] self.filter.append((ftype, func)) def apply(self, callback, context): filter = self.filter if not filter: return callback def wrapper(*a, **ka): rv = callback(*a, **ka) for testtype, filterfunc in filter: if isinstance(rv, testtype): rv = filterfunc(rv) return rv return wrapper ############################################################################### # Common Utilities ############################################################# ############################################################################### class MultiDict(DictMixin): """ A dict that remembers old values for each key """ # collections.MutableMapping would be better for Python >= 2.6 def __init__(self, *a, **k): self.dict = dict() for k, v in dict(*a, **k).iteritems(): self[k] = v def __len__(self): return len(self.dict) def __iter__(self): return iter(self.dict) def __contains__(self, key): return key in self.dict def __delitem__(self, key): del self.dict[key] def keys(self): return self.dict.keys() def __getitem__(self, key): return self.get(key, KeyError, -1) def __setitem__(self, key, value): self.append(key, value) def append(self, key, value): self.dict.setdefault(key, []).append(value) def replace(self, key, value): self.dict[key] = [value] def getall(self, key): return self.dict.get(key) or [] def get(self, key, default=None, index=-1): if key not in self.dict and default != KeyError: return [default][index] return self.dict[key][index] def iterallitems(self): for key, values in self.dict.iteritems(): for value in values: yield key, value class HeaderDict(MultiDict): """ Same as :class:`MultiDict`, but title()s the keys and overwrites. """ def __contains__(self, key): return MultiDict.__contains__(self, self.httpkey(key)) def __getitem__(self, key): return MultiDict.__getitem__(self, self.httpkey(key)) def __delitem__(self, key): return MultiDict.__delitem__(self, self.httpkey(key)) def __setitem__(self, key, value): self.replace(key, value) def get(self, key, default=None, index=-1): return MultiDict.get(self, self.httpkey(key), default, index) def append(self, key, value): return MultiDict.append(self, self.httpkey(key), str(value)) def replace(self, key, value): return MultiDict.replace(self, self.httpkey(key), str(value)) def getall(self, key): return MultiDict.getall(self, self.httpkey(key)) def httpkey(self, key): return str(key).replace('_','-').title() class WSGIHeaderDict(DictMixin): ''' This dict-like class wraps a WSGI environ dict and provides convenient access to HTTP_* fields. Keys and values are native strings (2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI environment contains non-native string values, these are de- or encoded using a lossless 'latin1' character set. The API will remain stable even on changes to the relevant PEPs. Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one that uses non-native strings.) ''' def __init__(self, environ): self.environ = environ def _ekey(self, key): # Translate header field name to environ key. return 'HTTP_' + key.replace('-','_').upper() def raw(self, key, default=None): ''' Return the header value as is (may be bytes or unicode). ''' return self.environ.get(self._ekey(key), default) def __getitem__(self, key): return tonat(self.environ[self._ekey(key)], 'latin1') def __setitem__(self, key, value): raise TypeError("%s is read-only." % self.__class__) def __delitem__(self, key): raise TypeError("%s is read-only." % self.__class__) def __iter__(self): for key in self.environ: if key[:5] == 'HTTP_': yield key[5:].replace('_', '-').title() def keys(self): return list(self) def __len__(self): return len(list(self)) def __contains__(self, key): return self._ekey(key) in self.environ class AppStack(list): """ A stack-like list. Calling it returns the head of the stack. """ def __call__(self): """ Return the current default application. """ return self[-1] def push(self, value=None): """ Add a new :class:`Bottle` instance to the stack """ if not isinstance(value, Bottle): value = Bottle() self.append(value) return value class WSGIFileWrapper(object): def __init__(self, fp, buffer_size=1024*64): self.fp, self.buffer_size = fp, buffer_size for attr in ('fileno', 'close', 'read', 'readlines'): if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr)) def __iter__(self): read, buff = self.fp.read, self.buffer_size while True: part = read(buff) if not part: break yield part ############################################################################### # Application Helper ########################################################### ############################################################################### def dict2json(d): depr('JSONPlugin is the preferred way to return JSON.') #0.9 response.content_type = 'application/json' return json_dumps(d) def abort(code=500, text='Unknown Error: Application stopped.'): """ Aborts execution and causes a HTTP error. """ raise HTTPError(code, text) def redirect(url, code=303): """ Aborts execution and causes a 303 redirect """ scriptname = request.environ.get('SCRIPT_NAME', '').rstrip('/') + '/' location = urljoin(request.url, urljoin(scriptname, url)) raise HTTPResponse("", status=code, header=dict(Location=location)) def send_file(*a, **k): #BC 0.6.4 """ Raises the output of static_file(). (deprecated) """ raise static_file(*a, **k) def static_file(filename, root, guessmime=True, mimetype=None, download=False): """ Opens a file in a safe way and returns a HTTPError object with status code 200, 305, 401 or 404. Sets Content-Type, Content-Length and Last-Modified header. Obeys If-Modified-Since header and HEAD requests. """ root = os.path.abspath(root) + os.sep filename = os.path.abspath(os.path.join(root, filename.strip('/\\'))) header = dict() if not filename.startswith(root): return HTTPError(403, "Access denied.") if not os.path.exists(filename) or not os.path.isfile(filename): return HTTPError(404, "File does not exist.") if not os.access(filename, os.R_OK): return HTTPError(403, "You do not have permission to access this file.") if not mimetype and guessmime: header['Content-Type'] = mimetypes.guess_type(filename)[0] else: header['Content-Type'] = mimetype if mimetype else 'text/plain' if download == True: download = os.path.basename(filename) if download: header['Content-Disposition'] = 'attachment; filename="%s"' % download stats = os.stat(filename) lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime)) header['Last-Modified'] = lm ims = request.environ.get('HTTP_IF_MODIFIED_SINCE') if ims: ims = ims.split(";")[0].strip() # IE sends "<date>; length=146" ims = parse_date(ims) if ims is not None and ims >= int(stats.st_mtime): header['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) return HTTPResponse(status=304, header=header) header['Content-Length'] = stats.st_size if request.method == 'HEAD': return HTTPResponse('', header=header) else: return HTTPResponse(open(filename, 'rb'), header=header) ############################################################################### # HTTP Utilities and MISC (TODO) ############################################### ############################################################################### def debug(mode=True): """ Change the debug level. There is only one debug level supported at the moment.""" global DEBUG DEBUG = bool(mode) def parse_date(ims): """ Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """ try: ts = email.utils.parsedate_tz(ims) return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone except (TypeError, ValueError, IndexError, OverflowError): return None def parse_auth(header): """ Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None""" try: method, data = header.split(None, 1) if method.lower() == 'basic': name, pwd = base64.b64decode(data).split(':', 1) return name, pwd except (KeyError, ValueError, TypeError): return None def _lscmp(a, b): ''' Compares two strings in a cryptographically save way: Runtime is not affected by a common prefix. ''' return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b) def cookie_encode(data, key): ''' Encode and sign a pickle-able object. Return a (byte) string ''' msg = base64.b64encode(pickle.dumps(data, -1)) sig = base64.b64encode(hmac.new(key, msg).digest()) return tob('!') + sig + tob('?') + msg def cookie_decode(data, key): ''' Verify and decode an encoded string. Return an object or None.''' data = tob(data) if cookie_is_encoded(data): sig, msg = data.split(tob('?'), 1) if _lscmp(sig[1:], base64.b64encode(hmac.new(key, msg).digest())): return pickle.loads(base64.b64decode(msg)) return None def cookie_is_encoded(data): ''' Return True if the argument looks like a encoded cookie.''' return bool(data.startswith(tob('!')) and tob('?') in data) def yieldroutes(func): """ Return a generator for routes that match the signature (name, args) of the func parameter. This may yield more than one route if the function takes optional keyword arguments. The output is best described by example:: a() -> '/a' b(x, y) -> '/b/:x/:y' c(x, y=5) -> '/c/:x' and '/c/:x/:y' d(x=5, y=6) -> '/d' and '/d/:x' and '/d/:x/:y' """ import inspect # Expensive module. Only import if necessary. path = '/' + func.__name__.replace('__','/').lstrip('/') spec = inspect.getargspec(func) argc = len(spec[0]) - len(spec[3] or []) path += ('/:%s' * argc) % tuple(spec[0][:argc]) yield path for arg in spec[0][argc:]: path += '/:%s' % arg yield path def path_shift(script_name, path_info, shift=1): ''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa. :return: The modified paths. :param script_name: The SCRIPT_NAME path. :param script_name: The PATH_INFO path. :param shift: The number of path fragments to shift. May be negative to change the shift direction. (default: 1) ''' if shift == 0: return script_name, path_info pathlist = path_info.strip('/').split('/') scriptlist = script_name.strip('/').split('/') if pathlist and pathlist[0] == '': pathlist = [] if scriptlist and scriptlist[0] == '': scriptlist = [] if shift > 0 and shift <= len(pathlist): moved = pathlist[:shift] scriptlist = scriptlist + moved pathlist = pathlist[shift:] elif shift < 0 and shift >= -len(scriptlist): moved = scriptlist[shift:] pathlist = moved + pathlist scriptlist = scriptlist[:shift] else: empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO' raise AssertionError("Cannot shift. Nothing left from %s" % empty) new_script_name = '/' + '/'.join(scriptlist) new_path_info = '/' + '/'.join(pathlist) if path_info.endswith('/') and pathlist: new_path_info += '/' return new_script_name, new_path_info # Decorators #TODO: Replace default_app() with app() def validate(**vkargs): """ Validates and manipulates keyword arguments by user defined callables. Handles ValueError and missing arguments by raising HTTPError(403). """ def decorator(func): def wrapper(**kargs): for key, value in vkargs.iteritems(): if key not in kargs: abort(403, 'Missing parameter: %s' % key) try: kargs[key] = value(kargs[key]) except ValueError: abort(403, 'Wrong parameter format for: %s' % key) return func(**kargs) return wrapper return decorator def auth_basic(check, realm="private", text="Access denied"): ''' Callback decorator to require HTTP auth (basic). TODO: Add route(check_auth=...) parameter. ''' def decorator(func): def wrapper(*a, **ka): user, password = request.auth or (None, None) if user is None or not check(user, password): response.headers['WWW-Authenticate'] = 'Basic realm="%s"' % realm return HTTPError(401, text) return func(*a, **ka) return wrapper return decorator def make_default_app_wrapper(name): ''' Return a callable that relays calls to the current default app. ''' @functools.wraps(getattr(Bottle, name)) def wrapper(*a, **ka): return getattr(app(), name)(*a, **ka) return wrapper for name in '''route get post put delete error mount hook install uninstall'''.split(): globals()[name] = make_default_app_wrapper(name) url = make_default_app_wrapper('get_url') del name def default(): depr("The default() decorator is deprecated. Use @error(404) instead.") return error(404) ############################################################################### # Server Adapter ############################################################### ############################################################################### class ServerAdapter(object): quiet = False def __init__(self, host='127.0.0.1', port=8080, **config): self.options = config self.host = host self.port = int(port) def run(self, handler): # pragma: no cover pass def __repr__(self): args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()]) return "%s(%s)" % (self.__class__.__name__, args) class CGIServer(ServerAdapter): quiet = True def run(self, handler): # pragma: no cover from wsgiref.handlers import CGIHandler CGIHandler().run(handler) # Just ignore host and port here class FlupFCGIServer(ServerAdapter): def run(self, handler): # pragma: no cover import flup.server.fcgi kwargs = {'bindAddress':(self.host, self.port)} kwargs.update(self.options) # allow to override bindAddress and others flup.server.fcgi.WSGIServer(handler, **kwargs).run() class WSGIRefServer(ServerAdapter): def run(self, handler): # pragma: no cover from wsgiref.simple_server import make_server, WSGIRequestHandler if self.quiet: class QuietHandler(WSGIRequestHandler): def log_request(*args, **kw): pass self.options['handler_class'] = QuietHandler srv = make_server(self.host, self.port, handler, **self.options) srv.serve_forever() class CherryPyServer(ServerAdapter): def run(self, handler): # pragma: no cover from cherrypy import wsgiserver server = wsgiserver.CherryPyWSGIServer((self.host, self.port), handler) server.start() class PasteServer(ServerAdapter): def run(self, handler): # pragma: no cover from paste import httpserver if not self.quiet: from paste.translogger import TransLogger handler = TransLogger(handler) httpserver.serve(handler, host=self.host, port=str(self.port), **self.options) class MeinheldServer(ServerAdapter): def run(self, handler): from meinheld import server server.listen((self.host, self.port)) server.run(handler) class FapwsServer(ServerAdapter): """ Extremely fast webserver using libev. See http://www.fapws.org/ """ def run(self, handler): # pragma: no cover import fapws._evwsgi as evwsgi from fapws import base, config port = self.port if float(config.SERVER_IDENT[-2:]) > 0.4: # fapws3 silently changed its API in 0.5 port = str(port) evwsgi.start(self.host, port) # fapws3 never releases the GIL. Complain upstream. I tried. No luck. if 'BOTTLE_CHILD' in os.environ and not self.quiet: print "WARNING: Auto-reloading does not work with Fapws3." print " (Fapws3 breaks python thread support)" evwsgi.set_base_module(base) def app(environ, start_response): environ['wsgi.multiprocess'] = False return handler(environ, start_response) evwsgi.wsgi_cb(('', app)) evwsgi.run() class TornadoServer(ServerAdapter): """ The super hyped asynchronous server by facebook. Untested. """ def run(self, handler): # pragma: no cover import tornado.wsgi import tornado.httpserver import tornado.ioloop container = tornado.wsgi.WSGIContainer(handler) server = tornado.httpserver.HTTPServer(container) server.listen(port=self.port) tornado.ioloop.IOLoop.instance().start() class AppEngineServer(ServerAdapter): """ Adapter for Google App Engine. """ quiet = True def run(self, handler): from google.appengine.ext.webapp import util # A main() function in the handler script enables 'App Caching'. # Lets makes sure it is there. This _really_ improves performance. module = sys.modules.get('__main__') if module and not hasattr(module, 'main'): module.main = lambda: util.run_wsgi_app(handler) util.run_wsgi_app(handler) class TwistedServer(ServerAdapter): """ Untested. """ def run(self, handler): from twisted.web import server, wsgi from twisted.python.threadpool import ThreadPool from twisted.internet import reactor thread_pool = ThreadPool() thread_pool.start() reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop) factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler)) reactor.listenTCP(self.port, factory, interface=self.host) reactor.run() class DieselServer(ServerAdapter): """ Untested. """ def run(self, handler): from diesel.protocols.wsgi import WSGIApplication app = WSGIApplication(handler, port=self.port) app.run() class GeventServer(ServerAdapter): """ Untested. Options: * `monkey` (default: True) fixes the stdlib to use greenthreads. * `fast` (default: False) uses libevent's http server, but has some issues: No streaming, no pipelining, no SSL. """ def run(self, handler): from gevent import wsgi as wsgi_fast, pywsgi as wsgi, monkey if self.options.get('monkey', True): monkey.patch_all() if self.options.get('fast', False): wsgi = wsgi_fast wsgi.WSGIServer((self.host, self.port), handler).serve_forever() class GunicornServer(ServerAdapter): """ Untested. """ def run(self, handler): from gunicorn.arbiter import Arbiter from gunicorn.config import Config handler.cfg = Config({'bind': "%s:%d" % (self.host, self.port), 'workers': 4}) arbiter = Arbiter(handler) arbiter.run() class EventletServer(ServerAdapter): """ Untested """ def run(self, handler): from eventlet import wsgi, listen wsgi.server(listen((self.host, self.port)), handler) class RocketServer(ServerAdapter): """ Untested. As requested in issue 63 https://github.com/defnull/bottle/issues/#issue/63 """ def run(self, handler): from rocket import Rocket server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler }) server.start() class BjoernServer(ServerAdapter): """ Screamingly fast server written in C: https://github.com/jonashaag/bjoern """ def run(self, handler): from bjoern import run run(handler, self.host, self.port) class Mongrel2Server(ServerAdapter): def run(self, app_handler): from mongrel2 import handler from mongrel2_wsgi import wsgi_server print "Starting 0MQ server." wsgi_server(app_handler, handler.Connection("279a117-5be1-4da7-9c4e-702b3412baba", "tcp://127.0.0.1:9997", "tcp://127.0.0.1:9996")) class AutoServer(ServerAdapter): """ Untested. """ adapters = [PasteServer, CherryPyServer, TwistedServer, WSGIRefServer] def run(self, handler): for sa in self.adapters: try: return sa(self.host, self.port, **self.options).run(handler) except ImportError: pass server_names = { 'cgi': CGIServer, 'flup': FlupFCGIServer, 'wsgiref': WSGIRefServer, 'cherrypy': CherryPyServer, 'paste': PasteServer, 'fapws3': FapwsServer, 'tornado': TornadoServer, 'gae': AppEngineServer, 'twisted': TwistedServer, 'diesel': DieselServer, 'meinheld': MeinheldServer, 'gunicorn': GunicornServer, 'eventlet': EventletServer, 'gevent': GeventServer, 'rocket': RocketServer, 'bjoern' : BjoernServer, 'mongrel2': Mongrel2Server, 'auto': AutoServer, } ######################################################## # Advanced reloading ######################################################## POLL_INTERVAL = 1 # check for changes every n seconds. SPINUP_TIME = 10 # application must start within this time. class ReloadingMonitor(object): instances = [] global_extra_files = [] global_file_callbacks = [] def __init__(self, tx=None, rx=None, poll_interval=POLL_INTERVAL): self.module_mtimes = {} self.keep_running = True self.poll_interval = poll_interval self.extra_files = list(self.global_extra_files) self.instances.append(self) self.file_callbacks = list(self.global_file_callbacks) self.state = 'RUN' self.tx = tx self.rx = rx def periodic_reload(self): while not self.rx.is_set(): if not self.check_reload(): self.state = 'STANDBY' # inform code change self.tx.put({'pid':os.getpid(), 'status':'changed'}) self.rx.wait(SPINUP_TIME) if self.rx.is_set(): return self.state = 'RUN' self.module_mtimes = {} time.sleep(self.poll_interval) def check_reload(self): filenames = list(self.extra_files) for file_callback in self.file_callbacks: try: filenames.extend(file_callback()) except: print >> sys.stderr, "Error calling reloader callback %r:" % file_callback traceback.print_exc() for module in sys.modules.values(): try: filename = module.__file__ except (AttributeError, ImportError), exc: continue if filename is not None: filenames.append(filename) for filename in filenames: try: stat = os.stat(filename) if stat: mtime = stat.st_mtime else: mtime = 0 except (OSError, IOError): continue if filename.endswith('.pyc') and os.path.exists(filename[:-1]): mtime = max(os.stat(filename[:-1]).st_mtime, mtime) elif filename.endswith('$py.class') and \ os.path.exists(filename[:-9] + '.py'): mtime = max(os.stat(filename[:-9] + '.py').st_mtime, mtime) if not self.module_mtimes.has_key(filename): self.module_mtimes[filename] = mtime elif self.module_mtimes[filename] < mtime: print >> sys.stderr, ( "%s changed; reloading..." % filename) return False return True def watch_file(self, cls, filename): """Watch the named file for changes""" filename = os.path.abspath(filename) if self is None: for instance in cls.instances: instance.watch_file(filename) cls.global_extra_files.append(filename) else: self.extra_files.append(filename) watch_file = classinstancemethod(watch_file) def add_file_callback(self, cls, callback): """Add a callback -- a function that takes no parameters -- that will return a list of filenames to watch for changes.""" if self is None: for instance in cls.instances: instance.add_file_callback(callback) cls.global_file_callbacks.append(callback) else: self.file_callbacks.append(callback) add_file_callback = classinstancemethod(add_file_callback) def _reloader_new_serve(server, app, interval, tx, rx): try: tx.put({'pid':os.getpid(), 'status':'loaded'}) def go(): server.run(app) t = threading.Thread(target=go) t.setDaemon(True) t.start() monitor = ReloadingMonitor(tx=tx, rx=rx, poll_interval=interval) monitor.periodic_reload() except KeyboardInterrupt: pass ############################################################################### # Application Control ########################################################## ############################################################################### def _load(target, **vars): """ Fetch something from a module. The exact behaviour depends on the the target string: If the target is a valid python import path (e.g. `package.module`), the rightmost part is returned as a module object. If the target contains a colon (e.g. `package.module:var`) the module variable specified after the colon is returned. If the part after the colon contains any non-alphanumeric characters (e.g. `package.module:func(var)`) the result of the expression is returned. The expression has access to keyword arguments supplied to this function. Example:: >>> _load('bottle') <module 'bottle' from 'bottle.py'> >>> _load('bottle:Bottle') <class 'bottle.Bottle'> >>> _load('bottle:cookie_encode(v, secret)', v='foo', secret='bar') '!F+hN4dQxaDJ4QxxaZ+Z3jw==?gAJVA2Zvb3EBLg==' """ module, target = target.split(":", 1) if ':' in target else (target, None) if module not in sys.modules: __import__(module) if not target: return sys.modules[module] if target.isalnum(): return getattr(sys.modules[module], target) package_name = module.split('.')[0] vars[package_name] = sys.modules[package_name] return eval('%s.%s' % (module, target), vars) def load_app(target): """ Load a bottle application based on a target string and return the application object. If the target is an import path (e.g. package.module), the application stack is used to isolate the routes defined in that module. If the target contains a colon (e.g. package.module:myapp) the module variable specified after the colon is returned instead. """ tmp = app.push() # Create a new "default application" rv = _load(target) # Import the target module app.remove(tmp) # Remove the temporary added default application return rv if isinstance(rv, Bottle) else tmp def run(app=None, server='wsgiref', host='127.0.0.1', port=8080, interval=1, reloader=False, quiet=False, **kargs): """ Start a server instance. This method blocks until the server terminates. :param app: WSGI application or target string supported by :func:`load_app`. (default: :func:`default_app`) :param server: Server adapter to use. See :data:`server_names` keys for valid names or pass a :class:`ServerAdapter` subclass. (default: `wsgiref`) :param host: Server address to bind to. Pass ``0.0.0.0`` to listens on all interfaces including the external one. (default: 127.0.0.1) :param port: Server port to bind to. Values below 1024 require root privileges. (default: 8080) :param reloader: Start auto-reloading server? (default: False) :param interval: Auto-reloader interval in seconds (default: 1) :param quiet: Suppress output to stdout and stderr? (default: False) :param options: Options passed to the server adapter. """ app = app or default_app() if isinstance(app, basestring): app = load_app(app) if isinstance(server, basestring): server = server_names.get(server) if isinstance(server, type): server = server(host=host, port=port, **kargs) if not isinstance(server, ServerAdapter): raise RuntimeError("Server must be a subclass of ServerAdapter") server.quiet = server.quiet or quiet if not server.quiet and not os.environ.get('BOTTLE_CHILD'): print "Bottle server starting up (using %s)..." % repr(server) print "Listening on http://%s:%d/" % (server.host, server.port) print "Use Ctrl-C to quit." print try: if reloader: # old reloading !!! interval = min(interval, 1) if os.environ.get('BOTTLE_CHILD'): _reloader_child(server, app, interval) else: _reloader_observer(server, app, interval) elif False: print "reloader !!!" interval = min(interval, 1) # tx, rx from the subprocess' perspective. tx = Queue() def spinup(): rx = Event() worker = Process(target=_reloader_new_serve, args=(server, app, interval, tx, rx)) worker.rx = rx worker.start() return worker spinup() while True: try: msg = tx.get(True, 1) sys.stderr.write("%r\n" % msg) if msg['status'] == 'changed': spinup() elif msg['status'] == 'loaded': for worker in active_children(): if worker.ident != msg['pid']: worker.rx.set() except Empty: if not active_children(): return else: server.run(app) except KeyboardInterrupt: pass if not server.quiet and not os.environ.get('BOTTLE_CHILD'): print "Shutting down..." class FileCheckerThread(threading.Thread): ''' Thread that periodically checks for changed module files. ''' def __init__(self, lockfile, interval): threading.Thread.__init__(self) self.lockfile, self.interval = lockfile, interval #1: lockfile to old; 2: lockfile missing #3: module file changed; 5: external exit self.status = 0 def run(self): exists = os.path.exists mtime = lambda path: os.stat(path).st_mtime files = dict() for module in sys.modules.values(): path = getattr(module, '__file__', '') if path[-4:] in ('.pyo', '.pyc'): path = path[:-1] if path and exists(path): files[path] = mtime(path) while not self.status: for path, lmtime in files.iteritems(): if not exists(path) or mtime(path) > lmtime: self.status = 3 if not exists(self.lockfile): self.status = 2 elif mtime(self.lockfile) < time.time() - self.interval - 5: self.status = 1 if not self.status: time.sleep(self.interval) if self.status != 5: thread.interrupt_main() def _reloader_child(server, app, interval): ''' Start the server and check for modified files in a background thread. As soon as an update is detected, KeyboardInterrupt is thrown in the main thread to exit the server loop. The process exists with status code 3 to request a reload by the observer process. If the lockfile is not modified in 2*interval second or missing, we assume that the observer process died and exit with status code 1 or 2. ''' lockfile = os.environ.get('BOTTLE_LOCKFILE') bgcheck = FileCheckerThread(lockfile, interval) try: bgcheck.start() server.run(app) except KeyboardInterrupt: pass bgcheck.status, status = 5, bgcheck.status bgcheck.join() # bgcheck.status == 5 --> silent exit if status: sys.exit(status) def _reloader_observer(server, app, interval): ''' Start a child process with identical commandline arguments and restart it as long as it exists with status code 3. Also create a lockfile and touch it (update mtime) every interval seconds. ''' fd, lockfile = tempfile.mkstemp(prefix='bottle-reloader.', suffix='.lock') os.close(fd) # We only need this file to exist. We never write to it try: while os.path.exists(lockfile): args = [sys.executable] + sys.argv environ = os.environ.copy() environ['BOTTLE_CHILD'] = 'true' environ['BOTTLE_LOCKFILE'] = lockfile p = subprocess.Popen(args, env=environ) while p.poll() is None: # Busy wait... os.utime(lockfile, None) # I am alive! time.sleep(interval) if p.poll() != 3: if os.path.exists(lockfile): os.unlink(lockfile) sys.exit(p.poll()) elif not server.quiet: print "Reloading server..." except KeyboardInterrupt: pass if os.path.exists(lockfile): os.unlink(lockfile) ############################################################################### # Template Adapters ############################################################ ############################################################################### class TemplateError(HTTPError): def __init__(self, message): HTTPError.__init__(self, 500, message) class BaseTemplate(object): """ Base class and minimal API for template adapters """ extentions = ['tpl','html','thtml','stpl'] settings = {} #used in prepare() defaults = {} #used in render() def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings): """ Create a new template. If the source parameter (str or buffer) is missing, the name argument is used to guess a template filename. Subclasses can assume that self.source and/or self.filename are set. Both are strings. The lookup, encoding and settings parameters are stored as instance variables. The lookup parameter stores a list containing directory paths. The encoding parameter should be used to decode byte strings or files. The settings parameter contains a dict for engine-specific settings. """ self.name = name self.source = source.read() if hasattr(source, 'read') else source self.filename = source.filename if hasattr(source, 'filename') else None self.lookup = map(os.path.abspath, lookup) self.encoding = encoding self.settings = self.settings.copy() # Copy from class variable self.settings.update(settings) # Apply if not self.source and self.name: self.filename = self.search(self.name, self.lookup) if not self.filename: raise TemplateError('Template %s not found.' % repr(name)) if not self.source and not self.filename: raise TemplateError('No template specified.') self.prepare(**self.settings) @classmethod def search(cls, name, lookup=[]): """ Search name in all directories specified in lookup. First without, then with common extensions. Return first hit. """ if os.path.isfile(name): return name for spath in lookup: fname = os.path.join(spath, name) if os.path.isfile(fname): return fname for ext in cls.extentions: if os.path.isfile('%s.%s' % (fname, ext)): return '%s.%s' % (fname, ext) @classmethod def global_config(cls, key, *args): ''' This reads or sets the global settings stored in class.settings. ''' if args: cls.settings[key] = args[0] else: return cls.settings[key] def prepare(self, **options): """ Run preparations (parsing, caching, ...). It should be possible to call this again to refresh a template or to update settings. """ raise NotImplementedError def render(self, *args, **kwargs): """ Render the template with the specified local variables and return a single byte or unicode string. If it is a byte string, the encoding must match self.encoding. This method must be thread-safe! Local variables may be provided in dictionaries (*args) or directly, as keywords (**kwargs). """ raise NotImplementedError class MakoTemplate(BaseTemplate): def prepare(self, **options): from mako.template import Template from mako.lookup import TemplateLookup options.update({'input_encoding':self.encoding}) options.setdefault('format_exceptions', bool(DEBUG)) #TODO: This is a hack... https://github.com/defnull/bottle/issues#issue/8 mylookup = TemplateLookup(directories=['.']+self.lookup, **options) if self.source: self.tpl = Template(self.source, lookup=mylookup, **options) else: #mako cannot guess extentions. We can, but only at top level... name = self.name if not os.path.splitext(name)[1]: name += os.path.splitext(self.filename)[1] self.tpl = mylookup.get_template(name) def render(self, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) _defaults = self.defaults.copy() _defaults.update(kwargs) return self.tpl.render(**_defaults) class CheetahTemplate(BaseTemplate): def prepare(self, **options): from Cheetah.Template import Template self.context = threading.local() self.context.vars = {} options['searchList'] = [self.context.vars] if self.source: self.tpl = Template(source=self.source, **options) else: self.tpl = Template(file=self.filename, **options) def render(self, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) self.context.vars.update(self.defaults) self.context.vars.update(kwargs) out = str(self.tpl) self.context.vars.clear() return [out] class Jinja2Template(BaseTemplate): def prepare(self, filters=None, tests=None, **kwargs): from jinja2 import Environment, FunctionLoader if 'prefix' in kwargs: # TODO: to be removed after a while raise RuntimeError('The keyword argument `prefix` has been removed. ' 'Use the full jinja2 environment name line_statement_prefix instead.') self.env = Environment(loader=FunctionLoader(self.loader), **kwargs) if filters: self.env.filters.update(filters) if tests: self.env.tests.update(tests) if self.source: self.tpl = self.env.from_string(self.source) else: self.tpl = self.env.get_template(self.filename) def render(self, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) _defaults = self.defaults.copy() _defaults.update(kwargs) return self.tpl.render(**_defaults).encode("utf-8") def loader(self, name): fname = self.search(name, self.lookup) if fname: with open(fname, "rb") as f: return f.read().decode(self.encoding) class SimpleTALTemplate(BaseTemplate): ''' Untested! ''' def prepare(self, **options): from simpletal import simpleTAL # TODO: add option to load METAL files during render if self.source: self.tpl = simpleTAL.compileHTMLTemplate(self.source) else: with open(self.filename, 'rb') as fp: self.tpl = simpleTAL.compileHTMLTemplate(tonat(fp.read())) def render(self, *args, **kwargs): from simpletal import simpleTALES from StringIO import StringIO for dictarg in args: kwargs.update(dictarg) # TODO: maybe reuse a context instead of always creating one context = simpleTALES.Context() for k,v in self.defaults.items(): context.addGlobal(k, v) for k,v in kwargs.items(): context.addGlobal(k, v) output = StringIO() self.tpl.expand(context, output) return output.getvalue() class SimpleTemplate(BaseTemplate): blocks = ('if','elif','else','try','except','finally','for','while','with','def','class') dedent_blocks = ('elif', 'else', 'except', 'finally') @lazy_attribute def re_pytokens(cls): ''' This matches comments and all kinds of quoted strings but does NOT match comments (#...) within quoted strings. (trust me) ''' return re.compile(r''' (''(?!')|""(?!")|'{6}|"{6} # Empty strings (all 4 types) |'(?:[^\\']|\\.)+?' # Single quotes (') |"(?:[^\\"]|\\.)+?" # Double quotes (") |'{3}(?:[^\\]|\\.|\n)+?'{3} # Triple-quoted strings (') |"{3}(?:[^\\]|\\.|\n)+?"{3} # Triple-quoted strings (") |\#.* # Comments )''', re.VERBOSE) def prepare(self, escape_func=cgi.escape, noescape=False): self.cache = {} enc = self.encoding self._str = lambda x: touni(x, enc) self._escape = lambda x: escape_func(touni(x, enc)) if noescape: self._str, self._escape = self._escape, self._str @classmethod def split_comment(cls, code): """ Removes comments (#...) from python code. """ if '#' not in code: return code #: Remove comments only (leave quoted strings as they are) subf = lambda m: '' if m.group(0)[0]=='#' else m.group(0) return re.sub(cls.re_pytokens, subf, code) @cached_property def co(self): return compile(self.code, self.filename or '<string>', 'exec') @cached_property def code(self): stack = [] # Current Code indentation lineno = 0 # Current line of code ptrbuffer = [] # Buffer for printable strings and token tuple instances codebuffer = [] # Buffer for generated python code multiline = dedent = oneline = False template = self.source if self.source else open(self.filename).read() def yield_tokens(line): for i, part in enumerate(re.split(r'\{\{(.*?)\}\}', line)): if i % 2: if part.startswith('!'): yield 'RAW', part[1:] else: yield 'CMD', part else: yield 'TXT', part def flush(): # Flush the ptrbuffer if not ptrbuffer: return cline = '' for line in ptrbuffer: for token, value in line: if token == 'TXT': cline += repr(value) elif token == 'RAW': cline += '_str(%s)' % value elif token == 'CMD': cline += '_escape(%s)' % value cline += ', ' cline = cline[:-2] + '\\\n' cline = cline[:-2] if cline[:-1].endswith('\\\\\\\\\\n'): cline = cline[:-7] + cline[-1] # 'nobr\\\\\n' --> 'nobr' cline = '_printlist([' + cline + '])' del ptrbuffer[:] # Do this before calling code() again code(cline) def code(stmt): for line in stmt.splitlines(): codebuffer.append(' ' * len(stack) + line.strip()) for line in template.splitlines(True): lineno += 1 line = line if isinstance(line, unicode)\ else unicode(line, encoding=self.encoding) if lineno <= 2: m = re.search(r"%.*coding[:=]\s*([-\w\.]+)", line) if m: self.encoding = m.group(1) if m: line = line.replace('coding','coding (removed)') if line.strip()[:2].count('%') == 1: line = line.split('%',1)[1].lstrip() # Full line following the % cline = self.split_comment(line).strip() cmd = re.split(r'[^a-zA-Z0-9_]', cline)[0] flush() ##encodig (TODO: why?) if cmd in self.blocks or multiline: cmd = multiline or cmd dedent = cmd in self.dedent_blocks # "else:" if dedent and not oneline and not multiline: cmd = stack.pop() code(line) oneline = not cline.endswith(':') # "if 1: pass" multiline = cmd if cline.endswith('\\') else False if not oneline and not multiline: stack.append(cmd) elif cmd == 'end' and stack: code('#end(%s) %s' % (stack.pop(), line.strip()[3:])) elif cmd == 'include': p = cline.split(None, 2)[1:] if len(p) == 2: code("_=_include(%s, _stdout, %s)" % (repr(p[0]), p[1])) elif p: code("_=_include(%s, _stdout)" % repr(p[0])) else: # Empty %include -> reverse of %rebase code("_printlist(_base)") elif cmd == 'rebase': p = cline.split(None, 2)[1:] if len(p) == 2: code("globals()['_rebase']=(%s, dict(%s))" % (repr(p[0]), p[1])) elif p: code("globals()['_rebase']=(%s, {})" % repr(p[0])) else: code(line) else: # Line starting with text (not '%') or '%%' (escaped) if line.strip().startswith('%%'): line = line.replace('%%', '%', 1) ptrbuffer.append(yield_tokens(line)) flush() return '\n'.join(codebuffer) + '\n' def subtemplate(self, _name, _stdout, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) if _name not in self.cache: self.cache[_name] = self.__class__(name=_name, lookup=self.lookup) return self.cache[_name].execute(_stdout, kwargs) def execute(self, _stdout, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) env = self.defaults.copy() env.update({'_stdout': _stdout, '_printlist': _stdout.extend, '_include': self.subtemplate, '_str': self._str, '_escape': self._escape}) env.update(kwargs) eval(self.co, env) if '_rebase' in env: subtpl, rargs = env['_rebase'] subtpl = self.__class__(name=subtpl, lookup=self.lookup) rargs['_base'] = _stdout[:] #copy stdout del _stdout[:] # clear stdout return subtpl.execute(_stdout, rargs) return env def render(self, *args, **kwargs): """ Render the template using keyword arguments as local variables. """ for dictarg in args: kwargs.update(dictarg) stdout = [] self.execute(stdout, kwargs) return ''.join(stdout) def template(*args, **kwargs): ''' Get a rendered template as a string iterator. You can use a name, a filename or a template string as first parameter. Template rendering arguments can be passed as dictionaries or directly (as keyword arguments). ''' tpl = args[0] if args else None template_adapter = kwargs.pop('template_adapter', SimpleTemplate) if tpl not in TEMPLATES or DEBUG: settings = kwargs.pop('template_settings', {}) lookup = kwargs.pop('template_lookup', TEMPLATE_PATH) if isinstance(tpl, template_adapter): TEMPLATES[tpl] = tpl if settings: TEMPLATES[tpl].prepare(**settings) elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl: TEMPLATES[tpl] = template_adapter(source=tpl, lookup=lookup, **settings) else: TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup, **settings) if not TEMPLATES[tpl]: abort(500, 'Template (%s) not found' % tpl) for dictarg in args[1:]: kwargs.update(dictarg) return TEMPLATES[tpl].render(kwargs) mako_template = functools.partial(template, template_adapter=MakoTemplate) cheetah_template = functools.partial(template, template_adapter=CheetahTemplate) jinja2_template = functools.partial(template, template_adapter=Jinja2Template) simpletal_template = functools.partial(template, template_adapter=SimpleTALTemplate) def view(tpl_name, **defaults): ''' Decorator: renders a template for a handler. The handler can control its behavior like that: - return a dict of template vars to fill out the template - return something other than a dict and the view decorator will not process the template, but return the handler result as is. This includes returning a HTTPResponse(dict) to get, for instance, JSON with autojson or other castfilters. ''' def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): result = func(*args, **kwargs) if isinstance(result, (dict, DictMixin)): tplvars = defaults.copy() tplvars.update(result) return template(tpl_name, **tplvars) return result return wrapper return decorator mako_view = functools.partial(view, template_adapter=MakoTemplate) cheetah_view = functools.partial(view, template_adapter=CheetahTemplate) jinja2_view = functools.partial(view, template_adapter=Jinja2Template) simpletal_view = functools.partial(view, template_adapter=SimpleTALTemplate) ############################################################################### # Constants and Globals ######################################################## ############################################################################### TEMPLATE_PATH = ['./', './views/'] TEMPLATES = {} DEBUG = False MEMFILE_MAX = 1024*100 #: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found') HTTP_CODES = httplib.responses HTTP_CODES[418] = "I'm a teapot" # RFC 2324 #: The default template used for error pages. Override with @error() ERROR_PAGE_TEMPLATE = """ %try: %from bottle import DEBUG, HTTP_CODES, request, touni %status_name = HTTP_CODES.get(e.status, 'Unknown').title() <!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN"> <html> <head> <title>Error {{e.status}}: {{status_name}}</title> <style type="text/css"> html {background-color: #eee; font-family: sans;} body {background-color: #fff; border: 1px solid #ddd; padding: 15px; margin: 15px;} pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;} </style> </head> <body> <h1>Error {{e.status}}: {{status_name}}</h1> <p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt> caused an error:</p> <pre>{{e.output}}</pre> %if DEBUG and e.exception: <h2>Exception:</h2> <pre>{{repr(e.exception)}}</pre> %end %if DEBUG and e.traceback: <h2>Traceback:</h2> <pre>{{e.traceback}}</pre> %end </body> </html> %except ImportError: <b>ImportError:</b> Could not generate the error page. Please add bottle to sys.path %end """ #: A thread-save instance of :class:`Request` representing the `current` request. request = Request() #: A thread-save instance of :class:`Response` used to build the HTTP response. response = Response() #: A thread-save namepsace. Not used by Bottle. local = threading.local() # Initialize app stack (create first empty Bottle app) # BC: 0.6.4 and needed for run() app = default_app = AppStack() app.push()
nilq/baby-python
python
#!/usr/bin/env python def getinput(): return open('day23.input.txt').read() ### PART 1 import re def solve(program, a=0, hack=False): instrs = [l.split() + [''] for l in program.strip().splitlines()] tgl = { 'cpy': 'jnz', 'inc': 'dec', 'dec': 'inc', 'jnz': 'cpy', 'tgl': 'inc' } ip, regs = 0, { 'a': a, 'b': 0, 'c': 0, 'd': 0 } def get(v): return int(regs.get(v, v)) while ip < len(instrs): if hack and ip == 4: regs['a'] = get('a') + get('b') * get('d') regs['b'] = get('b') - 1 regs['c'] = 2 * get('b') ip = 16 continue instr, ip = instrs[ip], ip + 1 op, x, y = instr[0], instr[1], instr[2] if op == 'cpy': regs[y] = get(x) elif op == 'inc': regs[x] += 1 elif op == 'dec': regs[x] -= 1 elif op == 'jnz': ip += get(y) - 1 if get(x) != 0 else 0 elif op == 'tgl': ipx = ip + get(x) - 1 if 0 <= ipx < len(instrs): instrs[ipx][0] = tgl[instrs[ipx][0]] return regs['a'] # sample assert solve(''' cpy 2 a tgl a tgl a tgl a cpy 1 a dec a dec a''') == 3 # problem s1 = solve(getinput(), 7) print(s1) assert s1 == 11662 == solve(getinput(), 7, True) ### PART 2 s2 = solve(getinput(), 12, True) print(s2) assert s2 == 479008222
nilq/baby-python
python
""" 二维数组 """ # use numpy import numpy as np import pandas as pd sdarry = np.array([ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12] ]) # different way to get element print(sdarry[1, 2]) print(sdarry[:, 2]) print(sdarry[2, :]) # get a row or a column mean of data # axis=1 is row # axis=0 is column print(sdarry.mean(), sdarry.mean(axis=1), sdarry.mean(axis=0)) # use pandas saleLog = { 'date': ['2018-01-01', '2018-01-02', '2018-01-03'], 'cardno': ['001', '002', '001'], 'name': ['vc银翘片', '清热解毒片', '感康'], 'num': [3, 2, 5], 'money': [18, 22.1, 61.6], 'actually': [16.1, 20.3, 52.1] } saleDf = pd.DataFrame(saleLog) from collections import OrderedDict as od saleLog = { 'date': ['2018-01-01', '2018-01-02', '2018-01-03'], 'cardno': ['001', '002', '001'], 'name': ['vc银翘片', '清热解毒片', '感康'], 'num': [3, 2, 5], 'money': [18, 22.1, 61.6], 'actually': [16.1, 20.3, 52.1] } saleOrderDict = od(saleLog) saleDf = pd.DataFrame(saleOrderDict) # iloc only accept int type for index print(saleDf.iloc[0, 1], saleDf[:, 1]) # loc column only accept string for index # print(saleDf.loc[0, 1]) this is error print(saleDf.loc[0, 'name']) # easy way saleDf.loc[:, 'name'] == saleDf['name']
nilq/baby-python
python
from .confirm import ShutdownConfirmationDialog def load(manager, params): """Launch shutdown confirmation manager""" pos = (100, 100) if params and len(params) > 0: pos = params[0] ShutdownConfirmationDialog(pos, manager)
nilq/baby-python
python
from handlers.chord import ChordHandler from handlers.base import IndexHandler url_patterns = [ (r"/index/", IndexHandler), ]
nilq/baby-python
python
""" Pytools Server module Run server >> (base) C:\\Users\\ginanjar\\AppData\\Roaming\\Sublime Text 3\\Packages\\pythontools>python core\\server\\pytools """
nilq/baby-python
python
import unittest from operator import itemgetter import tonos_ts4.ts4 as ts4 from utils.wallet import create_wallet, DEFAULT_WALLET_BALANCE from utils.nft_root import create_nft_root, mint_nft, get_nft_addr, DEFAULT_NFT_ROOT_BALANCE from utils.nft import restore_nft_by_addr, get_nft_info from random import randint unittest.TestLoader.sortTestMethodsUsing = lambda _, x, y: randint(-1, 1) ts4.init('test_build', verbose = False) ZERO_ADDRES = ts4.Address.zero_addr(0) MINT_COMMISSION = 5 * ts4.GRAM MIN_FOR_MINTING = 1_700_000_000 MIN_FOR_DATA_DEPLOY = 1_500_000_000 # error codes NOT_ENOUGH_VALUE_TO_MINT = 107 def prepare_for_minting(): wallet_minter = create_wallet() wallet_commission_agent = create_wallet() wallet_royalty_agent = create_wallet() nft_root = create_nft_root(wallet_commission_agent, wallet_royalty_agent, MINT_COMMISSION) prepared_info = { 'minter': wallet_minter, 'root': nft_root, 'commission_agent': wallet_commission_agent, } return prepared_info class TestNftMinting(unittest.TestCase): def tearDown(self): ts4.reset_all() # checking the NFT minting correctness def test_wallet_can_mint_nft(self): wallet_minter, nft_root = itemgetter('minter', 'root')(prepare_for_minting()) MINT_PRICE = MINT_COMMISSION + MIN_FOR_MINTING mint_nft(nft_root, wallet_minter, MINT_COMMISSION + MINT_PRICE) nft = restore_nft_by_addr(get_nft_addr(nft_root, 0)) nft_info = get_nft_info(nft) self.assertEqual(nft_info['addrOwner'], wallet_minter.address.str()) # checking the withdraw from minter's wallet with commission def test_minter_wallet_withdraw_with_commission(self): wallet_minter, wallet_commission_agent, nft_root = \ itemgetter('minter', 'commission_agent', 'root')(prepare_for_minting()) MINT_PRICE = 2*MINT_COMMISSION + MIN_FOR_MINTING mint_nft(nft_root, wallet_minter, MINT_PRICE) nft = restore_nft_by_addr(get_nft_addr(nft_root, 0)) nft_info = get_nft_info(nft) self.assertEqual(nft_info['addrOwner'], wallet_minter.address.str()) self.assertEqual(wallet_minter.balance, DEFAULT_WALLET_BALANCE - MIN_FOR_DATA_DEPLOY - MINT_COMMISSION) self.assertEqual(wallet_commission_agent.balance, MINT_COMMISSION + DEFAULT_WALLET_BALANCE) self.assertEqual(nft_root.balance, DEFAULT_NFT_ROOT_BALANCE) # checking the withdraw from commission agent's wallet without commission def test_agent_wallet_withdraw_with_commission(self): wallet_commission_agent, nft_root = itemgetter('commission_agent', 'root')(prepare_for_minting()) MINT_PRICE = 2*MINT_COMMISSION + MIN_FOR_MINTING mint_nft(nft_root, wallet_commission_agent, MINT_PRICE) nft = restore_nft_by_addr(get_nft_addr(nft_root, 0)) nft_info = get_nft_info(nft) self.assertEqual(nft_info['addrOwner'], wallet_commission_agent.address.str()) self.assertEqual(wallet_commission_agent.balance, DEFAULT_WALLET_BALANCE - MIN_FOR_DATA_DEPLOY) self.assertEqual(nft_root.balance, DEFAULT_NFT_ROOT_BALANCE) # checking the withdraw from nft root balance if commission agent sent not enought def test_agent_can_mint_using_nft_root_balance(self): wallet_commission_agent, nft_root = itemgetter('commission_agent', 'root')(prepare_for_minting()) MINT_PRICE = MIN_FOR_MINTING - 1 mint_nft(nft_root, wallet_commission_agent, MINT_PRICE) nft = restore_nft_by_addr(get_nft_addr(nft_root, 0)) nft_info = get_nft_info(nft) self.assertEqual(nft_info['addrOwner'], wallet_commission_agent.address.str()) self.assertEqual(wallet_commission_agent.balance, DEFAULT_WALLET_BALANCE) self.assertEqual(nft_root.balance, DEFAULT_NFT_ROOT_BALANCE - MIN_FOR_DATA_DEPLOY) # checking error throw if minter tries to mint nft without enough money def test_error_throw_if_minting_with_low_balance(self): wallet_minter, nft_root = itemgetter('minter', 'root')(prepare_for_minting()) MINT_PRICE = MINT_COMMISSION + MIN_FOR_MINTING - 1 mint_nft(nft_root, wallet_minter, MINT_PRICE, expected_err=107) if __name__ == '__main__': print('\nNftMinting testing:') unittest.main()
nilq/baby-python
python
__author__ = 'akshay' import socket import time import RPi.GPIO as GPIO GPIO.setwarnings(False) # create a socket and bind socket to the host client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client_socket.connect(('10.42.0.1', 8001)) buffe=1024 def measure(): """ measure distance """ GPIO.output(GPIO_TRIGGER, True) time.sleep(0.00001) GPIO.output(GPIO_TRIGGER, False) start = time.time() while GPIO.input(GPIO_ECHO)==0: start = time.time() while GPIO.input(GPIO_ECHO)==1: stop = time.time() elapsed = stop-start distance = (elapsed * 34300)/2 return distance # referring to the pins by GPIO numbers GPIO.setmode(GPIO.BCM) # define pi GPIO GPIO_TRIGGER = 23 GPIO_ECHO = 24 l1=21 l2=20 r1=16 r2=12 # output pin: Trigger GPIO.setup(GPIO_TRIGGER,GPIO.OUT) # input pin: Echo GPIO.setup(GPIO_ECHO,GPIO.IN) GPIO.setup(l1,GPIO.OUT) GPIO.setup(l2,GPIO.OUT) GPIO.setup(r1,GPIO.OUT) GPIO.setup(r2,GPIO.OUT) # initialize trigger pin to low GPIO.output(GPIO_TRIGGER, False) GPIO.output(l1, False) GPIO.output(l2, False) GPIO.output(r1, False) GPIO.output(r2, False) try: while True: distance = measure() #print "Distance : %.1f cm" % distance # send data to the host every 0.5 sec #client_socket.send(str(distance)) time.sleep(0.01) data=client_socket.recv(buffe) print 'prediction:',data[0],'distance:',distance if distance >10: if data[0]=='0': GPIO.output(l1, False) #forward GPIO.output(l2, True) GPIO.output(r1, False) GPIO.output(r2, True) elif data[0]=='1': #right GPIO.output(l1, True) GPIO.output(l2, False) GPIO.output(r1, False) GPIO.output(r2, True) elif data[0]=='2': GPIO.output(l1, False) #left GPIO.output(l2, True) GPIO.output(r1, True) GPIO.output(r2, False) else: GPIO.output(l1, False) GPIO.output(l2, False) GPIO.output(r1, False) GPIO.output(r2, False) else: GPIO.output(l1, False) GPIO.output(l2, False) GPIO.output(r1, False) GPIO.output(r2, False) finally: client_socket.close() GPIO.cleanup() client_socket.close()
nilq/baby-python
python
""" Function for matching delimiters in an arithmetic expression """ from ArrayStack import * def is_matched(expr): """ Return True if all delimiters are properly match; False otherwise """ lefty = '({[' righty = ')}]' S = ArrayStack() for c in expr: if c in lefty: S.push(c) elif c in righty: if S.is_empty(): return False if righty.index(c) != lefty.index(S.pop()): return False return S.is_empty() es = is_matched('[(5+x)-(y+z)]') print(es)
nilq/baby-python
python
""" File: caesar.py Name: Max Chang ------------------------------ This program demonstrates the idea of caesar cipher. Users will be asked to input a number to produce shifted ALPHABET as the cipher table. After that, any strings typed in will be encrypted. """ # This constant shows the original order of alphabetic sequence ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def main(): """ TODO: Decipher the code using Caesar Cipher """ secret = secret_input('Secret Number: ') cipher_text = input('What\'s the ciphered string? ') print('The deciphered string is: ' + decipher(cipher_text.upper(), secret)) def secret_input(display): """ Get the shift index for Caesar Cipher :param display: (string) text to show in the console :return: (int) the Caesar Cipher shift index """ while True: secret = input(display) if secret.isdigit() or (secret[0] == '-' and secret[1:].isdigit()): secret = int(secret) while True: if 0 <= secret < len(ALPHABET): return secret elif secret < 0: # Handle for minus secret += len(ALPHABET) else: # Handle for larger than 26 secret -= len(ALPHABET) print('Not an integer number') def decipher(cipher, key): """ Decipher the encrypted code from user using key :param cipher: (string) the encrypted code :param key: (int) Caesar Cipher key :return: (string) deciphered string """ decipher_text = '' for i in range(len(cipher)): if cipher[i].isalpha(): position = ALPHABET.find(cipher[i]) if position + key < len(ALPHABET): decipher_text += ALPHABET[position + key] else: # Handle for index larger than 26 decipher_text += ALPHABET[position + key - len(ALPHABET)] else: # Handle for non-alphabet decipher_text += cipher[i] return decipher_text ##### DO NOT EDIT THE CODE BELOW THIS LINE ##### if __name__ == '__main__': main()
nilq/baby-python
python
from django.db import models from django.contrib.auth.models import AbstractUser class StudentUser(AbstractUser): matric_no = models.CharField(max_length=14, unique=True) mac_add = models.CharField(max_length=17, unique=True)
nilq/baby-python
python
from typing import Any, Dict import attr from targets.config import is_in_memory_cache_target_value @attr.s(frozen=True) class TargetCacheKey(object): target = attr.ib(converter=str) value_type = attr.ib(converter=str) class TargetCache(object): def __init__(self, cache=None): self._cache = cache if cache is not None else {} def get_cache_group(self): # type: () -> Dict[TargetCacheKey, Any] return self._cache def set(self, value, key): if not self.enabled: return cache = self.get_cache_group() if cache is not None: cache[key] = value def get(self, key): if not self.enabled: return cache = self.get_cache_group() if cache is not None: return cache.get(key) def has(self, key): cache = self.get_cache_group() if cache is not None: return key in cache return False def __getitem__(self, item): return self.get(key=item) def __setitem__(self, key, value): return self.set(value=value, key=key) def __contains__(self, item): return self.has(key=item) def clear_for_targets(self, targets_to_clear): if not targets_to_clear: return cache = self.get_cache_group() if not cache: return targets_to_clear = set(targets_to_clear) for key in list(cache.keys()): if key.target in targets_to_clear and key in cache: del cache[key] def clear(self): cache = self.get_cache_group() if cache: cache.clear() def clear_all(self): self._cache.clear() @property def enabled(self): return is_in_memory_cache_target_value() TARGET_CACHE = TargetCache()
nilq/baby-python
python
import datetime from Module.MainThread_Socket_Client import MainThread_Socket_Client from Module.SocketServer_Client import SocketServer_Client class Socket_Client_Core(): def __init__(self): try: self.MainThread_Socket_Client=MainThread_Socket_Client self.SocketServer_Client=SocketServer_Client print(datetime.datetime.now(),self.__class__,' Ready') except Exception as Errr: raise(Errr)
nilq/baby-python
python
from tqdm import tqdm from src.reranker.lambdamart import LambdaMart from src.interface.corpus import Corpus from src.interface.iohandler import InputOutputHandler from src.interface.features import FeatureEngineer # import src.evaluation.validate_run as validate OUT = f"./evaluation/fairRuns/submission_lambdamart-full{i}.json" QUERIES_EVAL = "./evaluation/fair-TREC-evaluation-sample.json" SEQUENCE_EVAL = "./evaluation/fair-TREC-evaluation-sequences.csv" QUERIES_TRAIN = "./training/fair-TREC-training-sample-cleaned.json" SEQUENCE_TRAIN = "./training/training-sequence-full.tsv" corpus = Corpus() ft = FeatureEngineer(corpus) input_train = InputOutputHandler(corpus, fsequence=SEQUENCE_TRAIN, fquery=QUERIES_TRAIN) input_test = InputOutputHandler(corpus, fsequence=SEQUENCE_EVAL, fquery=QUERIES_EVAL) lambdamart = LambdaMart(ft) lambdamart.train(input_train) lambdamart.predict(input_test) input_test.write_submission(lambdamart, outfile=OUT) # args = validate.Args(queries=QUERIES_EVAL, query_sequence_file = SEQUENCE_EVAL, run_file=OUT) # validate.main(args)
nilq/baby-python
python
# coding: utf-8 import time i=0 while(i<20): print('-----------main--------[',i,']') i+=1 time.sleep(1) print("ok man yeaaaaahh!") #send block --> input IP and message #message='abcedf' #IP='127.0.0.1' #send_len=client.sendto(message.encode('utf-8'),(IP,recieve_port)) #recieve block -->None #rx_message,addr=server.recvfrom(M_SIZE) #print(rx_message.decode('utf-8'))
nilq/baby-python
python
#main.py # set up track structure # this one loops over alpha and epsilon import numpy as np from placerg.funcs import * from placerg.funcsrg import * from placerg.objects import * from placerg.runfunc import * N0 = 2048 # number of cells nstim = 10 # number of nonplace stimuli percell= 1.0 # probability that each field is accepted placeprob = 'None' bothprob = 0.5 time= np.float(0.1) phi=1.0 # multiply by this constant to adjust overall activity of # nonplace cells eta = 6.0 # multiply by this constant to increase overall activity of # network epsilon= -16./6. runsim(N0, nstim, percell, placeprob, bothprob, time, phi, eta, epsilon)
nilq/baby-python
python
#Created by Oli MacPherson and Ben O'Sullivan! #For use with only Python 2 at the moment cause i cant be bothered changing all the raw_inputs. import sys, os, random import time intro_phrases = ["The world is changing. \n", "Humanity's voracious appetite for and consumption of electricity born of burning coal and oil is releasing harmful gas into the atmosphere. \n", "You will now experience the effects of this upon the world's climate. \n", "Your objective is to survive for as long as you can, in a world of violent climate change. \n", "Good luck. \n"] def intro (): for phrase in intro_phrases: print(phrase) time.sleep(4) extreme_rain_1() def enterdaspace (): answerrr = raw_input() if answerrr == '1': print("You enter the airlock, and enter a brand new world. The spacecraft rockets away into space, and you live happily ever after.") print("Well done! You finished! Keep your eyes open for the second game, coming out in December 2014!") print("Of course, none of this would have been neccessary if we just looked after the planet a bit more.") print("Would you like to play again?") answertoques = raw_input().upper() if answertoques == "YES": extreme_rain_1() else: sys.exit() if answerrr == '2': print("You turn away from the airlock, unwilling to trust fate. You die of suffocation, many lonely days later.") print("You have died. You have failed. Your score is 1000 points.") print("Would you like to play again?") answertoques = raw_input().upper() if answertoques == "YES": extreme_rain_1() else: sys.exit() else: print("Invalid answer. Please type either 1 or 2.") enterdaspace() def spacecraftflying (): print("The spacecraft is soaring overhead. Your head looks up at it's ceiling. You wonder whether you should go explore.") answerr = raw_input("Quick! Do you remember. Did you open a window (1) or not (2)? No lying please.") if answerr == '1': print("You run out of air, and the vacuum sucks you into space. You manage to grab a spacesuit, and hurredly put it on. You look back at the spacecraft, and realise, Venus is too far away. But just as you begin to despair, a new spacecraft appears. It opens it's airlock. Do you enter (1), or not (2)?") enterdaspace() if answerr == '2': print("The spacecraft rocks wildly, as it enters Venus's atmosphere. You run, but lose control of the craft. You crash. And. Burn.") print("You have died. You have failed. Your score is 950 points.") print("Would you like to play again?") answertoques = raw_input().upper() if answertoques == "YES": extreme_rain_1() else: sys.exit() else: print("Invalid answer. Please type either 1 or 2.") spacecraftflying() def spacecrafttakeoff (): print("The spacecraft begins to shake. There are lizards about! Do you take off (1) or not (2)?") spacecraft = raw_input() if spacecraft == '1': print("You strap yourself in, and key the ignition. The spacecraft rockets off into the atmosphere.") spacecraftflying() if spacecraft == '2': print("The spacecraft topples over as the lizards begin to rip up the landing gear. The spacecraft ignites, exploding, melting the flesh from your bones.") print("You have died. You have failed.Your score is 900 points.") print("Would you like to play again?") answertoques = raw_input().upper() if answertoques == "YES": extreme_rain_1() else: sys.exit() else: print("Invalid answer. Please type either 1 or 2.") spacecrafttakeoff() def doyuodiewindow (): windowopen = raw_input("Do you open a window (1) or not (2)?") if windowopen == '1': windowisopen = True spacecrafttakeoff() if windowopen == '2': windowisopen = False spacecrafttakeoff() else: print("Invalid answer. Plese type '1' or '2'.") doyuodiewindow() def campornot (): camppls = raw_input("") if camppls == '1': print("You and Michael begin to retrieve the camping equipment from your bags, but before you can, the lizard's friends come roaring out of the desert, and ravage your camp. You are picked up and slowly ripped in two.") time.sleep(2) print("You have died. You have failed. Your score is 850 points.") print("Would you like to play again?") answertoques = raw_input().upper() if answertoques == "YES": extreme_rain_1() else: sys.exit() if camppls == '2': print("You and Michael hustle up the ramp. Once inside, you realise it is extremely humid and hot. You are sweating very much.") time.sleep(2) doyuodiewindow() else: print("Invalid answer. Please type either '1' or '2'.") campornot() def whichway (): theway = (raw_input("Do you go towards the spacecraft to leave immediately (1), or to the next settlement, for food and water (2)?").upper()).replace(" ", "") if theway == '1': print("You point at the very far off spacecraft, and beckon Michael towards you. The two of you begin to walk.") time.sleep(2) print("After two hours in the desert, you are hopelessly lost. You fall to the ground, weakened. Michael has already given up, and died. You crawl towards the ship, determined to make it. You don't even realise it when you slip into death.") time.sleep(2) print("You have died. You have failed. Your score is 800 points.") print("Would you like to play again?") answertoques = raw_input().upper() if answertoques == "YES": extreme_rain_1() else: sys.exit() if theway == '2': print("You set off towards the settlement, and within the hour, you arrive, and replenish your supplies. By he time you eventually reach the spacecraft, it is nightfall. Do you set up camp (1) or do you go inside immediately (2)?") campornot() else: print("Invalid answer. Please type either '1' or '2'.") whichway() def backatthehousetrekstart (): print("Back at the house, you ready yourself and the team for the journey. Your team runs out of water, and John dies of dehydration, but you and Michael continue on.") time.sleep(3) print("You and Michael leave the house, and Michael asks you which way we should go.") whichway() def whatthefdoyoudo (): whatdoyoudo = (raw_input("Do you kill him (1), capture him (2), or run (3)?").upper()).replace(" ", "") if whatdoyoudo == "1": print("You move forwards, and slice down with the sword. The man's head rolls away.") time.sleep(2) print("There are some bullets on the counter. You grab them, then head back to the house.") backatthehousetrekstart() if whatdoyoudo == "2": print("You lower your sword, and point it at the man. He snarls, and throws himself onto your sword, killing you in the process.") time.sleep(2) print("You have died. You have failed. Your score is 750 points.") if whatdoyoudo == "3": print("You turn to run, but are cut down in two strides.") print("You have died. You have failed. Your score is 700 points.") print("Would you like to play again?") answertoques = raw_input().upper() if answertoques == "YES": extreme_rain_1() else: sys.exit() else: print("Invalid answer. Please type '1', '2', or '3'.") whatthefdoyoudo() def abandonedhouse (): print("Still grasping your bloody sword, you stride out towards the house. It does indeed appear empty.") time.sleep(2) print("You walk to the door, and enter slowly. There is a man inside that you did not hear. He turns, and hisses at you. What do you do?") whatthefdoyoudo() def thegun (): gunanswer = (raw_input("Do you pick up the gun (1), or the sword that's lying beside it (2)?").upper()).replace(" ", "") if gunanswer == "1": print("You pick up the gun, load a bullet, and walk outside. The lizard is there, and he charges. You raise the gun and- \n") time.sleep(5) print("The gun misfires, and explodes in your hands. You are knocked backwards into the door. As you begin to fall unconscious, you witness the lizard begin to eat you.") time.sleep(2) print("You have died. You have failed. Your score is 650 points.") print("Would you like to play again?") answertoques = raw_input().upper() if answertoques == "YES": extreme_rain_1() else: sys.exit() if gunanswer == "2": print("You grab the sword and stride outside. ") print("The lizard charges, but you raise your sword and cut it's throat as it comes.") time.sleep(2) print("You skin the beast and take the skin in to Michael. He smiles and tanks you. Now he needs some bullets, he says. he points to an abandoned house ten metres from the door. There should be some in there.") time.sleep(3) abandonedhouse() else: print("Invalid answer. Please type either '1' or '2'.") thegun() def doyoustillhelp (): doyoustillhelp1 = (raw_input("Do you still want to help? Yes (1) or no (2)").upper()).replace(" ", "") if doyoustillhelp1 == "1": print("Good. Michael says he needs some reptile skin first. He points to a gun by the door, and says to kill the one outside.") thegun() if doyoustillhelp1 == "2": print("The stranger shakes his head regretfully, and turns away. 'You can leave, then.' he says.") time.sleep(2) print("You turn, and stalk out, outraged. You walk right into the giant lizard still waiting outside.") time.sleep(2) print("You are eaten. You have died. You have failed. your score is 600 points.") print("Would you like to play again?") answertoques = raw_input().upper() if answertoques == "YES": extreme_rain_1() else: sys.exit() else: print("Invalid answer. Please type either '1' or '2'.") def insidethehouse (): print("You turn, and take in the room around you. It is a neatly furnished home, complete with two men, sitting frozen at a table, in the midst of an afternoon tea. They are staring at you.") time.sleep(3) print("They stand, and one walks over. He puts his arm on your shoulder, and says 'You have come at a great moment, stranger. Will you undertake some tasks for me and my friend here?'.") time.sleep(3) print("Do you undertake these tasks? Yes (1) or no (2)?") taskchoice = (raw_input().upper()).replace(" ", "") if taskchoice == "1": print("The stranger smiles, and extends his hand. 'I'm Michael,' he says, 'and this is my friend, John. We are trying to get to venus.'.") time.sleep(3) print("John chimes in, says it's the only way to excape the now uncontrollable climate change threatening Earth. He says there's a spacecraft in the north, they only have to reach it.") time.sleep(3) print("They say they need you to find one or two things for them, then they can leave for the spacecraft.") time.sleep(3) doyoustillhelp() if taskchoice == "2": print("The stranger shakes his head regretfully, and turns away. 'You can leave, then.' he says.") time.sleep(2) print("You turn, and stalk out, outraged. You walk right into the giant lizard still waiting outside.") time.sleep(2) print("You are eaten. You have died. You have failed. your score is 600 points.") print("Would you like to play again?") answertoques = raw_input().upper() if answertoques == "YES": extreme_rain_1() else: sys.exit() else: print("Invalid answer. Please type either '1' or '2'.") insidethehouse() def housedoorchoice1 (): housedoorchoice = raw_input("Do you run around the side of the house (1), or do you open the door and enter (2)?").replace(" ","").upper() if housedoorchoice == "1": print("You attempt to run, but the lizard catches you before you go three steps. It eats you alive.") time.sleep(3) print("You have died. You have failed. Your score is 550 points.") print("Would you like to play again?") answertoques = raw_input().upper() if answertoques == "YES": extreme_rain_1() else: sys.exit() if housedoorchoice == "2": print("You grab the handle and turn it, running in and slamming the door behind you.") time.sleep(2) insidethehouse() else: print("Invalid answer. Please type either '1' or '2'.") housedoorchoice1() def route_choice_1 (): answer_routechoice1 = (raw_input("Which way do you go? To the house (1) or to the oasis (2)?").replace(" ","")).upper() if answer_routechoice1 == "T2": print("You run towards the glorious sight, splashing water from the bucket in your haste to reach it. Once you arrive, however, it fades. It was a mirage.") time.sleep(3) print("You gaze at your now empty bucket, then sit, and weep.") print("You die of thirst. You have failed. Your score is 500 points.") print("Would you like to play again?") answertoques = raw_input().upper() if answertoques == "YES": extreme_rain_1() else: sys.exit() if answer_routechoice1 == "1": print("You stroll towards the house, drinking leisurely from the bucket as you do. You have made it to the door when you hear thundering feet behind you. You turn, and see a massive lizard thundering towards you.") time.sleep(4) housedoorchoice1() else: print("Invalid answer. Please type either '1' or '2'.") route_choice_1() def stuck_in_room_2 (): print("It seems you only have a few seconds until the door collapses. Do you leave the house through the back window (1), or climb through the trapdoor in the ceiling (2)?") answer_stuckinroom = (raw_input().upper()).replace(" ", "") if answer_stuckinroom == "2": print("You scramble up to the trapdoor, and climb through. The roof, weakened by the downpour, cannot withstand your weight. You fall, and die in pain. You have failed. Your score is 450 points.") print("Would you like to play again?") answertoques = raw_input().upper() if answertoques == "YES": extreme_rain_1() else: sys.exit() if answer_stuckinroom == "1": print("You leap out of the window.") print("The sun shines down upon you. Suddenly thirsty, you drink from the bucket.") time.sleep(2) print("You begin to walk, and are faced with two routes. In the distance, their looms a beautiful oasis. To the left, however, their is a small house that seems uninhabited.") time.sleep(3) route_choice_1() else: print("Invalid answer. Please type either '1' or '2'.") stuck_in_room_2() def extreme_rain_choice_bucket (): stuck_in_room_2 def extreme_rain_2 (): print("Your door is blown off it's hinges with the force of the gale, and water pours in the door. Do you stay where you are (1), or barricade yourself in the other room (2)?") extreme_rain_2_answer = (raw_input().replace(" ","")).upper() if extreme_rain_2_answer == "1": print("You stay in your chair, watching as the water floods the room.") time.sleep(2) print("You doze off, peacefully.") time.sleep(2) print("You do not wake up. You have drowned. You have failed. Your score is 400 points.") print("Would you like to play again?") answertoques = raw_input().upper() if answertoques == "YES": extreme_rain_1() else: sys.exit() if extreme_rain_2_answer == "2": print("You run to the door, and dive inside, slamming it behind you.") stuck_in_room_2() else: print("That is not a valid answer. Please type either '1' or '2'.") extreme_rain_2() def very_thirsty_rain_big_roof_leak (): print("You are extremely thirsty. You stumble over to the stream of water dribbling down from the tiles, and drink until your thirst is sated.") extreme_rain_2() def rain_1_roof_leak_big (): time.sleep(1) print("The leak in the roof gets slowly larger. There are some wooden boards to the side of the room, and some tiles. Do you plug the hole, and if so, with what? Do you leave the hole (1), plug the hole with boards (2), or plug the hole with tiles (3)?") rain_1_roof_leak_big_answer = (raw_input()).upper() if (rain_1_roof_leak_big_answer.replace(" ","")).upper() == "2": print("You pick up the boards, and nail them in place. The leak stops completely.") time.sleep(2) print("An hour later, you are unbearable thirsty. You gaze at the empty bucket longingly, and at the plugged hole in the ceiling.") time.sleep(2) print("You open the door, and stumble outside, only to find the sun blinding you with it's heat. \n You fall backwards.") time.sleep(2) print("You stay there until you die of thirst. You have failed. Your score is 350 points.") print("Would you like to play again?") answertoques = raw_input().upper() if answertoques == "YES": extreme_rain_1() else: sys.exit() if rain_1_roof_leak_big_answer.replace(" ","") == "3": print("You pick up the tiles, and glue them to the roof. Some water still seeps through.") very_thirsty_rain_big_roof_leak() if rain_1_roof_leak_big_answer.replace(" ","") == "1": time.sleep(1) print("The roof, weakened by the downpour and the hole, collapses, crushing you. \n" + "You have failed. Your score is 300 points.") print("Would you like to play again?") answertoques = raw_input().upper() if answertoques == "YES": extreme_rain_1() else: sys.exit() else: print("Invalid answer. Please type '1', 'Plug the hole with tiles', or 'Leave the hole'.") rain_1_roof_leak_big() def rain_1_answer_function (): rainanswer = raw_input("Do you fetch a bucket and catch the water (1), or do you let it drip(2)?").replace(" ","").upper() if rainanswer == "1": print("You get a bucket and place it under the dripping ceiling. Water begins to collect in the bottom of the bucket.") time.sleep(2) extreme_rain_2() if rainanswer == "2": print("You sit back in your chair, content to let the rain drip. It's only a small leak after all.") time.sleep(2) rain_1_roof_leak_big() else: print("Invalid response. Please type either '1' or '2'.") rain_1_answer_function() def extreme_rain_1 (): print("The rain is bucketing down around you. You are sitting at home, snug inside your small, two-room house. The roof begins to leak, just a little. What do you do?") rain_1_answer_function() def introfunction (): haveyouplayed = raw_input("Have you played this game before?").upper() if haveyouplayed == "YES": extreme_rain_1() if haveyouplayed == "NO": intro() else: print("Invalid answer. Please type either 'yes' or 'no'.") introfunction() introfunction()
nilq/baby-python
python
HW_SOURCE_FILE = __file__ def num_eights(x): """Returns the number of times 8 appears as a digit of x. >>> num_eights(3) 0 >>> num_eights(8) 1 >>> num_eights(88888888) 8 >>> num_eights(2638) 1 >>> num_eights(86380) 2 >>> num_eights(12345) 0 >>> from construct_check import check >>> # ban all assignment statements >>> check(HW_SOURCE_FILE, 'num_eights', ... ['Assign', 'AugAssign']) True """ "*** YOUR CODE HERE ***" if x==0: return 0 if x%10==8: return num_eights(x//10)+1 return num_eights(x//10) def pingpong(n): """Return the nth element of the ping-pong sequence. >>> pingpong(8) 8 >>> pingpong(10) 6 >>> pingpong(15) 1 >>> pingpong(21) -1 >>> pingpong(22) -2 >>> pingpong(30) -2 >>> pingpong(68) 0 >>> pingpong(69) -1 >>> pingpong(80) 0 >>> pingpong(81) 1 >>> pingpong(82) 0 >>> pingpong(100) -6 >>> from construct_check import check >>> # ban assignment statements >>> check(HW_SOURCE_FILE, 'pingpong', ['Assign', 'AugAssign']) True """ "*** YOUR CODE HERE ***" def help(i,d,result): if i==n: return result if num_eights(i)>0 or i%8==0: return help(i+1,-d,result-d) return help(i+1,d,result+d) return help(1,1,1) def missing_digits(n): """Given a number a that is in sorted, increasing order, return the number of missing digits in n. A missing digit is a number between the first and last digit of a that is not in n. >>> missing_digits(1248) # 3, 5, 6, 7 4 >>> missing_digits(19) # 2, 3, 4, 5, 6, 7, 8 7 >>> missing_digits(1122) # No missing numbers 0 >>> missing_digits(123456) # No missing numbers 0 >>> missing_digits(3558) # 4, 6, 7 3 >>> missing_digits(35578) # 4, 6 2 >>> missing_digits(12456) # 3 1 >>> missing_digits(16789) # 2, 3, 4, 5 4 >>> missing_digits(4) # No missing numbers between 4 and 4 0 >>> from construct_check import check >>> # ban while or for loops >>> check(HW_SOURCE_FILE, 'missing_digits', ['While', 'For']) True """ "*** YOUR CODE HERE ***" def help(x,last): if x==0: return 0 if x%10==last: return help(x//10,x%10) return help(x//10,x%10)+last-x%10-1 return help(n//10,n%10) def get_next_coin(coin): """Return the next coin. >>> get_next_coin(1) 5 >>> get_next_coin(5) 10 >>> get_next_coin(10) 25 >>> get_next_coin(2) # Other values return None """ if coin == 1: return 5 elif coin == 5: return 10 elif coin == 10: return 25 def count_coins(change): """Return the number of ways to make change using coins of value of 1, 5, 10, 25. >>> count_coins(15) 6 >>> count_coins(10) 4 >>> count_coins(20) 9 >>> count_coins(100) # How many ways to make change for a dollar? 242 >>> from construct_check import check >>> # ban iteration >>> check(HW_SOURCE_FILE, 'count_coins', ['While', 'For']) True """ "*** YOUR CODE HERE ***" def help(change,smallest_coin): if change == 0: return 1 if change<0: return 0 if smallest_coin ==None: return 0 without_coin = help(change,get_next_coin(smallest_coin)) with_coin = help(change-smallest_coin,smallest_coin) return without_coin+with_coin return help(change,1) from operator import sub, mul def make_anonymous_factorial(): """Return the value of an expression that computes factorial. >>> make_anonymous_factorial()(5) 120 >>> from construct_check import check >>> # ban any assignments or recursion >>> check(HW_SOURCE_FILE, 'make_anonymous_factorial', ['Assign', 'AugAssign', 'FunctionDef', 'Recursion']) True """ return lambda x: (lambda f: f(f, x))(lambda f, n: 1 if n == 1 else mul(n, f(f, sub(n, 1)))) def print_move(origin, destination): """Print instructions to move a disk.""" print("Move the top disk from rod", origin, "to rod", destination) def move_stack(n, start, end): """Print the moves required to move n disks on the start pole to the end pole without violating the rules of Towers of Hanoi. n -- number of disks start -- a pole position, either 1, 2, or 3 end -- a pole position, either 1, 2, or 3 There are exactly three poles, and start and end must be different. Assume that the start pole has at least n disks of increasing size, and the end pole is either empty or has a top disk larger than the top n start disks. >>> move_stack(1, 1, 3) Move the top disk from rod 1 to rod 3 >>> move_stack(2, 1, 3) Move the top disk from rod 1 to rod 2 Move the top disk from rod 1 to rod 3 Move the top disk from rod 2 to rod 3 >>> move_stack(3, 1, 3) Move the top disk from rod 1 to rod 3 Move the top disk from rod 1 to rod 2 Move the top disk from rod 3 to rod 2 Move the top disk from rod 1 to rod 3 Move the top disk from rod 2 to rod 1 Move the top disk from rod 2 to rod 3 Move the top disk from rod 1 to rod 3 """ assert 1 <= start <= 3 and 1 <= end <= 3 and start != end, "Bad start/end" "*** YOUR CODE HERE ***" if n==1: print_move(start,end) else: other = 6-start-end move_stack(n-1,start,other) print_move(start,end) move_stack(n-1,other,end)
nilq/baby-python
python
from cacahuate.auth.base import BaseHierarchyProvider class BackrefHierarchyProvider(BaseHierarchyProvider): def find_users(self, **params): return [ (params.get('identifier'), { 'identifier': params.get('identifier'), 'email': params.get('identifier'), 'fullname': params.get('identifier'), }), ]
nilq/baby-python
python
# coding: utf-8 # # Categorical VAE with Gumbel-Softmax # # Partial implementation of the paper [Categorical Reparameterization with Gumbel-Softmax](https://arxiv.org/abs/1611.01144) # A categorical VAE with discrete latent variables. Tensorflow version is 0.10.0. # # 1. Imports and Helper Functions # In[1]: import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data # import matplotlib.pyplot as plt import numpy as np import sys sys.path.insert(0, "./data") from read_data import load_mnist, next_batch # get_ipython().magic('matplotlib inline') slim=tf.contrib.slim Bernoulli = tf.contrib.distributions.Bernoulli # In[2]: def sample_gumbel(shape, eps=1e-20): """Sample from Gumbel(0, 1)""" U = tf.random_uniform(shape,minval=0,maxval=1) return -tf.log(-tf.log(U + eps) + eps) def gumbel_softmax_sample(logits, temperature): """ Draw a sample from the Gumbel-Softmax distribution""" y = logits + sample_gumbel(tf.shape(logits)) return tf.nn.softmax( y / temperature) def gumbel_softmax(logits, temperature, hard=False): """Sample from the Gumbel-Softmax distribution and optionally discretize. Args: logits: [batch_size, n_class] unnormalized log-probs temperature: non-negative scalar hard: if True, take argmax, but differentiate w.r.t. soft sample y Returns: [batch_size, n_class] sample from the Gumbel-Softmax distribution. If hard=True, then the returned sample will be one-hot, otherwise it will be a probabilitiy distribution that sums to 1 across classes """ y = gumbel_softmax_sample(logits, temperature) if hard: k = tf.shape(logits)[-1] #y_hard = tf.cast(tf.one_hot(tf.argmax(y,1),k), y.dtype) y_hard = tf.cast(tf.equal(y,tf.reduce_max(y,1,keep_dims=True)),y.dtype) y = tf.stop_gradient(y_hard - y) + y return y # # 2. Build Model # In[3]: K=10 # number of classes N=30 # number of categorical distributions # In[4]: # input image x (shape=(batch_size,784)) x = tf.placeholder(tf.float32,[None,784]) # variational posterior q(y|x), i.e. the encoder (shape=(batch_size,200)) net = slim.stack(x,slim.fully_connected,[512,256]) # unnormalized logits for N separate K-categorical distributions (shape=(batch_size*N,K)) logits_y = tf.reshape(slim.fully_connected(net,K*N,activation_fn=None),[-1,K]) q_y = tf.nn.softmax(logits_y) log_q_y = tf.log(q_y+1e-20) # temperature tau = tf.Variable(5.0,name="temperature") # sample and reshape back (shape=(batch_size,N,K)) # set hard=True for ST Gumbel-Softmax y = tf.reshape(gumbel_softmax(logits_y,tau,hard=False),[-1,N,K]) # generative model p(x|y), i.e. the decoder (shape=(batch_size,200)) net = slim.stack(slim.flatten(y),slim.fully_connected,[256,512]) logits_x = slim.fully_connected(net,784,activation_fn=None) # (shape=(batch_size,784)) p_x = Bernoulli(logits=logits_x) # In[5]: # loss and train ops kl_tmp = tf.reshape(q_y*(log_q_y-tf.log(1.0/K)),[-1,N,K]) KL = tf.reduce_sum(kl_tmp,[1,2]) elbo=tf.reduce_sum(p_x.log_prob(x),1) - KL # In[6]: loss=tf.reduce_mean(-elbo) lr=tf.constant(0.001) train_op=tf.train.AdamOptimizer(learning_rate=lr).minimize(loss,var_list=slim.get_model_variables()) # init_op=tf.initialize_all_variables() init_op = tf.global_variables_initializer() print "Build model successfully!" # # 3. Train # In[7]: # get data data = load_mnist("./data/train-images.idx3-ubyte", "./data/train-labels.idx1-ubyte") print data[0][0] print "Get data successful!" # In[8]: BATCH_SIZE=100 NUM_ITERS=50000 tau0=1.0 # initial temperature np_temp=tau0 np_lr=0.001 ANNEAL_RATE=0.00003 MIN_TEMP=0.5 # In[9]: dat=[] sess=tf.InteractiveSession() sess.run(init_op) for i in range(1,NUM_ITERS): np_x,np_y=next_batch(data, BATCH_SIZE) _,np_loss=sess.run([train_op,loss],{ x:np_x, tau:np_temp, lr:np_lr }) if i % 100 == 1: dat.append([i,np_temp,np_loss]) if i % 1000 == 1: np_temp=np.maximum(tau0*np.exp(-ANNEAL_RATE*i),MIN_TEMP) np_lr*=0.9 if i % 5000 == 1: print('Step %d, ELBO: %0.3f' % (i,-np_loss)) ''' # ## save to animation # In[10]: np_x1,_=data.next_batch(100) np_x2,np_y1 = sess.run([p_x.mean(),y],{x:np_x1}) # In[11]: import matplotlib.animation as animation # In[12]: def save_anim(data,figsize,filename): fig=plt.figure(figsize=(figsize[1]/10.0,figsize[0]/10.0)) im = plt.imshow(data[0].reshape(figsize),cmap=plt.cm.gray,interpolation='none') plt.gca().set_axis_off() #fig.tight_layout() fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None) def updatefig(t): im.set_array(data[t].reshape(figsize)) return im, anim=animation.FuncAnimation(fig, updatefig, frames=100, interval=50, blit=True, repeat=True) Writer = animation.writers['imagemagick'] writer = Writer(fps=1, metadata=dict(artist='Me'), bitrate=1800) anim.save(filename, writer=writer) return # In[13]: # save_anim(np_x1,(28,28),'x0.gif') # save_anim(np_y1,(N,K),'y.gif') # save_anim(np_x2,(28,28),'x1.gif') # # 4. Plot Training Curves # In[14]: dat=np.array(dat).T # In[15]: f,axarr=plt.subplots(1,2) axarr[0].plot(dat[0],dat[1]) axarr[0].set_ylabel('Temperature') axarr[1].plot(dat[0],dat[2]) axarr[1].set_ylabel('-ELBO') # # 5. Unconditional Generation # # This consists of sampling from the prior $p_\theta(y)$ and passing it through the generative model. # In[16]: M=100*N np_y = np.zeros((M,K)) np_y[range(M),np.random.choice(K,M)] = 1 np_y = np.reshape(np_y,[100,N,K]) # In[17]: x_p=p_x.mean() np_x= sess.run(x_p,{y:np_y}) # In[18]: np_y = np_y.reshape((10,10,N,K)) np_y = np.concatenate(np.split(np_y,10,axis=0),axis=3) np_y = np.concatenate(np.split(np_y,10,axis=1),axis=2) y_img = np.squeeze(np_y) # In[19]: np_x = np_x.reshape((10,10,28,28)) # split into 10 (1,10,28,28) images, concat along columns -> 1,10,28,280 np_x = np.concatenate(np.split(np_x,10,axis=0),axis=3) # split into 10 (1,1,28,280) images, concat along rows -> 1,1,280,280 np_x = np.concatenate(np.split(np_x,10,axis=1),axis=2) x_img = np.squeeze(np_x) # In[26]: f,axarr=plt.subplots(1,2,figsize=(15,15)) # samples axarr[0].matshow(y_img,cmap=plt.cm.gray) axarr[0].set_title('Z Samples') # reconstruction axarr[1].imshow(x_img,cmap=plt.cm.gray,interpolation='none') axarr[1].set_title('Generated Images') # In[31]: f.tight_layout() f.savefig('/Users/ericjang/Desktop/gumbel_softmax/code.png') '''
nilq/baby-python
python
#!/usr/bin/env python3 """ Checks if a new ts3 version is available """ import re import smtplib import json import sys from email.mime.text import MIMEText from email import utils import argparse import requests """ CONFIG = {} CONFIG['CHANGELOG'] = '' CONFIG['URL'] = 'https://www.teamspeak.com/versions/server.json' CONFIG['MAIL'] = {} CONFIG['MAIL']['HOST'] = '' CONFIG['MAIL']['PORT'] = '' CONFIG['MAIL']['USER'] = '' CONFIG['MAIL']['PASSWORD'] = '' CONFIG['MAIL']['TARGET'] = '' """ PARSER = argparse.ArgumentParser() PARSER.add_argument("-c", help="-c config_file") ARGS = PARSER.parse_args() class Ts3Notify(): """docstring for Ts3Notify""" def __init__(self, config): super().__init__() self.config = config self.result_json = self.load_update_data() def load_update_data(self, retries=0): """ loads json from teamspeak.com and handles errors""" try: if retries < 5: request = requests.get(self.config['URL']) request.raise_for_status() else: print("Too many retries. {}".format(retries)) sys.exit(1) return request.json() except requests.exceptions.HTTPError as err: print("HTTP Error. {}".format(err)) retries += 1 self.load_update_data(retries) except requests.exceptions.Timeout: print("Timeout.") retries += 1 self.load_update_data(retries) except requests.exceptions.TooManyRedirects: print("Too many redirects. Please check the url.") sys.exit(1) except requests.exceptions.RequestException as err: print("Something went wrong {}".format(err)) sys.exit(1) def get_local_version(self): """ parse and return the local server version """ pattern = re.compile("Server Release ((\d+\.)?(\d+\.)?(\*|\d+))") versions = "" with open(self.config['CHANGELOG'], 'r') as changelog: versions = re.findall(pattern, str(changelog.read())) return str(versions[0][0]) def get_current_version(self): """ returns current version """ return str(self.result_json['linux']['x86_64']['version']) def get_update_url(self): """ returns current version """ return str(self.result_json['linux']['x86_64']['mirrors']['teamspeak.com']) def get_checksum(self): """ returns current version """ return str(self.result_json['linux']['x86_64']['checksum']) def send_mail(self, message): """ send mail according to config""" msg = MIMEText(message) msg['Subject'] = '[TS3] Your TS3 Server needs an update' msg['From'] = self.config['MAIL']['USER'] msg['To'] = self.config['MAIL']['TARGET'] msg['Date'] = utils.formatdate(localtime=True) server = smtplib.SMTP(host=self.config['MAIL']['HOST'], port=self.config['MAIL']['PORT']) # server.set_debuglevel(1) server.ehlo() server.starttls() server.ehlo() server.login(self.config['MAIL']['USER'], self.config['MAIL']['PASSWORD']) server.sendmail(self.config['MAIL']['USER'], self.config['MAIL']['TARGET'], msg.as_string()) def main(): """ load conifg file and create TS3 Notify object """ config = {} try: json_config = None with open(ARGS.c, 'r') as config_file: json_config = json.load(config_file) config['CHANGELOG'] = str(json_config['CHANGELOG']) config['URL'] = str(json_config['URL']) config['MAIL'] = {} config['MAIL']['HOST'] = str(json_config['MAIL']['HOST']) config['MAIL']['PORT'] = str(json_config['MAIL']['PORT']) config['MAIL']['USER'] = str(json_config['MAIL']['USER']) config['MAIL']['PASSWORD'] = str(json_config['MAIL']['PASSWORD']) config['MAIL']['TARGET'] = str(json_config['MAIL']['TARGET']) except ValueError: print("No config was found.", file=sys.stderr) sys.exit() except KeyError as key_error: print("Setting not found: {}".format(key_error), file=sys.stderr) sys.exit() except FileNotFoundError: print("No config was found.", file=sys.stderr) sys.exit() ts3_notify = Ts3Notify(config) local_version = ts3_notify.get_local_version() current_version = ts3_notify.get_current_version() url = ts3_notify.get_update_url() checksum = ts3_notify.get_checksum() if current_version != local_version: try: ts3_notify.send_mail("Your Server has version: {}\nAvailable version is: {}\nURL: {}\nChecksum: {}".format(local_version, current_version, url, checksum)) except smtplib.SMTPException as smtp_exception: print("Could not send an email: {}".format(smtp_exception), file=sys.stderr) if __name__ == '__main__': main()
nilq/baby-python
python
import bsddb3 import struct import json import flask import time from threading import Thread from Queue import Queue from StringIO import StringIO from sqlalchemy import text from sqlalchemy.sql.elements import TextClause from table_pb2 import * from itertools import product, chain, combinations from dct import * def powerset(iterable): "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)" s = list(iterable) return chain.from_iterable(combinations(s, r) for r in range(len(s)+1)) def decode_table(buf): table = Table() table.ParseFromString(buf) return table def encode_table(schema, rows): """ assume everything is uints @schema list of attr names @rows """ s = Table.Schema() s.name.extend(schema) table = Table(schema=s) # for col in zip(*rows): # # print "col:", col, len(col) # for el in col: # print el table.cols.extend(Table.Col(val=col) for col in zip(*rows)) return table.SerializeToString() class DS(object): """ Data structures are parameterized so that they are able to answer particular classes of queries. The encoding number specifies the _type_ of data structure (the class) The id specifies a particular instance of the data struture. For example, a datacube data structure with encoding 2 may be instantiated for queries grouped by (lat, lon, hour), and queries grouped by (hour, month) Given a data structure, we can represent a query simply with set of parameter values. Data structures expose methods for offline setup and online serving. Offline: setup() Online: cost_est(data) __call__(data) get_iter(data) """ def __init__(self): self.id = None self.encoding = None def setup(self): """ setup any offline data structures """ pass def __call__(self, data): return None def get_iter(self, data): return None @staticmethod def can_answer(query_template): """ @query_template is the output of the client's QueryTemplate.toWire() method. """ return False def cost_est(self, data): """ data is the "data" attribute in the output of the client's Query.toWire() Currently, it's a dictionary mapping param names to their values """ return None class Precompute(DS): """ Helper model for data structuruse that pre-compute and cache results """ name = "precompute" def __init__(self, db, *args, **kwargs): super(Precompute, self).__init__() self.name = Precompute.name self.db = db self.fname = kwargs.get("fname", "precompute.cache") self.cache = bsddb3.hashopen(self.fname) print "loaded Precompute file %s" % self.fname print "%d items" % len(self.cache) def __call__(self, data, block_size=50): return self.lookup(self.key(data)) def get_iter(self, data, block_size=50): """ XXX: note that it ignores block_size right now """ block = self.lookup_bytes(self.key(data)) if block: key = self.key(data) buf = StringIO() buf.write(struct.pack("2I", len(key), self.id)) buf.write(struct.pack("%ds" % len(key), key)) buf.write(block) yield buf.getvalue() buf.close() def key(self, data): """ This maps the query data to a unique key. The analagous function in javascript is js/datastruct.js:queryToKey() THESE FUNCTIONS MUST MATCH!! """ return json.dumps(sorted(data.items())).replace(" ", "") def setup_cache(self, query_iterable): """ This is called ahead of time to create data structures query_iterable is an iterator that yields pairs of (key, db.exec data) to run """ for key, exec_args in query_iterable: cur = self.db.execute(*exec_args) schema = cur.keys() rows = cur.fetchall() self.cache[key] = encode_table(schema, rows) def lookup(self, key): s = self.lookup_bytes(key) if not s: return None, None return decode_table(s) def lookup_bytes(self, key): return self.cache.get(key, None) def cost_est(self, data): if self.key(data) in self.cache: return 100 return None class GBDataStruct(Precompute): name = "gbquery" def __init__(self, db, spec, *args, **kwargs): """ spec = { select: { alias: expr }, fr: <tablename>, groupby: [ "expr", ... ], params: { attr: <data type "num" or "str"> } } """ fname = "gb_%s.cache" % ",".join(spec["groupby"]) kwargs['fname'] = kwargs.get("fname", fname) super(GBDataStruct, self).__init__(db, *args, **kwargs) self.name = GBDataStruct.name self.spec = spec self.encoding = 1 def spec_to_sql(self, params): qtemplate = """ SELECT %s FROM %s WHERE %s GROUP BY %s """ s = ["%s AS %s" % (expr, alias) for alias, expr in self.spec['select'].items()] s = ", ".join(s) g = ", ".join(self.spec["groupby"]) w = ["true"] for attr, val in params.iteritems(): if attr in self.spec['params']: if self.spec['params'][attr] == "num": w.append("%s = %s" % (attr, val)) else: w.append("%s = '%s'" % (attr, val)) w = w and " AND ".join(w) q = text(qtemplate % (s, self.spec["fr"], w, g)) return q def setup_cache(self, param_ranges): def f(): all_names = param_ranges.keys() for names in powerset(all_names): print names iters = map(param_ranges.get, names) for i, vals in enumerate(product(*iters)): data = dict(zip(names, vals)) key = self.key(data) q = self.spec_to_sql(data) yield key, [q] Precompute.setup_cache(self, f()) @staticmethod def can_answer(query_template): """ @query_template is the output of the client's QueryTemplate.toWire() method. """ return query_template.get('name') == GBDataStruct.name class ProgressiveDataStruct(Precompute): """ Python version of js/progds.js The signature and spec are the same as GBdataStruct, however it encodes data progressively TODO: the data structure that you will implement and fill in TODO: write a custom get_iter() in order to return blocks of partial results """ name = "progressive" def __init__(self, db, spec, *args, **kwargs): """ spec = { select: { alias: expr }, fr: <tablename>, groupby: [ "expr", ... ], params: { attr: <data type "num" or "str"> } } """ # name of the file cache fname = "prog_%s.cache" % ",".join(spec["groupby"]) kwargs['fname'] = kwargs.get("fname", fname) super(ProgressiveDataStruct, self).__init__(db, *args, **kwargs) self.name = ProgressiveDataStruct.name self.spec = spec self.encoding = 2 def cost_est(self, data): """ Force the cost estimate for progressive data structure to be lower than group by data structure (10 vs 100) """ if self.key(data) in self.cache: return 10 return None def spec_to_sql(self, params): """ Translates query parameters into an actual SQL string Identical to function in GBDataStruct """ qtemplate = """ SELECT %s FROM %s WHERE %s GROUP BY %s """ s = ["%s AS %s" % (expr, alias) for alias, expr in self.spec['select'].items()] s = ", ".join(s) g = ", ".join(self.spec["groupby"]) w = ["true"] for attr, val in params.iteritems(): if attr in self.spec['params']: if self.spec['params'][attr] == "num": w.append("%s = %s" % (attr, val)) else: w.append("%s = '%s'" % (attr, val)) w = w and " AND ".join(w) q = text(qtemplate % (s, self.spec["fr"], w, g)) return q def setup_cache(self, param_ranges): def f(): """ This generator yields all SQL queries that should be precomputed """ all_names = param_ranges.keys() for names in powerset(all_names): print names iters = map(param_ranges.get, names) for i, vals in enumerate(product(*iters)): data = dict(zip(names, vals)) key = self.key(data) q = self.spec_to_sql(data) yield key, [q] for key, exec_args in f(): cur = self.db.execute(*exec_args) schema = cur.keys() rows = cur.fetchall() self.cache[key] = self.progressively_encode_table(schema, rows) print "cache contains %d items" % len(self.cache) def progressively_encode_table(self, schema, rows): """ You can byte encode the progressive data using protocol buffers, or something custom. If you plan to do things custom, take a look at StringIO and struct.pack/unpack There are examples above """ # TODO: implement me #raise Exception("Implement Me!") """ assume everything is uints @schema list of attr names @rows """ data = [] index = [] s = Table.Schema() s.name.extend(schema) table = Table(schema=s) cnt = 0 temp_tuple = rows temp_tuple.sort(key = lambda t: t[1]); #print "temp", temp_tuple for col in zip(*temp_tuple): if cnt == 0: for el in col: data.append(el) #print "list:", data # DCT dct = DCT(data) encode = dct.encodeDct2() dct.quantize(encode) data = encode else: index = list(col) cnt = cnt + 1 encodedRows = zip(data, index) #print "tuple:", encodedRows # get data & index lists which have no 0 element dataNot0 = [] indexNot0 = [] length = len(data) for x in xrange(0,length): if data[x] != 0: dataNot0.append(data[x]) indexNot0.append(index[x]) dataNot0.append(length) indexNot0.append(-1) encodedRowsNot0 = zip(dataNot0, indexNot0) #print encodedRowsNot0, len(encodedRowsNot0) table.cols.extend(Table.Col(val=col) for col in zip(*encodedRowsNot0)) return table.SerializeToString() @staticmethod def can_answer(query_template): """ @query_template is the output of the client's QueryTemplate.toWire() method. """ return query_template.get('name') in (GBDataStruct.name, ProgressiveDataStruct.name) # Currently deprecated class SQLTemplates(Precompute): """ Precomputes templated queries The query template is expresed as a SQL string with parameters SELECT a - a%:a, avg(d)::int FROM data WHERE b = :b GROUP BY a - a%:a The above parameterized query can vary the filter condition and the discretization of the group by attribute TODO: use this data structure """ name = "templates" def __init__(self, db, query_templates, *args, **kwargs): super(SQLTemplates, self).__init__(db, *args, **kwargs) def to_text(qstr): if not isinstance(qstr, TextClause): return text(qstr) return qstr self.name = SQLTemplates.name self.query_template = to_text(query_template) self.encoding = 2 def key(self, data): keys = tuple([c.key for c in q.get_children()]) return hash(tuple(map(data.get, keys))) def setup_cache(self, param_ranges): """ This is called ahead of time to create data structures @param_ranges dictionary of param name --> iterable of assignable values """ def f(): names = param_ranges.keys() iters = map(param_ranges.get, names) for i, vals in enumerate(product(*iters)): data = dict(zip(names, vals)) yield self.key(data), [self.query_template, data] print "cache contains %d items" % len(self.cache) return Precompute.setup_cache(self, f()) # register relevant data structer classes ds_klasses = [GBDataStruct, SQLTemplates, ProgressiveDataStruct]
nilq/baby-python
python
from collections import deque # Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: res = 0 def diameterOfBinaryTree(self, root: TreeNode) -> int: self.dfs(root) return self.res def dfs(self, node: TreeNode) -> int: if not node: return -1 left = self.dfs(node.left) right = self.dfs(node.right) self.res = max(self.res, left + right + 2) return max(left, right) + 1 # def diameterOfBinaryTree(self, root: TreeNode) -> int: # # exception # if not root: # return 0 # # left deepest depth + right deepest depth = answer # # divide subtree into twos (left-right) # sub_tree =[] # if root.left: # left_sub = root.left # sub_tree.append(left_sub) # if root.right: # right_sub = root.right # sub_tree.append(right_sub) # ans = [] # # BFS # for sub in sub_tree: # queue = deque() # depth = 1 # queue.append((sub, depth)) # while queue: # node, depth = queue.popleft() # if node.left: # queue.append((node.left, depth+1)) # if node.right: # queue.append((node.right, depth+1)) # ans.append(depth) # return sum(ans)
nilq/baby-python
python
def f(<caret>x): return 42
nilq/baby-python
python
from __future__ import print_function from __future__ import absolute_import from __future__ import division import scriptcontext as sc import compas_fofin __commandname__ = "FoFin_init" def RunCommand(is_interactive): sc.sticky["FoFin"] = { 'cablenet' : None, 'settings' : { 'scale.reactions' : 0.1, 'layer' : "FoFin::Cablenet" } } # ============================================================================== # Main # ============================================================================== if __name__ == '__main__': RunCommand(True)
nilq/baby-python
python
import pytest from geocodr.keys import APIKeys from werkzeug.test import EnvironBuilder from werkzeug.wrappers import Request @pytest.fixture() def key_file(tmpdir): key_file = tmpdir.join("keys.csv") key_file.write( 'key,domains\n' 'wildcard,\n' '# comment,with,commas,\n' 'multi,example.org;example.com\n' 'single,test.local' ) yield key_file.strpath @pytest.mark.parametrize("key,referrer,permitted", [ ["single", None, True], ["single", "", True], ["single", "--", True], ["single", "http", True], ["single", "http://", True], ["single", "test.local", True], ["single", "http://test.local", True], ["single", "https://test.local", True], ["single", "http://sub.test.local/path?arg=1", True], ["single", "http://sub.test.local", True], ["single", "https://subtest.local", False], ["single", "http://sub.test.local.com", False], ["single", "https://1.2.3.4", False], ["multi", "https://example.org", True], ["multi", "https://example.com", True], ["multi", "https://example.net", False], ["wildcard", "https://example.net", True], ["wildcard", "https://1.2.3.4", True], ]) def test_api_key(key_file, key, referrer, permitted): a = APIKeys(key_file) headers = [] if referrer: headers.append(('Referer', referrer)) builder = EnvironBuilder(method='GET', query_string={'key': key}, headers=headers) req = Request(builder.get_environ()) assert a.is_permitted(req) == permitted
nilq/baby-python
python
#!/usr/bin/env python import rospy from std_msgs.msg import Header from humanoid_league_msgs.msg import BallRelative, ObstaclesRelative, ObstacleRelative, Strategy, GameState, RobotControlState from geometry_msgs.msg import Point, PoseWithCovarianceStamped, Pose2D import math import yaml import rospkg import os import tf # Dictonary for roles actionDecoder = {'ROLE_IDLING': 0, 'ROLE_OTHER': 1, 'ROLE_STRIKER': 2, 'ROLE_SUPPORTER': 3, 'ROLE_DEFENDER': 4, 'ROLE_GOALIE': 5 } # Loads the dictonary of coordinates from pathmaker def getCoordinates(filename): rp = rospkg.RosPack() fname = os.path.join(rp.get_path('bitbots_live_tool_rqt'), 'resource', 'paths', filename) with open(fname, "r") as file: positions = yaml.load(file) # da ist ein Dictonary drin file.close() return positions.get("positions") def vec_rotate(x, y, angle_rad): xneu = x * math.cos(angle_rad) - y * math.sin(angle_rad) yneu = y * math.cos(angle_rad) + x * math.sin(angle_rad) return [xneu, yneu] def publisher_main(): #initiieren des publishers rospy.init_node('publisher') print('started publisher node') pub = rospy.Publisher('ball_relative', BallRelative, queue_size = 10) pubRobo = rospy.Publisher('amcl_pose', PoseWithCovarianceStamped, queue_size = 10) pubTeam = rospy.Publisher('obstacles_relative', ObstaclesRelative, queue_size = 10) pubStrategy = rospy.Publisher('strategy', Strategy, queue_size = 10) pubGame = rospy.Publisher('gamestate', GameState, queue_size = 10) pubState = rospy.Publisher('robot_state', RobotControlState, queue_size = 10) pubTarget = rospy.Publisher('move_base_simple/goal', Pose2D, queue_size = 10) rate = rospy.Rate(10) timeCounter = 30 roboActionCounter = 30 firsthalf = True durationHalfGame = 60 # Coordinates from pathMaker ======================================================================================== # robo1 with pathmaker robo1 = getCoordinates("robo4.yaml") robo1Length = len(robo1) robo1Counter = 1 # teammates with pathmaker teammate1 = getCoordinates('TeamClubMate1.yaml') team1Length = len(teammate1) #anzahl eintraege team1Counter = 1 teammate2 = getCoordinates('TeamClubMate2.yaml') team2Length = len(teammate2) team2Counter = 1 # opponents with pathmaker opponent1 = getCoordinates('SuperScaryOpponent.yaml') op1Length = len(opponent1) op1Counter = 1 # opponents with pathmaker undef = getCoordinates('undef.yaml') undefLength = len(opponent1) undefCounter = 1 # ball with pathmaker ball = getCoordinates('HeartBall.yaml') ballLength = len(ball) ballCounter = 1 #teammate1[0 % length ].get('x') # fuer 0 ein counter, dann entsteht loop #teammate1[1].get('x') # an der ersten Stelle x-wert while not rospy.is_shutdown(): # Ball with pathmaker msgBall = BallRelative() msgBall.header.stamp = rospy.Time.now() msgBall.header.frame_id = "base_link" msgBall.ball_relative.y = ball[ballCounter % ballLength].get('x') msgBall.ball_relative.x = ball[ballCounter % ballLength].get('y') msgBall.confidence = 1.0 pub.publish(msgBall) ballCounter += 1 # Robo1 with pathmaker msgRobo = PoseWithCovarianceStamped() msgRobo.header.stamp = rospy.Time.now() msgRobo.pose.pose.position.x = robo1[int(robo1Counter) % robo1Length].get('x') msgRobo.pose.pose.position.y = robo1[int(robo1Counter) % robo1Length].get('y') # Angle of robot in quaternions angle = robo1[int(robo1Counter) % robo1Length].get('ang') quaternion = tf.transformations.quaternion_from_euler(0, 0, float(angle)) msgRobo.pose.pose.orientation.x = quaternion[0] msgRobo.pose.pose.orientation.y = quaternion[1] msgRobo.pose.pose.orientation.z = quaternion[2] msgRobo.pose.pose.orientation.w = quaternion[3] pubRobo.publish(msgRobo) # Role of Robo1, gets information from pathMaker msgStrategy = Strategy() msgRoleString = robo1[int(robo1Counter) % robo1Length].get('action') msgStrategy.role = actionDecoder.get(msgRoleString) #actiondecoder gleicht den string ab mit dictonary und gibt int zurueck # Action of Robo1, changes after short time (roboActionCounter) if roboActionCounter == 0: msgStrategy.action = 3 # TRYING_TO_SCORE else: msgStrategy.action = 2 # GOING_TO_BALL pubStrategy.publish(msgStrategy) roboActionCounter -= 1 roboActionCounter = max(roboActionCounter, 0) robo1Counter += 1 # Teammates with pathmaker, contains list of teammates msgTeam = ObstaclesRelative() msgTeam1 = ObstacleRelative() msgTeam1.color = 2 # magenta msgTeam1.position.x = teammate1[int(team1Counter) % team1Length].get('x') msgTeam1.position.y = teammate1[int(team1Counter) % team1Length].get('y') msgTeam2 = ObstacleRelative() msgTeam2.color = 2 # magenta msgTeam2.position.x = teammate2[int(team2Counter) % team2Length].get('x') msgTeam2.position.y = teammate2[int(team2Counter) % team2Length].get('y') # Opponents with pathmaker, contains list of opponents msgOp = ObstaclesRelative() msgUndef = ObstacleRelative() msgUndef.color = 1 # undef msgUndef.position.x = undef[int(undefCounter) % undefLength].get('x') msgUndef.position.y = undef[int(undefCounter) % undefLength].get('y') msgOp1 = ObstacleRelative() msgOp1.color = 3 # cyan msgOp1.position.x = opponent1[int(op1Counter) % op1Length].get('x') msgOp1.position.y = opponent1[int(op1Counter) % op1Length].get('y') # Publish all obstacles msgTeam.obstacles = [msgTeam1, msgTeam2, msgOp1, msgUndef] pubTeam.publish(msgTeam) team1Counter += 1 team2Counter += 1 op1Counter += 1 undefCounter += 1 # GameState msgs =========================================================================================== # Penalty: Seconds till unpenalized and boolean msgGame = GameState() msgBall.header.stamp = rospy.Time.now() msgGame.secondsTillUnpenalized = timeCounter # Penalty boolean msgGame.penalized = timeCounter > 0 # Sets halftime and rest secs msgGame.firstHalf = firsthalf msgGame.secondsRemaining = durationHalfGame # Sets Score msgGame.ownScore = 7 msgGame.rivalScore = 1 # team colors msgGame.teamColor = 1 # magenta pubGame.publish(msgGame) timeCounter -= 1 timeCounter = max(timeCounter, 0) durationHalfGame -= 1 if durationHalfGame == 0: durationHalfGame = 60 firsthalf = False # Sets hardware state msgState = RobotControlState() msgState.state = 10 pubState.publish(msgState) # Target msgTarget = Pose2D() if firsthalf: msgTarget.x = 3.5 msgTarget.y = 2.0 else: msgTarget.x = 2.0 msgTarget.y = 1.0 pubTarget.publish(msgTarget) rate.sleep() if __name__ == '__main__': try: publisher_main() except rospy.ROSInterruptException: pass
nilq/baby-python
python
import os import sys sys.path.append('../') import numpy as np import convert_weights import tensorflow as tf ############## REPRODUCIBILITY ############ tf.set_random_seed(0) np.random.seed(0) ########################################### from keras.models import load_model from keras.models import Sequential, Model from keras.utils.vis_utils import plot_model from keras.layers import Dense, BatchNormalization, Input input = x = Input((5,)) for i in range(3): x = Dense(30)(x) x = BatchNormalization()(x) # MULTIPLE OUTPUTS output1 = Dense(1)(x) output2 = Dense(1)(x) output3 = Dense(1)(x) # CREATE THE MODEL multi_output_model = Model(input,outputs=[output1, output2, output3]) multi_output_model.compile( loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'] ) # SAVE TO FILE FOR PARSING multi_output_model.save('multi_output_model.h5') # CONVERT TO TXT convert_weights.h5_to_txt('multi_output_model.h5', 'single_output_model.txt') # CONVERT TO H5 convert_weights.txt_to_h5('single_output_model.txt', 'single_output_model.h5') single_output_model = load_model('single_output_model.h5') # GRAPHIC PLOT OF MODEL plot_model(multi_output_model, to_file='../../Figures/multi_output_model.png', show_shapes=True, show_layer_names=True) plot_model(single_output_model, to_file='../../Figures/single_output_model.png', show_shapes=True, show_layer_names=True) # TEST INPUT input = np.array( [[1,2,3,4,5]] ) # COMPARE PREDICTIONS FROM MULTI OUTPUT AND SINGLE OUTPUT MODELS multiple_output = np.array(multi_output_model.predict(input)).squeeze() single_output = single_output_model.predict(input).squeeze() assert np.allclose(multiple_output, single_output) print('MULTI-OUTPUT:', multiple_output) print('SINGLE-OUTPUT:', single_output)
nilq/baby-python
python
from django.contrib import admin # DJANGAE from djangae.contrib.gauth.sql.models import GaeUser admin.site.register(GaeUser)
nilq/baby-python
python
from .handler import handler
nilq/baby-python
python
"""OVK learning, unit tests. The :mod:`sklearn.tests.test_learningrate` tests the different learning rates. """ import operalib as ovk def test_constant(): """Test whether constant learning rate.""" eta = ovk.Constant(1) assert eta(10) == 1 def test_invscaling(): """Test whether inverse scaling learning rate.""" eta = ovk.InvScaling(1., 2.) assert eta(10) == 1. / 10. ** 2.
nilq/baby-python
python
# -*- coding: utf-8 -*- # --------------------------------------------------------------------- # Discovery # --------------------------------------------------------------------- # Copyright (C) 2007-2019 The NOC Project # See LICENSE for details # --------------------------------------------------------------------- # Third-party modules import six from mongoengine.document import Document from mongoengine.fields import ( StringField, IntField, ListField, DictField, DateTimeField, FloatField, ) # NOC modules from noc.core.mongo.fields import ForeignKeyField from noc.sa.models.managedobject import ManagedObject @six.python_2_unicode_compatible class Discovery(Document): meta = { "collection": "noc.schedules.inv.discovery", "strict": False, "auto_create_index": False, } job_class = StringField(db_field="jcls") schedule = DictField() ts = DateTimeField(db_field="ts") last = DateTimeField() last_success = DateTimeField(db_field="st") last_duration = FloatField(db_field="ldur") last_status = StringField(db_field="ls") status = StringField(db_field="s") managed_object = ForeignKeyField(ManagedObject, db_field="key") data = DictField() traceback = DictField() runs = IntField() faults = IntField(db_field="f") log = ListField() def __str__(self): return "%s: %s" % (self.managed_object, self.job_class)
nilq/baby-python
python
# coding: utf-8 from common.db import write_session_scope from mall_spider.common.enums import TaobaoPageType, TaobaoTaskType from mall_spider.dao.stream_handle_task_dao import get_stream_handle_task_dao from mall_spider.dao.stream_opt_data_dao import get_stream_opt_data_dao from mall_spider.dao.stream_unhandle_task_dao import get_stream_unhandle_task_dao from mall_spider.model.cmm_sys_stream_unhandle_task import CmmSysStreamUnhandleTask from mall_spider.spiders.actions.context import Context from mall_spider.spiders.actions.default_action import DefaultAction class TaobaoListPagePersistAction(DefaultAction): def build_integrate_infos(self, integrate_result): # listItem = integrate_result['listItem'] listItem = integrate_result.get('listItem', []) return list( {'itemId': item['item_id'], 'title': item['title'], 'userType': item['userType'], 'originalPrice': item['originalPrice'], 'price': item['price'], 'priceWap': item['priceWap'], 'category': item.get('category', '0')} for item in listItem) def build_sale_infos(self, sale_result): # listItem = sale_result['listItem'] listItem = sale_result.get('listItem', []) return list( {'itemId': item['item_id'], 'title': item['title'], 'userType': item['userType'], 'originalPrice': item['originalPrice'], 'price': item['price'], 'priceWap': item['priceWap'], 'category': item['category']} for item in listItem) def do_execute(self, context): integrate_result = context.get(Context.KEY_TAOBAO_INTERGRATE_RESULT) sale_result = context.get(Context.KEY_TAOBAO_SALE_RESULT) with write_session_scope() as session: good = context.get(Context.KEY_GOOD_DICT) stream_opt_data_dao = get_stream_opt_data_dao(session=session) stream_unhandle_task_dao = get_stream_unhandle_task_dao(session=session) stream_handle_task_dao = get_stream_handle_task_dao(session=session) opt_data_entity = { 'raw_data': { 'integrateResult': integrate_result, 'saleResult': sale_result, 'goodResult': good }, 'type': int(TaobaoPageType.taobao_list) } entity = stream_opt_data_dao.insert(**opt_data_entity) unhandle_task_entity = { 'raw_data': { 'integrateInfos': self.build_integrate_infos(integrate_result), 'saleInfos': self.build_sale_infos(sale_result), 'goodResult': good }, 'type': int(TaobaoTaskType.taobao_list), 'origin_id': entity.id, 'date': good['date'] } stream_unhandle_task_dao.insert(**unhandle_task_entity) task = context.get(Context.KEY_CURRENT_TASK) task_entity = stream_unhandle_task_dao.delete(_filter=[CmmSysStreamUnhandleTask.id == task.id]) stream_handle_task_dao.insert(**{ 'type': task.type, 'raw_data': task.raw_data, 'origin_id': task.origin_id, 'date': good['date'] }) # stream_handle_task_dao.insert_entity(entity=task) # stream_opt_data_entities = list() # # stream_opt_data_entity = CmmSysStreamOptData() # # stream_opt_data_entity.raw_data = integrate_result # # stream_opt_data_entities.append({'raw_data': integrate_result, 'type':}) # stream_opt_data_entities.append({'raw_data': sale_result}) # # # stream_opt_data_dao.insert_entity() # stream_opt_data_dao.bulk_insert(stream_opt_data_entities) return True def on_create(self, context): pass def on_start(self, context): pass def on_complete(self, context): pass def on_destroy(self, context): pass
nilq/baby-python
python
from qtpy import QtCore, QtGui class DataTreeModel(QtCore.QAbstractItemModel): def __init__(self, data_node, parent=None): super(DataTreeModel, self).__init__(parent) self.data_node = data_node self.rootItem = DataTreeItem(self.data_node) data_node.changed.connect(self.on_node_changed, sender=data_node) def columnCount(self, parent): if parent.isValid(): return parent.internalPointer().columnCount() else: return self.rootItem.columnCount() def data(self, index, role): """ :type index: QtCore.QModelIndex :type role: int """ if not index.isValid(): return None item = index.internalPointer() if role == QtCore.Qt.DisplayRole: return item.data(index.column()) elif role == QtCore.Qt.DecorationRole: if item.data_node.icon: return item.data_node.icon else: if item.data_node.children: return QtGui.QIcon.fromTheme("folder") elif item.data_node.has_subtree(): return QtGui.QIcon.fromTheme("package-x-generic") else: return None # TODO: provide default return None def rowCount(self, parent): if parent.column() > 0: return 0 if not parent.isValid(): parentItem = self.rootItem else: parentItem = parent.internalPointer() return parentItem.childCount() def index(self, row, column, parent): if not self.hasIndex(row, column, parent): return QtCore.QModelIndex() if not parent.isValid(): parentItem = self.rootItem else: parentItem = parent.internalPointer() childItem = parentItem.child(row) if childItem: return self.createIndex(row, column, childItem) else: return QtCore.QModelIndex() def parent(self, index): if not index.isValid(): return QtCore.QModelIndex() childItem = index.internalPointer() parentItem = childItem.parent if parentItem == self.rootItem: return QtCore.QModelIndex() return self.createIndex(parentItem.row(), 0, parentItem) def headerData(self, section, orientation, role): if (orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole): return ["Name", "Type", "Shape"][section] return None def on_node_changed(self, sender): self.reload() def reload(self): self.rootItem.reload_items() self.modelReset.emit() @property def title(self): return self.data_node.title class DataTreeItem: def __init__(self, data_node, parent=None, subtrees=True): self.parent = parent self.data_node = data_node self.reload_items() def data(self, column): if column == 0: return self.data_node.title elif column == 1: node_type = self.data_node.node_type if self.data_node.has_subtree(): node_type += " (%s)" % self.data_node.subtree().node_type return node_type elif self.data_node.has_object(): if column == 2: if self.data_node.data_object.shape: return " x ".join(str(dim) for dim in self.data_node.data_object.shape) return "" def row(self): if self.parent: return self.parent.childItems.index(self) return 0 def childCount(self): return len(self.childItems) def child(self, row): return self.childItems[row] def columnCount(self): return 1 # return 3 def reload_items(self): self.childItems = [] for node_child in self.data_node.children: self.childItems.append(DataTreeItem(node_child, self)) if self.data_node.has_subtree(): subtree = self.data_node.subtree() if subtree: for tree_child in subtree.children: self.childItems.append(DataTreeItem(tree_child, self))
nilq/baby-python
python
import math style_normal = "\033[0m" style_great_success = "\033[1;32m" style_success = "\033[32m" style_error = "\033[31m" style_warning = "\033[33m" style_info = "\033[0m" style_stealthy = "\033[1;37m" def __generic_style(c): def _x(s): return c + s + style_normal return _x success = __generic_style(style_success) error = __generic_style(style_error) warning = __generic_style(style_warning) great_success = __generic_style(style_great_success) stealthy = __generic_style(style_stealthy) info = __generic_style(style_info) def base32hex(s): ctable = '0123456789abcdefghijklmnopqrstuv' return "".join([ ctable.index(c) for c in s]) def alphabet(tks): return set("".join(tks)) def phi(z): return (1/2.) * (1 + math.erf(z/math.sqrt(2)))
nilq/baby-python
python
# pip install feedparser # pip install notify2 # [Python Desktop News Notifier in 20 lines](http://geeksforgeeks.org/python-desktop-news-notifier-in-20-lines/) # [Desktop Notifier in Python](https://www.geeksforgeeks.org/desktop-notifier-python/) import feedparser import notify2 import os import time # https://www.espncricinfo.com/ci/content/rss/feeds_rss_cricket.html LIVE_SCORES_RSS = 'http://static.cricinfo.com/rss/livescores.xml' GLOBAL_NEWS_RSS = 'https://www.espncricinfo.com/rss/content/story/feeds/0.xml' def parseFeed(): f = feedparser.parse(LIVE_SCORES_RSS) ICON_PATH = os.getcwd() + "/icon.ico" notify2.init('News Notify') for item in f['items']: n = notify2.Notification(item['title'], item['summary'], icon=ICON_PATH) n.set_urgency(notify2.URGENCY_NORMAL) n.show() n.set_timeout(15000) time.sleep(1200) if __name__ == '__main__': parseFeed()
nilq/baby-python
python
import requests from sniplink.utils import * from sniplink.api import API from sniplink.objects import ShortLinkData class Client: """ The Backend Client Sniplink-Py is powered by a back-end client/runner, this system is responsible for ensuring safety among API access. Once you've registered a client, you can access all the Sniplink-Py API features available without worry. It's best to declare your client in the global scope to ensure you only ever have one client active. """ def __init__(self): pass @staticmethod def get_link(public_id): """ Fetches the data of shortlink with provided public ID. :param public_id: :returns ShortLinkData: """ resp = requests.get(API.link_endpoint + f"/{public_id}").json() return ShortLinkData(resp['id'], resp['creationTime'], resp['expirationTime'], resp['value'], resp['shortUrl']) @staticmethod def create_link(expires_in, url): """ Creates a new shortlink with provided expires_in, url values. Note: expires_in value represents a unix timestamp. the maximum expiration time is 30 days. :param expires_in: :param url: :returns ShortLinkData: """ body = { "value": url } if isinstance(expires_in, float) or isinstance(expires_in, int): body["expirationTime"] = int(expires_in) elif isinstance(expires_in, str): body["expirationTime"] = int(expires_in) else: raise SnipLinkError("Invalid expires in value passed.") resp = requests.post(API.link_endpoint, json=body, headers={'content-type': 'application/json'}).json() return ShortLinkData(resp['id'], resp['creationTime'], resp['expirationTime'], resp['value'], resp['shortUrl'])
nilq/baby-python
python
class AbilityChangeEvent: """Event that indicates an ability change""" def __init__(self, data, type) -> None: """Init event""" self.data = data self.type = type @property def sphere_id(self) -> str: return self.data['sphere']['id'] @property def cloud_id(self) -> str: return self.data['stone']['id'] @property def unique_id(self) -> str: return self.data['stone']['uid'] @property def ability_type(self) -> str: return self.data['ability']['type'] @property def ability_enabled(self) -> bool: return self.data['ability']['enabled'] @property def ability_synced_to_crownstone(self) -> bool: return self.data['ability']['syncedToCrownstone']
nilq/baby-python
python
from tkinter import messagebox import pandas as pd import matplotlib.pyplot as plt import tkinter as tk import os def get_chart_user(date): if os.path.exists("re/drowsiness_files/"+date+".csv"): data=pd.read_csv("re/drowsiness_files/"+date+".csv") data.fillna("Unknown",inplace=True) gb=data.groupby("name") su=gb.sum() l=list(su.index.values) user=[] for i in l: user.append(gb.get_group(i)) grb=gb.get_group(i) grb.plot(x="time", y="EAR", rot=45, title=i) plt.show() else: messagebox.showinfo(title="sample", message="Report/data is not available") def bar_chart(): root = tk.Toplevel() title=tk.Label(root,text="Report Page(Line chart)") title.pack() hint_date=tk.Label(root,text="dd-mm-yyyy please fill in this format") hint_date.pack() date=tk.Entry(root) date.pack() btn=tk.Button(root,text="Genrate",command=lambda :get_chart_user(date.get())) btn.pack() root.mainloop()
nilq/baby-python
python
"""User memberships in teams.""" import dataclasses import kaptos.db import roax.schema as s from roax.resource import operation @dataclasses.dataclass class Member: """User membership in team.""" id: s.uuid(description="Identifies the membership.") team_id: s.uuid(description="Identifies the team.") user_id: s.uuid(description="Identifies the user.") status: s.str( description="Status of user's group membership.", enum={"active", "suspended", "requested", "denied"}, ) roles: s.set( description="User role(s) in team.", items=s.str(enum={"read", "submit", "admin", "owner"}), ) _required = "team_id user_id status roles" schema = s.dataclass(Member) class Members(kaptos.db.TableResource): schema = schema @operation def create(self, _body: schema) -> s.dict({"id": schema.attrs.id}): return super().create(_body) @operation def read(self, id: schema.attrs.id) -> schema: return super().read(id) @operation def update(self, id: schema.attrs.id, _body: schema) -> None: return super().update(id, _body) @operation def delete(self, id: schema.attrs.id) -> None: return super().delete(id)
nilq/baby-python
python
# # Copyright (c) 2016, deepsense.io # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from neptune.internal.common import NeptuneException, NeptuneIOException class NeptuneReadConfigException(NeptuneIOException): def __init__(self, io_error): super(NeptuneReadConfigException, self).__init__(io_error) self.message += " Failed to load job configuration file." class ParameterYAMLInvalidDefinitionException(NeptuneException): def __init__(self, parameter_name): super(ParameterYAMLInvalidDefinitionException, self).__init__( u'The provided definition in YAML configuration file for parameter \"{}\" is not supported!'. \ format(parameter_name)) class ParameterCLIDuplicatedDefinitionException(NeptuneException): def __init__(self, parameter_name): super(ParameterCLIDuplicatedDefinitionException, self).__init__( u'The provided definition in -p/--parameter option for parameter \"{}\" is duplicated!'. \ format(parameter_name)) class JobConfigFileNotYAMLException(NeptuneException): def __init__(self, job_config_path): super(JobConfigFileNotYAMLException, self).__init__( u'The provided job config file {} is not in YAML format!'.format(job_config_path)) class InvalidJobConfigException(NeptuneException): def __init__(self, job_config_path, cause): message = u'The provided job configuration {} is invalid! {}'.format(job_config_path, cause) super(InvalidJobConfigException, self).__init__(message) class MetricNotDeclaredException(NeptuneException): def __init__(self, param_name): cause = u"Parameter '{param_name}' is declared using hyper-parameter notation but "\ u"no metric is declared in the experiment configuration file."\ .format(param_name=param_name) super(MetricNotDeclaredException, self).__init__(cause) class NoReferenceParameterException(NeptuneException): def __init__(self, param_name): cause = u"Parameter '{param_name}' must reference to existing parameter definition"\ .format(param_name=param_name) super(NoReferenceParameterException, self).__init__(cause) self.param_name = param_name class NoReferenceParameterInException(NeptuneException): def __init__(self, param_name, arg, message=None): cause = u"Parameter '{param_name}' in '{arg}' must reference to existing parameter definition.\n"\ u"{message}"\ .format(param_name=param_name, arg=arg, message=message or "") super(NoReferenceParameterInException, self).__init__(cause) class NoValueSetException(NeptuneException): def __init__(self, param_name): cause = u"Parameter '{param_name}' doesn't have a value".format(param_name=param_name) super(NoValueSetException, self).__init__(cause) class JobConfigValidationFailException(InvalidJobConfigException): def __init__(self, job_config_path, validation_errors): enumerated_validation_errors = [ u'{}. {}\n'.format(index + 1, validation_error) for index, validation_error in enumerate(validation_errors) ] cause = 'Validation errors: ' + ', '.join(enumerated_validation_errors) super(JobConfigValidationFailException, self).__init__(job_config_path, cause)
nilq/baby-python
python
#!/usr/bin/env python # coding=utf-8 from setuptools import setup # setup configuration is in `setup.cfg` setup()
nilq/baby-python
python
import hashlib import requests # import uuid import os import tempfile def get_file(logger, storage_root, doc_uuid, image_url): # local_path = "%s/%s/images/tmp/%s.%s" % (storage_root, doc_uuid, uuid.uuid4(), image_url["extension"]) try: r = requests.get(image_url["url"]) temp = tempfile.NamedTemporaryFile(delete=False, suffix=".%s" % (image_url["extension"])) temp.write(r.content) logger.info("%s => %s" % (image_url["url"], temp.name)) except Exception as e: logger.exception(e) return '', '', True return temp.name, hash_file(temp.name), False def create_path(storage_root, doc_uuid): directory = "%s/%s/images/tmp" % (storage_root, doc_uuid) if not os.path.exists(directory): os.makedirs(directory, exist_ok=True) def hash_file(filename): # make a hash object h = hashlib.sha1() # open file for reading in binary mode with open(filename, 'rb') as file: # loop till the end of the file chunk = 0 while chunk != b'': # read only 1024 bytes at a time chunk = file.read(1024) h.update(chunk) # return the hex representation of digest return h.hexdigest()
nilq/baby-python
python
import socket import sys import pickle import time from tkinter import * from tkinter import ttk from tkinter import messagebox, BooleanVar from tkinter import font from PIL import Image import glob import os from PIL import Image, ImageTk import argparse import pprint import random import numpy as np import struct import pdb import pandas as pd import json import matplotlib import matplotlib.pyplot as plt from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk from matplotlib.backend_bases import key_press_handler from matplotlib.figure import Figure import seaborn as sns from matplotlib import style style.use('ggplot') from sklearn.metrics import precision_recall_fscore_support, accuracy_score import os,sys,inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parentdir = os.path.join(os.path.dirname(currentdir),'src') sys.path.insert(0,parentdir) from utils.utils import get_args, get_data BLANK_IMG = Image.new('RGB', (80, 80), (200,200,200)) def setup_connection(server_address=('localhost', 8990)): # Create a TCP/IP socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Connect the socket to the port where the server is listening print('connecting to {} port {}'.format(*server_address)) sock.connect(server_address) return sock def send_data(sock, data): print('sending', data) pickled = pickle.dumps(data) send_one_message(sock, pickled) def send_one_message(sock, data): length = len(data) sock.sendall(struct.pack('!I', length)) time.sleep(0.5) sock.sendall(data) def recv_one_message(sock): lengthbuf = recvall(sock, 4) if not lengthbuf: return None length, = struct.unpack('!I', lengthbuf) data = recvall(sock, length) return data def recvall(sock, count): buf = b'' while count: newbuf = sock.recv(count) if not newbuf: return None buf += newbuf count -= len(newbuf) return buf def wait_for_response(sock): print('waiting for a response') data = recv_one_message(sock) if data is None: return None return pickle.loads(data) class Sampler(): def __init__(self, dataset, seed, num_targets): self.dataset = dataset self.num_targets = num_targets self.seed = seed self.rng = np.random.RandomState(seed) self.previous_supports = {} self.potential_supports = {} self.selected_supports = {} self.targets = {} for clss in self.dataset.class_dict: idxs = list(self.dataset.class_dict[clss]) print(type(idxs)) self.rng.shuffle(idxs) self.targets[clss] = idxs[:self.num_targets] self.potential_supports[clss] = idxs[self.num_targets:] self.previous_supports[clss] = [] self.selected_supports[clss] = [] def move_selected_to_previous(self): for clss in self.selected_supports: for idx in self.selected_supports[clss]: self.previous_supports[clss].append(idx) self.selected_supports[clss] = [] def deselect_support(self, clss, idx): self.selected_supports[clss].remove(idx) self.potential_supports[clss].append(idx) def deselect_supports(self): for clss in self.selected_supports: for idx in self.selected_supports[clss]: self.potential_supports[clss].append(idx) self.selected_supports[clss] = [] def select_support(self, clss, idx): self.selected_supports[clss].append(idx) self.potential_supports[clss].remove(idx) def sync_selected(self, selected): self.deselect_supports() for clss in selected: for idx in selected[clss]: self.select_support(clss, idx) def sample_potential_supports(self, n_by_clss=None): sample = {} for clss in self.potential_supports: potential = self.potential_supports[clss] selected = self.selected_supports[clss] if n_by_clss is None: n = len(potential) - len(selected) elif clss in n_by_clss: n = min(n_by_clss[clss], len(potential) - len(selected)) elif clss not in n_by_clss: continue self.rng.shuffle(potential) count = 0 sample[clss] = [] for i in range(n): idx = potential[i] sample[clss].append(idx) return sample def get_previous_supports(self): return self.previous_supports def get_selected_supports(self): return self.selected_supports def get_targets(self, target_classes): return {clss:self.targets[clss] for clss in target_classes} class MyButton(Button): def __init__(self, master, **kw): super().__init__(master, bg="red", activebackground="blue", **kw) # self.configure(bg="SlateBlue2", activebackground="SlateBlue1") # self.bind("<Enter>", self.on_enter) # self.bind("<Leave>", self.on_leave) def on_enter(self, e): print("enter") # self.configure(bg="SlateBlue1") def on_leave(self, e): print("leave") # self.configure(bg="SlateBlue2", activebackground="SlateBlue2") def _image_for_display(im): # print("type:", type(im)) if type(im) == np.ndarray: h, w, c = np.shape(im) if c == 1: im = im.squeeze() im = Image.fromarray(im, 'L') else: im = Image.fromarray(im) im = im.resize((70, 70)) im = ImageTk.PhotoImage(im) return im class MyImageButton(Frame): def __init__(self, frame): super().__init__(frame, borderwidth=4) self.is_selected = False self.image = None self.enabled = True self.button = Button(self, padx=0, bd=0, pady=0, command=self.toggle) self.button.pack() self.stats = {} def toggle(self): if self.enabled: if self.is_selected: self.deselect() else: self.select() def set_image(self, image): self.image = _image_for_display(image) self.button.configure(image=self.image) def select(self): self.is_selected = True self.configure(bg="red") def deselect(self): self.is_selected = False self.configure(bg="white") def enable(self): self.enabled = True self.configure(bg="white") def disable(self): self.enabled = False self.configure(bg="light gray") def disable2(self): self.enabled = False self.configure(bg="blue") class MyImageLabel(Frame): def __init__(self, frame): super().__init__(frame, borderwidth=4, padx=1, pady=1) self.is_selected = False self.image = None self.image_label = Label(self, padx=0, bd=0, pady=0, font=("Helvetica", 10)) self.image_label.pack(fill=BOTH, expand=True) self.caption = None self.caption_label = Label(self, padx=0, bd=0, pady=0, font=("Helvetica", 10)) self.caption_label.pack(fill=BOTH, expand=True) def set_image(self, image): self.image = _image_for_display(image) self.image_label.configure(image=self.image) def set_caption(self, caption): self.caption = caption self.caption_label.configure(text=caption) def get_caption(self): return self.caption def set_style_colour(self, colour): self.configure(bg=colour) self.caption_label.configure(fg=colour) class SupportSetButtonFrame(Frame): def __init__(self, master, num_rows, num_cols): super().__init__(master, borderwidth=4) self.num_rows = num_rows self.num_cols = num_cols self.rowconfigure(0, weight=0) self.rowconfigure(1, weight=1) title_frame = Frame(self) title_frame.grid(column=0, row=0, sticky="wesn") title = Label(title_frame, text='Support set', borderwidth=5, font=("Helvetica", 10)) title.pack(fill='both', expand=True) TITLE_FONT = font.Font(self, size=15) title.configure(bg="gray", fg="white", font=TITLE_FONT) self.matrix_frame = Frame(self) self.matrix_frame.grid(column=0, row=1, sticky="swen") self.init_matrix() # self.control_frame = Frame(self) # resample_button_wrapper = Frame(self.control_frame) # resample_button_wrapper.grid(column=0, row=2, sticky="swen") # self.resample_button = Button(self.control_frame, text='Resample Supports') # self.resample_button.pack(fill=BOTH, expand=True) def clear_matrix(self): for widget in self.matrix_frame.winfo_children(): widget.destroy() def init_matrix(self): self.class_labels = [] self.image_buttons = [] for i_row in range(self.num_rows): label_wrapper = Frame(self.matrix_frame, borderwidth=2) label_wrapper.grid(column=0, row=i_row, sticky="swen") label = Label(label_wrapper, text="") label.configure(bg='#EEEEEE', fg='black') label.pack(fill='both', expand=True) self.class_labels.append(label) self.image_buttons.append([]) for j_col in range(self.num_cols): button = MyImageButton(self.matrix_frame) button.set_image(BLANK_IMG) button.grid(column=j_col+1, row=i_row, sticky="swen") button.deselect() button.disable() self.image_buttons[i_row].append(button) def redraw_support_set(self, dataset, previous, selected, potential): classes = list() for cls in previous.keys(): if len(previous[cls]) >= 1 and cls not in classes: classes.append(cls) # print('previous classes', classes) for cls in selected.keys(): if len(selected[cls]) >= 1 and cls not in classes: classes.append(cls) # print('selected classes', classes) potential_classes = list(potential.keys()) # print('before', potential_classes) random.shuffle(potential_classes) # print('after', potential_classes) for cls in potential_classes: if cls not in classes: classes.append(cls) print('classes', classes) self.image_button_mapping = [] for i in range(min(len(classes), len(self.class_labels))): clss = classes[i] self.class_labels[i].configure(text=clss) self.image_button_mapping.append([]) j_offset = 0 if clss in previous: num_cols = min(len(previous[clss]), self.num_cols - j_offset) for j in range(num_cols): button = self.image_buttons[i][j+j_offset] idx = previous[clss][j] image = dataset.image_data[idx] button.set_image(image) button.deselect() button.disable2() self.image_button_mapping[i].append((clss, idx)) j_offset+=num_cols if clss in selected: num_cols = min(len(selected[clss]), self.num_cols - j_offset) for j in range(num_cols): button = self.image_buttons[i][j+j_offset] idx = selected[clss][j] image = dataset.image_data[idx] button.set_image(image) button.enable() button.select() self.image_button_mapping[i].append((clss, idx)) j_offset+=num_cols if clss in potential: num_cols = min(len(potential[clss]), self.num_cols - j_offset) for j in range(num_cols): button = self.image_buttons[i][j+j_offset] idx = potential[clss][j] image = dataset.image_data[idx] button.set_image(image) button.enable() button.deselect() self.image_button_mapping[i].append((clss, idx)) j_offset+=num_cols # fill up the remaining cols with blanks (i.e. when more columns than samples in the class) if self.num_cols - j_offset > 0: cols_remaining = self.num_cols - j_offset for j in range(cols_remaining): button = self.image_buttons[i][j+j_offset] button.set_image(BLANK_IMG) button.deselect() button.disable() self.image_button_mapping[i].append(None) # fill up the remaining rows with blanks (i.e. when more rows than classes in dataset) for i in range(len(classes), self.num_rows): self.class_labels[i].configure(text="") self.image_button_mapping.append([]) for j in range(self.num_cols): button = self.image_buttons[i][j] button.set_image(BLANK_IMG) button.deselect() button.disable() self.image_button_mapping[i].append(None) def add_row(self): self.num_rows += 1 self.clear_matrix() self.init_matrix() def add_column(self): self.num_cols += 1 self.clear_matrix() self.init_matrix() def subtract_row(self): self.num_rows -= 1 self.clear_matrix() self.init_matrix() def subtract_column(self): self.num_cols -= 1 self.clear_matrix() self.init_matrix() def get_selected(self): selected = {} for i in range(self.num_rows): for j in range(self.num_cols): button = self.image_buttons[i][j] if button.enabled and button.is_selected: tag = self.image_button_mapping[i][j] if tag is not None: clss, idx = tag if clss not in selected: selected[clss] = [idx] else: selected[clss].append(idx) return selected def deselect_all(self): selected = {} for i in range(self.num_rows): for j in range(self.num_cols): button = self.image_buttons[i][j] if button.enabled and button.is_selected: button.deselect() return selected class GraphFrame(Frame): def __init__(self, master): super().__init__(master, borderwidth=4) self.rowconfigure(0, weight=1) self.rowconfigure(1, weight=1) title_frame = Frame(self) title_frame.grid(column=0, row=0, sticky="wesn") title = Label(title_frame, text='Graphs', borderwidth=5) title.pack(fill='both', expand=True, side = LEFT) TITLE_FONT = font.Font(self, size=15) title.configure(bg="gray", fg="white", font=TITLE_FONT) graph_frame_wrapper = Frame(self) graph_frame_wrapper.grid(column=0, row=1, sticky="wesn") self.graph_frames = {} self.figs = {} self.graph_canvases = {} self.metrics = ['accuracy', 'precision', 'recall', 'f1'] for i, m in enumerate(self.metrics): graph_frame = Frame(graph_frame_wrapper) graph_frame.grid(column=0, row=0, sticky="wesn") fig = Figure(figsize=(8.5,5), dpi=100) graph_canvas = FigureCanvasTkAgg(fig, master=graph_frame) graph_canvas.draw() graph_canvas.get_tk_widget().pack(fill=BOTH, expand=True) self.graph_frames[m] = graph_frame self.figs[m] = fig self.graph_canvases[m] = graph_canvas if i >= 1: graph_frame.grid_remove() dropdown_frame = OptionMenuFrame(title_frame, 'Metric', self.graph_frames, choice_order=self.metrics) dropdown_frame.pack(side = RIGHT) def plot(self, stats_by_subtask): for m in self.metrics: self.figs[m].clear() latest = stats_by_subtask[-1] models = [] y = [] for model in latest: models.append(model) print(latest[model].keys()) y.append(latest[model][m]) ax = self.figs[m].add_subplot(211) x = np.arange(len(models)) ax.bar(x, y, 0.5) ax.set_xticks(x) ax.set_xticklabels(models) ax.set_ylabel(m) # ax.yticks(rotation=90) # ax.set_title('Accuracy for each algorithm') ax.yaxis.set_ticks(np.linspace(0, 1.0, 11)) ax.set_ylim([0.15, 1.05]) ax.tick_params(axis='y', which='major') for tick in ax.get_xticklabels(): tick.set_rotation(10) ax = self.figs[m].add_subplot(212) # ax.set_title('Accuracy through the subtasks') x = list(range(len(stats_by_subtask))) for model in models: accs = [] for stats in stats_by_subtask: accs.append(stats[model][m]) ax.plot(x, accs, marker='.') ax.yaxis.set_ticks(np.linspace(0, 1.0, 11)) ax.set_ylim([0.15, 1.05]) ax.tick_params(axis='y', which='major') ax.set_xticks(x) ax.set_ylabel(m) ax.set_xlabel('Subtask ID') ax.legend(models, loc='upper center', bbox_to_anchor=(0.5,1.27), ncol=4, frameon=True) plt.subplots_adjust( wspace=0.4, hspace=0.155 ) self.figs[m].tight_layout() self.graph_canvases[m].draw() def clear_graph(self): for m, fig in self.figs.items(): fig.clear() for m, graph in self.graph_canvases.items(): graph.draw() def set_size(w,h, ax=None): """ w, h: width, height in inches """ if not ax: ax=plt.gca() l = ax.figure.subplotpars.left r = ax.figure.subplotpars.right t = ax.figure.subplotpars.top b = ax.figure.subplotpars.bottom figw = float(w)/(r-l) figh = float(h)/(t-b) ax.figure.set_size_inches(figw, figh) class OptionMenuFrame(Frame): def __init__(self, master, title, choice_frames, choice_order=None, grid_or_pack='grid'): super().__init__(master, borderwidth=4) if choice_order is None: choice_order = choice_frames.keys() wrapper_frame = Frame(self) wrapper_frame.grid(row = 0, column = 0, sticky="wesn") tkvar = StringVar(wrapper_frame) tkvar.set(choice_order[0]) Label(wrapper_frame, text=title).grid(row = 0, column = 0) popupMenu = OptionMenu(wrapper_frame, tkvar, *choice_order) popupMenu.grid(row = 0, column = 1) # on change dropdown value def change_dropdown(*args): for algo_name in choice_frames: if grid_or_pack == 'grid': choice_frames[algo_name].grid_remove() # remove from view else: choice_frames[algo_name].pack_forget() algo_name = tkvar.get() if grid_or_pack == 'grid': choice_frames[algo_name].grid() # show in view else: choice_frames[algo_name].pack() # link function to change dropdown tkvar.trace('w', change_dropdown) class ConfMatrixFrame(Frame): def __init__(self, master, models): super().__init__(master, borderwidth=4) self.rowconfigure(0, weight=0) self.rowconfigure(1, weight=1) self.models = models self.figs = {} self.graph_canvas = {} tab_frames = {} title_frame = Frame(self) title_frame.grid(column=0, row=0, sticky="wesn") title = Label(title_frame, text='Confusion Matrix (x=pred, y=true)', borderwidth=5) title.pack(fill='both', side = LEFT, expand=True) TITLE_FONT = font.Font(self, size=15) title.configure(bg="gray", fg="white", font=TITLE_FONT) for i, model in enumerate(models): model_tab = Frame(self) model_tab.grid(column=0, row=1, sticky="wesn") self.figs[model] = Figure(figsize=(8,3.5), dpi=100) self.graph_canvas[model] = FigureCanvasTkAgg(self.figs[model], master=model_tab) self.graph_canvas[model].draw() self.graph_canvas[model].get_tk_widget().pack(fill=BOTH, expand=True) tab_frames[model] = model_tab if i >= 1: model_tab.grid_remove() dropdown_frame = OptionMenuFrame(title_frame, '', tab_frames, choice_order=models) dropdown_frame.pack(side = RIGHT) def clear_graph(self): for model in self.models: self.figs[model].clear() self.graph_canvas[model].draw() def plot(self, stats_by_subtask): latest = stats_by_subtask[-1] for model in latest: self.figs[model].clear() ax = self.figs[model].add_axes([0.2,0.2,0.7,0.7]) # set_size(3,3,ax) # ax.set_title('Confusion matrix ') conf_matrix = latest[model]['conf_matrix'] conf_matrix_df = pd.DataFrame(conf_matrix) conf_matrix_df.sort_index(axis=0, inplace=True, ascending=True) conf_matrix_df.sort_index(axis=1, inplace=True, ascending=True) sns.heatmap(conf_matrix_df, annot=True, ax=ax, linewidths=0.5, annot_kws={"size": 12}) ax.set_ylim(0, len(conf_matrix)) plt.setp(ax.get_xticklabels(), rotation=20, ha="right", rotation_mode="anchor", fontsize=10) plt.setp(ax.get_yticklabels(), rotation=45, ha="right", rotation_mode="anchor", fontsize=10) self.graph_canvas[model].draw() class OutputSamplesTabFrame(Frame): def __init__(self, master, num_rows, num_cols): super().__init__(master) self.num_rows = num_rows self.num_cols = num_cols self.imagelabels = [] for i in range(num_rows): self.imagelabels.append([]) for j in range(num_cols): label = MyImageLabel(self) label.set_image(BLANK_IMG) label.set_caption('n\\a') label.grid(column=j, row=i, sticky="swen") self.imagelabels[i].append(label) def clear_images(self): for r in range(self.num_rows): for c in range(self.num_cols): image = BLANK_IMG label = "n\\a" colour = "white" self.imagelabels[r][c].set_image(image) self.imagelabels[r][c].set_caption(label) self.imagelabels[r][c].set_style_colour(colour) def draw_images(self, dataset, stats, seed, num_correct_rows): """ num_correct_rows: -1: the correct and incorrect images proportional n: top n rows for the correct, rest for incorrect None: shuffled but the same for all algorithms """ image_pairs = list(zip(stats['targets_idx'], stats["preds"], stats["true"])) rng = np.random.RandomState(seed) rng.shuffle(image_pairs) if num_correct_rows is None: count = 0 for r in range(self.num_rows): for c in range(self.num_cols): if count < len(image_pairs): image = dataset.image_data[image_pairs[count][0]] label = "{}\n({})".format(image_pairs[count][1], image_pairs[count][2]) colour = "green" if image_pairs[count][1] == image_pairs[count][2] else "red" else: image = BLANK_IMG label = "n\\a" colour = "white" self.imagelabels[r][c].set_image(image) self.imagelabels[r][c].set_caption(label) self.imagelabels[r][c].set_style_colour(colour) count += 1 else: correct = [] incorrect = [] for i, pair in enumerate(image_pairs): if pair[1] == pair[2]: correct.append(pair) else: incorrect.append(pair) if num_correct_rows < 0: num_correct = int((len(correct) * self.num_rows * self.num_cols)/ (len(correct) + len(incorrect))) else: num_correct = self.num_cols * num_correct_rows num_correct = min(num_correct, len(correct)) num_incorrect = len(incorrect) count = 0 for r in range(self.num_rows): for c in range(self.num_cols): if count < num_correct: image = dataset.image_data[correct[count][0]] label = "{}\n({})".format(correct[count][1], correct[count][2]) colour = "green" elif count - num_correct < num_incorrect: image = dataset.image_data[incorrect[count - num_correct][0]] label = "{}\n({})".format(incorrect[count - num_correct][1], incorrect[count - num_correct][2]) colour = "red" else: image = BLANK_IMG label = "n\\a" colour = "white" self.imagelabels[r][c].set_image(image) self.imagelabels[r][c].set_caption(label) self.imagelabels[r][c].set_style_colour(colour) count += 1 class OutputSamplesFrame(Frame): def __init__(self, master, models, num_rows, num_cols, num_correct_rows): super().__init__(master, borderwidth=4) self.models = models self.rng = np.random.RandomState(0) self.num_correct_rows = num_correct_rows self.rowconfigure(0, weight=0) self.rowconfigure(1, weight=1) title_frame = Frame(self) title_frame.grid(column=0, row=0, sticky="wesn") title = Label(title_frame, text='Sample Outputs', borderwidth=5) title.pack(fill='both', expand=True, side=LEFT) TITLE_FONT = font.Font(self, size=15) title.configure(bg="gray", fg="white", font=TITLE_FONT) self.tabs = {} for model in models: model_tab = OutputSamplesTabFrame(self, num_rows, num_cols) model_tab.grid(column=0, row=1, sticky="wesn") self.tabs[model] = model_tab dropdown_frame = OptionMenuFrame(title_frame, '', self.tabs, choice_order=models) dropdown_frame.pack(side=RIGHT) def draw_images(self, dataset, stats_by_subtask): seed = self.rng.randint(999999) latest = stats_by_subtask[-1] for model in self.tabs: self.tabs[model].draw_images(dataset, latest[model], seed, self.num_correct_rows) def clear_images(self): for model in self.tabs: self.tabs[model].clear_images() class ControlFrame(Frame): def __init__(self, master): super().__init__(master, borderwidth=4) frame = Frame(self) frame.pack(fill=BOTH, expand=True) textfont = font.Font(self, size=15) self.resample_supports_button = Button(frame, text='Resample Supports', command=self.resample_supports, font=textfont) self.resample_supports_button.pack(side=LEFT, expand=True) # self.deselect_button = Button(frame, text='Deselect', command=self.deselect_all) # self.deselect_button.pack(side=LEFT) # self.select_button = Button(frame, text='Select All', command=self.select_all) # self.select_button.pack(side=LEFT) # self.select_random_button = Button(frame, text='Select 5 Random Rows', command=self.select_5_random_rows) # self.select_random_button.pack(side=LEFT) self.classify_button = Button(frame, fg='green', text='Classify', command=self.classify, font=textfont, width=50) self.classify_button.pack(side=LEFT, expand=True) self.resample_output_button = Button(frame, text='Resample Output', command=self.resample_outputs, font=textfont) self.resample_output_button.pack(side=LEFT, expand=True) self.reset_button = Button(frame, fg='red', text='Reset Task', command=self.reset_all, font=textfont) self.reset_button.pack(side=RIGHT) def resample_supports(self): print("ControlFrame.resample_supports") pass def deselect_all(self): pass def select_all(self): pass def classify(self): pass def resample_outputs(self): pass def reset_all(self): pass def disable_buttons(self): self.resample_supports_button['state'] = 'disable' self.classify_button['state'] = 'disable' # self.select_button['state'] = 'disable' # self.deselect_button['state'] = 'disable' # self.select_random_button['state'] = 'disable' self.resample_output_button['state'] = 'disable' self.reset_button['state'] = 'disable' def enable_buttons(self): self.resample_supports_button['state'] = 'normal' self.classify_button['state'] = 'normal' # self.select_button['state'] = 'normal' # self.deselect_button['state'] = 'normal' # self.select_random_button['state'] = 'normal' self.resample_output_button['state'] = 'normal' self.reset_button['state'] = 'normal' class MainWindow(Frame): def __init__(self, master, args): super().__init__(master) self.args = args self.stats = [] self.master = master # self.master.pack(expand=True, fill=BOTH) self.output_samples_on = (args.num_output_rows > 0) and (args.num_output_columns > 0) def initUI(self): self.columnconfigure(0, weight=1) self.columnconfigure(1, weight=1) self.columnconfigure(2, weight=1) self.columnconfigure(3, weight=1) self.rowconfigure(0, weight=1) self.rowconfigure(1, weight=0) self.support_frame = SupportSetButtonFrame(self.master, self.args.max_num_classes, self.args.max_labelled_num_images_per_class) self.support_frame.grid(column=0, row=0, sticky="wesn") center_frame = Frame(self.master) center_frame.grid(column=1, row=0, sticky="wesn") # center_frame.rowconfigure(0, weight=1) # center_frame.rowconfigure(1, weight=1) # center_frame.columnconfigure(0, weight=1) center_frame.configure(bg='black') self.graph_frame = GraphFrame(center_frame) self.graph_frame.grid(column=0, row=0, sticky="wesn") self.conf_matrix_frame = ConfMatrixFrame(center_frame, self.algorithm_names) self.conf_matrix_frame.grid(column=0, row=1, sticky="wesn") if self.output_samples_on: self.output_samples_frame = OutputSamplesFrame(self.master, self.algorithm_names, self.args.num_output_rows, self.args.num_output_columns, self.args.num_correct_output_rows) self.output_samples_frame.grid(column=2, row=0, sticky="wesn") self.control_frame = ControlFrame(self.master) self.control_frame.grid(column=0, row=1, sticky="wesn", columnspan=3) self.control_frame.resample_supports_button.configure(command=self.resample_supports) # self.control_frame.deselect_all = self.deselect_all # self.control_frame.select_all = self.select_all self.control_frame.classify_button.configure(command=self.classify) self.control_frame.resample_output_button.configure(command=self.resample_outputs) self.control_frame.reset_button.configure(command=self.reset_all) def init_with_server(self): print('setting up connection with server') address = ('localhost', self.args.port) self.sock = setup_connection(address) print('syncing with server') data = {'action':'setup'} send_data(self.sock, data) message = self.wait_for_response(loud=False) assert message['action'] == 'setup' self.algorithm_names = message['algorithms'] self.num_algorithms = len(self.algorithm_names) message['dataset_args']['train']['data_path'] = self.args.data_path message['dataset_args']['val']['data_path'] = self.args.data_path message['dataset_args']['test']['data_path'] = self.args.data_path args, excluded_args, parser = get_args(stdin_list=['--dataset', message['dataset'], '--dataset_args', json.dumps(message['dataset_args'])]) print(args) datasets = get_data(args) self.dataset = datasets['test'] assert message['dataset_sig'] == self.dataset.get_signature(), 'The datasets files and dataset_args must match!' self.sampler = Sampler(self.dataset, np.random.randint(99999), self.args.num_targets) def resample_supports(self): # print("MainFrame.resample_supports") selected = self.support_frame.get_selected() self.sampler.sync_selected(selected) previous = self.sampler.get_previous_supports() selected = self.sampler.get_selected_supports() potential = self.sampler.sample_potential_supports() self.support_frame.redraw_support_set(self.dataset, previous, selected, potential) pass def classify(self): selected = self.support_frame.get_selected() self.sampler.sync_selected(selected) selected = self.sampler.get_selected_supports() previous = self.sampler.get_previous_supports() classes = [clss for clss in selected if len(selected[clss]) > 0] classes += [clss for clss in previous if len(previous[clss]) > 0] targets = self.sampler.get_targets(set(classes)) selected_idx = [idx for clss in previous for idx in previous[clss]] selected_idx += [idx for clss in selected for idx in selected[clss]] if len(selected_idx) == 0: messagebox.showerror("Error", "No supports selected") return target_pairs = [(idx,clss) for clss in targets for idx in targets[clss]] targets_idx = [ pair[0] for pair in target_pairs ] target_labels = [ pair[1] for pair in target_pairs ] print("support", selected_idx) print("support labels:", set(classes)) print("target", targets_idx) print("target labels:", set(target_labels)) message = { 'action': 'classify', 'supports': selected_idx, # [3163, 3166, 1507, 1512, 1502, # 3283, 3286, 1627, 1632, 1622, # 3193, 3196, 1537, 1542, 1532, # 3253, 3256, 1597, 1602, 1592, # 1746, 2990, 112, 2128, 2199], # selected_idx, 'targets': targets_idx # [3155, 1517, 3152, 1526, 1508, 1525, 3168, 3169, 3173, 3164, 1528, 1520, 1527, 1503, 3159, # 3275, 1637, 3272, 1646, 1628, 1645, 3288, 3289, 3293, 3284, 1648, 1640, 1647, 1623, 3279, # 3185, 1547, 3182, 1556, 1538, 1555, 3198, 3199, 3203, 3194, 1558, 1550, 1557, 1533, 3189, # 3245, 1607, 3242, 1616, 1598, 1615, 3258, 3259, 3263, 3254, 1618, 1610, 1617, 1593, 3249, # 2326, 333, 2087, 2250, 229, 201, 1085, 3072, 1352, 892, 1831, 1436, 391, 2411, 374] # targets_idx } send_data(self.sock, message) message = self.wait_for_response(loud=True) if message is None: messagebox.showerror("Error", "Server failed") return assert message['action'] == 'output' stats = {} for model_name in self.algorithm_names: model_stats = {} model_stats['preds'] = message['models'][model_name] model_stats['true'] = target_labels model_stats['targets_idx'] = targets_idx # acc = 0 conf_matrix = { label1:{ label2: 0 for label2 in target_labels} for label1 in target_labels} for pred, true in zip(model_stats['preds'], model_stats['true']): # acc += 1 if pred == true else 0 if pred not in conf_matrix: conf_matrix[pred] = { label: 0 for label in target_labels } conf_matrix[pred][true] += 1 # acc = acc * 1. / len(targets_idx) output = precision_recall_fscore_support(model_stats['true'], model_stats['preds'], beta=1.0) precision=output[0] recall=output[1] f1=output[2] acc=accuracy_score(model_stats['true'], model_stats['preds']) model_stats['accuracy']=acc model_stats['conf_matrix']=conf_matrix model_stats['precision']=precision.mean() model_stats['recall']=recall.mean() model_stats['f1']=f1.mean() stats[model_name] = model_stats self.stats.append(stats) self.draw_stats() self.resample_outputs() self.next_episode() def next_episode(self): self.sampler.move_selected_to_previous() self.support_frame.deselect_all() self.resample_supports() def draw_stats(self): self.graph_frame.plot(self.stats) self.conf_matrix_frame.plot(self.stats) def reset_all(self): self.graph_frame.clear_graph() self.conf_matrix_frame.clear_graph() if self.output_samples_on: self.output_samples_frame.clear_images() self.stats = [] self.support_frame.deselect_all() self.sampler = Sampler(self.dataset, np.random.randint(99999), self.args.num_targets) self.resample_supports() message = {'action':'reset_task'} send_data(self.sock, message) def resample_outputs(self): if len(self.stats) == 0: messagebox.showerror("Error", "Press 'Classify'") return if self.output_samples_on: self.output_samples_frame.draw_images(self.dataset, self.stats) def wait_for_response(self, loud=False): print('waiting for a response') if loud: self.control_frame.disable_buttons() messagebox.showerror("Info", "Wait for results from the server") data = wait_for_response(self.sock) if loud: self.control_frame.enable_buttons() return data def main(args): if args.task == 'fsl': root = Tk() root.geometry("2250x1000") app = MainWindow(root, args) app.init_with_server() app.initUI() app.resample_supports() root.mainloop() else: raise NotImplementedError() def get_demo_parser(parser=argparse.ArgumentParser()): parser.add_argument('--max_num_classes', default=11) parser.add_argument('--max_labelled_num_images_per_class', type=int, default=6, help='max number of images available for labeling') parser.add_argument('--num_output_columns', type=int, default=3, help='Number of columns to displays for output images') parser.add_argument('--num_output_rows', type=int, default=8, help='Number of incorrectly labellled images to display') parser.add_argument('--num_correct_output_rows', default=None, help='Number of correctly labellled images to display') parser.add_argument('--checkpoint_name', default=None) parser.add_argument('--experiment_folder', default=None) parser.add_argument('--num_targets', default=15) parser.add_argument('--port', type=int, default=8891) parser.add_argument('--task', default='fsl', type=str) parser.add_argument('--data_path', type=str, default="/Users/mateuszochal/Documents/University/PhD/datasets/", help="Path to folder with all datasets.") return parser def get_demo_args(): parser = get_demo_parser() args = parser.parse_args() args_dict = vars(args) pprint.pprint(args_dict, indent=4) args = Bunch(args_dict) return args class Bunch(object): def __init__(self, adict): self.__dict__.update(adict) def update(self, adict): self.__dict__.update(adict) if __name__ == '__main__': args = get_demo_args() main(args)
nilq/baby-python
python
import sys import collections Record = collections.namedtuple('Record', ['timestamp', 'action', 'km']) class Car: def __init__(self, plate): self.plate = plate self.records = [] def add_record(self, record): record[2] = int(record[2]) self.records.append(Record(*record)) def order_records(self): self.records.sort(key=lambda x: x.timestamp) def calculate_bill(self, fares): bill = 0 valid_bill = False enter = False for record in self.records: if record.action == 'enter': fare = fares[int(record.timestamp[4:6])] km = record.km enter = True elif enter and record.action == 'exit': km = abs(km - record.km) bill += fare / 100 * km + 1 valid_bill = True enter = False if not valid_bill: return bill return bill + 2 def solve(fares, records): cars = {} res = [] for record in records: cars.setdefault(record[0], Car(record[0])).add_record(record[1:]) for plate in sorted(cars.keys()): cars[plate].order_records() bill = cars[plate].calculate_bill(fares) if bill: res.append('{} ${:.2f}'.format(plate, bill)) return res def main(file): res = [] cases = int(file.readline()) file.readline() for _ in range(cases): fares = [int(f) for f in file.readline().split()] records = [] while True: record = [x.replace(':', '') for x in file.readline().split()] if len(record) == 0: break records.append(record) bills = solve(fares, records) res.extend(bill + '\n' for bill in bills) res.append('\n') return res[:-1] if __name__ == '__main__': print(''.join(main(sys.stdin)), end='')
nilq/baby-python
python
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * import glob class Efivar(MakefilePackage): """Tools and libraries to work with EFI variables""" homepage = "https://github.com/rhboot/efivar" url = "https://github.com/rhboot/efivar/archive/37.tar.gz" version('37', sha256='74c52b4f479120fb6639e753e71163ba3f557a7a67c0be225593f9f05b253f36') version('36', sha256='24ed0cafbaf6d913e8f60e5da3cbbac1a1578e16cf5c95b21f2eb6753c13173f') version('35', sha256='747bc4d97b4bd74979e5356c44a172534a8a07184f130349fd201742e683d292') build_directory = 'src' def install(self, spec, prefix): with working_dir(self.build_directory): mkdirp(prefix.lib) files = glob.glob('*.so*') for f in files: install(f, prefix.lib) install_tree('include/efivar', prefix.include)
nilq/baby-python
python
# Copyright (c) 2019 Sony Corporation. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest def pytest_addoption(parser): parser.addoption('--disable-test-large-blocks', action='store_true', default=False, help='Whether to run test_cuda_large_blocks which consumes quite large memory.') @pytest.fixture(scope='session') def cuda_test_opts(request): """Parse options and expose as a fixture. Returns: NNablaOpts An object which has ext, ext_kwargs, benchmark_output_dir and function_benchmark_writer as attributes. """ from collections import namedtuple getoption = request.config.getoption disable_test_large_blocks = getoption("--disable-test-large-blocks") NNablaExtCudaTestOpts = namedtuple("NNablaExtCudaTestOpts", [ 'disable_test_large_blocks', ]) return NNablaExtCudaTestOpts(disable_test_large_blocks)
nilq/baby-python
python
import os import subprocess import shutil import datetime import pandas as pd SNAPSHOT_FOLDER = "big_test_snapshots_diarios" RETROACTIVE_FOLDER = "retroactive_data" def daterange(start_date, end_date, step=None): for n in range(0,int((end_date - start_date).days),step): yield start_date + datetime.timedelta(days=n) def clean_deaths(): df = pd.read_csv("deaths_unclean.csv") # Get only the most common regions g = df.groupby(["nom_municipio","nom_regional"]).count() cities_regions = {c:("",0) for c in df["nom_municipio"].unique()} for c,r in g.index: count = g.loc[(c,r)]["data_ocorrencia"] if cities_regions[c][1] < count: cities_regions[c] = (r,count) for c in cities_regions: df.loc[df["nom_municipio"]==c,"nom_regional"] = cities_regions[c][0] # Replace 2002 with 2020 since it is a common typo df["data_ocorrencia"] = df["data_ocorrencia"].str.replace("2002-","2020-") df["data_ocorrencia"] = df["data_ocorrencia"].str.replace("2022-","2020-") df["data_ocorrencia"] = df["data_ocorrencia"].str.replace("2200-","2020-") df["data_ocorrencia"] = df["data_ocorrencia"].str.replace("2202-","2020-") df["data_ocorrencia"] = df["data_ocorrencia"].str.replace("2220-","2020-") df["data_ocorrencia"] = df["data_ocorrencia"].str.replace("2222-","2020-") df.loc[df["data_ocorrencia"].str.startswith("201"),"data_ocorrencia"] = "2020-01-01" df.loc[df["data_ocorrencia"].str.startswith("200"),"data_ocorrencia"] = "2020-01-01" df.to_csv("deaths.csv") def copy_cases_file(date): fname = "{date_str}_compilado.csv".format( date_str=date.strftime("%Y_%m_%d"), ) shutil.copy(os.path.join(SNAPSHOT_FOLDER,fname), "deaths_unclean.csv") def copy_retroactive_file(date): fname = "retroactive_{date_str}.csv".format( date_str=date.strftime("%Y_%m_%d"), ) shutil.copy(os.path.join(RETROACTIVE_FOLDER,fname), "retroactive.csv") def gen_onset_to_death(date): fname = "{date_str}_onset_to_death.csv".format( date_str=date.strftime("%Y_%m_%d"), ) df = pd.read_csv(os.path.join(SNAPSHOT_FOLDER,fname)) count = df.data_onset_death.count() df = df[df.data_onset_death>=0] pd.DataFrame( { "count": [count], "avg_days": [df.data_onset_death.mean()], "std_days": [df.data_onset_death.std()], "coeff_variation": [df.data_onset_death.std()/df.data_onset_death.mean()] } ).to_csv("onset_to_death.csv", index=False) def prepare_google_mobility(date): fname = "Global_Mobility_Report.csv" df = pd.read_csv(os.path.join(SNAPSHOT_FOLDER,fname), low_memory=False) df.date = pd.to_datetime(df.date, format="%Y-%m-%d") last_mobility_datetime = datetime.datetime(date.year,date.month,date.day) - datetime.timedelta(days=7) df = df[df.date<last_mobility_datetime] #TODO: Convert date back to str df.to_csv(fname, index=False) START_OPTS = "-m DEVELOP -k 7 -i 3000 -w 2400 -c 1 -t 8 -r '{date_str}' -v TRUE >'{date_str}.out'" SUBSEQ_OPTS = "-m DEVELOP -k 7 -i 1200 -w 600 -c 1 -t 8 -r '{date_str}' -e {previous_model_file} -v TRUE >'{date_str}.out'" if __name__=="__main__": start_date = datetime.date(2020, 5, 31) end_date = datetime.date(2021, 2, 1) opts = START_OPTS last_model_path = None for data_date in daterange(start_date, end_date, 7): reference_date = data_date + datetime.timedelta(days=1) print(reference_date.strftime("%Y-%m-%d")) # Copy data files prepare_google_mobility(data_date) gen_onset_to_death(data_date) copy_cases_file(data_date) copy_retroactive_file(data_date) clean_deaths() # Run model subprocess.call( #os.system( 'Rscript run_regions.R {opts}'.format( opts = opts.format( date_str = reference_date.strftime("%Y-%m-%d"), previous_model_file = last_model_path, ) ), shell=True ) # Get saved model path results_path = "../results/{date_str}".format( date_str = reference_date.strftime("%Y_%m_%d"), ) last_model_path = os.path.join( results_path, os.listdir(results_path)[0], ) print(last_model_path) # Ensure we will be loading the previous model on the next step opts = SUBSEQ_OPTS
nilq/baby-python
python
import os import numpy as np import mdtraj as md from simtk import unit from simtk.openmm import LangevinIntegrator from simtk.openmm.app import Simulation from simtk.openmm.app.pdbfile import PDBFile from cg_openmm.cg_model.cgmodel import CGModel from cg_openmm.utilities.iotools import write_pdbfile_without_topology from cg_openmm.utilities.helix_utils import * from scipy.optimize import differential_evolution, root_scalar def optimize_helix_LJ_parameters(radius, pitch, n_particle_bb, bond_dist_bb=None, bond_dist_sc=None,DE_popsize=50, pdbfile='LJ_helix_openmm_energy.pdb', plotfile='LJ_helix_openmm_energy.pdf'): """ Optimize backbone and sidechain particle parameters along a helix with specified radius and pitch, with equal spacing of backbone particles and sidechain beads normal to the helix. :param radius: fixed helical radius :type radius: Quantity :param pitch: fixed helical pitch (c*2*pi) :type pitch: Quantity :param n_particle_bb: Number of backbone particles to model :type n_particle_bb: int :param bond_dist_bb: bond distance for bb-bb bonds. If None, bond distance will also be optimized. :type bond_dist_bb: Quantity :param bond_dist_sc: bond distance for bb-sc bonds. If None, bond distance will also be optimized. :type bond_dist_sc: Quantity :param DE_popsize: population size to use in SciPy differential_evolution solver (default=50) :type DE_popsize: int :param pdbfile: Path to pdb file for saving the helical structure (default='LJ_helix_openmm_energy.pdb') :type pdbfile: str :param plotfile: Path to pdf file for plotting the helical equations and particle positions (default='LJ_helix_openmm_energy.pdf') :type plotfile: str :returns: - opt_sol - Results from scipy.optimize (dict) - geometry - Dictionary containing key geometric parameters of the optimized helix """ r_unit = radius.unit # Use angstrom for writing pdb file: radius = radius.value_in_unit(unit.angstrom) pitch = pitch.value_in_unit(unit.angstrom) r = radius c = pitch/(2*np.pi) # Helical rise parameter # t_delta is related to the specified bond distance - this must be computed at each iteration # Here we need to create a cgmodel # Set initial epsilon parameters epsilon_bb = 1.0 * unit.kilojoule_per_mole epsilon_sc = 1.0 * unit.kilojoule_per_mole # Set initial sigma parameters sigma_bb = 1.0 * unit.angstrom sigma_sc = 1.0 * unit.angstrom cgmodel = get_helix_cgmodel(sigma_bb,sigma_sc,epsilon_bb,epsilon_sc,n_particle_bb) # Get particle type lists and bonded lists: (particle_type_list, bb_array, sc_array, bb_bond_list, sc_bond_list, b_angle_list, s_angle_list, bbbb_torsion_list, bbbs_torsion_list, sbbs_torsion_list) = get_helix_particle_bonded_lists(cgmodel) # Set up Simulation object beforehand: simulation_time_step = 5.0 * unit.femtosecond friction = 0.0 / unit.picosecond integrator = LangevinIntegrator( 0.0 * unit.kelvin, friction, simulation_time_step.in_units_of(unit.picosecond) ) simulation = Simulation(cgmodel.topology, cgmodel.system, integrator) if bond_dist_bb is None and bond_dist_sc is None: #-----------------------------# # Unconstrained bonds version # #-----------------------------# # Set sidechain bonds to the backbone bond distance for now # Vary t spacing, sigma_bb, sigma_sc params = (simulation, bb_array, sc_array, particle_type_list, r, c, n_particle_bb) # Set optimization bounds [t, sigma_bb, sigma_sc]: bounds = [(0.01,np.pi),(r/50,15*r),(r/50,15*r)] opt_sol = differential_evolution( compute_helix_openmm_energy_vary_LJ, bounds, args=params, polish=True, popsize=DE_popsize, ) t_delta_opt = opt_sol.x[0] sigma_bb_opt = opt_sol.x[1] sigma_sc_opt = opt_sol.x[2] t_par = np.zeros(n_particle_bb) for i in range(n_particle_bb): t_par[i] = i*t_delta_opt else: #---------------------------# # Constrained bonds version # #---------------------------# # For now, we have to specify both bb-bb and bb-sc bond distances bond_dist_bb = bond_dist_bb.value_in_unit(unit.angstrom) bond_dist_sc = bond_dist_sc.value_in_unit(unit.angstrom) params = (simulation, bb_array, sc_array, particle_type_list, r, c, n_particle_bb, bond_dist_bb, bond_dist_sc) # Set optimization bounds [sigma_bb, sigma_sc]: bounds = [(r/50,15*r),(r/50,15*r)] opt_sol = differential_evolution( compute_helix_openmm_energy_vary_LJ_constrained, bounds, args=params, polish=True, popsize=DE_popsize, ) sigma_bb_opt = opt_sol.x[0] sigma_sc_opt = opt_sol.x[1] # Compute particle spacing based on bond constraints t_delta_opt = get_t_from_bond_distance(r,c,bond_dist_bb) if t_delta_opt < 0: print(t_delta_opt) t_delta_opt *= -1 t_par = np.zeros(n_particle_bb) for i in range(n_particle_bb): t_par[i] = i*t_delta_opt # Equilibrium LJ distance (for visual representation) r_eq_bb = sigma_bb_opt*np.power(2,(1/6)) r_eq_sc = sigma_sc_opt*np.power(2,(1/6)) # Get particle positions: xyz_par = get_helix_coordinates(r,c,t_par) # Place sidechain particles normal to helix if bond_dist_sc == None: # Use optimized bond length from first two backbone beads: r_bs = dist_unitless(xyz_par[0,:],xyz_par[1,:]) else: # Use specified bb-sc bond distance: r_bs = bond_dist_sc side_xyz = np.zeros((n_particle_bb,3)) side_xyz[:,0] = (1+r_bs/r)*xyz_par[:,0] side_xyz[:,1] = (1+r_bs/r)*xyz_par[:,1] side_xyz[:,2] = xyz_par[:,2] # Now, set the backbone and sidechain positions to the correct bead indices: positions = np.zeros((2*n_particle_bb,3)) # This assumes that the backbone and sidechain beads are ordered from end-to-end positions[bb_array] = xyz_par positions[sc_array] = side_xyz # Write pdb file cgmodel.positions = positions * unit.angstrom write_pdbfile_without_topology(cgmodel, pdbfile) # Also write dcd file (better precision) dcdfile = pdbfile[:-3]+'dcd' dcdtraj = md.Trajectory( xyz=positions, topology=md.Topology.from_openmm(cgmodel.topology), ) md.Trajectory.save_dcd(dcdtraj,dcdfile) # Store key geometric parameters geometry = {} geometry['sigma_bb'] = (sigma_bb_opt*unit.angstrom).in_units_of(r_unit) geometry['sigma_sc'] = (sigma_sc_opt*unit.angstrom).in_units_of(r_unit) geometry['helical_radius'] = (r*unit.angstrom).in_units_of(r_unit) geometry['particle_spacing'] = t_delta_opt * unit.radian geometry['pitch'] = (2*np.pi*c*unit.angstrom).in_units_of(r_unit) # Load dcd file into mdtraj traj = md.load(dcdfile,top=md.Topology.from_openmm(cgmodel.topology)) # Get bb-bb bond distance geometry['bb_bb_distance'] = (dist_unitless(positions[bb_bond_list[0][0],:],positions[bb_bond_list[0][1],:]) * unit.angstrom).in_units_of(r_unit) geometry['bb_sc_distance'] = (dist_unitless(positions[sc_bond_list[0][0],:],positions[sc_bond_list[0][1],:]) * unit.angstrom).in_units_of(r_unit) # Get bb-bb-bb angle angle_indices = np.array([[b_angle_list[0][0], b_angle_list[0][1], b_angle_list[0][2]]]) geometry['bb_bb_bb_angle'] = (md.compute_angles(traj,angle_indices)*unit.radians).in_units_of(unit.degrees)[0][0] # Get bb-bb-sc angle angle_indices = np.array([[s_angle_list[0][0], s_angle_list[0][1], s_angle_list[0][2]]]) geometry['bb_bb_sc_angle'] = (md.compute_angles(traj,angle_indices)*unit.radians).in_units_of(unit.degrees)[0][0] # Get bb-bb-bb-bb torsion dihedral_indices = np.array([[bbbb_torsion_list[0][0], bbbb_torsion_list[0][1], bbbb_torsion_list[0][2], bbbb_torsion_list[0][3]]]) geometry['bb_bb_bb_bb_angle'] = (md.compute_dihedrals(traj,dihedral_indices)*unit.radians).in_units_of(unit.degrees)[0][0] # Get sc-bb-bb-sc torsion dihedral_indices = np.array([[sbbs_torsion_list[0][0], sbbs_torsion_list[0][1], sbbs_torsion_list[0][2], sbbs_torsion_list[0][3]]]) geometry['sc_bb_bb_sc_angle'] = (md.compute_dihedrals(traj,dihedral_indices)*unit.radians).in_units_of(unit.degrees)[0][0] # Get bb-bb-bb-sc torsion dihedral_indices = np.array([[bbbs_torsion_list[0][0], bbbs_torsion_list[0][1], bbbs_torsion_list[0][2], bbbs_torsion_list[0][3]]]) geometry['bb_bb_bb_sc_angle'] = (md.compute_dihedrals(traj,dihedral_indices)*unit.radians).in_units_of(unit.degrees)[0][0] # Plot helix: if plotfile is not None: plot_LJ_helix(r,c,t_par,r_eq_bb,r_eq_sc=r_eq_sc,plotfile=plotfile) return opt_sol, geometry def compute_helix_openmm_energy_vary_LJ(geo, simulation, bb_array, sc_array, particle_type_list, r, c, n_particle_bb): """ Internal function for computing openmm energy of Lennard-Jones 12-6 helix """ # Particle spacing (radians) t_delta = geo[0] # Backbone sigma parameter sigma_bb = geo[1] * unit.angstrom # Sidechain sigma parameter sigma_sc = geo[2] * unit.angstrom t1 = np.zeros(n_particle_bb) for i in range(n_particle_bb): t1[i] = i*t_delta xyz = get_helix_coordinates(r,c,t1) # If the bonds, angles, and backbone torsions are at their equilibrium positions, # then we don't need to update any parameters in the simulation object. Just # the nonbonded energies need to be evaluated. # Place sidechain particles normal to helix with same bond length as bb_bb r_bs = dist_unitless(xyz[0,:],xyz[1,:]) side_xyz = np.zeros((n_particle_bb,3)) side_xyz[:,0] = (1+r_bs/r)*xyz[:,0] side_xyz[:,1] = (1+r_bs/r)*xyz[:,1] side_xyz[:,2] = xyz[:,2] # Now, set the backbone and sidechain positions to the correct bead indices: positions = np.zeros((2*n_particle_bb,3)) # This assumes that the backbone and sidechain beads are ordered from end-to-end positions[bb_array] = xyz positions[sc_array] = side_xyz positions *= unit.angstrom # Update the nonbonded parameters: for force_index, force in enumerate(simulation.system.getForces()): force_name = force.__class__.__name__ if force_name == 'NonbondedForce': for particle_index in range(len(particle_type_list)): (q,sigma_old,eps) = force.getParticleParameters(particle_index) # Only need to change the sigma values here: if particle_type_list[particle_index] == 'bb': force.setParticleParameters(particle_index,q,sigma_bb,eps) else: force.setParticleParameters(particle_index,q,sigma_sc,eps) force.updateParametersInContext(simulation.context) # Update the positions: simulation.context.setPositions(positions) potential_energy = simulation.context.getState(getEnergy=True).getPotentialEnergy() U_helix = potential_energy.value_in_unit(unit.kilojoules_per_mole) return U_helix def compute_helix_openmm_energy_vary_LJ_constrained( geo, simulation, bb_array, sc_array, particle_type_list, r, c, n_particle_bb, bond_dist_bb, bond_dist_sc): """ Internal function for computing openmm energy of Lennard-Jones 12-6 helix """ # Backbone sigma parameter sigma_bb = geo[0] * unit.angstrom # Sidechain sigma parameter sigma_sc = geo[1] * unit.angstrom # Particle spacing (radians) t_delta = get_t_from_bond_distance(r,c,bond_dist_bb) t1 = np.zeros(n_particle_bb) for i in range(n_particle_bb): t1[i] = i*t_delta xyz = get_helix_coordinates(r,c,t1) # If the bonds, angles, and backbone torsions are at their equilibrium positions, # then we don't need to update any parameters in the simulation object. Just # the nonbonded energies need to be evaluated. # Place sidechain particles normal to helix with same bond length as bb_bb r_bs = bond_dist_sc side_xyz = np.zeros((n_particle_bb,3)) side_xyz[:,0] = (1+r_bs/r)*xyz[:,0] side_xyz[:,1] = (1+r_bs/r)*xyz[:,1] side_xyz[:,2] = xyz[:,2] # Now, set the backbone and sidechain positions to the correct bead indices: positions = np.zeros((2*n_particle_bb,3)) # This assumes that the backbone and sidechain beads are ordered from end-to-end positions[bb_array] = xyz positions[sc_array] = side_xyz positions *= unit.angstrom # Update the nonbonded parameters: for force_index, force in enumerate(simulation.system.getForces()): force_name = force.__class__.__name__ if force_name == 'NonbondedForce': for particle_index in range(len(particle_type_list)): (q,sigma_old,eps) = force.getParticleParameters(particle_index) # Only need to change the sigma values here: if particle_type_list[particle_index] == 'bb': force.setParticleParameters(particle_index,q,sigma_bb,eps) else: force.setParticleParameters(particle_index,q,sigma_sc,eps) force.updateParametersInContext(simulation.context) # Update the positions: simulation.context.setPositions(positions) potential_energy = simulation.context.getState(getEnergy=True).getPotentialEnergy() U_helix = potential_energy.value_in_unit(unit.kilojoules_per_mole) return U_helix
nilq/baby-python
python
#!/usr/bin/env python import asyncio import sys import logging import unittest import conf from os.path import join, realpath from hummingbot.market.bitcoin_com.bitcoin_com_websocket import BitcoinComWebsocket from hummingbot.market.bitcoin_com.bitcoin_com_auth import BitcoinComAuth sys.path.insert(0, realpath(join(__file__, "../../../"))) class BitcoinComWebsocketUnitTest(unittest.TestCase): auth = BitcoinComAuth(conf.bitcoin_com_api_key, conf.bitcoin_com_secret_key) ws = BitcoinComWebsocket() # balances = None @classmethod def setUpClass(cls): cls.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop() cls.ev_loop.run_until_complete(cls.wait_til_ready()) @classmethod async def wait_til_ready(cls): while True: await cls.ws.connect() if cls.ws._client.open is True: print("Websocket connection established.") return await asyncio.sleep(1) def test_open(self): """ Tests if websocket connection is opened succesfully """ self.assertTrue(self.ws._client.open) # def test_authenticated(self): # """ # Tests if websocket connection is authenticated # """ # self.assertTrue(self.connectionAuthenticated) # def test_received_trading_balances(self): # """ # Tests if it received balances # """ # print(self.balances) # self.assertGreater(len(list(self.balances))) def main(): logging.basicConfig(level=logging.INFO) unittest.main() if __name__ == "__main__": main()
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Created on Tue Oct 13 13:57:29 2020 @author: lentzye """ import mykeys
nilq/baby-python
python
from IBM_diffprivlib import diffprivlib_single_query from smartnoise import smartnoise_single_query from openmined_pydp import openmind_pydp_single_query library_name = 'smartnoise' # libraries: 'difforivlib', 'smartnoise', additive_noise', 'openmined_pydp' dataset_folder_path = 'E:\\MS_Thesis\\publication_stuff\\car_datasets\\gaussian_datasets\\' # path to the dataset folder number_of_csv_files = 7 # number of csv files in dataset folder query = 'variance' # query could be = count, sum, mean, var (for smartnoise: variance) total_experiments = 500 eval(library_name + '_single_query')(dataset_folder_path= dataset_folder_path, number_of_csv_files=number_of_csv_files, query_name= query, number_of_experiments=total_experiments)
nilq/baby-python
python
N = int(input()) A = [] for ii in range(N): A.append(int(input())) s = set() for ii in range(len(A)): s.add(A[ii]) print(len(s))
nilq/baby-python
python
# -*- coding: UTF-8 -*- """ This module provides the abstract base classes and core concepts for the model elements in behave. """ import os.path import sys import six from behave.capture import Captured from behave.textutil import text as _text from enum import Enum PLATFORM_WIN = sys.platform.startswith("win") def posixpath_normalize(path): return path.replace("\\", "/") # ----------------------------------------------------------------------------- # GENERIC MODEL CLASSES: # ----------------------------------------------------------------------------- class Status(Enum): """Provides the (test-run) status of a model element. Features and Scenarios use: untested, skipped, passed, failed. Steps may use all enum-values. Enum values: * untested (initial state): Defines the initial state before a test-run. Sometimes used to indicate that the model element was not executed during a test run. * skipped: A model element is skipped because it should not run. This is caused by filtering mechanisms, like tags, active-tags, file-location arg, select-by-name, etc. * passed: A model element was executed and passed (without failures). * failed: Failures occurred while executing it. * undefined: Used for undefined-steps (no step implementation was found). * executing: Marks the steps during execution (used in a formatter) .. versionadded:: 1.2.6 Superceeds string-based status values. """ untested = 0 skipped = 1 passed = 2 failed = 3 undefined = 4 executing = 5 def __eq__(self, other): """Comparison operator equals-to other value. Supports other enum-values and string (for backward compatibility). EXAMPLES:: status = Status.passed assert status == Status.passed assert status == "passed" assert status != "failed" :param other: Other value to compare (enum-value, string). :return: True, if both values are equal. False, otherwise. """ if isinstance(other, six.string_types): # -- CONVENIENCE: Compare with string-name (backward-compatible) return self.name == other return super(Status, self).__eq__(other) @classmethod def from_name(cls, name): """Select enumeration value by using its name. :param name: Name as key to the enum value (as string). :return: Enum value (instance) :raises: LookupError, if status name is unknown. """ # pylint: disable=no-member enum_value = cls.__members__.get(name, None) if enum_value is None: known_names = ", ".join(cls.__members__.keys()) raise LookupError("%s (expected: %s)" % (name, known_names)) return enum_value class Argument(object): """An argument found in a *feature file* step name and extracted using step decorator `parameters`_. The attributes are: .. attribute:: original The actual text matched in the step name. .. attribute:: value The potentially type-converted value of the argument. .. attribute:: name The name of the argument. This will be None if the parameter is anonymous. .. attribute:: start The start index in the step name of the argument. Used for display. .. attribute:: end The end index in the step name of the argument. Used for display. """ def __init__(self, start, end, original, value, name=None): self.start = start self.end = end self.original = original self.value = value self.name = name # @total_ordering # class FileLocation(unicode): class FileLocation(object): """ Provides a value object for file location objects. A file location consists of: * filename * line (number), optional LOCATION SCHEMA: * "{filename}:{line}" or * "{filename}" (if line number is not present) """ __pychecker__ = "missingattrs=line" # -- Ignore warnings for 'line'. def __init__(self, filename, line=None): if PLATFORM_WIN: filename = posixpath_normalize(filename) self.filename = filename self.line = line def get(self): return self.filename def abspath(self): return os.path.abspath(self.filename) def basename(self): return os.path.basename(self.filename) def dirname(self): return os.path.dirname(self.filename) def relpath(self, start=os.curdir): """Compute relative path for start to filename. :param start: Base path or start directory (default=current dir). :return: Relative path from start to filename """ return os.path.relpath(self.filename, start) def exists(self): return os.path.exists(self.filename) def _line_lessthan(self, other_line): if self.line is None: # return not (other_line is None) return other_line is not None elif other_line is None: return False else: return self.line < other_line def __eq__(self, other): if isinstance(other, FileLocation): return self.filename == other.filename and self.line == other.line elif isinstance(other, six.string_types): return self.filename == other else: raise TypeError("Cannot compare FileLocation with %s:%s" % \ (type(other), other)) def __ne__(self, other): # return not self == other # pylint: disable=unneeded-not return not self.__eq__(other) def __lt__(self, other): if isinstance(other, FileLocation): if self.filename < other.filename: return True elif self.filename > other.filename: return False else: assert self.filename == other.filename return self._line_lessthan(other.line) elif isinstance(other, six.string_types): return self.filename < other else: raise TypeError("Cannot compare FileLocation with %s:%s" % \ (type(other), other)) def __le__(self, other): # -- SEE ALSO: python2.7, functools.total_ordering # return not other < self # pylint unneeded-not return other >= self def __gt__(self, other): # -- SEE ALSO: python2.7, functools.total_ordering if isinstance(other, FileLocation): return other < self else: return self.filename > other def __ge__(self, other): # -- SEE ALSO: python2.7, functools.total_ordering # return not self < other return not self.__lt__(other) def __repr__(self): return u'<FileLocation: filename="%s", line=%s>' % \ (self.filename, self.line) def __str__(self): filename = self.filename if isinstance(filename, six.binary_type): filename = _text(filename, "utf-8") if self.line is None: return filename return u"%s:%d" % (filename, self.line) if six.PY2: __unicode__ = __str__ __str__ = lambda self: self.__unicode__().encode("utf-8") @classmethod def for_function(cls, func, curdir=None): """Extracts the location information from the function and builds the location string (schema: "{source_filename}:{line_number}"). :param func: Function whose location should be determined. :return: FileLocation object """ func = unwrap_function(func) function_code = six.get_function_code(func) filename = function_code.co_filename line_number = function_code.co_firstlineno curdir = curdir or os.getcwd() filename = os.path.relpath(filename, curdir) return cls(filename, line_number) # ----------------------------------------------------------------------------- # ABSTRACT MODEL CLASSES (and concepts): # ----------------------------------------------------------------------------- class BasicStatement(object): def __init__(self, filename, line, keyword, name): filename = filename or '<string>' filename = os.path.relpath(filename, os.getcwd()) # -- NEEDS: abspath? self.location = FileLocation(filename, line) assert isinstance(keyword, six.text_type) assert isinstance(name, six.text_type) self.keyword = keyword self.name = name # -- SINCE: 1.2.6 self.captured = Captured() # -- ERROR CONTEXT INFO: self.exception = None self.exc_traceback = None self.error_message = None @property def filename(self): # return os.path.abspath(self.location.filename) return self.location.filename @property def line(self): return self.location.line def reset(self): # -- RESET: Captured output data self.captured.reset() # -- RESET: ERROR CONTEXT INFO self.exception = None self.exc_traceback = None self.error_message = None def store_exception_context(self, exception): self.exception = exception self.exc_traceback = sys.exc_info()[2] def __hash__(self): # -- NEEDED-FOR: PYTHON3 # return id((self.keyword, self.name)) return id(self) def __eq__(self, other): # -- PYTHON3 SUPPORT, ORDERABLE: # NOTE: Ignore potential FileLocation differences. return (self.keyword, self.name) == (other.keyword, other.name) def __lt__(self, other): # -- PYTHON3 SUPPORT, ORDERABLE: # NOTE: Ignore potential FileLocation differences. return (self.keyword, self.name) < (other.keyword, other.name) def __ne__(self, other): return not self.__eq__(other) def __le__(self, other): # -- SEE ALSO: python2.7, functools.total_ordering # return not other < self return other >= self def __gt__(self, other): # -- SEE ALSO: python2.7, functools.total_ordering assert isinstance(other, BasicStatement) return other < self def __ge__(self, other): # -- SEE ALSO: python2.7, functools.total_ordering # OR: return self >= other return not self < other # pylint: disable=unneeded-not # def __cmp__(self, other): # # -- NOTE: Ignore potential FileLocation differences. # return cmp((self.keyword, self.name), (other.keyword, other.name)) class TagStatement(BasicStatement): def __init__(self, filename, line, keyword, name, tags): if tags is None: tags = [] super(TagStatement, self).__init__(filename, line, keyword, name) self.tags = tags def should_run_with_tags(self, tag_expression): """Determines if statement should run when the tag expression is used. :param tag_expression: Runner/config environment tags to use. :return: True, if examples should run. False, otherwise (skip it). """ return tag_expression.check(self.tags) class TagAndStatusStatement(BasicStatement): # final_status = ('passed', 'failed', 'skipped') final_status = (Status.passed, Status.failed, Status.skipped) def __init__(self, filename, line, keyword, name, tags): super(TagAndStatusStatement, self).__init__(filename, line, keyword, name) self.tags = tags self.should_skip = False self.skip_reason = None self._cached_status = Status.untested def should_run_with_tags(self, tag_expression): """Determines if statement should run when the tag expression is used. :param tag_expression: Runner/config environment tags to use. :return: True, if examples should run. False, otherwise (skip it). """ return tag_expression.check(self.tags) @property def status(self): if self._cached_status not in self.final_status: # -- RECOMPUTE: As long as final status is not reached. self._cached_status = self.compute_status() return self._cached_status def set_status(self, value): if isinstance(value, six.string_types): value = Status.from_name(value) self._cached_status = value def clear_status(self): self._cached_status = Status.untested def reset(self): self.should_skip = False self.skip_reason = None self.clear_status() def compute_status(self): raise NotImplementedError class Replayable(object): type = None def replay(self, formatter): getattr(formatter, self.type)(self) # ----------------------------------------------------------------------------- # UTILITY FUNCTIONS: # ----------------------------------------------------------------------------- def unwrap_function(func, max_depth=10): """Unwraps a function that is wrapped with :func:`functools.partial()`""" iteration = 0 wrapped = getattr(func, "__wrapped__", None) while wrapped and iteration < max_depth: func = wrapped wrapped = getattr(func, "__wrapped__", None) iteration += 1 return func
nilq/baby-python
python
#!/usr/bin/env python3 from pybf import BloomFilter def test_bf_int(num_items=1000000, error=0.01): """Test the BloomFilter with parameters 'num_items' & `error`""" print(f"Bloom filter test for integer keys") bf = BloomFilter(num_items, error) sz = len(bf) print(f'size: {sz}') # checking for i in range(100): assert not (i in bf) # insertion for i in range(num_items): bf.add(i) # no false negative for i in range(num_items): assert i in bf # false positive fc = 0 tc = 0 for i in range(num_items, 2 * num_items): if i in bf: fc += 1 tc += 1 fpr = 1.0 * fc / tc print(f"False positive rate: {fpr}") print("Passed!") def test_bf_str(num_items=1000000, error=0.01): """Test the BloomFilter with parameters 'num_items' & `error`""" print(f"Bloom filter test for str keys") bf = BloomFilter(num_items, error) sz = len(bf) print(f'size: {sz}') # checking for i in range(100): assert not (str(i) in bf) # insertion for i in range(num_items): bf.add(str(i)) # no false negative for i in range(num_items): assert str(i) in bf # false positive fc = 0 tc = 0 for i in range(num_items, 2 * num_items): if str(i) in bf: fc += 1 tc += 1 fpr = 1.0 * fc / tc print(f"False positive rate: {fpr}") print("Passed!") if __name__ == "__main__": test_bf_int() test_bf_str() # import bloom # if __name__ == '__main__': # total_items = 9585058 # error = 0.01 # bf = bloom.init(total_items, error) # print(f'{bf}') # sz = bloom.size(bf) # print(f"{sz}") # for i in range(total_items): # bf.add(i) # cf = 0 # ct = 0 # for i in range(total_items, 2*total_items): # if i in bf: # cf += 1 # ct += 1 # fpr = cf * 1.0 / ct * 100 # print("false positive rate: %.2f" % fpr)
nilq/baby-python
python
# -*- coding: utf-8 -*- """ """ from __future__ import absolute_import import mock import pytest import easy_acl.rule as rule __copyright__ = "Copyright (c) 2015-2019 Ing. Petr Jindra. All Rights Reserved." def test_init_without_wildcard(): definition = "foo.bar.foo-bar" parts = ("foo", "bar", "foo-bar") evaluator = mock.Mock() instance = rule.WildcardEnding(definition, evaluator) assert not instance.has_wildcard assert instance.definition_parts == parts assert instance.evaluator is evaluator def test_init_with_wildcard(): definition = "foo.bar.foo-bar.*" parts = ("foo", "bar", "foo-bar", "*") evaluator = mock.Mock() instance = rule.WildcardEnding(definition, evaluator) assert instance.has_wildcard assert instance.definition_parts == parts assert instance.evaluator is evaluator def test_resolve_matching_without_wildcard(): definition = "foo.bar.foo-bar" role = mock.Mock() evaluator = mock.Mock() evaluator.return_value = True instance = rule.WildcardEnding(definition, evaluator) result = instance.resolve(role, definition) assert result.level == 0 assert result.is_allowed == evaluator.return_value def test_resolve_not_matching_without_wildcard(): definition = "foo.bar.foo-bar" resource = "fooo" role = mock.Mock() evaluator = mock.Mock() evaluator.return_value = True instance = rule.WildcardEnding(definition, evaluator) with pytest.raises(ValueError): instance.resolve(role, resource) def test_resolve_matching_with_wildcard(): definition = "foo.bar.foo-bar.*" resource = "foo.bar.foo-bar.my.resource" expected_level = 2 role = mock.Mock() evaluator = mock.Mock() evaluator.return_value = True instance = rule.WildcardEnding(definition, evaluator) result = instance.resolve(role, resource) assert result.level == expected_level assert result.is_allowed == evaluator.return_value def test_resolve_not_matching_with_wildcard(): definition = "foo.bar.foo-bar.*" resource = "bar.foo.bar-foo.not.matching" role = mock.Mock() evaluator = mock.Mock() evaluator.return_value = True instance = rule.WildcardEnding(definition, evaluator) with pytest.raises(ValueError): instance.resolve(role, resource) def test_resolve_not_matching_with_wildcard_edgecase(): definition = "foo.bar.foo-bar.*" resource = "foo.bar.foo-bar" role = mock.Mock() evaluator = mock.Mock() evaluator.return_value = True instance = rule.WildcardEnding(definition, evaluator) with pytest.raises(ValueError): instance.resolve(role, resource)
nilq/baby-python
python
import socket import re try: url = input('Enter - ') # http://data.pr4e.org/romeo.txt HOST = re.findall(r'http[s]?://([\S\d.]*)/.*?', url) mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) mysock.connect((HOST[0], 80)) cmd = 'GET ' + url + ' HTTP/1.0\r\n\r\n' cmd = cmd.encode() mysock.send(cmd) while True: data = mysock.recv(512) if len(data) < 1: break print(data.decode(), end='') mysock.close() except Exception as e: print('please input correct url', e)
nilq/baby-python
python
"""Tests for envs module. Need rework. """ from __future__ import absolute_import # import unittest # from numpy.testing import * import inspect from functools import partial import SafeRLBench.envs as envs import numpy as np import gym gym.undo_logger_setup() from mock import Mock class TestEnvironments(object): """ Test Class for Environment tests. Note that you really dont want to inherit from unittest.TestCase here, because it will break reasonable output with verbose testing. """ exclude = [] args = { 'GymWrap': envs.gym_wrap._get_test_args(), 'MDP': envs.mdp._get_test_args() } @classmethod def setUpClass(cls): """Generate list of classes.""" cls.classes = [] for name, c in inspect.getmembers(envs): if inspect.isclass(c): cls.classes.append(c) def test_environment_requirements(self): """Generate tests for environment implementations.""" for c in self.classes: if c.__name__ in self.exclude: pass else: # Generate NotImplementedError Test for _update check_update = partial(self.check_env_update) check_update.description = ('Test: ' + c.__name__.upper() + ': update implementation.') yield check_update, c # Generate NotImplementedError Test for _reset check_reset = partial(self.check_env_reset) check_reset.description = ('Test: ' + c.__name__.upper() + ': reset implementation.') yield check_reset, c check_rollout = partial(self.check_env_rollout) check_rollout.description = ('Test: ' + c.__name__.upper() + ': rollout implementation.') yield check_rollout, c def check_env_update(self, c): """Check if _update is implemented.""" args = self.args.get(c.__name__, []) env = c(*args) x = env.action_space.sample() try: env._update(x) except NotImplementedError: assert False def check_env_reset(self, c): """Check if _reset is implemented.""" args = self.args.get(c.__name__, []) env = c(*args) try: env._reset() except NotImplementedError: assert False def check_env_rollout(self, c): """Check rollout correctness at random positions.""" args = self.args.get(c.__name__, []) env = c(*args) init_state = env.state def policy(state): return env.action_space.sample() policy_mock = Mock(side_effect=policy) trace = env._rollout(policy_mock) # reset the environment env._reset() env.state = init_state # if the environment depends on a seed, reset it. if hasattr(env, 'seed'): env.seed = env.seed actions = [t[0] for t in trace] policy_mock_redo = Mock(side_effect=actions) trace_verify = env._rollout(policy_mock_redo) for t, t_verify in zip(trace, trace_verify): print(t) print(t_verify) if isinstance(t[0], np.ndarray): assert(all(np.isclose(t_verify[0], t[0]))) else: assert(np.isclose(t_verify[0], t[0])) if isinstance(t[1], np.ndarray): print(t_verify[1] - t[1]) assert(all(np.isclose(t_verify[1], t[1]))) else: assert(np.isclose(t_verify[1], t[1])) assert(np.isclose(t_verify[2], t[2]))
nilq/baby-python
python
''' Tipo String Em Python, um dado é considerado do tipo string sempre que: - Estiver entre aspas simples -> 'uma string', '234', 'a', 'True', '42.3' - Estiver entre aspas duplas -> "uma string", "234", "a", "True", "42.3" - Estiver entre aspas simples triplas; - Estiver entre aspas simples duplas. nome = 'Geek University' print(nome) print(type(nome)) - Atenção para o exemplo abaixo, tem que colocar aspas duplas: nome = "Gina's Bar" print(nome) print(type(nome)) nome = 'Angelina \nJones' print(nome) print(type(nome)) nome = """Angelina Jolie""" print(nome) print(type(nome)) print(nome.upper()) print(nome.lower()) print(nome.split()) - transforma em uma lista de strings. nome = 'Geek University' print(nome[0:14]) - imprime todas as letras (Slice de string) print(nome.split()[0]) - slice de string nome = 'Geek University' print(nome[::-1]) - come do primeiro elemento, vá até o último elemento e inverta. ''' nome = 'Geek University' print(nome[::-1]) # Inversão da string em forma pythonica. print(nome.replace('G', 'P')) # -> substitui uma letra por outra print(type(nome)) texto = 'socorram me subino onibus em marrocos' #Palíndromo print(texto) print(texto[::-1])
nilq/baby-python
python
# -*- coding: utf-8 -*- __version__ = '1.0.3' from .fixedrec import RecordFile, RecordFileError
nilq/baby-python
python
import json import os import time from abc import ABC import numpy as np import torch from agent0.common.utils import set_random_seed from agent0.ddpg.agent import Agent from agent0.ddpg.config import Config from ray import tune from ray.tune.trial import ExportFormat class Trainer(tune.Trainable, ABC): def __init__(self, config=None, logger_creator=None): self.Rs, self.Qs, self.TRs, self.Ls, self.ITRs, self.velocity = [], [], [], [], [], [] self.cfg = None self.agent = None self.epsilon = None self.epsilon_schedule = None self.actors = None self.frame_count = None self.Rs, self.Qs, self.TRs, self.VLoss, self.PLoss, self.ITRs = [], [], [], [], [], [] self.best = float('-inf') self.sample_ops = None super(Trainer, self).__init__(config, logger_creator) def setup(self, config): self.cfg = Config(**config) set_random_seed(self.cfg.seed) print("input args:\n", json.dumps(vars(self.cfg), indent=4, separators=(",", ":"))) self.agent = Agent(**config) self.frame_count = 0 self.best = float('-inf') def step(self): tic = time.time() info = self.agent.step() self.frame_count += 1 if 'rs' in info: self.Rs.append(info['rs']) if 'ploss' in info: self.PLoss.append(info['ploss']) if 'vloss' in info: self.VLoss.append(info['vloss']) toc = time.time() self.velocity.append(1.0 / (toc - tic)) result = dict( game=self.cfg.game, time_past=self._time_total, frames=self._iteration, velocity=np.mean(self.velocity[-20:]) if len(self.velocity) > 0 else 0, speed=self.frame_count / (self._time_total + 1), time_remain=(self.cfg.total_steps - self.frame_count) / ((self.frame_count + 1) / (self._time_total + 1)), v_loss=np.mean(self.VLoss[-20:]) if len(self.VLoss) > 0 else 0, p_loss=np.mean(self.PLoss[-20:]) if len(self.PLoss) > 0 else 0, ep_reward_test=np.mean(self.ITRs) if len(self.ITRs) > 0 else 0, ep_reward_train=np.mean(self.Rs[-20:]) if len(self.Rs) > 0 else 0, ep_reward_train_max=np.max(self.Rs) if len(self.Rs) > 0 else 0, ep_reward_test_max=np.max(self.TRs) if len(self.TRs) > 0 else 0, ) return result def save_checkpoint(self, checkpoint_dir): rs = [] while True: info = self.agent.step(testing=True) if 'rs' in info: rs.append(info['rs']) if len(rs) > self.cfg.test_episodes: break self.ITRs = rs self.TRs += rs print(f"Iteration {self.training_iteration} test Result(mean|std|max|min|len):" f" {np.mean(rs)}\t{np.std(rs)}\t{np.max(rs)}\t{np.min(rs)}\t{len(rs)}") data_to_save = { 'model': self.agent.network.state_dict(), 'model_target': self.agent.target_network.state_dict(), 'actor_optim': self.agent.actor_optimizer.state_dict(), 'critic_optim': self.agent.critic_optimizer.state_dict(), 'VLoss': self.VLoss, 'PLoss': self.PLoss, 'Rs': self.Rs, 'TRs': self.TRs, 'frame_count': self.frame_count, 'ITRs': rs, 'best': self.best, } if np.mean(rs) > self.best: self.best = np.mean(rs) torch.save(data_to_save, './best.pth') return data_to_save def load_checkpoint(self, checkpoint): self.agent.network.load_state_dict(checkpoint['model']) self.agent.target_network.load_state_dict(checkpoint['model_target']) self.agent.actor_optimizer.load_state_dict(checkpoint['actor_optim']) self.agent.critic_optimizer.load_state_dict(checkpoint['critic_optim']) self.VLoss = checkpoint['VLoss'] self.PLoss = checkpoint['PLoss'] self.Rs = checkpoint['Rs'] self.TRs = checkpoint['TRs'] self.frame_count = checkpoint['frame_count'] self.best = checkpoint['best'] def _export_model(self, export_formats, export_dir): if export_formats == [ExportFormat.MODEL]: path = os.path.join(export_dir, "exported_models") torch.save({ "model": self.agent.network.state_dict(), }, path) return {ExportFormat.MODEL: path} else: raise ValueError("unexpected formats: " + str(export_formats))
nilq/baby-python
python
"""Bay Bridge simulation.""" import os import urllib.request from flow.core.params import SumoParams, EnvParams, NetParams, InitialConfig, \ SumoCarFollowingParams, SumoLaneChangeParams, InFlows from flow.core.params import VehicleParams from flow.core.params import TrafficLightParams from flow.core.experiment import Experiment from flow.envs.bay_bridge.base import BayBridgeEnv from flow.scenarios.bay_bridge import BayBridgeScenario, EDGES_DISTRIBUTION from flow.controllers import SimCarFollowingController, BayBridgeRouter NETFILE = os.path.join( os.path.dirname(os.path.abspath(__file__)), "bay_bridge.net.xml") def bay_bridge_example(render=None, use_inflows=False, use_traffic_lights=False): """ Perform a simulation of vehicles on the Oakland-San Francisco Bay Bridge. Parameters ---------- render: bool, optional specifies whether to use the gui during execution use_inflows: bool, optional whether to activate inflows from the peripheries of the network use_traffic_lights: bool, optional whether to activate the traffic lights in the scenario Returns ------- exp: flow.core.experiment.Experiment A non-rl experiment demonstrating the performance of human-driven vehicles simulated by sumo on the Bay Bridge. """ sim_params = SumoParams(sim_step=0.6, overtake_right=True) if render is not None: sim_params.render = render car_following_params = SumoCarFollowingParams( speedDev=0.2, speed_mode="all_checks", ) lane_change_params = SumoLaneChangeParams( lc_assertive=20, lc_pushy=0.8, lc_speed_gain=4.0, model="LC2013", lane_change_mode="no_lat_collide", # lcKeepRight=0.8 ) vehicles = VehicleParams() vehicles.add( veh_id="human", acceleration_controller=(SimCarFollowingController, {}), routing_controller=(BayBridgeRouter, {}), car_following_params=car_following_params, lane_change_params=lane_change_params, num_vehicles=1400) additional_env_params = {} env_params = EnvParams(additional_params=additional_env_params) traffic_lights = TrafficLightParams() inflow = InFlows() if use_inflows: # south inflow.add( veh_type="human", edge="183343422", vehsPerHour=528, departLane="0", departSpeed=20) inflow.add( veh_type="human", edge="183343422", vehsPerHour=864, departLane="1", departSpeed=20) inflow.add( veh_type="human", edge="183343422", vehsPerHour=600, departLane="2", departSpeed=20) inflow.add( veh_type="human", edge="393649534", probability=0.1, departLane="0", departSpeed=20) # no data for this # west inflow.add( veh_type="human", edge="11189946", vehsPerHour=1752, departLane="0", departSpeed=20) inflow.add( veh_type="human", edge="11189946", vehsPerHour=2136, departLane="1", departSpeed=20) inflow.add( veh_type="human", edge="11189946", vehsPerHour=576, departLane="2", departSpeed=20) # north inflow.add( veh_type="human", edge="28413687#0", vehsPerHour=2880, departLane="0", departSpeed=20) inflow.add( veh_type="human", edge="28413687#0", vehsPerHour=2328, departLane="1", departSpeed=20) inflow.add( veh_type="human", edge="28413687#0", vehsPerHour=3060, departLane="2", departSpeed=20) inflow.add( veh_type="human", edge="11198593", probability=0.1, departLane="0", departSpeed=20) # no data for this inflow.add( veh_type="human", edge="11197889", probability=0.1, departLane="0", departSpeed=20) # no data for this # midway through bridge inflow.add( veh_type="human", edge="35536683", probability=0.1, departLane="0", departSpeed=20) # no data for this net_params = NetParams(inflows=inflow, no_internal_links=False) net_params.netfile = NETFILE # download the netfile from AWS if use_traffic_lights: my_url = "https://s3-us-west-1.amazonaws.com/flow.netfiles/" \ "bay_bridge_TL_all_green.net.xml" else: my_url = "https://s3-us-west-1.amazonaws.com/flow.netfiles/" \ "bay_bridge_junction_fix.net.xml" my_file = urllib.request.urlopen(my_url) data_to_write = my_file.read() with open( os.path.join(os.path.dirname(os.path.abspath(__file__)), NETFILE), "wb+") as f: f.write(data_to_write) initial_config = InitialConfig( spacing="uniform", min_gap=15, edges_distribution=EDGES_DISTRIBUTION.copy()) scenario = BayBridgeScenario( name="bay_bridge", vehicles=vehicles, traffic_lights=traffic_lights, net_params=net_params, initial_config=initial_config) env = BayBridgeEnv(env_params, sim_params, scenario) return Experiment(env) if __name__ == "__main__": # import the experiment variable exp = bay_bridge_example( render=True, use_inflows=False, use_traffic_lights=False) # run for a set number of rollouts / time steps exp.run(1, 1500)
nilq/baby-python
python
import logging from queue import Queue from common.configuration import ConfigurationService from common.reddit import get_relevat_subreddits from common.settings import Setting from common.storage import StorageService from functools import wraps from telegram import Bot, ChatAction, Update from telegram.ext import CallbackContext, CommandHandler, Dispatcher convos_blob_name = "registered-convos.json" def build_dispatcher(storage_service: StorageService) -> Dispatcher: configuration_service = ConfigurationService(storage_service) token = configuration_service.get(Setting.CFG_SECRET_TELEGRAM_BOT_TOKEN) dispatcher = Dispatcher(Bot(token), Queue()) dispatcher.add_handler(CommandHandler("start", cmd_register_callback)) dispatcher.add_handler(CommandHandler("stop", cmd_unregister_callback)) return dispatcher def send_action(action): """Sends `action` while processing func command.""" def decorator(func): @wraps(func) def command_func(update, context, *args, **kwargs): context.bot.send_chat_action( chat_id=update.effective_message.chat_id, action=action) return func(update, context, *args, **kwargs) return command_func return decorator @send_action(ChatAction.TYPING) def cmd_register_callback(update: Update, context: CallbackContext): message = update.message if not message: logging.warn(f"No message found in the update") return chat_id = str(message.chat_id) logging.info(f"Registering chat {chat_id}") if not register_chat(chat_id): text = "Already registered" else: text = "Registered 🚀" message.reply_text(text=text) logging.info(f"Registered chat {chat_id}") @send_action(ChatAction.TYPING) def cmd_unregister_callback(update: Update, context: CallbackContext): message = update.message if not message: logging.warn(f"No message found in the update") return chat_id = str(message.chat_id) logging.info(f"Unregistering chat {chat_id}") unregister_chat(chat_id) message.reply_text("Unregistered") logging.info(f"Unregistered chat {chat_id}") def unregister_chat(chat_id: str) -> bool: storage = StorageService() convos = ( storage.get_blob_data( StorageService.default_container, convos_blob_name) or {}) if chat_id in convos: del convos[chat_id] storage.set_blob_data( StorageService.default_container, convos_blob_name, convos) return True else: return False def register_chat(chat_id: str) -> bool: storage_service = StorageService() subreddits = get_relevat_subreddits(storage_service) convos = ( storage_service.get_blob_data( StorageService.default_container, convos_blob_name) or {}) if chat_id not in convos: convos[chat_id] = subreddits storage_service.set_blob_data( StorageService.default_container, convos_blob_name, convos) return True return False
nilq/baby-python
python
#!/usr/bin/env python # # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" """ Test that we can generate Visual Studio 10.0 project (.vcxproj) and solution (.sln) files that contain SCC information and look correct. """ import os import TestSConsMSVS test = TestSConsMSVS.TestSConsMSVS() # Make the test infrastructure think we have this version of MSVS installed. test._msvs_versions = ['10.0'] expected_slnfile = TestSConsMSVS.expected_slnfile_10_0 expected_vcprojfile = TestSConsMSVS.expected_vcprojfile_10_0 SConscript_contents = """\ env=Environment(platform='win32', tools=['msvs'], MSVS_VERSION='10.0', CPPDEFINES=['DEF1', 'DEF2',('DEF3','1234')], CPPPATH=['inc1', 'inc2'], MSVS_SCC_CONNECTION_ROOT='.', MSVS_SCC_PROVIDER='MSSCCI:Perforce SCM', MSVS_SCC_PROJECT_NAME='Perforce Project') testsrc = ['test1.cpp', 'test2.cpp'] testincs = [r'sdk_dir\\sdk.h'] testlocalincs = ['test.h'] testresources = ['test.rc'] testmisc = ['readme.txt'] env.MSVSProject(target = 'Test.vcxproj', srcs = testsrc, incs = testincs, localincs = testlocalincs, resources = testresources, misc = testmisc, buildtarget = 'Test.exe', variant = 'Release') """ expected_sln_sccinfo = """\ \tGlobalSection(SourceCodeControl) = preSolution \t\tSccNumberOfProjects = 2 \t\tSccProjectName0 = Perforce\\u0020Project \t\tSccLocalPath0 = . \t\tSccProvider0 = MSSCCI:Perforce\\u0020SCM \t\tCanCheckoutShared = true \t\tSccProjectUniqueName1 = Test.vcxproj \t\tSccLocalPath1 = . \t\tCanCheckoutShared = true \t\tSccProjectFilePathRelativizedFromConnection1 = .\\\\ \tEndGlobalSection """ expected_vcproj_sccinfo = """\ \t\t<SccProjectName>Perforce Project</SccProjectName> \t\t<SccLocalPath>.</SccLocalPath> \t\t<SccProvider>MSSCCI:Perforce SCM</SccProvider> """ test.write('SConstruct', SConscript_contents) test.run(arguments="Test.vcxproj") test.must_exist(test.workpath('Test.vcxproj')) vcproj = test.read('Test.vcxproj', 'r') expect = test.msvs_substitute(expected_vcprojfile, '10.0', None, 'SConstruct', vcproj_sccinfo=expected_vcproj_sccinfo) # don't compare the pickled data assert vcproj[:len(expect)] == expect, test.diff_substr(expect, vcproj) test.must_exist(test.workpath('Test.sln')) sln = test.read('Test.sln', 'r') expect = test.msvs_substitute(expected_slnfile, '10.0', None, 'SConstruct', sln_sccinfo=expected_sln_sccinfo) # don't compare the pickled data assert sln[:len(expect)] == expect, test.diff_substr(expect, sln) test.pass_test() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
nilq/baby-python
python
import bz2 import re import sys from collections import namedtuple from pathlib import Path from xml.etree import cElementTree as ElementTree from urllib.parse import urljoin from urllib.error import URLError import mwparserfromhell import numpy import pycountry from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from tqdm import tqdm, tqdm_notebook from src.text.bag_of_words.okapi_bm25_search import OkapiBM25Searcher from src.utility.connectivity import retrieve_file from src.utility.files import ( ensure_directory, save_as_compressed_pickle_file, load_from_compressed_pickle_file ) if not sys.stdout.isatty(): tqdm = tqdm_notebook DATA_DIRECTORY = Path("data", "wikipedia") ensure_directory(DATA_DIRECTORY) WIKIPEDIA_PAGE_BASE_URL = "https://{}.wikipedia.org/wiki/" WIKIPEDIA_DUMP_URL = "https://dumps.wikimedia.org/"\ "{0}wiki/latest/"\ "{0}wiki-latest-pages-articles-multistream.xml.bz2" CC_BY_SA_LICENSE_URL = "https://creativecommons.org/licenses/by-sa/3.0/" GNU_FREE_DOCUMENTATION_LICENSE_URL = "https://www.gnu.org/copyleft/fdl.html" LICENSE_URLS = [ CC_BY_SA_LICENSE_URL, GNU_FREE_DOCUMENTATION_LICENSE_URL ] class WikipediaDocument: def __init__(self, title=None, url=None, abstract=None, text=None): self.title = title self.url = url self.abstract = abstract self.text = text def __str__(self): return "Document('{}')".format(self.title) def __repr__(self): return str(self) class Wikipedia: def __init__(self, language="English", cache_directory_url="tmp", maximum_number_of_documents=None): self.documents = None # type: list[WikipediaDocument] self.__language_code = pycountry.languages.lookup(language).alpha_2 self.__maximum_number_of_documents = maximum_number_of_documents # Wikipedia URLs self.__page_base_url = WIKIPEDIA_PAGE_BASE_URL.format( self.__language_code) self.__dump_url = WIKIPEDIA_DUMP_URL.format(self.__language_code) # Local files self.__filename = self.__dump_url.split("/")[-1] self.__path = Path( DATA_DIRECTORY, self.__filename ) base_name = self.__filename.split(".")[0] self.__parsed_documents_filename = \ base_name + "-parsed.pkl.gz" self.__parsed_documents_path = Path( DATA_DIRECTORY, self.__parsed_documents_filename) self.__vectorised_documents_filename = \ base_name + "-vectorised.pkl.gz" self.__vectorised_documents_path = Path( DATA_DIRECTORY, self.__vectorised_documents_filename) # Load, download cache, or parse and preprocess as necessary if self.__parsed_documents_path.exists(): self._load_parsed_documents() if self.__vectorised_documents_path.exists(): self._load_vectorised_documents() else: self._vectorise_documents() else: parsed_documents_loaded_succesfully = False if cache_directory_url: parsed_documents_downloaded_succesfully = False vectorised_documents_downloaded_succesfully = False try: print("Downloading parsed Wikipedia documents.") parsed_documents_url = urljoin( cache_directory_url, self.__parsed_documents_filename ) retrieve_file( url=parsed_documents_url, path=self.__parsed_documents_path ) parsed_documents_downloaded_succesfully = True except URLError as url_error: print(f"Failed to download documents ({url_error}).") print("Falling back to parsing Wikipedia locally.") if parsed_documents_downloaded_succesfully: self._load_parsed_documents() parsed_documents_loaded_succesfully = True try: print("Downloading preprocessed Wikipedia documents.") vectorised_documents_url = urljoin( cache_directory_url, self.__vectorised_documents_filename ) retrieve_file( url=vectorised_documents_url, path=self.__vectorised_documents_path ) vectorised_documents_downloaded_succesfully = True except URLError as url_error: print(f"Failed to download documents ({url_error}).") print("Falling back to preprocess Wikipedia locally.") if vectorised_documents_downloaded_succesfully: self._load_vectorised_documents() else: self._vectorise_documents() if not parsed_documents_loaded_succesfully: if not self.__path.exists(): print("Downloading Wikipedia documents.") self._download_wikipedia() self._parse_documents() self._vectorise_documents() # Okapi BM25 searcher self.searcher = OkapiBM25Searcher( tf_matrix=self.matrix_doc_term, idf_vector=self.idf ) # Progress print("Wikipedia loaded.") @property def language_code(self): return self.__language_code def _load_parsed_documents(self): print("Loading parsed documents.") parsed_documents = load_from_compressed_pickle_file( self.__parsed_documents_path) self.__set_documents(parsed_documents["content"]) def _load_vectorised_documents(self): print("Loading preprocessed documents.") vectorised_storage = load_from_compressed_pickle_file( self.__vectorised_documents_path) self._vectorised_storage = vectorised_storage["content"] def _parse_documents(self): # Parse Wikipedia file print("Parsing Wikipedia documents.") documents = self._parse_wikipedia( maximum_number_of_documents=self.__maximum_number_of_documents ) # Store data print("Saving parsed documents.") parsed_documents = { "content": documents, "changes": "extracted first paragraph of each article", "license": LICENSE_URLS } save_as_compressed_pickle_file( parsed_documents, self.__parsed_documents_path, ) self.__set_documents(documents) def __set_documents(self, documents): self.documents = [] for document in documents: self.documents.append( WikipediaDocument( title=document["title"], url=document["url"], abstract=document["abstract"], text=document["text"] ) ) def _vectorise_documents(self): # Process documents self._process_parsed_documents() # Store vectorised data print("Saving preprocessed documents.") vectorised_storage = { "content": self._vectorised_storage, "changes": "bag-of-words representation of the first " "paragraph of each article", "license": LICENSE_URLS } save_as_compressed_pickle_file( vectorised_storage, self.__vectorised_documents_path.open("wb"), ) @property def _vectorised_storage(self): return (self.n_documents, self.n_words, self.matrix_doc_term, self.document_lengths, self._avg_document_length, self.idf, self.term_vectorizer) @_vectorised_storage.setter def _vectorised_storage(self, values): (self.n_documents, self.n_words, self.matrix_doc_term, self.document_lengths, self._avg_document_length, self.idf, self.term_vectorizer) = values def _process_parsed_documents(self): print("Preprocessing documents.") # Get abstracts documents = [document.abstract for document in self.documents] # Number of documents self.n_documents = len(documents) # Make vectorizer self.term_vectorizer = CountVectorizer( lowercase=True, preprocessor=None, tokenizer=None, stop_words=None, ngram_range=(1, 1), analyzer="word", ) # Vectorize documents self.matrix_doc_term = self.term_vectorizer.fit_transform(documents) # Compute document lengths self.document_lengths = self.matrix_doc_term.sum(1) self._avg_document_length = self.document_lengths.mean() print("Computing TF-IDF.") # Make TF-IDF transformer tfidf_transformer = TfidfTransformer( norm="l2", smooth_idf=True, ) # Transform to TF-IDF tfidf_transformer.fit_transform(self.matrix_doc_term) # Get IDF self.idf = tfidf_transformer.idf_ # Number of features self.n_words = self.matrix_doc_term.shape[1] def _download_wikipedia(self): retrieve_file( url=self.__dump_url, path=self.__path ) def _parse_wikipedia(self, maximum_number_of_documents=None): assert self.__path.exists(), "Wikipedia data does not exist" # Determine size of file compressed_size = self.__path.stat().st_size # Initialise container for documents documents = [] with open(self.__path, mode="rb") as compressed_file: with bz2.BZ2File(compressed_file, mode="rb") as uncompressed_file: total_compressed_bytes_read_at_last_batch = 0 tag_prefix = "" namespaces = [] article_namespace_key = None in_page = False with tqdm(desc="", total=compressed_size, unit="B", unit_scale=True) as progress_bar: for event_number, (event, element) in enumerate( ElementTree.iterparse( uncompressed_file, events=["start", "end", "start-ns", "end-ns"] ) ): if event == "start-ns": namespaces.append(element) namespace_id, namespace_uri = element if namespace_id == "": tag_prefix = f"{{{namespace_uri}}}" elif event == "end-ns": namespace = namespaces.pop() namespace_id, namespace_uri = namespace if namespace_id == "": tag_prefix = "" elif event == "start": if element.tag == f"{tag_prefix}page": in_page = True title = None text = None page_namespace_keys = [] page_redirect = False elif event == "end": tag = element.tag if tag.startswith(tag_prefix): tag = tag.replace(tag_prefix, "", 1) if tag == "namespace": if element.text is None: article_namespace_key = element.attrib["key"] elif in_page and tag == "title": if not title: title = element.text else: progress_bar.write( "Multiple titles found for article " f"\"{title}\". First one used." ) elif in_page and tag == "text": if not text: text = element.text else: progress_bar.write( "Multiple text sections found for article " f"\"{title}\". First one used." ) elif in_page and tag == "ns": page_namespace_keys.append(element.text) elif in_page and tag == "redirect": page_redirect = True elif in_page and tag == "page": in_page = False if article_namespace_key not in page_namespace_keys \ or page_redirect: continue url = self.__page_base_url \ + title.replace(" ", "_") abstract = self._parse_wikipedia_article( article_text=text, sections="first paragraph", include_header_image_captions=False, include_header_infoboxes=False ) fulltext = self._parse_wikipedia_article( article_text=text, sections="all", include_header_image_captions=False, include_header_infoboxes=False ) document = { "title": title, "url": url, "abstract": abstract, "text": fulltext } documents.append(document) element.clear() if maximum_number_of_documents and \ len(documents) >= maximum_number_of_documents: break if event_number % 1000 == 0: total_compressed_bytes_read = \ compressed_file.tell() compressed_bytes_read_for_batch = \ total_compressed_bytes_read \ - total_compressed_bytes_read_at_last_batch total_compressed_bytes_read_at_last_batch = \ total_compressed_bytes_read progress_bar.update( compressed_bytes_read_for_batch) return documents def _parse_wikipedia_article(self, article_text, sections="first paragraph", include_header_image_captions=False, include_header_infoboxes=False): text = "" if sections not in ["first paragraph", "lead", "all"]: raise ValueError( "Can only extract the first paragraph, the lead, or all sections." ) def remove_footnotes_from_wikimedia_markup(markup): for element in markup.filter_tags(): if element.tag == "ref": try: markup.remove(element) except ValueError: pass def add_line_break_before_lists_in_wikimedia_text(text): return re.sub(r"(?m)(^(\*|#) .+$)", r"\n\1", text) if sections == "all": article_text = add_line_break_before_lists_in_wikimedia_text( article_text) article_markup = mwparserfromhell.parse(article_text) sections_markup = article_markup.get_sections( include_lead = True, include_headings=True) lead_markup = sections_markup[0] else: # Remove everything after the first header including the header lead_text = re.split(r"==[^=]+==", article_text, maxsplit=1)[0] lead_text = add_line_break_before_lists_in_wikimedia_text(lead_text) lead_markup = mwparserfromhell.parse(lead_text) remove_footnotes_from_wikimedia_markup(lead_markup) header_infoboxes = [] header_image_captions = [] nodes_to_remove = [] # Find the first line while saving image captions and info boxes as well # as removing the rest before the first line for node in lead_markup.nodes: if isinstance(node, mwparserfromhell.nodes.Wikilink) and node.text: nodes_to_remove.append(node) caption = node.text.strip_code().split("|")[-1] header_image_captions.append(caption) elif isinstance(node, mwparserfromhell.nodes.Template): nodes_to_remove.append(node) if "\n" in node: infobox = { parameter.name.strip_code().strip(): parameter.value.strip_code().strip() for parameter in node.params } header_infoboxes.append(infobox) elif isinstance(node, mwparserfromhell.nodes.Comment): nodes_to_remove.append(node) elif isinstance(node, mwparserfromhell.nodes.Tag) \ and node.tag == "table": nodes_to_remove.append(node) elif isinstance(node, mwparserfromhell.nodes.Text): value = re.sub(r"__[A-Z]+__", "", node.value.lstrip()) if not re.match(r"[A-Za-z0-9\"\']", value): nodes_to_remove.append(node) else: node.value = value break else: break for node in nodes_to_remove: lead_markup.remove(node) lead = lead_markup.strip_code( normalize=True, collapse=True, keep_template_params=False ) # Add line breaks before and after image links lead = re.sub(r"(?m)(^thumb\|.+$)", r"\n\1\n", lead) lead = re.sub(r"(?m)(^.+)(thumb\|.+$)", r"\1\n\n\2\n", lead) if sections == "first paragraph": text = lead.split("\n\n")[0] text = text.replace("\n", " ") elif sections == "lead": text = lead if include_header_image_captions and header_image_captions: text += "\n\n" + "\n\n".join(header_image_captions) if include_header_infoboxes and header_infoboxes: for infobox in header_infoboxes: infobox_string = "\n\n" for infobox_key, infobox_value in infobox.items(): infobox_string += f"* {infobox_key}: {infobox_value}\n" text += infobox_string if sections == "all": remaining_sections_markup = mwparserfromhell.parse(sections_markup[1:]) #try: remove_footnotes_from_wikimedia_markup( remaining_sections_markup) #except Exception: #print(sections_markup) text += remaining_sections_markup.strip_code( normalize=True, collapse=True, keep_template_params=False ) text = re.sub(r"__[A-Z]+__\n?", "", text) text = text.replace("()", "") text = re.sub(r" +", " ", text) text = text.strip() return text @property def vocabulary(self): return self.term_vectorizer.vocabulary_ @property def terms(self): return self.term_vectorizer.get_feature_names() def __str__(self): return "{}({} documents, {} words)".format(type(self).__name__, self.n_documents, self.n_words) def __repr__(self): return str(self) def search(self, query, k_1=1.5, b=0.75): query_vectorised = self.term_vectorizer.transform([query]) scores = self.searcher.search( query_vectorised=query_vectorised, k_1=k_1, b=b ) _, keyword_indices = query_vectorised.nonzero() keywords = numpy.array(self.terms)[keyword_indices].tolist() SearchResults = namedtuple("SearchResults", ["scores", "keywords"]) results = SearchResults(scores=scores, keywords=keywords) return results if __name__ == "__main__": wikipedia = Wikipedia()
nilq/baby-python
python
#! /usr/bin/env python3 ### # KINOVA (R) KORTEX (TM) # # Copyright (c) 2018 Kinova inc. All rights reserved. # # This software may be modified and distributed # under the terms of the BSD 3-Clause license. # # Refer to the LICENSE file for details. # ### import sys import os from kortex_api.RouterClient import RouterClientSendOptions from kortex_api.autogen.client_stubs.DeviceManagerClientRpc import DeviceManagerClient from kortex_api.autogen.client_stubs.DeviceConfigClientRpc import DeviceConfigClient from kortex_api.autogen.client_stubs.BaseClientRpc import BaseClient from kortex_api.autogen.messages import Common_pb2, DeviceManager_pb2, DeviceConfig_pb2, Session_pb2, Base_pb2, ProductConfiguration_pb2 from google.protobuf import json_format def example_routed_device_config(device_manager, device_config): # Get all device routing information (from DeviceManagerClient service) all_devices_info = device_manager.ReadAllDevices() # Uncomment next line to print 'all_devices_info' in JSON format # print(json_format.MessageToJson(all_devices_info)) options = RouterClientSendOptions() options.timeout_ms = 4000 # Use device routing information to route to every devices (base, actuator, interconnect, etc.) in the arm/base system and request their general device information for dev in all_devices_info.device_handle: device_info = {} device_info.update( json_format.MessageToDict( device_config.GetDeviceType (dev.device_identifier, options) ) ) device_info.update( json_format.MessageToDict( device_config.GetFirmwareVersion (dev.device_identifier, options) ) ) device_info.update( json_format.MessageToDict( device_config.GetBootloaderVersion (dev.device_identifier, options) ) ) device_info.update( json_format.MessageToDict( device_config.GetModelNumber (dev.device_identifier, options) ) ) device_info.update( json_format.MessageToDict( device_config.GetPartNumber (dev.device_identifier, options) ) ) device_info.update( json_format.MessageToDict( device_config.GetPartNumberRevision (dev.device_identifier, options) ) ) device_info.update( json_format.MessageToDict( device_config.GetSerialNumber (dev.device_identifier, options) ) ) # Get hexadecimal representation of MAC address macAddress_hexstr = "" for b in device_config.GetMACAddress(dev.device_identifier, options).mac_address: macAddress_hexstr += "%02X:" % b macAddress_hexstr = macAddress_hexstr[:-1] # remove last ':' device_info.update( { "macAddress": macAddress_hexstr } ) print("-----------------------------") print("-- {}: id = {} --".format(Common_pb2._DEVICETYPES.values_by_number[dev.device_type].name, dev.device_identifier)) for key, value in device_info.items(): print(str("%20s") % key + ": " + str(value)) def main(): # Import the utilities helper module sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) import utilities # Parse arguments args = utilities.parseConnectionArguments() # Create connection to the device and get the router with utilities.DeviceConnection.createTcpConnection(args) as router: # Create required services device_manager = DeviceManagerClient(router) device_config = DeviceConfigClient(router) # Example core example_routed_device_config(device_manager, device_config) if __name__ == "__main__": main()
nilq/baby-python
python
class Gif: pass
nilq/baby-python
python