content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
""" Copyright (c) 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
"""
import neoml.PythonWrapper as PythonWrapper
from .Dnn import Layer
from neoml.Utils import check_input_layers
from .BatchNormalization import BatchNormalization
import neoml.Blob as Blob
class FullyConnected(Layer):
"""The fully connected layer.
It multiplies each of the input vectors by the weight matrix
and adds the free term vector to the result.
:param input_layers: The input layers to be connected.
The integer in each tuple specifies the number of the output.
If not set, the first output will be used.
:type input_layers: list of object, tuple(object, int)
:param element_count: The length of each vector in the output.
:type element_count: int, > 0
:param is_zero_free_term: If True, the free term vector is set to all zeros and not trained.
If False, the free term is trained together with the weights.
:type is_zero_free_term: bool, default=False
:param name: The layer name.
:type name: str, default=None
.. rubric:: Layer inputs:
The layer can have any number of inputs.
The dimensions:
- **BatchLength** * **BatchWidth** * **ListSize** is the number of vectors
- **Height** * **Width** * **Depth** * **Channels** is the vector size;
should be the same for all inputs
.. rubric:: Layer outputs:
The layer returns one output for each input.
The dimensions:
- **BatchLength**, **BatchWidth**, **ListSize** the same as for the input
- **Height**, **Width**, **Depth** are 1
- **Channels** is element_count
"""
def __init__(self, input_layers, element_count, is_zero_free_term=False, name=None):
if type(input_layers) is PythonWrapper.FullyConnected:
super().__init__(input_layers)
return
layers, outputs = check_input_layers(input_layers, 0)
if element_count < 1:
raise ValueError('The `element_count` must be > 0.')
internal = PythonWrapper.FullyConnected(str(name), layers, outputs, int(element_count), bool(is_zero_free_term))
super().__init__(internal)
@property
def element_count(self):
"""Gets the length of each vector in the output.
"""
return self._internal.get_element_count()
@property
def zero_free_term(self):
"""Sets the length of each vector in the output.
"""
return self._internal.get_zero_free_term()
@zero_free_term.setter
def zero_free_term(self, zero_free_term):
"""Checks if the free term is all zeros.
"""
self._internal.set_zero_free_term(bool(zero_free_term))
def apply_batch_normalization(self, layer):
"""Applies batch normalization to this layer.
Batch normalization must be deleted from the dnn afterwards
and layers which were connected to the batch norm must be connected to this layer.
:param neoml.Dnn.BatchNormalization layer: batch norm to be applied
"""
if type(layer) is not BatchNormalization:
raise ValueError('The `layer` must be neoml.Dnn.BatchNormalization.')
self._internal.apply_batch_normalization(layer._internal)
@property
def weights(self):
"""Gets the trained weights as a blob of the dimensions:
- **BatchLength** * **BatchWidth** * **ListSize** equal to element_count
- **Height**, **Width**, **Depth**, **Channels** the same as for the first input
"""
return Blob.Blob(self._internal.get_weights())
@weights.setter
def weights(self, blob):
"""Sets the trained weights as a blob of the dimensions:
- **BatchLength** * **BatchWidth** * **ListSize** equal to element_count
- **Height**, **Width**, **Depth**, **Channels** the same as for the first input
"""
if not type(blob) is Blob.Blob:
raise ValueError('The `blob` must be neoml.Blob.')
self._internal.set_weights(blob._internal)
@property
def free_term(self):
"""Gets the free term vector, of element_count length.
"""
return Blob.Blob(self._internal.get_free_term())
@free_term.setter
def free_term(self, blob):
"""Sets the free term vector, of element_count length.
"""
if not type(blob) is Blob.Blob:
raise ValueError('The `blob` must be neoml.Blob.')
self._internal.set_free_term(blob._internal)
|
nilq/baby-python
|
python
|
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
# Set default settings for celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'stratahq.settings')
app = Celery('strathq')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
|
nilq/baby-python
|
python
|
from flask import session
import csv, pymssql, datetime
import threading
table_lock = threading.Lock()
def create_csv_rep(orgid, filename):
table_lock.acquire()
host = "197.189.232.50"
username = "FE-User"
password = "Fourier.01"
database = "PGAluminium"
conn = pymssql.connect(host, username, password, database)
cursor = conn.cursor()
def add_headers(columns):
with open('static/reports/'+filename,mode='w') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(columns)
csvFile.close()
def add_data(data):
#for col, val in enumerate(data):
# if isinstance(val, datetime.datetime):
# lysie = list(data)
# lysie[col] = lysie[col].date()
# data = tuple(lysie)
with open('static/reports/'+filename,mode='a') as csvFile:
writer = csv.writer(csvFile, delimiter=",",quoting=csv.QUOTE_MINIMAL)
#for col, val in enumerate(data):
writer.writerow(data)
csvFile.close()
return
# Main --------------------------------------------------------------
org = orgid
forceUpdate = 1
sql1 = """EXEC [dbo].[my_sp] @orgid=?,@forceUpdate=?;"""
sql1 = "DECLARE @return_value int; \
EXEC @return_value = [dbo].[ReportContractItemsForOrg] \
@orgid = ?, \
@forceUpdate = ?; \
SELECT 'Return Value' = @return_value;"
sql1 = """DECLARE @RC int;
EXEC @RC = [dbo].[ReportContractItemsForOrg] {}, {};
SELECT @RC AS rc;""".format(orgid, forceUpdate)
report_table_name = "ReportContractItems_" + str(orgid)
# sql = "EXEC [dbo].[ReportContractItemsForOrg] {}, {};".format(orgid, forceUpdate)
sql = "EXEC [dbo].[GenerateDynamicReport] {}, {};".format(orgid, forceUpdate)
sql2 = "EXEC [dbo].[CreateReportTableForOrg] {}".format(session["OrgId"])
cursor.execute(sql)
# cursor2.execute(sql2)
columns = [column[0] for column in cursor.description]
add_headers(columns)
row = 1
data = cursor.fetchone()
while data is not None:
row += 1
add_data(data)
data = cursor.fetchone()
print(row)
# worksheet1.autofilter('B2:AG39')
conn.commit()
table_lock.release()
|
nilq/baby-python
|
python
|
from granule_ingester.processors.EmptyTileFilter import EmptyTileFilter
from granule_ingester.processors.GenerateTileId import GenerateTileId
from granule_ingester.processors.TileProcessor import TileProcessor
from granule_ingester.processors.TileSummarizingProcessor import TileSummarizingProcessor
from granule_ingester.processors.kelvintocelsius import KelvinToCelsius
from granule_ingester.processors.Subtract180FromLongitude import Subtract180FromLongitude
from granule_ingester.processors.ForceAscendingLatitude import ForceAscendingLatitude
|
nilq/baby-python
|
python
|
import sys
input = sys.stdin.readline
# import accumulate takes too much memory
def accumulate(A):
n = len(A)
P = [0] * (n + 1)
for k in range(1, n + 1):
P[k] = P[k - 1] + A[k - 1]
return P
count = 0
length = 0
lower_bound = 0
cups, fill = map(int, input().split())
cuplist = [0] * (cups + 2)
for i in range(fill):
lower, upper, chocs = map(int, input().split())
cuplist[lower] += chocs
cuplist[upper+1] -= chocs
totalcup = int(input())
cuplist = list(accumulate(cuplist))
cuplist.pop(0)
cuphead = list(accumulate(cuplist)) # Prefix sum array for cuplist
cuphead.pop(0)
# A brute force method will force the upper bound to go back to l, therefore resulting in O(N^2)
# Though it may be 2 for loops, it is actually 2 counters
for upper_bound in range(1, cups + 1):
while cuphead[upper_bound] - cuphead[lower_bound] > totalcup:
lower_bound += 1
count = max(count, upper_bound - lower_bound)
print(count)
|
nilq/baby-python
|
python
|
import PyPDF2
pdf1File = open('meetingminutes.pdf', 'rb')
pdf2File = open('meetingminutes2.pdf', 'rb')
pdf1Reader = PyPDF2.PdfFileReader(pdf1File)
pdf2Reader = PyPDF2.PdfFileReader(pdf2File)
pdfWriter = PyPDF2.PdfFileWriter()
for pageNum in range(pdf1Reader.numPages):
pageObj = pdf1Reader.getPage(pageNum)
pdfWriter.addPage(pageObj)
for pageNum in range(pdf2Reader.numPages):
pageObj = pdf2Reader.getPage(pageNum)
pdfWriter.addPage(pageObj)
pdfOutputFile = open('combinedminutes.pdf', 'wb')
pdfWriter.write(pdfOutputFile)
pdfOutputFile.close()
pdf1File.close()
pdf2File.close()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Nov 7, 2015
Don't blink...
@author: Juan_Insuasti
'''
import sys
import datetime
import os.path
import json
class Logger:
def __init__(self, logName="Log", file="log.txt", enabled=True, printConsole=True, saveFile=False, saveCloud=False):
self.logName = logName
self.file = file
self.enabled = enabled
self.printConsole = printConsole
self.saveFile = saveFile
self.saveCloud = saveCloud
self.saveRecord('===== ' + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + " +0000 " + '=====')
def log(self, action, data=None):
if (self.enabled):
record = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
record += " +0000 : "
record += self.logName + " >> "
if(data):
record += action % data
else:
record += action
self.printLog(record)
self.saveRecord(record)
def printLog(self, record):
if(self.printConsole):
print(record, file=sys.stderr)
def saveRecord(self, record):
if(self.saveFile):
fileData = record
fileData += "\n"
file = open(self.file,"a")
file.write(fileData)
file.close()
if __name__ == '__main__':
print('Starting Program')
console = Logger(logName='device0', file='test.log', enabled=True, printConsole=True, saveFile=True)
console.log('testing with data = %s',222)
console.log('testing without data')
pass
|
nilq/baby-python
|
python
|
from setuptools import setup
# Load in babel support, if available.
try:
from babel.messages import frontend as babel
cmdclass = {"compile_catalog": babel.compile_catalog,
"extract_messages": babel.extract_messages,
"init_catalog": babel.init_catalog,
"update_catalog": babel.update_catalog, }
except ImportError:
cmdclass = {}
setup(name="django-nudge",
version="0.9.1",
description="Use Nudge to (gently) push content between Django servers",
author="Joshua Ruihley, Ross Karchner",
author_email="joshua.ruihley@cfpb.gov",
url="https://github.com/CFPB/django-nudge",
zip_safe=False,
packages=["nudge", "nudge.demo", "nudge.management", "nudge.templatetags", "nudge.management.commands"],
package_data = {"nudge": ["templates/*.html",
"templates/admin/nudge/*.html",
"templates/admin/nudge/batch/*.html",
"templates/admin/nudge/setting/*.html"]},
package_dir={"": "src"},
install_requires=['django', 'django-reversion', 'pycrypto',],
cmdclass = cmdclass,
classifiers=["Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: Public Domain",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Framework :: Django",])
|
nilq/baby-python
|
python
|
from typing import Union
from notion.models.annotations import Annotations
class RichText():
"""
https://developers.notion.com/reference/rich-text
"""
TYPES = ["text", "mention", "equation"]
def __init__(self, plain_text: str = None, href: str = None, annotations: Union[dict, Annotations] = None, type: str = None) -> None:
if plain_text is not None:
self.plain_text = plain_text
if href is not None:
self.href = href
if annotations is not None:
self.annotations = Annotations(**annotations) if isinstance(annotations, dict) else annotations
if type is not None:
self.type = type
assert type in RichText.TYPES, f"`type` must be one of {', '.join(RichText.types)}"
|
nilq/baby-python
|
python
|
"""
Handles running extensions inside a sandbox, which runs outside the primary
Petronia memory space with OS specific constraints.
"""
from .module_loader import create_sandbox_module_loader
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: ps-license@tuebingen.mpg.de
import torch
import torch.nn as nn
from torch.nn.modules.utils import _pair
class LocallyConnected2d(nn.Module):
def __init__(self,
in_channels,
out_channels,
output_size,
kernel_size,
stride,
bias=False):
super(LocallyConnected2d, self).__init__()
output_size = _pair(output_size)
self.weight = nn.Parameter(
torch.randn(1, out_channels, in_channels, output_size[0],
output_size[1], kernel_size**2),
requires_grad=True,
)
if bias:
self.bias = nn.Parameter(torch.randn(1, out_channels,
output_size[0],
output_size[1]),
requires_grad=True)
else:
self.register_parameter('bias', None)
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
def forward(self, x):
_, c, h, w = x.size()
kh, kw = self.kernel_size
dh, dw = self.stride
x = x.unfold(2, kh, dh).unfold(3, kw, dw)
x = x.contiguous().view(*x.size()[:-2], -1)
# Sum in in_channel and kernel_size dims
out = (x.unsqueeze(1) * self.weight).sum([2, -1])
if self.bias is not None:
out += self.bias
return out
|
nilq/baby-python
|
python
|
from fastai.conv_learner import *
from fastai.dataset import *
from tensorboard_cb_old import *
import cv2
import pandas as pd
import numpy as np
import os
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
import scipy.optimize as opt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import warnings
warnings.filterwarnings("ignore")
#=======================================================================================================================
PATH = './'
TRAIN = '../input/train/'
TEST = '../input/test/'
LABELS = '../input/train.csv'
SAMPLE = '../input/sample_submission.csv'
name_label_dict = {
0: 'Nucleoplasm',
1: 'Nuclear membrane',
2: 'Nucleoli',
3: 'Nucleoli fibrillar center',
4: 'Nuclear speckles',
5: 'Nuclear bodies',
6: 'Endoplasmic reticulum',
7: 'Golgi apparatus',
8: 'Peroxisomes',
9: 'Endosomes',
10: 'Lysosomes',
11: 'Intermediate filaments',
12: 'Actin filaments',
13: 'Focal adhesion sites',
14: 'Microtubules',
15: 'Microtubule ends',
16: 'Cytokinetic bridge',
17: 'Mitotic spindle',
18: 'Microtubule organizing center',
19: 'Centrosome',
20: 'Lipid droplets',
21: 'Plasma membrane',
22: 'Cell junctions',
23: 'Mitochondria',
24: 'Aggresome',
25: 'Cytosol',
26: 'Cytoplasmic bodies',
27: 'Rods & rings' }
nw = 4 #number of workers for data loader
arch = inceptionresnet_2 #specify target architecture
#=======================================================================================================================
#=======================================================================================================================
# Data
#=======================================================================================================================
# faulty image : dc756dea-bbb4-11e8-b2ba-ac1f6b6435d0
#=================
TRAIN_IMAGES_PER_CATEGORY = 1000
image_df = pd.read_csv(LABELS)
image_df = image_df[(image_df.Id != 'dc756dea-bbb4-11e8-b2ba-ac1f6b6435d0') &
(image_df.Id != 'c861eb54-bb9f-11e8-b2b9-ac1f6b6435d0') &
(image_df.Id != '7a88f200-bbc3-11e8-b2bc-ac1f6b6435d0')]
image_df['target_list'] = image_df['Target'].map(lambda x: [int(a) for a in x.split(' ')])
all_labels = list(chain.from_iterable(image_df['target_list'].values))
c_val = Counter(all_labels)
n_keys = c_val.keys()
max_idx = max(n_keys)
#==================================================================================
# visualize train distribution
# fig, ax1 = plt.subplots(1,1, figsize = (10, 5))
# ax1.bar(n_keys, [c_val[k] for k in n_keys])
# ax1.set_xticks(range(max_idx))
# ax1.set_xticklabels([name_label_dict[k] for k in range(max_idx)], rotation=90)
# plt.show()
#==================================================================================
for k,v in c_val.items():
print(name_label_dict[k], 'count:', v)
# create a categorical vector
image_df['target_vec'] = image_df['target_list'].map(lambda ck: [i in ck for i in range(max_idx+1)])
raw_train_df, valid_df = train_test_split(image_df,
test_size = 0.15,
# hack to make stratification work
stratify = image_df['Target'].map(lambda x: x[:3] if '27' not in x else '0'),
random_state= 42)
print(raw_train_df.shape[0], 'training masks')
print(valid_df.shape[0], 'validation masks')
tr_n = raw_train_df['Id'].values.tolist()
val_n = valid_df['Id'].values.tolist()
tr_n = tr_n[:-2] # pytorch has problems if last batch has one sample
test_names = list({f[:36] for f in os.listdir(TEST)})
# #=================================================================================
# # # Balance data
# #================================================================================
# # keep labels with more then 50 objects
# out_df_list = []
# for k,v in c_val.items():
# if v>50:
# keep_rows = raw_train_df['target_list'].map(lambda x: k in x)
# out_df_list += [raw_train_df[keep_rows].sample(TRAIN_IMAGES_PER_CATEGORY,
# replace=True)]
# train_df = pd.concat(out_df_list, ignore_index=True)
#
# tr_n = train_df['Id'].values.tolist()
# val_n = valid_df['Id'].values.tolist()
# tr_n = tr_n[:-2] # pytorch has problems if last batch has one sample
#
# print(train_df.shape[0])
# print(len(tr_n))
# print('unique train:',len(train_df['Id'].unique().tolist()))
#
# #=========================================================================
# #show balanced class graph
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (10, 5))
train_sum_vec = np.sum(np.stack(raw_train_df['target_vec'].values, 0), 0)
valid_sum_vec = np.sum(np.stack(valid_df['target_vec'].values, 0), 0)
ax1.bar(n_keys, [train_sum_vec[k] for k in n_keys])
ax1.set_title('Training Distribution')
ax2.bar(n_keys, [valid_sum_vec[k] for k in n_keys])
ax2.set_title('Validation Distribution')
plt.show()
#=======================================================================================================================
# Dataset loading helpers
#=======================================================================================================================
def open_rgby(path,id): #a function that reads RGBY image
#print(id)
colors = ['red','green','blue','yellow']
flags = cv2.IMREAD_GRAYSCALE
img = [cv2.imread(os.path.join(path, id+'_'+color+'.png'), flags).astype(np.float32)/255
for color in colors]
img[0] = img[0] * 0.85
img[1] = img[1] * 1.0
img[2] = img[2] * 0.85
img[3] = img[3] * 0.85
img = np.stack(img, axis=-1)
#print('img loaded:', id)
return img
class pdFilesDataset(FilesDataset):
def __init__(self, fnames, path, transform):
self.labels = pd.read_csv(LABELS).set_index('Id')
self.labels['Target'] = [[int(i) for i in s.split()] for s in self.labels['Target']]
super().__init__(fnames, transform, path)
def get_x(self, i):
img = open_rgby(self.path, self.fnames[i])
if self.sz == 512:
return img
else:
return cv2.resize(img, (self.sz, self.sz), cv2.INTER_AREA)
def get_y(self, i):
if (self.path == TEST):
return np.zeros(len(name_label_dict), dtype=np.int)
else:
labels = self.labels.loc[self.fnames[i]]['Target']
return np.eye(len(name_label_dict), dtype=np.float)[labels].sum(axis=0)
@property
def is_multi(self):
return True
@property
def is_reg(self):
return True
# this flag is set to remove the output sigmoid that allows log(sigmoid) optimization
# of the numerical stability of the loss function
def get_c(self):
return len(name_label_dict) # number of classes
def get_data(sz,bs):
#data augmentation
aug_tfms = [RandomRotate(30, tfm_y=TfmType.NO),
RandomDihedral(tfm_y=TfmType.NO),
RandomLighting(0.05, 0.05, tfm_y=TfmType.NO)]
#mean and std in of each channel in the train set
stats = A([0.08069, 0.05258, 0.05487, 0.08282], [0.13704, 0.10145, 0.15313, 0.13814])
#stats = A([0.08069, 0.05258, 0.05487], [0.13704, 0.10145, 0.15313])
tfms = tfms_from_stats(stats, sz, crop_type=CropType.NO, tfm_y=TfmType.NO,
aug_tfms=aug_tfms)
ds = ImageData.get_ds(pdFilesDataset, (tr_n[:-(len(tr_n)%bs)],TRAIN),
(val_n,TRAIN), tfms, test=(test_names,TEST))
md = ImageData(PATH, ds, bs, num_workers=nw, classes=None)
return md
#=======================================================================================================================
bs = 16
sz = 256
md = get_data(sz,bs)
x,y = next(iter(md.trn_dl))
print(x.shape, y.shape)
#=======================================================================================================================
# Display images
#=======================================================================================================================
# def display_imgs(x):
# columns = 4
# bs = x.shape[0]
# rows = min((bs + 3) // 4, 4)
# fig = plt.figure(figsize=(columns * 4, rows * 4))
# for i in range(rows):
# for j in range(columns):
# idx = i + j * columns
# fig.add_subplot(rows, columns, idx + 1)
# plt.axis('off')
# plt.imshow((x[idx, :, :, :3] * 255).astype(np.int))
# plt.show()
#
#
# display_imgs(np.asarray(md.trn_ds.denorm(x)))
#=======================================================================================================================
# compute dataset stats
#=======================================================================================================================
# x_tot = np.zeros(4)
# x2_tot = np.zeros(4)
# for x,y in iter(md.trn_dl):
# tmp = md.trn_ds.denorm(x).reshape(16,-1)
# x = md.trn_ds.denorm(x).reshape(-1,4)
# x_tot += x.mean(axis=0)
# x2_tot += (x**2).mean(axis=0)
#
# channel_avr = x_tot/len(md.trn_dl)
# channel_std = np.sqrt(x2_tot/len(md.trn_dl) - channel_avr**2)
# print(channel_avr,channel_std)
#=======================================================================================================================
# Loss and metrics
#=======================================================================================================================
class FocalLoss(nn.Module):
def __init__(self, gamma=1):
super().__init__()
self.gamma = gamma
def forward(self, input, target):
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})"
.format(target.size(), input.size()))
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + \
((-max_val).exp() + (-input - max_val).exp()).log()
invprobs = F.logsigmoid(-input * (target * 2.0 - 1.0))
loss = (invprobs * self.gamma).exp() * loss
return loss.sum(dim=1).mean()
def acc(preds,targs,th=0.0):
preds = (preds > th).int()
targs = targs.int()
return (preds==targs).float().mean()
def recall(preds, targs, thresh=0.5):
pred_pos = preds > thresh
tpos = torch.mul((targs.byte() == pred_pos), targs.byte())
tp = tpos.sum().item()
tr = targs.sum().item()
return float(tp+0.000001)/float( tr + 0.000001)
def precision(preds, targs, thresh=0.5):
pred_pos = preds > thresh
tpos = torch.mul((targs.byte() == pred_pos), targs.byte())
tp = tpos.sum().item()
pp = pred_pos.sum().item()
return float(tp+0.000001)/float(pp + 0.000001)
def fbeta(preds, targs, beta, thresh=0.5):
"""Calculates the F-beta score (the weighted harmonic mean of precision and recall).
This is the micro averaged version where the true positives, false negatives and
false positives are calculated globally (as opposed to on a per label basis).
beta == 1 places equal weight on precision and recall, b < 1 emphasizes precision and
beta > 1 favors recall.
"""
assert beta > 0, 'beta needs to be greater than 0'
beta2 = beta ** 2
rec = recall(preds, targs, thresh)
prec = precision(preds, targs, thresh)
return float((1 + beta2) * prec * rec) / float(beta2 * prec + rec + 0.00000001)
def f1(preds, targs, thresh=0.5): return float(fbeta(preds, targs, 1, thresh))
########################################################################################################################
# Training
########################################################################################################################
class ConvnetBuilder_custom():
def __init__(self, f, c, is_multi, is_reg, ps=None, xtra_fc=None, xtra_cut=0,
custom_head=None, pretrained=True):
self.f, self.c, self.is_multi, self.is_reg, self.xtra_cut = f, c, is_multi, is_reg, xtra_cut
if xtra_fc is None: xtra_fc = [512]
if ps is None: ps = [0.25] * len(xtra_fc) + [0.5]
self.ps, self.xtra_fc = ps, xtra_fc
if f in model_meta:
cut, self.lr_cut = model_meta[f]
else:
cut, self.lr_cut = 0, 0
cut -= xtra_cut
layers = cut_model(f(pretrained), cut)
# replace first convolutional layer by 4->32 while keeping corresponding weights
# and initializing new weights with zeros
w = layers[00].conv.weight
layers[00].conv = nn.Conv2d(4, 32, kernel_size=(3, 3), stride=(2, 2), bias=False)
layers[00].conv.weight = torch.nn.Parameter(torch.cat((w, torch.zeros(32, 1, 3, 3)), dim=1))
self.nf = model_features[f] if f in model_features else (num_features(layers) * 2)
if not custom_head: layers += [AdaptiveConcatPool2d(), Flatten()]
self.top_model = nn.Sequential(*layers)
n_fc = len(self.xtra_fc) + 1
if not isinstance(self.ps, list): self.ps = [self.ps] * n_fc
if custom_head:
fc_layers = [custom_head]
else:
fc_layers = self.get_fc_layers()
self.n_fc = len(fc_layers)
self.fc_model = to_gpu(nn.Sequential(*fc_layers))
if not custom_head: apply_init(self.fc_model, kaiming_normal)
self.model = to_gpu(nn.Sequential(*(layers + fc_layers)))
@property
def name(self):
return f'{self.f.__name__}_{self.xtra_cut}'
def create_fc_layer(self, ni, nf, p, actn=None):
res = [nn.BatchNorm1d(num_features=ni)]
if p: res.append(nn.Dropout(p=p))
res.append(nn.Linear(in_features=ni, out_features=nf))
if actn: res.append(actn)
return res
def get_fc_layers(self):
res = []
ni = self.nf
for i, nf in enumerate(self.xtra_fc):
res += self.create_fc_layer(ni, nf, p=self.ps[i], actn=nn.ReLU())
ni = nf
final_actn = nn.Sigmoid() if self.is_multi else nn.LogSoftmax()
if self.is_reg: final_actn = None
res += self.create_fc_layer(ni, self.c, p=self.ps[-1], actn=final_actn)
return res
def get_layer_groups(self, do_fc=False):
if do_fc:
return [self.fc_model]
idxs = [self.lr_cut]
c = children(self.top_model)
if len(c) == 3: c = children(c[0]) + c[1:]
lgs = list(split_by_idxs(c, idxs))
return lgs + [self.fc_model]
class ConvLearner(Learner):
def __init__(self, data, models, precompute=False, **kwargs):
self.precompute = False
super().__init__(data, models, **kwargs)
if hasattr(data, 'is_multi') and not data.is_reg and self.metrics is None:
self.metrics = [accuracy_thresh(0.5)] if self.data.is_multi else [accuracy]
if precompute: self.save_fc1()
self.freeze()
self.precompute = precompute
def _get_crit(self, data):
if not hasattr(data, 'is_multi'): return super()._get_crit(data)
return F.l1_loss if data.is_reg else F.binary_cross_entropy if data.is_multi else F.nll_loss
@classmethod
def pretrained(cls, f, data, ps=None, xtra_fc=None, xtra_cut=0, custom_head=None, precompute=False,
pretrained=True, **kwargs):
models = ConvnetBuilder_custom(f, data.c, data.is_multi, data.is_reg,
ps=ps, xtra_fc=xtra_fc, xtra_cut=xtra_cut, custom_head=custom_head,
pretrained=pretrained)
return cls(data, models, precompute, **kwargs)
@classmethod
def lsuv_learner(cls, f, data, ps=None, xtra_fc=None, xtra_cut=0, custom_head=None, precompute=False,
needed_std=1.0, std_tol=0.1, max_attempts=10, do_orthonorm=False, **kwargs):
models = ConvnetBuilder(f, data.c, data.is_multi, data.is_reg,
ps=ps, xtra_fc=xtra_fc, xtra_cut=xtra_cut, custom_head=custom_head, pretrained=False)
convlearn = cls(data, models, precompute, **kwargs)
convlearn.lsuv_init()
return convlearn
@property
def model(self):
return self.models.fc_model if self.precompute else self.models.model
def half(self):
if self.fp16: return
self.fp16 = True
if type(self.model) != FP16: self.models.model = FP16(self.model)
if not isinstance(self.models.fc_model, FP16): self.models.fc_model = FP16(self.models.fc_model)
def float(self):
if not self.fp16: return
self.fp16 = False
if type(self.models.model) == FP16: self.models.model = self.model.module.float()
if type(self.models.fc_model) == FP16: self.models.fc_model = self.models.fc_model.module.float()
@property
def data(self):
return self.fc_data if self.precompute else self.data_
def create_empty_bcolz(self, n, name):
return bcolz.carray(np.zeros((0, n), np.float32), chunklen=1, mode='w', rootdir=name)
def set_data(self, data, precompute=False):
super().set_data(data)
if precompute:
self.unfreeze()
self.save_fc1()
self.freeze()
self.precompute = True
else:
self.freeze()
def get_layer_groups(self):
return self.models.get_layer_groups(self.precompute)
def summary(self):
precompute = self.precompute
self.precompute = False
res = super().summary()
self.precompute = precompute
return res
def get_activations(self, force=False):
tmpl = f'_{self.models.name}_{self.data.sz}.bc'
# TODO: Somehow check that directory names haven't changed (e.g. added test set)
names = [os.path.join(self.tmp_path, p + tmpl) for p in ('x_act', 'x_act_val', 'x_act_test')]
if os.path.exists(names[0]) and not force:
self.activations = [bcolz.open(p) for p in names]
else:
self.activations = [self.create_empty_bcolz(self.models.nf, n) for n in names]
def save_fc1(self):
self.get_activations()
act, val_act, test_act = self.activations
m = self.models.top_model
if len(self.activations[0]) != len(self.data.trn_ds):
predict_to_bcolz(m, self.data.fix_dl, act)
if len(self.activations[1]) != len(self.data.val_ds):
predict_to_bcolz(m, self.data.val_dl, val_act)
if self.data.test_dl and (len(self.activations[2]) != len(self.data.test_ds)):
if self.data.test_dl: predict_to_bcolz(m, self.data.test_dl, test_act)
self.fc_data = ImageClassifierData.from_arrays(self.data.path,
(act, self.data.trn_y), (val_act, self.data.val_y), self.data.bs,
classes=self.data.classes,
test=test_act if self.data.test_dl else None, num_workers=8)
def freeze(self):
self.freeze_to(-1)
def unfreeze(self):
self.freeze_to(0)
self.precompute = False
def predict_array(self, arr):
precompute = self.precompute
self.precompute = False
pred = super().predict_array(arr)
self.precompute = precompute
return pred
#=======================================================================================================================
sz = 512 #image size
bs = 8 #batch size
md = get_data(sz,bs)
learner = ConvLearner.pretrained(arch, md, ps=0.2) #dropout 50%
learner.opt_fn = optim.Adam
learner.clip = 1.0 #gradient clipping
learner.crit = FocalLoss()
#learner.crit = f2_loss
learner.metrics = [precision, recall, f1]
print(learner.summary)
#learner.lr_find()
#learner.sched.plot()
#plt.show()
tb_logger = TensorboardLogger(learner.model, md, "inres_512_val3", metrics_names=["precision", 'recall', 'f1'])
lr = 1e-3
lrs=np.array([lr/10,lr/3,lr])
#learner.fit(lr,1, best_save_name='inres_512_0.3', callbacks=[tb_logger])
learner.unfreeze()
#learner.load('wrn_512_3.3')
#learner.fit(lrs/4,4,cycle_len=2,use_clr=(10,20),best_save_name='inres_512_1.3', callbacks=[tb_logger])
#learner.fit(lrs/4,2,cycle_len=4,use_clr=(10,20), best_save_name='inres_512_2.3', callbacks=[tb_logger])
#learner.fit(lrs/16,1,cycle_len=8,use_clr=(5,20), best_save_name='inres_512_3.3', callbacks=[tb_logger])
# learner.fit(lrs/16,1,cycle_len=8,use_clr=(5,20), best_save_name='wrn_512_4.3_best', callbacks=[tb_logger] )
# learner.fit(lrs/16,1,cycle_len=8,use_clr=(5,20), best_save_name='wrn_512_5.3_best', callbacks=[tb_logger])
#learner.save('inres_512_unbalanced_grn+')
learner.load('inres_512_unbalanced_grn+')
#learner.load('wrn_512_balanced')
learner.fit(lrs/16,1,cycle_len=8,use_clr=(5,20), best_save_name='inres_512_4.3_best_unbalanced_grn+', callbacks=[tb_logger] )
learner.fit(lrs/16,1,cycle_len=8,use_clr=(5,20), best_save_name='inres_512_5.3_best_unbalanced_grn+', callbacks=[tb_logger])
learner.save('inres_512_unbalanced_grn+_focalgamma1')
# swa
#learner.fit(lrs/160,1,cycle_len=8,use_clr=(5,20), best_save_name='wrn_512_4', callbacks=[tb_logger])
#learner.load('Res34_512_grn4-swa')
#learner.fit(lrs/16, n_cycle=4, cycle_len=4,use_clr=(5,20), best_save_name='Res34_512_grn4', use_swa=True, swa_start=1, swa_eval_freq=5,callbacks=[tb_logger])
#learner.load('Res34_512_grn4-swa')
#======================================================================================================================
# Validation
#=======================================================================================================================
def sigmoid_np(x):
return 1.0/(1.0 + np.exp(-x))
preds,y = learner.TTA(n_aug=16)
preds = np.stack(preds, axis=-1)
preds = sigmoid_np(preds)
pred = preds.max(axis=-1)
def F1_soft(preds,targs,th=0.5,d=50.0):
preds = sigmoid_np(d*(preds - th))
targs = targs.astype(np.float)
score = 2.0*(preds*targs).sum(axis=0)/((preds+targs).sum(axis=0) + 1e-6)
return score
def fit_val(x,y):
params = 0.5*np.ones(len(name_label_dict))
wd = 1e-5
error = lambda p: np.concatenate((F1_soft(x,y,p) - 1.0,
wd*(p - 0.5)), axis=None)
p, success = opt.leastsq(error, params)
return p
th = fit_val(pred,y)
th[th<0.1] = 0.1
print('Thresholds: ',th)
print('F1 macro: ',f1_score(y, pred>th, average='macro'))
print('F1 macro (th = 0.5): ',f1_score(y, pred>0.5, average='macro'))
print('F1 micro: ',f1_score(y, pred>th, average='micro'))
print('Fractions: ',(pred > th).mean(axis=0))
print('Fractions (true): ',(y > th).mean(axis=0))
#=======================================================================================================================
# Submission
#=======================================================================================================================
preds_t,y_t = learner.TTA(n_aug=16,is_test=True)
preds_t = np.stack(preds_t, axis=-1)
preds_t = sigmoid_np(preds_t)
pred_t = preds_t.max(axis=-1) #max works better for F1 macro score
def save_pred(pred, th=0.5, fname='protein_classification.csv'):
pred_list = []
for line in pred:
s = ' '.join(list([str(i) for i in np.nonzero(line > th)[0]]))
pred_list.append(s)
sample_df = pd.read_csv(SAMPLE)
sample_list = list(sample_df.Id)
pred_dic = dict((key, value) for (key, value)
in zip(learner.data.test_ds.fnames, pred_list))
pred_list_cor = [pred_dic[id] for id in sample_list]
df = pd.DataFrame({'Id': sample_list, 'Predicted': pred_list_cor})
df.to_csv(fname, header=True, index=False)
# Manual thresholds
th_t = np.array([0.565,0.39,0.55,0.345,0.33,0.39,0.33,0.45,0.38,0.39,
0.34,0.42,0.31,0.38,0.49,0.50,0.38,0.43,0.46,0.40,
0.39,0.505,0.37,0.47,0.41,0.545,0.32,0.1])
print('Fractions: ',(pred_t > th_t).mean(axis=0))
save_pred(pred_t,th_t) # From manual threshold
# Automatic fitting the thresholds based on the public LB statistics.
lb_prob = [
0.362397820,0.043841336,0.075268817,0.059322034,0.075268817,
0.075268817,0.043841336,0.075268817,0.010000000,0.010000000,
0.010000000,0.043841336,0.043841336,0.014198783,0.043841336,
0.010000000,0.028806584,0.014198783,0.028806584,0.059322034,
0.010000000,0.126126126,0.028806584,0.075268817,0.010000000,
0.222493880,0.028806584,0.010000000]
# I replaced 0 by 0.01 since there may be a rounding error leading to 0
def Count_soft(preds,th=0.5,d=50.0):
preds = sigmoid_np(d*(preds - th))
return preds.mean(axis=0)
def fit_test(x,y):
params = 0.5*np.ones(len(name_label_dict))
wd = 1e-5
error = lambda p: np.concatenate((Count_soft(x,p) - y,
wd*(p - 0.5)), axis=None)
p, success = opt.leastsq(error, params)
return p
th_t = fit_test(pred_t,lb_prob)
th_t[th_t<0.1] = 0.1
print('Thresholds: ',th_t)
print('Fractions: ',(pred_t > th_t).mean(axis=0))
print('Fractions (th = 0.5): ',(pred_t > 0.5).mean(axis=0))
save_pred(pred_t,th_t,'protein_classification_f.csv') # based on public lb stats
save_pred(pred_t,th,'protein_classification_v.csv') # based on validation
save_pred(pred_t,0.5,'protein_classification_05.csv') # based on fixed threshold 0.5
#=======================================================================================================================
# using the threshold from validation set for classes not present in the public LB:
class_list = [8,9,10,15,20,24,27]
for i in class_list:
th_t[i] = th[i]
save_pred(pred_t,th_t,'protein_classification_c.csv')
#=======================================================================================================================
# fitting thresholds based on the frequency of classes in the train dataset:
labels = pd.read_csv(LABELS).set_index('Id')
label_count = np.zeros(len(name_label_dict))
for label in labels['Target']:
l = [int(i) for i in label.split()]
label_count += np.eye(len(name_label_dict))[l].sum(axis=0)
label_fraction = label_count.astype(np.float)/len(labels)
print(label_count, label_fraction)
th_t = fit_test(pred_t,label_fraction)
th_t[th_t<0.05] = 0.05
print('Thresholds: ',th_t)
print('Fractions: ',(pred_t > th_t).mean(axis=0))
save_pred(pred_t,th_t,'protein_classification_t.csv') # based on frquency of classes in train
#=======================================================================================================================
# res34
# F1 macro: 0.7339006427813839
# F1 macro (th = 0.5): 0.6669998135148151
# F1 micro: 0.7723082957442635
#res101xt-m4
# Thresholds: [0.54491 0.75237 0.58362 0.55942 0.56169 0.52287 0.56564 0.58306 0.50261 0.52049 0.46712 0.5479 0.57008
# 0.71485 0.59936 0.1 0.66235 0.58874 0.51545 0.51548 0.52326 0.49656 0.65905 0.54701 0.68219 0.50362
# 0.48294 0.29036]
# F1 macro: 0.7011295048856508
# F1 macro (th = 0.5): 0.6415093521306193
# F1 micro: 0.7883417085427137
# # resnet101xt-swa
# [0.52095 0.67501 0.46876 0.62209 0.52894 0.55665 0.55442 0.48154 0.46129 0.75715 0.43572 0.586 0.64507
# 0.64826 0.55982 0.1 0.83022 0.90441 0.55107 0.51155 0.52846 0.4664 0.74345 0.52408 0.79122 0.46872
# 0.55224 0.1 ]
# F1 macro: 0.6819705865476475
# F1 macro (th = 0.5): 0.6223916910357309
# F1 micro: 0.7857503279846604
# sub_0.05 lb: 0.492
########################################################################################################################
# res34 grn+ 512 revised val
# Thresholds: [0.54664 0.5475 0.55301 0.50708 0.47384 0.5289 0.44358 0.5137 0.41192 0.4685 0.42144 0.54451 0.5089
# 0.53344 0.47533 0.12029 0.39405 0.40774 0.45618 0.46368 0.40192 0.47281 0.56206 0.49217 0.51224 0.49178
# 0.1 0.34105]
# F1 macro: 0.6304911952781457 # 0.451
# F1 macro (th = 0.5): 0.5779850745288859 # 0.438
# F1 micro: 0.6733807952769578
# res34 grn+0.5 swa 4x4 revised val
# Thresholds: [0.54103 0.56479 0.53996 0.55322 0.47398 0.54292 0.46768 0.53656 0.38274 0.48946 0.41035 0.4907 0.48226
# 0.5141 0.51645 0.1427 0.38583 0.42499 0.45048 0.4634 0.41046 0.45131 0.52466 0.51523 0.67688 0.48726
# 0.45562 0.34339]
# F1 macro: 0.666934899747442
# F1 macro (th = 0.5): 0.5910769947924901
# F1 micro: 0.7727588603196666
#grn34+0.9 swa 4x4 revised val
# Thresholds: [0.54138 0.56821 0.57645 0.49649 0.45076 0.5454 0.49167 0.52807 0.4007 0.43375 0.37413 0.52472 0.52156
# 0.44734 0.54172 0.1312 0.43421 0.42853 0.46424 0.48458 0.4138 0.45056 0.55984 0.50826 0.71608 0.48222
# 0.51216 0.35996]
# F1 macro: 0.6830337665787448
# F1 macro (th = 0.5): 0.6158043015502873
# F1 micro: 0.7779433681073026
#grn34+0.5 256 revised val
# Thresholds: [0.53031 0.61858 0.58287 0.50504 0.56897 0.6039 0.48341 0.57169 0.48902 0.61432 0.53577 0.60106 0.52176
# 0.49809 0.59424 0.1445 0.46618 0.50464 0.50754 0.53109 0.51841 0.51343 0.50707 0.58757 0.57555 0.49086
# 0.53558 0.52238]
# F1 macro: 0.6661630089375489
# F1 macro (th = 0.40): 0.5571912452206964
# F1 macro (th = 0.45): 0.6171187769631362
# F1 macro (th = 0.50): 0.6415479086760095 # 0.470
# F1 macro (th = 0.55): 0.6489516914324852
# F1 macro (th = 0.60): 0.6026006073694331
# F1 macro (th = 0.65): 0.553418550910492
# F1 micro: 0.7577577577577577
# Fractions: [0.4482 0.03153
# wrn - validation-stratified run 1
# Thresholds: [0.54149 0.55812 0.56723 0.58857 0.55518 0.59766 0.5105 0.59866 0.73868 0.69084 0.56855 0.66444 0.57103
# 0.73118 0.5686 0.63132 0.61924 0.57267 0.54284 0.48375 0.53864 0.47911 0.57397 0.56129 0.78236 0.1
# 0.55899 0.36679]
# F1 macro: 0.7199049787804883
# F1 macro (th = 0.5): 0.6730420723769626 # 0.
# F1 micro: 0.688504734639947
# wrn - validation-stratified run 2 -long -16 more epoch
# Thresholds: [0.56716 0.63661 0.59092 0.62034 0.52683 0.56752 0.50399 0.61647 0.65823 0.57482 0.52132 0.68148 0.60175
# 0.57967 0.5999 0.61468 0.48772 0.56341 0.5741 0.49707 0.5276 0.49113 0.57197 0.54825 0.62061 0.49563
# 0.624 0.27953]
# F1 macro: 0.7361542316545664
# F1 macro (th = 0.5): 0.6932206090395802
# F1 micro: 0.7887470695493618
# wrn - validation-stratified run 3 -long -balanced train - 16 more epoch --overfit
# 7
# 0.220173
# 0.761697
# 0.856499
# 0.610779
# 0.706326
# Thresholds: [0.55793 0.78152 0.66333 0.6825 0.65572 0.75289 0.74526 0.63238 0.53035 0.36548 0.42722 0.72039 0.71662
# 0.75241 0.66181 0.40304 0.78582 0.87507 0.72847 0.61643 0.81325 0.5994 0.64994 0.60596 0.8597 0.52862
# 0.84814 0.13508]
# F1 macro: 0.720740962219489
# F1 macro (th = 0.5): 0.6314560071256061
# F1 micro: 0.7730109204368174
# wrn - validation-stratified run 4 -long -unbalanced train - 16 more epoch --overfit Augmentation+
# 7 0.257098 0.746422 0.866529 0.583308 0.688742
# Thresholds: [0.5979 0.73537 0.634 0.71561 0.69005 0.69933 0.65443 0.63613 0.8177 0.42255 0.4162 0.7414 0.76869
# 0.78629 0.73087 0.45202 0.83717 0.77659 0.72559 0.63488 0.70433 0.60525 0.71827 0.61926 0.76373 0.52041
# 0.78393 0.16647]
# F1 macro: 0.7102068815363216
# F1 macro (th = 0.5): 0.6060142764385398
# F1 micro: 0.
#
# wrn - validation-stratified run 4 -long -unbalanced train - 16 more epoch -grn+
# 7 0.309054 0.610084 0.917235 0.591056 0.710322
#Thresholds: [0.56554 0.67176 0.63116 0.58562 0.54196 0.63124 0.54626 0.61112 0.60392 0.51268 0.4329 0.70066 0.63997
# 0.71733 0.63157 0.55963 0.64437 0.76405 0.69452 0.5257 0.57838 0.52668 0.55164 0.60304 0.77591 0.50272
#0.53615 0.41276]
#F1 macro: 0.747663269393283
#F1 macro (th = 0.5): 0.6990677584721688
#F1 micro: 0.7917927134026042
# Thresholds: [0.51905 0.63727 0.59806 0.61262 0.61216 0.60486 0.54142 0.60487 0.42234 0.58749 0.52914 0.6674 0.58175
# 0.69042 0.48655 0.1 0.56512 0.56398 0.53958 0.53107 0.45948 0.51422 0.57877 0.62183 0.51312 0.50313
# 0.54336 0.48171]
# F1 macro: 0.7065067795265598
# F1 macro (th = 0.5): 0.6705134909023859
# F1 micro: 0.7804733141895237
# incepres focal loss grn+
# 7 0.320347 0.61997 0.90574 0.5458 0.672016
#Thresholds: [0.57556 0.59779 0.62277 0.54471 0.49556 0.61897 0.57477 0.57172 0.6501 0.68403 0.46683 0.64066 0.58307
#0.64869 0.65064 0.63917 0.71538 0.78358 0.53561 0.57432 0.57465 0.51983 0.53168 0.61909 0.83321 0.52462
#0.73072 0.49235]
#F1 macro: 0.7212613994045415
#F1 macro (th = 0.5): 0.6801362915766137
#F1 micro: 0.7713873968295559
# incepres grn+ f1 loss
# 7 0.087745 0.224351 0.756631 0.7749 0.761466
# Thresholds: [0.89364 0.90076 0.91226 0.60601 0.9128 0.91639 0.90742 0.90624 0.58812 0.64672 0.60698 0.92495 0.91406
# 0.92322 0.8833 0.68635 0.77239 0.90479 0.92075 0.91763 0.75379 0.8955 0.92912 0.90515 0.79271 0.91636
# 0.94046 0.5 ]
# F1 macro: 0.6629372155000963
# F1 macro (th = 0.5): 0.6038595182923132
# F1 micro: 0.7442261289210618
|
nilq/baby-python
|
python
|
"""Reverse-engineered client for the LG SmartThinQ API.
"""
from .core import * # noqa
from .client import * # noqa
from .ac import * # noqa
from .dishwasher import * # noqa
from .dryer import * # noqa
from .refrigerator import * # noqa
from .washer import * # noqa
__version__ = '1.3.0'
|
nilq/baby-python
|
python
|
# dic = {'key': 'value', 'key2': 'value2'}
import json
#
# ret = json.dumps(dic) # 序列化
# print(dic, type(dic))
# print(ret, type(ret))
#
# res = json.loads(ret) # 反序列化
# print(res, type(res))
# 问题1
# dic = {1: 'value', 2: 'value2'}
# ret = json.dumps(dic) # 序列化
# print(dic, type(dic))
# print(ret, type(ret))
#
# res = json.loads(ret) # 反序列化
# print(res, type(res))
# 问题2
# dic = {1: [1, 2, 3], 2: (4, 5, 'aa')}
# ret = json.dumps(dic) # 序列化
# print(dic, type(dic))
# print(ret, type(ret))
#
# res = json.loads(ret) # 反序列化
# print(res, type(res))
# 问题3
# s = {1, 2, 'aaa'}
# json.dumps(s)
# 问题4 # TypeError: keys must be a string
# json.dumps({(1, 2, 3): 123})
# json 在所有的语言之间都通用 : json序列化的数据 在python上序列化了 那在java中也可以反序列化
# 能够处理的数据类型是非常有限的 : 字符串 列表 字典 数字
# 字典中的key只能是字符串
# 后端语言 java c c++ c#
# 前端语言 在网页上展示
# 向文件中记录字典
import json
# dic = {'key': 'value', 'key2': 'value2'}
# ret = json.dumps(dic) # 序列化
# with open('json_file', 'a') as f:
# f.write('\n')
# f.write(ret)
# 从文件中读取字典
# with open('json_file', 'r') as f:
# str_dic = f.read()
# dic = json.loads(str_dic)
# print(dic.keys())
# dump load 是直接操作文件的
# dic = {'key1': 'value1', 'key2': 'value2'}
# with open('json_file', 'a') as f:
# json.dump(dic, f)
# with open('json_file', 'r') as f:
# dic = json.load(f)
# print(dic.keys())
# 问题5 不支持连续的存 取
# dic = {'key1': 'value1', 'key2': 'value2'}
# with open('json_file', 'a') as f:
# json.dump(dic, f)
# json.dump(dic, f)
# json.dump(dic, f)
# with open('json_file', 'r') as f:
# dic = json.load(f)
# print(dic.keys())
# 需求 :就是想要把一个一个的字典放到文件中,再一个一个取出来???
# dic = {'key1': 'value1', 'key2': 'value2'}
#
# with open('json_file', 'a') as f:
# str_dic = json.dumps(dic)
# f.write(str_dic + '\n')
# str_dic = json.dumps(dic)
# f.write(str_dic + '\n')
# str_dic = json.dumps(dic)
# f.write(str_dic + '\n')
#
# with open('json_file', 'r') as f:
# for line in f:
# dic = json.loads(line.strip())
# print(dic.keys())
# json
# dumps loads
# 在内存中做数据转换 :
# dumps 数据类型 转成 字符串 序列化
# loads 字符串 转成 数据类型 反序列化
# dump load
# 直接将数据类型写入文件,直接从文件中读出数据类型
# dump 数据类型 写入 文件 序列化
# load 文件 读出 数据类型 反序列化
# json是所有语言都通用的一种序列化格式
# 只支持 列表 字典 字符串 数字
# 字典的key必须是字符串
# dic = {'key': '你好'}
# print(json.dumps(dic, ensure_ascii=False))
import json
data = {'username': ['李华', '二愣子'], 'sex': 'male', 'age': 16}
json_dic2 = json.dumps(data, sort_keys=True, indent=4, separators=(',', ':'), ensure_ascii=False)
print(json_dic2)
# 存文件/传网络
|
nilq/baby-python
|
python
|
__author__ = 'xubinggui'
class Student(object):
def __init__(self, name, score):
self.name = name
self.score = score
def print_score(self):
print(self.score)
bart = Student('Bart Simpson', 59)
bart.print_score()
|
nilq/baby-python
|
python
|
import unittest
from app.models import Pitch, User
from flask_login import current_user
from app import db
class TestPitch(unittest.TestCase):
def setUp(self):
self.user_joe = User(
username='jack', password='password', email='xyz@gmail.com')
self.new_pitch = Pitch(title="Test", pitch=' This is a test')
def tearDown(self):
Pitch.query.delete()
User.query.delete()
def test_instance(self):
self.assertTrue(isinstance(self.new_pitch, Pitch))
def test_check_instance_variables(self):
self.assertEquals(self.new_pitch.title, "Test")
self.assertEquals(self.new_pitch.pitch, 'This is test')
self.assertEquals(self.new_pitch.user, self.user_joe)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
from .unique_identifiable import CloudioUniqueIdentifiable
class CloudioObjectContainer(CloudioUniqueIdentifiable):
"""Interface to be implemented by all classes that can hold cloud.iO objects."""
__metaclass__ = ABCMeta
@abstractmethod
def attribute_has_changed_by_endpoint(self, attribute):
"""
:param attribute: Attribute which has changed.
:type attribute: CloudioAttribute
"""
pass
@abstractmethod
def attribute_has_changed_by_cloud(self, attribute):
"""The attribute has changed from the cloud.
:param attribute Attribute which has changed.
:type attribute CloudioAttribute
"""
pass
@abstractmethod
def is_node_registered_within_endpoint(self):
"""Returns true if the node the attribute is part of is registered within an endpoint, false otherwise.
:return True if the node is registered within the endpoint, false if not.
:rtype: bool
"""
pass
@abstractmethod
def get_objects(self):
"""Returns the list of child object contained inside this container.
:return Child objects
:rtype: {CloudioObject}
"""
pass
@abstractmethod
def get_parent_object_container(self):
"""Returns the object container's parent object container. Note that if the actual
object container is not embedded into another object controller, the method returns null.
"""
pass
@abstractmethod
def set_parent_object_container(self, object_container):
"""Sets the parent object container of the object container. Note that object containers
can not be moved, so this method throws a runtime exception if someone tries to move the
object container to a new parent or in the case the actual container is a node, which can
not be part of another object container.
"""
pass
@abstractmethod
def get_parent_node_container(self):
"""Returns the object container's parent node container. Note that if the actual object
container is not a node, the method returns null.
"""
pass
@abstractmethod
def set_parent_node_container(self, node_container):
"""Sets the parent node container of the object container (node). Note that object
containers can not be moved, so this method throws a runtime exception if someone tries
to move the object container to a new parent or in the case the actual container is not
a node.
"""
pass
@abstractmethod
def find_attribute(self, location):
"""Finds the given attribute inside the child objects using the given location
path (stack). If an attribute was found at the given location, a reference to that
attribute is returned, otherwise null is returned.
"""
pass
@abstractmethod
def find_object(self, location):
"""Finds the given object inside the objects tree using the given location
path (stack). If the object was found at the given location, a reference to
that object is returned, otherwise null is returned.
"""
pass
|
nilq/baby-python
|
python
|
import os, sys; sys.path.append(os.path.join("..", "..", ".."))
from pattern.en import parse, Text
# The easiest way to analyze the output of the parser is to create a Text.
# A Text is a "parse tree" of linked Python objects.
# A Text is essentially a list of Sentence objects.
# Each Sentence is a list of Word objects.
# Each Word can be part of a Chunk object, accessible with Word.chunk.
s = "I eat pizza with a silver fork."
s = parse(s)
s = Text(s)
print s[0].words # A list of all the words in the first sentence.
print s[0].chunks # A list of all the chunks in the first sentence.
print s[0].chunks[-1].words
print
for sentence in s:
for word in sentence:
print word.string, \
word.type, \
word.chunk, \
word.pnp
# A Text can be exported as an XML-string (among other).
print
print s.xml
|
nilq/baby-python
|
python
|
import re
from enum import Enum
from operator import attrgetter
from re import RegexFlag
from typing import List, Union, Match, Dict, Optional
from annotation.models.models import Citation
from annotation.models.models_enums import CitationSource
from library.log_utils import report_message
from ontology.models import OntologyService, OntologyTerm
class MatchType(Enum):
NUMERIC = 1
ALPHA_NUMERIC = 2
SIMPLE_NUMBERS = 3
ENTIRE_UNTIL_SPACE = 4
class DbRefRegex:
_all_db_ref_regexes: List['DbRefRegex'] = []
def __init__(self,
db: str,
prefixes: Union[str, List[str]],
link: str,
match_type: MatchType = MatchType.NUMERIC,
min_length: int = 3,
expected_length: Optional[int] = None):
"""
Creates an instance of a external id/link detection and automatically registers it with the complete collection.
The end result allowing us to scan text for any number of kinds of links.
:param db: An identifier uniquely associated with the DB
:param prefixes: A single string or array of strings that will be scanned for in text - IMPORTANT - these will be interpreted in regex
:param link: The URL the link will go to with ${1} being replaced with the value found after the prefix
:param match_type: Determines if the link is a series of numbers, alpha-numeric etc - be specific to avoid false positives
:param min_length: How long the ID part must be after the prefix, helps avoid false positives such as the gene rs1 being mistaken for SNP
"""
if isinstance(prefixes, str):
prefixes = [prefixes]
self.db = db
self.prefixes = prefixes
self.link = link
self.match_type = match_type
self.min_length = min_length or 1
self.expected_length = expected_length
self._all_db_ref_regexes.append(self)
def link_for(self, idx: int) -> str:
id_str = self.fix_id(str(idx))
return self.link.replace("${1}", id_str)
def fix_id(self, id_str: str) -> str:
if self.expected_length:
id_str = id_str.rjust(self.expected_length, '0')
return id_str
def __eq__(self, other):
# db should be unique in DbRefRegex
return self.db == other.db
def __hash__(self):
return hash(self.db)
class DbRegexes:
CLINGEN = DbRefRegex(db="ClinGen", prefixes="CA", link="http://reg.clinicalgenome.org/redmine/projects/registry/genboree_registry/by_caid?caid=CA${1}", match_type=MatchType.SIMPLE_NUMBERS)
CLINVAR = DbRefRegex(db="Clinvar", prefixes="VariationID", link="https://www.ncbi.nlm.nih.gov/clinvar/variation/${1}")
COSMIC = DbRefRegex(db="COSMIC", prefixes="COSM", link="https://cancer.sanger.ac.uk/cosmic/mutation/overview?id=${1}")
DOID = DbRefRegex(db="DOID", prefixes="DOID", link=OntologyService.URLS[OntologyService.DOID], min_length=OntologyService.EXPECTED_LENGTHS[OntologyService.DOID], expected_length=OntologyService.EXPECTED_LENGTHS[OntologyService.DOID])
GTR = DbRefRegex(db="GTR", prefixes="GTR", link="https://www.ncbi.nlm.nih.gov/gtr/tests/${1}/overview/")
HP = DbRefRegex(db="HP", prefixes=["HPO", "HP"], link=OntologyService.URLS[OntologyService.HPO], expected_length=OntologyService.EXPECTED_LENGTHS[OntologyService.HPO])
HGNC = DbRefRegex(db="HGNC", prefixes="HGNC", link=OntologyService.URLS[OntologyService.HGNC], expected_length=OntologyService.EXPECTED_LENGTHS[OntologyService.HGNC])
MEDGEN = DbRefRegex(db="MedGen", prefixes="MedGen", link="https://www.ncbi.nlm.nih.gov/medgen/?term=${1}", match_type=MatchType.ALPHA_NUMERIC)
MONDO = DbRefRegex(db="MONDO", prefixes="MONDO", link=OntologyService.URLS[OntologyService.MONDO], expected_length=OntologyService.EXPECTED_LENGTHS[OntologyService.MONDO])
NCBIBookShelf = DbRefRegex(db="NCBIBookShelf", prefixes=["NCBIBookShelf"], link="https://www.ncbi.nlm.nih.gov/books/${1}", match_type=MatchType.ALPHA_NUMERIC)
NIHMS = DbRefRegex(db="NIHMS", prefixes="NIHMS", link="https://www.ncbi.nlm.nih.gov/pubmed/?term=NIHMS${1}")
# smallest OMIM starts with a 1, so there's no 0 padding there, expect min length
OMIM = DbRefRegex(db="OMIM", prefixes=["OMIM", "MIM"], link=OntologyService.URLS[OntologyService.OMIM], min_length=OntologyService.EXPECTED_LENGTHS[OntologyService.OMIM], expected_length=OntologyService.EXPECTED_LENGTHS[OntologyService.OMIM])
ORPHA = DbRefRegex(db="Orphanet", prefixes=["ORPHANET", "ORPHA"], link=OntologyService.URLS[OntologyService.ORPHANET], expected_length=OntologyService.EXPECTED_LENGTHS[OntologyService.ORPHANET])
PMC = DbRefRegex(db="PMC", prefixes="PMCID", link="https://www.ncbi.nlm.nih.gov/pubmed/?term=PMC${1}")
PUBMED = DbRefRegex(db="PubMed", prefixes=["PubMed", "PMID", "PubMedCentral"], link="https://www.ncbi.nlm.nih.gov/pubmed/?term=${1}")
SNP = DbRefRegex(db="SNP", prefixes="rs", link="https://www.ncbi.nlm.nih.gov/snp/${1}", match_type=MatchType.SIMPLE_NUMBERS)
SNOMEDCT = DbRefRegex(db="SNOMED-CT", prefixes=["SNOMED-CT", "SNOMEDCT"], link="https://snomedbrowser.com/Codes/Details/${1}")
UNIPROTKB = DbRefRegex(db="UniProtKB", prefixes="UniProtKB", link="https://www.uniprot.org/uniprot/${1}", match_type=MatchType.ALPHA_NUMERIC)
HTTP = DbRefRegex(db="HTTP", prefixes="http:", link="http:${1}", match_type=MatchType.ENTIRE_UNTIL_SPACE)
HTTPS = DbRefRegex(db="HTTPS", prefixes="https:", link="https:${1}", match_type=MatchType.ENTIRE_UNTIL_SPACE)
FTP = DbRefRegex(db="FTP", prefixes="ftp:", link="ftp:${1}", match_type=MatchType.ENTIRE_UNTIL_SPACE)
class DbRefRegexResult:
def __init__(self, cregx: DbRefRegex, idx: str, match: Match):
self.cregx = cregx
self.idx = cregx.fix_id(idx)
self.match = match
self.internal_id = None
self.summary = None
# this is where we check our database to see if we know what this reference is about
if self.db in OntologyService.LOCAL_ONTOLOGY_PREFIXES:
term_id = f"{self.db}:{self.idx}"
if term := OntologyTerm.objects.filter(id=term_id).first():
self.summary = term.name
try:
if source := CitationSource.CODES.get(self.db):
citation, _ = Citation.objects.get_or_create(citation_source=source, citation_id=idx)
self.internal_id = citation.pk
except:
report_message(message=f"Could not resolve external DB reference for {self.db}:{self.idx}")
@property
def id_fixed(self):
return f"{self.db}:{self.cregx.fix_id(self.idx)}"
@property
def url(self):
return self.cregx.link.replace('${1}', self.idx)
@property
def idx_num(self):
"""
Attempt to convert the id to a number, only use for sorting.
Some ids have a version suffix, so using float for the sake of decimals
"""
try:
return float(self.idx)
except:
return 0
@property
def db(self):
return self.cregx.db
def to_json(self):
jsonny = {'id': '%s: %s' % (self.db, self.idx), 'db': self.db, 'idx': self.idx, 'url': self.url}
if self.summary:
jsonny['summary'] = self.summary
if self.internal_id:
jsonny['internal_id'] = self.internal_id
return jsonny
def __str__(self):
return f'{self.cregx.db}:{self.idx}'
_simple_numbers = re.compile('([0-9]{3,})')
_num_regex = re.compile('[:#\\s]*([0-9]+)')
_num_repeat_regex = re.compile('\\s*,[:#\\s]*([0-9]+)')
_word_regex = re.compile('[:# ]*([A-Za-z0-9_-]+)') # no repeats for words, too risky
_entire_until_space = re.compile('(.*?)(?:[)]|\\s|$|[.] )')
class DbRefRegexes:
def __init__(self, regexes: List[DbRefRegex]):
self.regexes = regexes
self.prefix_map: Dict[str, DbRefRegex] = dict()
prefixes: List[str] = list()
for regex in self.regexes:
for prefix in regex.prefixes:
prefix = prefix.lower()
self.prefix_map[prefix] = regex
prefixes.append(prefix)
self.prefix_regex = re.compile('(' + '|'.join(prefixes) + ')', RegexFlag.IGNORECASE)
def link_html(self, text: str) -> str:
db_matches = reversed(self.search(text, sort=False))
for db_match in db_matches:
span = db_match.match.span()
if text[span[0]] in (':', ',', ' ', '#'):
span = [span[0]+1, span[1]]
before, middle, after = text[0:span[0]], text[span[0]:span[1]], text[span[1]:]
text = f"{before}<a href='{db_match.url}'>{middle}</a>{after}"
return text
def search(self, text: str, default_regex: DbRefRegex = None, sort: bool = True) -> List[DbRefRegexResult]:
"""
@param text The text to be searched for ID patterns
@param default_regex If the field is expected to be a specific kind of id
(e.g. db_rs_id should default to SNP). Only gets used if no match can be found
and will look for just the number part, e.g. if db_rs_id is "23432" instead of "rs23432"
it will still work).
@param sort If true sorts the results by database and id, otherwise leaves them in order of discovery
"""
results: List[DbRefRegexResult] = list()
def append_result_if_length(db_regex: DbRefRegex, match: Optional[Match]) -> bool:
"""
:param db_regex: The Database Regex we were searching for
:param match: The regex match
:return: True if the ID looked valid and was recorded, False otherwise
"""
nonlocal results
if match and len(match.group(1)) >= db_regex.min_length:
results.append(DbRefRegexResult(cregx=db_regex, idx=match.group(1), match=match))
return True
return False
for match in re.finditer(self.prefix_regex, text):
prefix = match.group(1).lower()
db_regex = self.prefix_map[prefix]
find_from = match.end(0)
if db_regex.match_type == MatchType.SIMPLE_NUMBERS:
match = _simple_numbers.match(text, find_from)
append_result_if_length(db_regex, match)
elif db_regex.match_type == MatchType.ALPHA_NUMERIC:
match = _word_regex.match(text, find_from)
append_result_if_length(db_regex, match)
elif db_regex.match_type == MatchType.ENTIRE_UNTIL_SPACE:
match = _entire_until_space.match(text, find_from)
append_result_if_length(db_regex, match)
else:
match = _num_regex.match(text, find_from)
if append_result_if_length(db_regex, match):
find_from = match.end(0)
while True:
match = _num_repeat_regex.match(text, find_from)
if append_result_if_length(db_regex, match):
find_from = match.end(0)
else:
break
if not results and default_regex:
match = None
if default_regex.match_type == MatchType.SIMPLE_NUMBERS:
match = _word_regex.match(text)
else:
match = _num_regex.match(text)
append_result_if_length(default_regex, match)
if sort:
results.sort(key=attrgetter('db', 'idx_num', 'idx'))
return results
db_ref_regexes = DbRefRegexes(DbRefRegex._all_db_ref_regexes)
|
nilq/baby-python
|
python
|
import pprint
from flask import Flask
from flask import request
app = Flask(__name__)
@app.route('/', methods=['POST'])
def hello_world():
content = request.get_json(silent=True)
pprint.pprint(content)
return content
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
from __future__ import print_function
import tensorflow as tf
import os, collections, sys, subprocess, io
from abc import abstractmethod
import numpy as np
def flattern(A):
'''
Flatten a list containing a combination of strings and lists.
Copied from https://stackoverflow.com/questions/17864466/flatten-a-list-of-strings-and-lists-of-strings-and-lists-in-python.
'''
rt = []
for i in A:
if isinstance(i,list): rt.extend(flattern(i))
else: rt.append(i)
return rt
def save_item_to_id(item_to_id, file, encoding):
'''
Saves a item_to_id mapping to file.
'''
out = io.open(file, 'w', encoding=encoding)
for item, id_ in item_to_id.iteritems():
if item == '':
print('EMPTY ELEMENT')
if item == ' ':
print('SPACE')
out.write(u'{0}\t{1}\n'.format(item, id_))
out.close()
def load_item_to_id(file, encoding):
'''
Loads an item_to_id mapping and corresponding id_to_item mapping from file.
'''
item_to_id = {}
id_to_item = {}
for line in io.open(file, 'r', encoding=encoding):
l = line.strip().split()
item_to_id[l[0]] = int(l[1])
id_to_item[int(l[1])] = l[0]
return item_to_id, id_to_item
class LMData(object):
'''
The input data: words, batches across sentence boundaries.
'''
def __init__(self, config, eval_config, TRAIN, VALID, TEST):
'''
Arguments:
config: configuration dictionary, specifying all parameters used for training
eval_config: configuration dictionary, specifying all parameters used for testing
TRAIN: boolean indicating whether we want to train or not
VALID: boolean indicating whether we want to validate or not
TEST: boolean indicating whether we want to test or not
'''
self.config = config
self.eval_config = eval_config
self.TRAIN = TRAIN
self.VALID = VALID
self.TEST = TEST
# if we want to train with a limited vocabulary, words not in the vocabulary
# should already be mapped to UNK
# data files should be of format train_50k-unk.txt etc. for a 50k vocabulary
if config['vocab']:
train_file = "train_" + str(config['vocab']) + "k-unk.txt"
valid_file = "valid_" + str(config['vocab']) + "k-unk.txt"
test_file = "test_" + str(config['vocab']) + "k-unk.txt"
self.train_path = os.path.join(config['data_path'], train_file)
self.valid_path = os.path.join(config['data_path'], valid_file)
self.test_path = os.path.join(config['data_path'], test_file)
else:
self.train_path = os.path.join(config['data_path'], "train.txt")
self.valid_path = os.path.join(config['data_path'], "valid.txt")
self.test_path = os.path.join(config['data_path'], "test.txt")
self.batch_size = config['batch_size']
self.num_steps = config['num_steps']
self.eval_batch_size = eval_config['batch_size']
self.eval_num_steps = eval_config['num_steps']
self.iterator = 0
self.end_reached = False
# default encoding = utf-8, specify in config file if otherwise
if 'encoding' in self.config:
self.encoding = self.config['encoding']
else:
self.encoding = "utf-8"
self.id_to_item = {}
self.item_to_id = {}
# by default, unknown words are represented with <unk>
# if this is not the case for a certain dataset, add it here
if 'CGN' in self.config['data_path'] or \
'WSJ/88' in self.config['data_path']:
self.unk = '<UNK>'
self.replace_unk = '<unk>'
else:
self.unk = '<unk>'
self.replace_unk = '<UNK>'
if 'rescore' in self.config and isinstance(self.config['rescore'], str):
self.test_path = self.config['rescore']
elif 'predict_next' in self.config and isinstance(self.config['predict_next'], str):
self.test_path = self.config['predict_next']
elif 'debug2' in self.config and isinstance(self.config['debug2'], str):
self.test_path = self.config['debug2']
elif 'other_test' in self.config:
self.test_path = self.config['other_test']
if 'valid_as_test' in self.config:
self.test_path = self.valid_path
self.PADDING_SYMBOL = '@'
def read_items(self, filename):
'''
Returns a list of all WORDS in filename.
'''
with tf.gfile.GFile(filename, "r") as f:
# Wikitext: more than 1 sentence per line, also introduce <eos> at ' . '
# add here other datasets that contain more than 1 sentence per line
if "WikiText" in self.config['data_path']:
data = f.read().decode(self.encoding).replace("\n", " <eos> ").replace(" . "," <eos> ").split()
elif 'no_eos' in self.config:
data = f.read().decode(self.encoding).replace("\n", " ").split()
else:
data = f.read().decode(self.encoding).replace("\n", " <eos> ").split()
# make sure there is only 1 symbol for unknown words
data = [self.unk if word==self.replace_unk else word for word in data]
return data
@abstractmethod
def calc_longest_sent(self, all_data):
raise NotImplementedError("Abstract class.")
@abstractmethod
def padding(self, dataset, total_length):
raise NotImplementedError("Abstract class.")
@abstractmethod
def pad_data(self, all_data, max_length):
raise NotImplementedError("Abstract class.")
def build_vocab(self, filename):
'''
Returns an item-to-id and id-to-item mapping for all words (or characters) in filename.
Arguments:
filename: name of file for which the mapping will be built
Returns:
item_to_id mapping and id_to_item mapping
'''
data = self.read_items(filename)
counter = collections.Counter(data)
# counter.items() = list of the words in data + their frequencies, then sorted according to decreasing frequency
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
# words = list of all the words (in decreasing frequency)
items, _ = list(zip(*count_pairs))
# make a dictionary with a mapping from each word to an id; word with highest frequency gets lowest id etc.
item_to_id = dict(zip(items, range(len(items))))
# remove empty element and space
if '' in item_to_id:
item_to_id.pop('')
if ' ' in item_to_id and not 'char' in self.config:
item_to_id.pop(' ')
# reverse dictionary
id_to_item = dict(zip(range(len(items)), items))
# make sure there is a special token for unknown words
if not self.unk in item_to_id:
item_to_id[self.unk] = len(item_to_id)
id_to_item[len(id_to_item)] = self.unk
# add <bos>: used for sentence-level batches, or
# for discourse-level models that are use for e.g. rescoring
item_to_id['<bos>'] = len(item_to_id)
id_to_item[len(id_to_item)] = '<bos>'
return item_to_id, id_to_item
def extend_vocab(self, filename):
'''
If there already is a vocabulary, this function extends the vocabulary with words
found in the data file 'filename'.
'''
data = self.read_items(filename)
vocab_curr = set(data)
for word in vocab_curr:
if word not in self.item_to_id:
print(u'word {0} not yet seen'.format(word).encode(self.encoding))
self.item_to_id[word] = len(self.item_to_id)
self.id_to_item[len(self.id_to_item)] = word
def add_padding_symbol(self):
'''
Add the correct padding symbol to the vocabulary
'''
if self.PADDING_SYMBOL not in self.item_to_id:
self.item_to_id[self.PADDING_SYMBOL] = len(self.item_to_id)
self.id_to_item[len(self.id_to_item)] = self.PADDING_SYMBOL
# if the default symbol for padding is already in the vocabulary
else:
# another symbol should be specified in the config file
if not 'padding_symbol' in self.config:
raise ValueError("{0} used as padding symbol but occurs in text. " \
"Specify another padding symbol with 'padding_symbol' in the config file.".format(
self.PADDING_SYMBOL))
else:
self.PADDING_SYMBOL = self.config['padding_symbol']
# check whether the padding symbol specified in the config file occurs in the data or not
if self.PADDING_SYMBOL not in self.item_to_id:
self.item_to_id[self.PADDING_SYMBOL] = len(self.item_to_id)
self.id_to_item[len(self.id_to_item)] = self.PADDING_SYMBOL
else:
raise ValueError("The padding symbol specified in the config file ({0}) " \
"already occurs in the text.".format(self.PADDING_SYMBOL))
@abstractmethod
def build_ngram_vocab(self, filename):
raise NotImplementedError("Abstract class.")
@abstractmethod
def build_skipgram_vocab(self, filename, skip):
raise NotImplementedError("Abstract class.")
def file_to_item_ids(self, filename, item_to_id=None):
'''
Returns list of all words/characters (mapped to their ids) in the file,
either one long list or a list of lists per sentence.
Arguments:
filename: name of file for which the words should be mapped to their ids
Optional:
item_to_id: dictionary that should be used for the mapping (otherwise self.item_to_id is used)
'''
if item_to_id == None:
item_to_id = self.item_to_id
data = self.read_items(filename)
tmp_l = []
for w in data:
if w in item_to_id:
tmp_l.append(item_to_id[w])
else:
print(u'{0} not in item_to_id'.format(w).encode('utf-8'))
return [item_to_id[item] if item in item_to_id else item_to_id[self.unk] for item in data]
@abstractmethod
def file_to_ngram_ids(self, filename):
raise NotImplementedError("Abstract class.")
@abstractmethod
def file_to_skipgram_ids(self, filename):
raise NotImplementedError("Abstract class.")
def read_data(self):
'''
Makes sure there is a vocabulary and reads all necessary data.
Returns:
all_data: tuple of three lists : train_data, valid_data and test_data
'''
if 'read_vocab_from_file' in self.config:
# read vocabulary mapping from file
self.item_to_id, self.id_to_item = load_item_to_id(self.config['read_vocab_from_file'], self.encoding)
# check whether the data file contains words that are not yet in the vocabulary mapping
self.extend_vocab(self.train_path)
if 'per_sentence' in self.config:
self.add_padding_symbol()
else:
# if the vocabulary mapping is not saved on disk, make one based on the training data
self.item_to_id, self.id_to_item = self.build_vocab(self.train_path)
# sentence-level model or model that will be used for rescoring: needs padding symbol in vocabulary
if 'rescore_later' in self.config or 'per_sentence' in self.config:
self.add_padding_symbol()
# save the item_to_id mapping such that it can be re-used
if 'save_dict' in self.config:
save_item_to_id(self.item_to_id, '{0}.dict'.format(self.config['save_dict']), self.encoding)
# make a label file to visualize the embeddings
# with the correct labels (= words instead of ids) in tensorboard
self.label_file = os.path.join(self.config['save_path'], "labels.tsv")
# write label file
with io.open(self.label_file, 'w', encoding=self.encoding) as f:
for i in range(len(self.id_to_item)):
f.write(u'{0}\n'.format(self.id_to_item[i]))
# list of all words in training data converted to their ids
if self.TRAIN:
train_data = self.file_to_item_ids(self.train_path)
else:
train_data = []
# list of all words in validation data converted to their ids
if self.VALID:
valid_data = self.file_to_item_ids(self.valid_path)
else:
valid_data = []
# list of all words in test data converted to their ids
if self.TEST:
test_data = self.file_to_item_ids(self.test_path)
else:
test_data = []
all_data = (train_data, valid_data, test_data)
return all_data
def get_data(self):
'''
Retrieve the necessary data and vocabulary size.
'''
all_data = self.read_data()
return all_data, len(self.id_to_item), 0
def init_batching(self, data, test=False):
'''
Prepare for batching.
'''
if test:
batch_size = self.eval_batch_size
self.num_steps = self.eval_num_steps
else:
batch_size = self.batch_size
# beginning of data set: set self.end_reached to False (was set to True if another data set is already processed)
if self.iterator == 0:
self.end_reached = False
data_len = len(data)
# to divide data in batch_size batches, each of length batch_len
batch_len = data_len // batch_size
# number of samples that can be taken from the batch_len slices
self.num_samples = (batch_len // self.num_steps) - 1
# remove last part of the data that doesn't fit in the batch_size x num_steps samples
data = data[:batch_size * batch_len]
# convert to numpy array: batch_size x batch_len
self.data_array = np.array(data).reshape(batch_size, batch_len)
def get_batch(self):
'''
Gets a single batch.
Returns:
x: input data
y: target data
end_reached: boolean marking whether the end of the data file has been reached or not
'''
# take slice of batch_size x num_steps
x = self.data_array[:, self.iterator * self.num_steps :
(self.iterator * self.num_steps) + self.num_steps]
# targets = same slice but shifted one step to the right
y = self.data_array[:, (self.iterator * self.num_steps) +1 :
(self.iterator * self.num_steps) + self.num_steps + 1]
# if iterated over the whole dataset, set iterator to 0 to start again
if self.iterator >= self.num_samples:
self.iterator = 0
self.end_reached = True
# otherwise, increase count
else:
self.iterator += 1
return x, y, self.end_reached
class charData(LMData):
'''
Train on character level rather than word level.
'''
def __init__(self, config, eval_config, TRAIN, VALID, TEST):
super(charData, self).__init__(config, eval_config, TRAIN, VALID, TEST)
def read_items(self, filename):
'''
Returns a list of all CHARACTERS in filename.
'''
with tf.gfile.GFile(filename, "r") as f:
data = ['<eos>' if x == '\n' else x for x in f.read().decode(self.encoding)]
return data
class wordSentenceData(LMData):
'''
Feed sentence per sentence to the network,
each sentence padded until the length of the longest sentence.
'''
def __init__(self, config, eval_config, TRAIN, VALID, TEST):
super(wordSentenceData, self).__init__(config, eval_config, TRAIN, VALID, TEST)
self.sentence_iterator = 0
def read_sentences(self, filename):
'''
Returns a list with all sentences in filename, each sentence is split in words.
'''
with tf.gfile.GFile(filename, "r") as f:
if "WikiText" in self.config['data_path']:
all_sentences = f.read().decode(self.encoding).replace("\n", "<eos>").replace(" . "," <eos> ").split("<eos>")
# this assumes that all other datasets contain 1 sentence per line
else:
all_sentences = f.read().decode(self.encoding).replace("\n", "<eos>").split("<eos>")
# remove empty element at the end
if all_sentences[-1] == '':
all_sentences = all_sentences[:-1]
# split sentence in words
for i in xrange(len(all_sentences)):
all_sentences[i] = all_sentences[i].split()
return all_sentences
def calc_longest_sent(self, all_data):
'''
Returns length of longest sentence occurring in all_data.
'''
max_length = 0
for dataset in all_data:
for sentence in dataset:
if len(sentence) > max_length:
max_length = len(sentence)
return max_length
def padding(self, dataset, total_length):
'''
Add <bos> and <eos> to each sentence in dataset + pad until max_length.
'''
seq_lengths = []
for sentence in dataset:
#seq_lengths.append(len(sentence)+1) # +1 ONLY <eos>
seq_lengths.append(len(sentence)+2) # +2 <bos> + <eos>
if 'hyp_with_ids' in self.config:
sentence.insert(1, self.item_to_id['<bos>']) # CHANGED
else:
sentence.insert(0, self.item_to_id['<bos>']) # CHANGED
# end of sentence symbol
sentence.append(self.item_to_id['<eos>'])
# pad rest of sentence until maximum length
num_pads = total_length - len(sentence)
for pos in xrange(num_pads):
if 'not_trained_with_padding' in self.config:
sentence.append(self.item_to_id[self.unk])
else:
try:
sentence.append(self.item_to_id[self.PADDING_SYMBOL])
except KeyError:
print("No padding symbol ({0}) in the dictionary. Either add 'not_trained_with_padding' " \
"in the config file if the model is trained without padding or " \
"specify the correct symbol with 'padding_symbol' in the config.".format(
self.PADDING_SYMBOL))
sys.exit(1)
return dataset, seq_lengths
def pad_data(self, all_data, max_length):
'''
Pad each dataset in all_data.
'''
# + 2 because <bos> and <eos> should be added
# + 1 for extra padding symbol to avoid having target sequences
# which end on the beginning of the next sentence
#total_length = max_length + 2
total_length = max_length + 3
if isinstance(all_data, tuple):
padded_all = ()
seq_lengths_all = ()
for dataset in all_data:
padded_dataset, seq_length = self.padding(dataset, total_length)
padded_all += (padded_dataset,)
seq_lengths_all += (seq_length,)
else:
padded_all, seq_lengths_all = self.padding(all_data, total_length)
return padded_all, seq_lengths_all
def file_to_item_ids(self, filename):
data = self.read_sentences(filename)
data_ids = []
for sentence in data:
if 'hyp_with_ids' in self.config:
# do not convert hypothesis id to integer
hyp = [self.item_to_id[item] if item in self.item_to_id else self.item_to_id[self.unk] for item in sentence[1:]]
data_ids.append([sentence[0]] + hyp)
else:
data_ids.append([self.item_to_id[item] if item in self.item_to_id else self.item_to_id[self.unk] for item in sentence])
return data_ids
def get_data(self):
all_data = self.read_data()
if not '<bos>' in self.item_to_id:
self.item_to_id['<bos>'] = len(self.item_to_id)
self.id_to_item[len(self.id_to_item)] = '<bos>'
if 'max_length' in self.config:
max_length = self.config['max_length']
else:
max_length = self.calc_longest_sent(all_data)
# + 2 for <eos> and extra padding symbol at the end
#self.num_steps = max_length + 2
self.num_steps = max_length + 3
padded_data, seq_lengths = self.pad_data(all_data, max_length)
# return max_length+1 and not +2 because the last padding symbol is only there
# to make sure that the target sequence does not end with the beginning of the next sequence
#return padded_data, len(self.id_to_item), max_length+1, seq_lengths
return padded_data, len(self.id_to_item), max_length+2, seq_lengths
def init_batching(self, data, test=False):
if test:
self.batch_size = self.eval_batch_size
self.num_steps = self.eval_num_steps
length_sentence = self.num_steps
if self.iterator == 0:
self.end_reached = False
self.test = test
words = data[0]
seq_lengths = data[1]
if not self.test:
data_len = len(words)*len(words[0])
# to divide data in batch_size batches, each of length batch_len
batch_len = data_len // self.batch_size
# number of sentences that fit in 1 batch_len
self.num_sentences_batch = batch_len // (length_sentence+1)
# we want batch_len to be a multiple of num_steps (=size of padded sentence)
batch_len = self.num_sentences_batch * (length_sentence+1)
# remove last part of the data that doesn't fit in the batch_size x num_steps samples
words = words[:self.batch_size * self.num_sentences_batch]
# convert to numpy array: batch_size x batch_len*num_steps
self.data_array = np.array(words).reshape(
self.batch_size, self.num_sentences_batch*length_sentence)
# convert seq_lengths to numpy array
self.seql_array = np.array(seq_lengths)
else:
# only for testing, this assumes that batch_size and num_steps are 1!
self.len_data = len(words)*len(words[0])
self.len_sentence = len(words[0])
self.data_array = np.array(words).reshape(len(words), len(words[0]))
def get_batch(self):
if not self.test:
# take slice of batch_size x num_steps
x = self.data_array[:, self.iterator * self.num_steps :
(self.iterator * self.num_steps) + self.num_steps - 1]
# targets = same slice but shifted one step to the right
y = self.data_array[:, (self.iterator * self.num_steps) +1 :
(self.iterator * self.num_steps) + self.num_steps ]
# take slice of sequence lengths for all elements in the batch
seql = self.seql_array[self.iterator * self.batch_size : (self.iterator+1) * self.batch_size]
# if iterated over the whole dataset, set iterator to 0 to start again
if self.iterator >= self.num_sentences_batch:
self.iterator = 0
self.end_reached = True
# otherwise, increase count
else:
self.iterator += 1
else:
x = self.data_array[self.sentence_iterator, self.iterator: self.iterator + 1]
y = self.data_array[self.sentence_iterator, self.iterator + 1 : self.iterator + 2]
# num_steps = 1 so no sequence length needed
seql = [1]
if self.sentence_iterator == self.len_data / self.len_sentence and self.iterator == self.len_sentence - 1:
self.end_reached = True
# otherwise, increase count
else:
self.iterator += 1
if self.iterator == self.len_sentence - 1:
# end of file reached
if self.sentence_iterator >= (self.len_data / self.len_sentence) - 1:
self.end_reached = True
# end of sentence reached
else:
self.iterator = 0
self.sentence_iterator += 1
x = [x]
y = [y]
return x, y, self.end_reached, seql
class charSentenceData(wordSentenceData):
'''
Same as wordSentenceData, except that the input unit is a character.
'''
def __init__(self, config, eval_config, TRAIN, VALID, TEST):
super(charSentenceData, self).__init__(config, eval_config, TRAIN, VALID, TEST)
def read_sentences(self, filename):
'''Returns a list with all sentences in filename, each sentence is split in words.'''
with tf.gfile.GFile(filename, "r") as f:
if "WikiText" in self.config['data_path']:
all_sentences = [x for x in f.read().decode(self.encoding).replace("\n", "<eos>").replace(
" . "," <eos> ").split("<eos>")]
else:
all_sentences = f.read().decode(self.encoding).replace("\n", "<eos>").split("<eos>")
# remove empty element at the end
if all_sentences[-1] == '':
all_sentences = all_sentences[:-1]
# split sentence in words
for i in xrange(len(all_sentences)):
all_sentences[i] = [x for x in all_sentences[i]]
return all_sentences
def read_items(self, filename):
'''
Returns a list of all CHARACTERS in filename.
'''
with tf.gfile.GFile(filename, "r") as f:
data = ['<eos>' if x == '\n' else x for x in f.read().decode(self.encoding)]
return data
class wordSentenceDataStream(wordSentenceData):
'''
Same as wordSentenceData but reads the data batch per batch instead of all at once.
'''
def __init__(self, config, eval_config, TRAIN, VALID, TEST):
super(wordSentenceDataStream, self).__init__(config, eval_config, TRAIN, VALID, TEST)
def calc_longest_sent(self, list_files):
'''
Calculates longest sentence based on list of files instead of already read data.
'''
max_length = 0
for f in list_files:
if os.path.isfile(f):
for line in io.open(f, 'r', encoding=self.encoding):
curr_length = len(line.strip().split(' '))
if curr_length > max_length:
max_length = curr_length
return max_length
def get_batch(self, f, test=False):
if test:
self.batch_size = self.eval_batch_size
end_reached = False
curr_batch = []
seq_lengths = []
for i in xrange(self.batch_size):
curr_sentence = f.readline().replace('\n',' <eos>')
if not curr_sentence:
end_reached = True
break
# if end of file is reached
if curr_sentence == '':
end_reached = True
f.close()
return None, None, end_reached, None
# input batch: convert words to indices
curr_sentence_idx = [self.item_to_id['<bos>']]
for w in curr_sentence.split(' '):
# ignore blanks
if w == '':
continue
elif w in self.item_to_id:
curr_sentence_idx.append(self.item_to_id[w])
# map OOV words to UNK-symbol
else:
curr_sentence_idx.append(self.item_to_id[self.unk])
# length of sentence (for dynamic rnn)
seq_lengths.append(len(curr_sentence_idx))
number_pads = self.max_length - len(curr_sentence_idx) + 1
padding = [self.item_to_id[self.PADDING_SYMBOL]]*number_pads
curr_sentence_idx.extend(padding)
curr_batch.append(curr_sentence_idx)
if end_reached:
return None, None, end_reached, None
else:
curr_batch_array = np.array(curr_batch)
x = curr_batch_array[:,:-1]
y = curr_batch_array[:,1:]
seq_lengths_array = np.array(seq_lengths)
return x, y, False, seq_lengths_array
def prepare_data(self):
if 'read_vocab_from_file' in self.config:
# read vocabulary mapping and maximum sentence length from file
self.item_to_id, self.id_to_item = load_item_to_id(self.config['read_vocab_from_file'], self.encoding)
if len(self.item_to_id) != self.config['vocab_size']:
raise IOError("The vocabulary size specified by 'vocab_size' ({0}) does not correspond \
to the size of the vocabulary file given ({1}).".format(
self.config['vocab_size'], len(self.item_to_id)))
self.max_length = int(open(os.path.join(self.config['data_path'], "max_sentence_length")).readlines()[0].strip())
else:
# build input vocabulary
self.item_to_id, self.id_to_item = self.build_vocab(self.train_path)
# get maximum length of sentence in all files
self.max_length = self.calc_longest_sent([self.train_path, self.valid_path, self.test_path])
# padding symbol needed
self.add_padding_symbol()
return (self.train_path, self.valid_path, self.test_path), len(self.item_to_id), self.max_length
def init_batching(self, data_path):
self.end_reached = False
data_file = io.open(data_path,"r", encoding=self.encoding)
return data_file
class charWordData(wordSentenceData):
'''
Character-level data, but per word (padded until the maximum word length).
Used for lm_char_rnn.
'''
def __init__(self, config, eval_config, TRAIN, VALID, TEST):
super(charWordData, self).__init__(config, eval_config, TRAIN, VALID, TEST)
def read_items(self, filename):
'''
Returns a list of all CHARACTERS in filename.
'''
with tf.gfile.GFile(filename, "r") as f:
# Wikitext: more than 1 sentence per line, also introduce <eos> at ' . '
if "WikiText" in self.config['data_path']:
data = [list(x) if (x != '<eos>' and x != self.unk) else x for x in f.read().decode(
self.encoding).replace("\n", " <eos> ").replace(" . "," <eos> ").split(" ")]
else:
data = [list(x) if (x != '<eos>' and x != self.unk) else x for x in f.read().decode(
self.encoding).replace("\n", " <eos> ").split(" ")]
data = flattern(data)
return data # single list with all characters in the file
def read_sentences(self, filename):
'''
Returns a list with all words in filename, each words is split in characters.
'''
with tf.gfile.GFile(filename, "r") as f:
if "WikiText" in self.config['data_path']:
all_words = [list(word) if (word != self.unk and word != '<eos>') else word for word in f.read().decode(
self.encoding).replace("\n", " <eos> ").replace(" . "," <eos> ").split(" ")]
else:
# split word in characters if it is not <unk> or <eos>
all_words = [list(word) if (word != self.unk and word != '<eos>') else word for word in f.read().decode(
self.encoding).replace("\n", " <eos> ").split(" ")]
# remove empty elements
all_words = [word for word in all_words if word != []]
return all_words
def padding(self, dataset, total_length):
'''
Pad until max_length without adding <eos> symbol first.
'''
seq_lengths = []
# total_length = max_length + 2 (inherited from wordSentenceData),
# but since we did not add <eos> in addition to the padding symbols, the actual length is -1
# if no extra padding symbol is used to ensure the last padding symbol still has a 'target', -2
total_length = total_length - 2
for word in dataset:
seq_lengths.append(len(word))
# pad rest of word until maximum length
num_pads = total_length - len(word)
for pos in xrange(num_pads):
word.append(self.item_to_id[self.PADDING_SYMBOL])
return dataset, seq_lengths
def file_to_item_ids(self, filename):
data = self.read_sentences(filename)
data_ids = []
for word in data:
if word == '<eos>' or word == self.unk:
data_ids.append([self.item_to_id[word]])
else:
data_ids.append([self.item_to_id[char] for char in word if char in self.item_to_id])
return data_ids
def get_data(self):
all_data = self.read_data()
self.add_padding_symbol()
max_length = self.calc_longest_sent(all_data)
self.max_length = max_length
#self.num_steps = max_length
#self.eval_num_steps = max_length
padded_data, seq_lengths = self.pad_data(all_data, max_length)
# return max_length+1 and not +2 because the last padding symbol is only there
# to make sure that the target sequence does not end with the beginning of the next sequence
return padded_data, len(self.id_to_item), max_length, seq_lengths
def init_batching(self, data, test=False):
if test:
self.batch_size = self.eval_batch_size
self.num_steps = self.eval_num_steps
#else:
#batch_size = self.batch_size
#num_steps = self.num_steps
if self.iterator == 0:
self.end_reached = False
self.test = test
words = data[0]
seq_lengths = data[1]
data_len = len(words)*len(words[0])
# to divide data in batch_size batches, each of length batch_len
batch_len = data_len // self.batch_size
# number of items in 1 batch_len = self.max_length (length of word) * self.num_steps (number of words)
# subtract one because there is not target for the last word
self.num_words_batch = batch_len // (self.max_length*self.num_steps) - 1
# we want batch_len to be a multiple of num_steps (=size of padded sentence)
#batch_len = self.num_words_batch * self.num_steps #v1
batch_len = self.num_words_batch * self.max_length * self.num_steps
# only batch_size x batch_len words fit,
# divide by self.max_length because 'words' = list of lists (each max.length long)
words = words[:(self.batch_size * batch_len)/self.max_length]
# convert to numpy array
#self.data_array = np.array(words).reshape(self.batch_size, self.num_words_batch*self.num_steps) #v1
self.data_array = np.array(words).reshape(self.batch_size, batch_len)
# convert seq_lengths to numpy array
seq_lengths = seq_lengths[:(self.batch_size * batch_len)/self.max_length]
self.seql_array = np.array(seq_lengths).reshape(self.batch_size, self.num_steps*self.num_words_batch)
def get_batch(self):
# take slice of batch_size x num_steps
x = self.data_array[:, self.iterator * self.num_steps : (self.iterator * self.num_steps) + (self.num_steps*self.max_length)]
x = x.reshape(self.batch_size, self.num_steps, self.max_length)
y = self.data_array[:, (self.iterator * self.num_steps)+1 : (self.iterator * self.num_steps) + (self.num_steps*self.max_length) +1]
y = y.reshape(self.batch_size, self.num_steps, self.max_length)
# !!! TO DO: last element of each word is first character of next word --> correct this
# take slice of sequence lengths for all elements in the batch
seql = self.seql_array[:, self.iterator * self.num_steps : (self.iterator+1) * self.num_steps]
# if iterated over the whole dataset, set iterator to 0 to start again
if self.iterator >= self.num_words_batch:
self.iterator = 0
self.end_reached = True
# otherwise, increase count
else:
self.iterator += 1
return x, y, self.end_reached, seql
class wordSentenceDataRescore(wordSentenceData):
'''
Rescore N-best lists with model trained across sentence boundaries.
'''
def __init__(self, config, eval_config, TRAIN, VALID, TEST):
super(wordSentenceDataRescore, self).__init__(config, eval_config, TRAIN, VALID, TEST)
def get_data(self):
all_data = self.read_data()
max_length = self.config['num_steps'] - 3
padded_data, _ = self.pad_data(all_data, max_length)
# return max_length+2 and not +3 because the last padding symbol is only there
# to make sure that the target sequence does not end with the beginning of the next sequence
return padded_data, len(self.id_to_item), max_length+2
class charSentenceDataRescore(charSentenceData, wordSentenceDataRescore):
'''
Same as wordSentenceDataRescore but on character level.
'''
def __init__(self, config, eval_config, TRAIN, VALID, TEST):
super(charSentenceDataRescore, self).__init__(config, eval_config, TRAIN, VALID, TEST)
def file_to_item_ids(self, filename):
return wordSentenceDataRescore.file_to_item_ids(self, filename)
def get_data(self):
return wordSentenceDataRescore.get_data(self)
class charNGramData(LMData):
'''
Feed character n-grams to the network (but still predict words).
'''
def __init__(self, config, eval_config, TRAIN, VALID, TEST):
super(charNGramData, self).__init__(config, eval_config, TRAIN, VALID, TEST)
if not isinstance(self.config['char_ngram'],int):
raise IOError("Specify what n should be used for the character n-grams.")
else:
self.n = self.config['char_ngram']
self.special_symbols = ['<UNK>', '<unk>', '<eos>']
self.ngram_to_id = {}
self.id_to_ngram = {}
#if 'add_word' in self.config and 'input_vocab' in self.config:
if 'add_word' in self.config:
if not 'word_size' in self.config:
raise IOError("Specify the size that should be assigned to the word input (word_size).")
if not 'input_vocab_size' in self.config:
raise IOError("Specify the size of the word input vocabulary (input_vocab_size).")
self.input_item_to_id = {}
self.input_id_to_item = {}
def find_ngrams(self, data):
'''
Finds all ngrams in data.
Arguments:
data: list of all words in the training file
Returns:
freq_ngrams: dictionary containing all n-grams found + their frequency
'''
freq_ngrams = dict()
for word in data:
# add the special symbols as 1
if word in self.special_symbols:
if word in freq_ngrams:
freq_ngrams[word] += 1
else:
freq_ngrams[word] = 1
else:
# first ngram: append <bow> to the beginning of the word
first_ngram = '<bow>'+word[:self.n-1]
if 'capital' in self.config:
first_ngram = first_ngram.lower()
if first_ngram in freq_ngrams:
freq_ngrams[first_ngram] += 1
else:
freq_ngrams[first_ngram] = 1
# n-grams in the middle of the word
for pos in xrange(len(word)):
# only add the ngram if it is long enough (end of the word: not long enough)
if len(word[pos:pos+self.n]) == self.n:
curr_ngram = word[pos:pos+self.n]
# if special marker for capital: only use lower case n-grams
if 'capital' in self.config:
curr_ngram = curr_ngram.lower()
# add ngram if not yet in set
if curr_ngram in freq_ngrams:
freq_ngrams[curr_ngram] += 1
else:
freq_ngrams[curr_ngram] = 1
# last n-gram: append '<eow>' to end of word
last_ngram = word[-1-self.n+2:]+'<eow>'
if 'capital' in self.config:
last_ngram = last_ngram.lower()
if last_ngram in freq_ngrams:
freq_ngrams[last_ngram] += 1
else:
freq_ngrams[last_ngram] = 1
return freq_ngrams
def find_skipgrams(self, data, skip):
'''
Finds all skipgrams in data.
Arguments:
data: list of all words in the training file
skip: number of characters that should be skipped
Returns:
freq_ngrams: dictionary containing all skipgrams found + their frequency
'''
freq_skipgrams = dict()
for word in data:
# add the special symbols as 1
if word in self.special_symbols:
if word in freq_skipgrams:
freq_skipgrams[word] += 1
else:
freq_skipgrams[word] = 1
elif len(word) > 1:
# first skipgram: append '<bow>' to beginning of word
first_skipgram = '<bow>'+word[skip]
if 'capital' in self.config:
first_skipgram = first_skipgram.lower()
if first_skipgram in freq_skipgrams:
freq_skipgrams[first_skipgram] += 1
else:
freq_skipgrams[first_skipgram] = 1
for pos in xrange(len(word)):
# only add the skipgram if it is long enough (end of the word: not long enough)
if len(word[pos:]) >= skip+2:
curr_skipgram = word[pos] + word[pos+1+skip]
# if special marker for capital: only use lower case n-grams
if 'capital' in self.config:
curr_skipgram = curr_skipgram.lower()
if curr_skipgram in freq_skipgrams:
freq_skipgrams[curr_skipgram] += 1
else:
freq_skipgrams[curr_skipgram] = 1
# append '<eow>' to end of word
last_skipgram = word[-1-skip]+'<eow>'
if 'capital' in self.config:
last_skipgram = last_skipgram.lower()
if last_skipgram in freq_skipgrams:
freq_skipgrams[last_skipgram] += 1
else:
freq_skipgrams[last_skipgram] = 1
return freq_skipgrams
def build_ngram_vocab(self, filename, skip=None):
'''
Reads the data and builds ngram-to-id mapping and id-to-ngram mapping.
Arguments:
filename: data file from which the vocbulary is read
skip: if None, n-grams are read; if not None, 'skip' characters are skipped
'''
data = self.read_items(filename)
# find all n-grams/skipgram + their frequency
if skip != None:
freq_ngrams = self.find_skipgrams(data, skip)
else:
freq_ngrams = self.find_ngrams(data)
# for words that consist of only 1 character: add unigrams
# possible TO DO: if n > 2, what to do with words of 2 characters?
all_chars = set(''.join(data))
for word in data:
if word in all_chars:
if word in freq_ngrams:
freq_ngrams[word] += 1
else:
freq_ngrams[word] = 1
# if only ngrams with a frequency > ngram_cutoff have to be kept
if 'ngram_cutoff' in self.config:
if not isinstance(self.config['ngram_cutoff'],int):
raise ValueError("Specify what cutoff frequency should be used for the character n-grams.")
else:
freq_ngrams = {ngram:freq for ngram, freq in freq_ngrams.items() if freq > self.config['ngram_cutoff']}
ngrams = freq_ngrams.keys()
if 'capital' in self.config:
# special symbol to indicate whether the word contains (a) capital(s) or not
ngrams.append('<cap>')
# remove ngrams with capitals from vocabulary
ngrams = [gram for gram in ngrams if gram.islower()]
# unknown n-gram symbol
ngrams.append('<UNKngram>')
self.ngram_to_id = dict(zip(ngrams, range(len(ngrams))))
self.id_to_ngram = dict(zip(range(len(ngrams)), ngrams))
print('Size of n-gram vocabulary: {0}'.format(len(self.ngram_to_id)))
def map_ngrams_to_ids(self, ngram_repr, word):
'''
Maps all n-grams in the word to a count on the input vector.
Arguments:
ngram_repr: input vector
word: word that should be mapped to n-grams
Returns:
ngram_repr: input vector, with counts for all n-grams in 'word' added
'''
# first ngram
first_ngram = '<bow>'+word[:self.n-1]
if 'capital' in self.config:
first_ngram = first_ngram.lower()
# increase count at index of character ngram
if first_ngram in self.ngram_to_id:
ngram_repr[self.ngram_to_id[first_ngram]] += 1
else:
ngram_repr[self.ngram_to_id['<UNKngram>']] += 1
for pos in xrange(len(word)):
# not yet at the end of the word (otherwise the subword might be shorter than n)
if len(word[pos:pos+self.n]) == self.n:
curr_ngram = word[pos:pos+self.n]
# if special marker for capital: only use lower case n-grams
if 'capital' in self.config:
curr_ngram = curr_ngram.lower()
# increase count at index of character ngram
if curr_ngram in self.ngram_to_id:
ngram_repr[self.ngram_to_id[curr_ngram]] += 1
else:
ngram_repr[self.ngram_to_id['<UNKngram>']] += 1
# append '<eow>' to end of word
last_ngram = word[-1-self.n+2:]+'<eow>'
if 'capital' in self.config:
last_ngram = last_ngram.lower()
# increase count at index of character ngram
if last_ngram in self.ngram_to_id:
ngram_repr[self.ngram_to_id[last_ngram]] += 1
else:
ngram_repr[self.ngram_to_id['<UNKngram>']] += 1
return ngram_repr
def map_skipgrams_to_ids(self, ngram_repr, word, skip):
'''
Maps all skipgrams in the word to a count on the input vector.
Arguments:
ngram_repr: input vector
word: word that should be mapped to skipgrams
skip: number of characters that should be skipped
Returns:
ngram_repr: input vector, with counts for all skipgrams in 'word' added
'''
# first skipgram
first_skipgram = '<bow>'+word[skip]
if 'capital' in self.config:
first_skipgram = first_skipgram.lower()
# increase count at index of character skipgram
if first_skipgram in self.ngram_to_id:
ngram_repr[self.ngram_to_id[first_skipgram]] += 1
else:
ngram_repr[self.ngram_to_id['<UNKngram>']] += 1
for pos in xrange(len(word)):
# not yet at the end of the word (otherwise the subword might be shorter than n)
if len(word[pos:]) >= skip+2:
curr_skipgram = word[pos] + word[pos+1+skip]
# if special marker for capital: only use lower case n-grams
if 'capital' in self.config:
curr_skipgram = curr_skipgram.lower()
# increase count at index of character skipgram
if curr_skipgram in self.ngram_to_id:
ngram_repr[self.ngram_to_id[curr_skipgram]] += 1
else:
ngram_repr[self.ngram_to_id['<UNKngram>']] += 1
# append '<eow>' to end of word
last_skipgram = word[-1-skip]+'<eow>'
if 'capital' in self.config:
last_skipgram = last_skipgram.lower()
# increase count at index of character skipgram
if last_skipgram in self.ngram_to_id:
ngram_repr[self.ngram_to_id[last_skipgram]] += 1
else:
ngram_repr[self.ngram_to_id['<UNKngram>']] += 1
return ngram_repr
def file_to_ngram_ids(self, filename):
'''
Generates occurrence vectors for all words in the file.
Arguments:
filename: name of data file
Returns:
ngrams: a list of ngram_repr, which are numpy arrays containing the counts of each n-gram
'''
data = self.read_items(filename)
ngrams = []
for word in data:
# initialize zero vector of size of the ngram vocabulary
ngram_repr = np.zeros(len(self.ngram_to_id), dtype=np.float32)
# do not cut word in n-grams if it contains only 1 character or is a special symbol
if len(word) == 1 or word in self.special_symbols:
if word in self.ngram_to_id:
ngram_repr[self.ngram_to_id[word]] += 1
else:
ngram_repr[self.ngram_to_id['<UNKngram>']] += 1
else:
# if special marker for capital, check how many capitals the word has
if 'capital' in self.config:
num_capitals = sum(1 for char in word if char.isupper())
if num_capitals > 0:
# increase count at index of special capital marker
ngram_repr[self.ngram_to_id['<cap>']] += num_capitals
if not 'skipgram' in self.config:
ngram_repr = self.map_ngrams_to_ids(ngram_repr, word)
else:
ngram_repr = self.map_skipgrams_to_ids(ngram_repr, word, self.config['skipgram'])
ngrams.append(ngram_repr)
return ngrams
def read_data(self):
# n-gram input: use data with full vcoabulary, where words are not converted to <UNK>
train_path_full_vocab = os.path.join(self.config['data_path'], "train.txt")
valid_path_full_vocab = os.path.join(self.config['data_path'], "valid.txt")
test_path_full_vocab = os.path.join(self.config['data_path'], "test.txt")
if 'skipgram' in self.config:
if self.config['char_ngram'] != 2 or self.config['skipgram'] != 1:
raise NotImplementedError("Skipgrams have only been implemented for char_ngram = 2 and skipgram = 1.")
self.build_ngram_vocab(train_path_full_vocab, self.config['skipgram'])
else:
self.build_ngram_vocab(train_path_full_vocab)
# output vocabulary: use reduced vocabulary
self.item_to_id, self.id_to_item = self.build_vocab(self.train_path)
# combine character n-grams with word input
if 'add_word' in self.config:
# if input vocabulary is different from output vocabulary
if 'input_vocab' in self.config:
train_file = "train_" + str(self.config['input_vocab']) + "k-unk.txt"
valid_file = "valid_" + str(self.config['input_vocab']) + "k-unk.txt"
test_file = "test_" + str(self.config['input_vocab']) + "k-unk.txt"
else:
train_file = "train.txt"
valid_file = "valid.txt"
test_file = "test.txt"
input_train_path = os.path.join(self.config['data_path'], train_file)
input_valid_path = os.path.join(self.config['data_path'], valid_file)
input_test_path = os.path.join(self.config['data_path'], test_file)
# build vocab for input word representation
self.input_item_to_id, self.input_id_to_item = self.build_vocab(input_train_path)
# make a label file to visualize the embeddings
#with the correct labels (= words instead of ids) in tensorboard
self.label_file = os.path.join(self.config['save_path'], "labels.tsv")
# Write label file
with open(self.label_file,"w") as f:
for i in range(len(self.input_id_to_item)):
f.write('{0}\n'.format(self.input_id_to_item[i]))
# lists of all ngrams/words in training data converted to their ids
if self.TRAIN:
#if 'skipgram' in self.config:
# train_ngrams = self.file_to_skipgram_ids(train_path_full_vocab, self.config['skipgram'])
#else:
train_ngrams = self.file_to_ngram_ids(train_path_full_vocab)
if 'add_word' in self.config and 'input_vocab' in self.config:
train_input_words = self.file_to_item_ids(input_train_path, item_to_id=self.input_item_to_id)
train_words = self.file_to_item_ids(self.train_path)
else:
train_ngrams = []
train_words = []
train_input_words = []
# lists of all ngrams/words in validation data converted to their ids
if self.VALID:
#if 'skipgram' in self.config:
# valid_ngrams = self.file_to_skipgram_ids(valid_path_full_vocab, self.config['skipgram'])
#else:
valid_ngrams = self.file_to_ngram_ids(valid_path_full_vocab)
if 'add_word' in self.config and 'input_vocab' in self.config:
valid_input_words = self.file_to_item_ids(input_valid_path, item_to_id=self.input_item_to_id)
valid_words = self.file_to_item_ids(self.valid_path)
else:
valid_ngrams = []
valid_words = []
valid_input_words = []
# lists of all ngrams/words in test data converted to their ids
if self.TEST:
#if 'skipgram' in self.config:
# test_ngrams = self.file_to_skipgram_ids(test_path_full_vocab, self.config['skipgram'])
#else:
test_ngrams = self.file_to_ngram_ids(test_path_full_vocab)
if 'add_word' in self.config and 'input_vocab' in self.config:
test_input_words = self.file_to_item_ids(input_test_path, item_to_id=self.input_item_to_id)
test_words = self.file_to_item_ids(self.test_path)
else:
test_ngrams = []
test_words = []
test_input_words = []
if 'add_word' in self.config and 'input_vocab' in self.config:
train_words = (train_words, train_input_words)
valid_words = (valid_words, valid_input_words)
test_words = (test_words, test_input_words)
all_data = ((train_ngrams,train_words), (valid_ngrams,valid_words),(test_ngrams,test_words))
return all_data
def get_data(self):
all_data = self.read_data()
lengths = (len(self.id_to_ngram), len(self.id_to_item))
return all_data, lengths, 0
def init_batching(self, data, test=False):
if test:
batch_size = self.eval_batch_size
self.num_steps = self.eval_num_steps
else:
batch_size = self.batch_size
#self.num_steps = self.num_steps
ngram_data, word_data = data
if 'add_word' in self.config and 'input_vocab' in self.config:
word_data, input_word_data = word_data
input_size = self.config['input_size']
if self.iterator == 0:
self.end_reached = False
data_len = len(word_data)
# to divide data in batch_size batches, each of length batch_len
batch_len = data_len // batch_size
# number of samples that can be taken from the batch_len slices
if self.num_steps != 1:
self.num_samples = batch_len // self.num_steps
else:
self.num_samples = (batch_len // self.num_steps) - 1
# remove last part of the data that doesn't fit in the batch_size x num_steps samples
ngram_data = ngram_data[:batch_size * batch_len]
word_data = word_data[:batch_size * batch_len]
# for n-gram inputs: convert to numpy array: batch_size x batch_len x input_size
self.data_array_ngrams = np.array(ngram_data).reshape(batch_size, batch_len, input_size)
# for word outputs: convert to numpy array: batch_size x batch_len
self.data_array_words = np.array(word_data).reshape(batch_size, batch_len)
# if word representation is added to the input and input and output vocabulary are not the same
if 'add_word' in self.config and 'input_vocab' in self.config:
input_word_data = input_word_data[:batch_size * batch_len]
self.data_array_input_words = np.array(input_word_data).reshape(batch_size, batch_len)
self.batching_initialized = True
def get_batch(self):
if not self.batching_initialized:
raise ValueError("Batching is not yet initialized.")
# inputs = ngrams (take slice of batch_size x num_steps)
x = self.data_array_ngrams[:, self.iterator * self.num_steps : (self.iterator * self.num_steps) + self.num_steps]
if 'add_word' in self.config:
# different size for input and output vocabulary
if 'input_vocab' in self.config:
x_words = self.data_array_input_words[:, self.iterator * self.num_steps :
(self.iterator * self.num_steps) + self.num_steps]
# same size for input and output vocabulary
else:
x_words = self.data_array_words[:, self.iterator * self.num_steps :
(self.iterator * self.num_steps) + self.num_steps]
x = (x, x_words)
# targets = words (same slice but shifted one step to the right)
y = self.data_array_words[:, (self.iterator * self.num_steps) +1 :
(self.iterator * self.num_steps) + self.num_steps + 1]
self.iterator += 1
# if iterated over the whole dataset, set iterator to 0 to start again
if self.iterator >= self.num_samples:
self.iterator = 0
self.end_reached = True
return x, y, self.end_reached
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
""" RandOm Convolutional KErnel Transform (ROCKET)
"""
__author__ = ["Matthew Middlehurst", "Oleksii Kachaiev"]
__all__ = ["ROCKETClassifier"]
import numpy as np
from joblib import delayed, Parallel
from sklearn.base import clone
from sklearn.ensemble._base import _set_random_states
from sklearn.linear_model import RidgeClassifierCV
from sklearn.pipeline import make_pipeline
from sklearn.utils import check_random_state
from sklearn.utils.multiclass import class_distribution
from sktime.classification.base import BaseClassifier
from sktime.transformations.panel.rocket import Rocket
from sktime.utils.validation import check_n_jobs
from sktime.utils.validation.panel import check_X
from sktime.utils.validation.panel import check_X_y
import warnings
class ROCKETClassifier(BaseClassifier):
"""
Classifier wrapped for the ROCKET transformer using RidgeClassifierCV as the
base classifier.
Allows the creation of an ensemble of ROCKET classifiers to allow for
generation of probabilities as the expense of scalability.
Parameters
----------
num_kernels : int, number of kernels for ROCKET transform
(default=10,000)
n_estimators : int, ensemble size, optional (default=None). When set
to None (default) or 1, the classifier uses a single estimator rather than ensemble
random_state : int or None, seed for random, integer,
optional (default to no seed)
n_jobs : int, the number of jobs to run in parallel for `fit`,
optional (default=1)
Attributes
----------
estimators_ : array of individual classifiers
weights : weight of each classifier in the ensemble
weight_sum : sum of all weights
n_classes : extracted from the data
Notes
-----
@article{dempster_etal_2019,
author = {Dempster, Angus and Petitjean, Francois and Webb,
Geoffrey I},
title = {ROCKET: Exceptionally fast and accurate time series
classification using random convolutional kernels},
year = {2019},
journal = {arXiv:1910.13051}
}
Java version
https://github.com/uea-machine-learning/tsml/blob/master/src/main/java/
tsml/classifiers/hybrids/ROCKETClassifier.java
"""
# Capability tags
capabilities = {
"multivariate": True,
"unequal_length": False,
"missing_values": False,
}
def __init__(
self,
num_kernels=10000,
ensemble=None,
ensemble_size=25,
random_state=None,
n_estimators=None,
n_jobs=1,
):
self.num_kernels = num_kernels
self.random_state = random_state
self.n_jobs = n_jobs
self.n_estimators = n_estimators
# for compatibility only
self.ensemble = ensemble
self.ensemble_size = ensemble_size
# for compatibility only
if ensemble is not None and n_estimators is None:
self.n_estimators = ensemble_size
warnings.warn(
"ensemble and ensemble_size params are deprecated and will be "
"removed in future releases, use n_estimators instead",
PendingDeprecationWarning,
)
self.estimators_ = []
self.weights = []
self.weight_sum = 0
self.n_classes = 0
self.classes_ = []
self.class_dictionary = {}
super(ROCKETClassifier, self).__init__()
def fit(self, X, y):
"""
Build a single or ensemble of pipelines containing the ROCKET transformer and
RidgeClassifierCV classifier.
Parameters
----------
X : nested pandas DataFrame of shape [n_instances, 1]
Nested dataframe with univariate time-series in cells.
y : array-like, shape = [n_instances] The class labels.
Returns
-------
self : object
"""
X, y = check_X_y(X, y)
n_jobs = check_n_jobs(self.n_jobs)
self.n_classes = np.unique(y).shape[0]
self.classes_ = class_distribution(np.asarray(y).reshape(-1, 1))[0][0]
for index, class_val in enumerate(self.classes_):
self.class_dictionary[class_val] = index
if self.n_estimators is not None and self.n_estimators > 1:
base_estimator = _make_estimator(self.num_kernels, self.random_state)
self.estimators_ = Parallel(n_jobs=n_jobs)(
delayed(_fit_estimator)(
_clone_estimator(base_estimator, self.random_state), X, y
)
for _ in range(self.n_estimators)
)
for rocket_pipeline in self.estimators_:
weight = rocket_pipeline.steps[1][1].best_score_
self.weights.append(weight)
self.weight_sum += weight
else:
base_estimator = _make_estimator(self.num_kernels, self.random_state)
self.estimators_ = [_fit_estimator(base_estimator, X, y)]
self._is_fitted = True
return self
def predict(self, X):
if self.n_estimators is not None:
rng = check_random_state(self.random_state)
return np.array(
[
self.classes_[int(rng.choice(np.flatnonzero(prob == prob.max())))]
for prob in self.predict_proba(X)
]
)
else:
self.check_is_fitted()
return self.estimators_[0].predict(X)
def predict_proba(self, X):
self.check_is_fitted()
X = check_X(X)
if self.n_estimators is not None:
sums = np.zeros((X.shape[0], self.n_classes))
for n, clf in enumerate(self.estimators_):
preds = clf.predict(X)
for i in range(0, X.shape[0]):
sums[i, self.class_dictionary[preds[i]]] += self.weights[n]
dists = sums / (np.ones(self.n_classes) * self.weight_sum)
else:
dists = np.zeros((X.shape[0], self.n_classes))
preds = self.estimators_[0].predict(X)
for i in range(0, X.shape[0]):
dists[i, np.where(self.classes_ == preds[i])] = 1
return dists
# for compatibility
@property
def classifiers(self):
warnings.warn(
"classifiers attribute is deprecated and will be removed "
"in future releases, use estimators_ instead",
PendingDeprecationWarning,
)
return self.estimators_
def _fit_estimator(estimator, X, y):
return estimator.fit(X, y)
def _make_estimator(num_kernels, random_state):
return make_pipeline(
Rocket(num_kernels=num_kernels, random_state=random_state),
RidgeClassifierCV(alphas=np.logspace(-3, 3, 10), normalize=True),
)
def _clone_estimator(base_estimator, random_state=None):
estimator = clone(base_estimator)
if random_state is not None:
_set_random_states(estimator, random_state)
return estimator
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.1 on 2019-05-29 20:37
import json
from django.db import migrations
def normalize_webhook_values(apps, schema_editor):
Channel = apps.get_model("api", "Channel")
for ch in Channel.objects.filter(kind="webhook").only("value"):
# The old format of url_down, url_up, post_data separated by newlines:
if not ch.value.startswith("{"):
parts = ch.value.split("\n")
url_down = parts[0]
url_up = parts[1] if len(parts) > 1 else ""
post_data = parts[2] if len(parts) > 2 else ""
ch.value = json.dumps(
{
"method_down": "POST" if post_data else "GET",
"url_down": url_down,
"body_down": post_data,
"headers_down": {},
"method_up": "POST" if post_data else "GET",
"url_up": url_up,
"body_up": post_data,
"headers_up": {},
}
)
ch.save()
continue
doc = json.loads(ch.value)
# Legacy "post_data" in doc -- use the legacy fields
if "post_data" in doc:
ch.value = json.dumps(
{
"method_down": "POST" if doc["post_data"] else "GET",
"url_down": doc["url_down"],
"body_down": doc["post_data"],
"headers_down": doc["headers"],
"method_up": "POST" if doc["post_data"] else "GET",
"url_up": doc["url_up"],
"body_up": doc["post_data"],
"headers_up": doc["headers"],
}
)
ch.save()
continue
class Migration(migrations.Migration):
dependencies = [("api", "0060_tokenbucket")]
operations = [
migrations.RunPython(normalize_webhook_values, migrations.RunPython.noop)
]
|
nilq/baby-python
|
python
|
from easyidp.io.tests import test
import easyidp.io.metashape
import easyidp.io.pix4d
import easyidp.io.pcd
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# (C) 2005 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: kamaelia-list-owner@lists.sourceforge.net
# to discuss alternative licensing.
# -------------------------------------------------------------------------
#
# Following script is tested on python-2.4.3 and pycrypto-2.0.1
# It adds a new class securedComponent which is subclass of component
# All other classes are the same as explained in MiniAxon tutorial
from Crypto.Cipher import AES
class microprocess(object):
def __init__(self):
super(microprocess, self).__init__()
def main(self):
yield 1
class scheduler(microprocess):
def __init__(self):
super(scheduler, self).__init__()
self.active = []
#self.queue = []
self.newqueue = []
def main(self):
for i in range(100):
for current in self.active:
yield 1 #something ?
try:
ret = current.next()
if ret != -1:
self.newqueue.append(current)
except StopIteration:
pass
self.active = self.newqueue
self.newqueue = []
def activateMicroprocess(self, someprocess):
ret = someprocess.main()
self.newqueue.append(ret)
class component(microprocess):
def __init__(self):
super(component, self).__init__()
self.boxes = {"inbox":[] , "outbox":[]}
def send(self, value, boxname):
self.boxes[boxname].append(value)
def recv(self, boxname):
return self.boxes[boxname].pop()
def dataReady(self, boxname):
return len(self.boxes[boxname])
class secureComponent(component): # New class
def __init__(self):
super(secureComponent, self).__init__()
self.key = 'A simple testkey'
self.crypt_obj = AES.new(self.key, AES.MODE_ECB) # Simplest mode for testing
def send(self, value, boxname):
diff = len(value) % 16 # Data required in blocks of 16 bytes
if diff is not 0:
value = value + ( '~' * (16 - diff)) # For testing
encrypted_value = self.crypt_obj.encrypt(value)
super(secureComponent, self).send(encrypted_value, boxname)
def recv(self, boxname):
encrypted_value = super(secureComponent, self).recv(boxname)
value = self.crypt_obj.decrypt(encrypted_value)
orig_len = value.find('~', len(value) - 16)
value = value[0:orig_len]
return value
class postman(microprocess):
def __init__(self, source, sourcebox, sink, sinkbox):
super(postman, self).__init__()
self.source = source
self.sourcebox = sourcebox
self.sink = sink
self.sinkbox = sinkbox
def main(self):
while 1:
yield 1
if self.source.dataReady(self.sourcebox):
data = self.source.recv(self.sourcebox)
self.sink.send(data, self.sinkbox)
#-------------------------------------------------------
# Testing
class Producer(secureComponent):
def __init__(self, message):
super(Producer, self).__init__()
self.message = message
def main(self):
count = 0
while 1:
yield 1
count += 1
msg = self.message + str(count)
self.send(msg, "outbox")
class Consumer(secureComponent):
def main(self):
while 1:
yield 1
if self.dataReady("inbox"):
data = self.recv("inbox")
print data
p = Producer("Hello World - test ")
c = Consumer()
delivery_girl = postman(p, "outbox", c, "inbox")
myscheduler = scheduler()
myscheduler.activateMicroprocess(p)
myscheduler.activateMicroprocess(c)
myscheduler.activateMicroprocess(delivery_girl)
for _ in myscheduler.main():
pass
## class printer(microprocess):
## def __init__(self, string):
## super(printer, self).__init__()
## self.string = string #String to be printed
## def main(self):
## while 1:
## yield 1
## print self.string
## X = printer("Hello World")
## Y = printer("Game Over")
## myscheduler = scheduler()
## myscheduler.activateMicroprocess(X)
## myscheduler.activateMicroprocess(Y)
## for _ in myscheduler.main():
## pass
|
nilq/baby-python
|
python
|
import pytest
from channels.generic.websocket import (
AsyncJsonWebsocketConsumer, AsyncWebsocketConsumer, JsonWebsocketConsumer, WebsocketConsumer,
)
from channels.testing import WebsocketCommunicator
# @pytest.mark.asyncio
# async def test_websocket_consumer():
# """
# Tests that WebsocketConsumer is implemented correctly.
# """
# results = {}
#
# class TestConsumer(WebsocketConsumer):
# def connect(self):
# results["connected"] = True
# self.accept()
#
# def receive(self, text_data=None, bytes_data=None):
# results["received"] = (text_data, bytes_data)
# self.send(text_data=text_data, bytes_data=bytes_data)
#
# def disconnect(self, code):
# results["disconnected"] = code
#
# # Test a normal connection
# communicator = WebsocketCommunicator(TestConsumer, "/testws/")
# connected, _ = await communicator.connect()
# assert connected
# assert "connected" in results
# # Test sending text
# await communicator.send_to(text_data="hello")
# response = await communicator.receive_from()
# assert response == "hello"
# assert results["received"] == ("hello", None)
# # Test sending bytes
# await communicator.send_to(bytes_data=b"w\0\0\0")
# response = await communicator.receive_from()
# assert response == b"w\0\0\0"
# assert results["received"] == (None, b"w\0\0\0")
# # Close out
# await communicator.disconnect()
# assert "disconnected" in results
@pytest.mark.asyncio
async def test_async_websocket_consumer():
"""
Tests that AsyncWebsocketConsumer is implemented correctly.
"""
results = {}
class TestConsumer(AsyncWebsocketConsumer):
async def connect(self):
results["connected"] = True
await self.accept()
async def receive(self, text_data=None, bytes_data=None):
results["received"] = (text_data, bytes_data)
await self.send(text_data=text_data, bytes_data=bytes_data)
async def disconnect(self, code):
results["disconnected"] = code
# Test a normal connection
communicator = WebsocketCommunicator(TestConsumer, "/testws/")
connected, _ = await communicator.connect()
assert connected
assert "connected" in results
# Test sending text
await communicator.send_to(text_data="hello")
response = await communicator.receive_from()
assert response == "hello"
assert results["received"] == ("hello", None)
# Test sending bytes
await communicator.send_to(bytes_data=b"w\0\0\0")
response = await communicator.receive_from()
assert response == b"w\0\0\0"
assert results["received"] == (None, b"w\0\0\0")
# Close out
await communicator.disconnect()
assert "disconnected" in results
# @pytest.mark.asyncio
# async def test_json_websocket_consumer():
# """
# Tests that JsonWebsocketConsumer is implemented correctly.
# """
# results = {}
#
# class TestConsumer(JsonWebsocketConsumer):
# def connect(self):
# self.accept()
#
# def receive_json(self, data=None):
# results["received"] = data
# self.send_json(data)
#
# # Open a connection
# communicator = WebsocketCommunicator(TestConsumer, "/testws/")
# connected, _ = await communicator.connect()
# assert connected
# # Test sending
# await communicator.send_json_to({"hello": "world"})
# response = await communicator.receive_json_from()
# assert response == {"hello": "world"}
# assert results["received"] == {"hello": "world"}
# # Test sending bytes breaks it
# await communicator.send_to(bytes_data=b"w\0\0\0")
# with pytest.raises(ValueError):
# await communicator.wait()
#
#
@pytest.mark.asyncio
async def test_async_json_websocket_consumer():
"""
Tests that AsyncJsonWebsocketConsumer is implemented correctly.
"""
results = {}
class TestConsumer(AsyncJsonWebsocketConsumer):
async def connect(self):
await self.accept()
async def receive_json(self, data=None):
results["received"] = data
await self.send_json(data)
# Open a connection
communicator = WebsocketCommunicator(TestConsumer, "/testws/")
connected, _ = await communicator.connect()
assert connected
# Test sending
await communicator.send_json_to({"hello": "world"})
response = await communicator.receive_json_from()
assert response == {"hello": "world"}
assert results["received"] == {"hello": "world"}
# Test sending bytes breaks it
await communicator.send_to(bytes_data=b"w\0\0\0")
with pytest.raises(ValueError):
await communicator.wait()
|
nilq/baby-python
|
python
|
from PySide6.QtCore import QAbstractTableModel, Qt
class PandasModel(QAbstractTableModel):
def __init__(self, data):
super().__init__()
self._data = data
def rowCount(self, index):
return self._data.shape[0]
def columnCount(self, parnet=None):
return self._data.shape[1]
def data(self, index, role=Qt.DisplayRole):
if index.isValid():
if role == Qt.DisplayRole or role == Qt.EditRole:
value = self._data.iloc[index.row(), index.column()]
return str(value)
def setData(self, index, value, role):
if role == Qt.EditRole:
self._data.iloc[index.row(), index.column()] = value
return True
return False
def headerData(self, col, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self._data.columns[col]
def flags(self, index):
return Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
__author__ = "Gabriel Zapodeanu TME, ENB"
__email__ = "gzapodea@cisco.com"
__version__ = "0.1.0"
__copyright__ = "Copyright (c) 2019 Cisco and/or its affiliates."
__license__ = "Cisco Sample Code License, Version 1.1"
import difflib
import urllib3
from urllib3.exceptions import InsecureRequestWarning # for insecure https warnings
urllib3.disable_warnings(InsecureRequestWarning) # disable insecure https warnings
def compare_configs(cfg1, cfg2):
"""
This function, using the unified diff function, will compare two config files and identify the changes.
'+' or '-' will be prepended in front of the lines with changes
:param cfg1: old configuration file path and filename
:param cfg2: new configuration file path and filename
:return: text with the configuration lines that changed. The return will include the configuration for the sections
that include the changes
"""
# open the old and new configuration files
f1 = open(cfg1, 'r')
old_cfg = f1.readlines()
f1.close()
f2 = open(cfg2, 'r')
new_cfg = f2.readlines()
f2.close()
# compare the two specified config files {cfg1} and {cfg2}
d = difflib.unified_diff(old_cfg, new_cfg, n=9)
# create a diff_list that will include all the lines that changed
# create a diff_output string that will collect the generator output from the unified_diff function
diff_list = []
diff_output = ''
for line in d:
diff_output += line
if line.find('Current configuration') == -1:
if line.find('Last configuration change') == -1:
if (line.find('+++') == -1) and (line.find('---') == -1):
if (line.find('-!') == -1) and (line.find('+!') == -1):
if line.startswith('+'):
diff_list.append('\n' + line)
elif line.startswith('-'):
diff_list.append('\n' + line)
# process the diff_output to select only the sections between '!' characters for the sections that changed,
# replace the empty '+' or '-' lines with space
diff_output = diff_output.replace('+!', '!')
diff_output = diff_output.replace('-!', '!')
diff_output_list = diff_output.split('!')
all_changes = []
for changes in diff_list:
for config_changes in diff_output_list:
if changes in config_changes:
if config_changes not in all_changes:
all_changes.append(config_changes)
# create a config_text string with all the sections that include changes
config_text = ''
for items in all_changes:
config_text += items
return config_text
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import re
import logging
from rapidsms.apps.base import AppBase
from .models import Location
logger = logging.getLogger(__name__)
class App(AppBase):
PATTERN = re.compile(r"^(.+)\b(?:at)\b(.+?)$")
def __find_location(self, text):
try:
# check for a location code first
return Location.objects.get(slug__iexact=text)
# nothing else is supported, for now!
except Location.DoesNotExist:
return None
def parse(self, msg):
# if this message ends in "at SOMEWHERE",
# we have work to do. otherwise, ignore it
m = self.PATTERN.match(msg.text)
if m is not None:
# resolve the string into a Location object
# (or None), and attach it to msg for other
# apps to deal with
text = m.group(2).strip()
# split the text by space to find if it has a village
# locCode,village = text.split()
# location = self.__find_location(locCode)
# location.village = village
# msg.location = location
msg.location = self.__find_location(text)
# strip the location tag from the message,
# so other apps don't have to deal with it
msg.text = m.group(1)
# we should probably log this crazy behavior...
logger.info("Stripped Location code: %s" % text)
logger.info("Message is now: %s" % msg.text)
|
nilq/baby-python
|
python
|
class Weapon:
def __init__(self, name, damage, range):
self.name = name
self.damage = damage
self.range = range
def hit(self, actor, target):
if target.is_alive():
if (self.range >= (target.pos_x - actor.pos_x) +
(target.pos_y - actor.pos_y)):
print(f'Врагу нанесен урон оружием {self.name} в размере {self.damage}')
target.hp -= self.damage
else:
print(f'Враг слишком далеко для оружия {self.name}')
else:
print('Враг уже повержен')
def __str__(self):
return self.name
class BaseCharacter:
def __init__(self, x, y, hp):
self.pos_x = x
self.pos_y = y
self.hp = hp
def move(self, delta_x, delta_y):
self.pos_x += delta_x
self.pos_y += delta_y
def is_alive(self):
return self.hp > 0
def get_damage(self, amount):
if self.is_alive():
self.hp -= amount
def get_coords(self):
return self.pos_x, self.pos_y
class BaseEnemy(BaseCharacter):
def __init__(self, pos_x, pos_y, weapon, hp):
super().__init__(pos_x, pos_y, hp)
self.weapon = weapon
def hit(self, target):
if target.__class__.__name__ == 'MainHero':
self.weapon.hit(self, target)
else:
print('Могу ударить только Главного героя')
def __str__(self):
return f'Враг на позиции ({self.pos_x}, {self.pos_y}) с оружием {self.weapon.name}'
class MainHero(BaseCharacter):
def __init__(self, pos_x, pos_y, name, hp):
super().__init__(pos_x, pos_y, hp)
self.name = name
self.weapons = []
self.current_weapon = 0
def hit(self, target):
if self.weapons:
if target.__class__.__name__ == 'BaseEnemy':
self.weapons[self.current_weapon].hit(self, target)
else:
print('Могу ударить только Врага')
else:
print('Я безоружен')
def add_weapon(self, weapon):
if weapon.__class__.__name__ == 'Weapon':
self.weapons.append(weapon)
print(f'Подобрал {weapon}')
else:
print('Это не оружие')
def next_weapon(self):
if len(self.weapons) == 1:
print('У меня только одно оружие')
elif len(self.weapons) > 1:
self.current_weapon += 1
if self.current_weapon == len(self.weapons):
self.current_weapon = 0
print(f'Сменил оружие на {self.weapons[self.current_weapon]}')
else:
print('Я безоружен')
def heal(self, amount):
self.hp += amount
if self.hp > 200:
self.hp = 200
print(f'Полечился, теперь здоровья {self.hp}')
weapon1 = Weapon("Короткий меч", 5, 1)
weapon2 = Weapon("Длинный меч", 7, 2)
weapon3 = Weapon("Лук", 3, 10)
weapon4 = Weapon("Лазерная орбитальная пушка", 1000, 1000)
princess = BaseCharacter(100, 100, 100)
archer = BaseEnemy(50, 50, weapon3, 100)
armored_swordsman = BaseEnemy(10, 10, weapon2, 500)
archer.hit(armored_swordsman)
armored_swordsman.move(10, 10)
print(armored_swordsman.get_coords())
main_hero = MainHero(0, 0, "Король Артур", 200)
main_hero.hit(armored_swordsman)
main_hero.next_weapon()
main_hero.add_weapon(weapon1)
main_hero.hit(armored_swordsman)
main_hero.add_weapon(weapon4)
main_hero.hit(armored_swordsman)
main_hero.next_weapon()
main_hero.hit(princess)
main_hero.hit(armored_swordsman)
main_hero.hit(armored_swordsman)
|
nilq/baby-python
|
python
|
# flake8: noqa
#
# Root of the SAM package where we expose public classes & methods for other consumers of this SAM Translator to use.
# This is essentially our Public API
#
|
nilq/baby-python
|
python
|
class Solution:
def trap(self, height: List[int]) -> int:
n = len(height)
if n<=2:return 0
stack = []
ans = 0
for i,num in enumerate(height):
while stack and height[stack[-1]]<num:
cur = stack.pop()
if stack:
ans+=(min(height[stack[-1]],num)-height[cur])*(i-stack[-1]-1)
stack.append(i)
return ans
|
nilq/baby-python
|
python
|
import random
def int_to_list(n):
n=str(n)
l=list(n)
return l
class CowsAndBulls:
def __init__(self):
self.number = ""
self.digits = 0
self.active = False
def makeRandom(self, digit):
digits = set(range(10))
first = random.randint(1, 9)
second_to_last = random.sample(digits - {first}, digit-1)
botNumber = str(first) + ''.join(map(str, second_to_last))
return botNumber
def compareNumbers(numA, numB):
if(len(numA)!=len(numB)):
return -1, -1
else:
bulls=0
cows=0
n1 = int(numA)
n2 = int(numB)
l1 = int_to_list(n1)
l2 = int_to_list(n2)
i = 0
for digit in l1:
if digit == l2[i]:
bulls+=1
l2[i]='a'
i+=1
for digit in l1:
for dig in l2:
if dig==digit:
cows+=1
return bulls, cows
|
nilq/baby-python
|
python
|
"""
Build a parse tree to evaluate a fully parenthesised mathematical expression, ((7+3)∗(5−2)) = ?
*
/ \
+ -
/ \ / \
7 3 5 2
"""
from datastruct.collections import HashTable
from datastruct.abstract import Stack
from datastruct.tree import BinaryTree
import operator
def build_parse_tree(fpexp):
"""
:param fpexp: fully parenthesised expression
:return: BinaryTree
"""
fpexp = fpexp.split()
tree = BinaryTree()
pStack = Stack() # with the help of stack, we can get the parent node back with respect to current node
pStack.push(tree)
currTree = tree
for c in fpexp:
if c == '(': # insert a new subtree
currTree.insertLeft(None)
pStack.push(currTree)
currTree = currTree.left
elif c in '+-*/': # c is an operator, change root value and descent to right subtree
currTree.key = c
currTree.insertRight(None)
pStack.push(currTree)
currTree = currTree.right
elif c == ')': # ths subtree has been filled up with operand and operators
currTree = pStack.pop()
else: # an operand, change root value and back to parent node
currTree.key = int(c)
currTree = pStack.pop()
return tree
def evaluate(parseTree: BinaryTree):
op = HashTable(11)
op['+'] = operator.add
op['-'] = operator.sub
op['*'] = operator.mul
op['/'] = operator.truediv
if parseTree.left and parseTree.right:
a = evaluate(parseTree.left)
b = evaluate(parseTree.right)
fn = op[parseTree.key]
return fn(a, b)
else:
return parseTree.key
if __name__ == '__main__':
pt = build_parse_tree("( ( 10 + 5 ) * 3 )")
print(pt)
print(evaluate(pt))
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
import re
import csv
import click
import numpy as np
from scipy.stats import spearmanr
from hivdbql import app
from hivdbql.utils import dbutils
from hivdbql.models.isolate import CRITERIA_SHORTCUTS
np.seterr(divide='raise', invalid='raise')
db = app.db
models = app.models
GENE2DRUGCLASS = {
'PR': 'PI',
'RT': 'RTI',
'IN': 'INSTI'
}
MUTATION_PATTERN = re.compile(r'^[A-Z]?(\d+)([A-Z*_-]+)$')
def read_mutations(fp):
mutations = set()
for line in fp:
line = line.strip()
match = MUTATION_PATTERN.match(line)
if line and match:
pos, aa = match.groups()
mutations.add((int(pos), aa))
orderedmuts = sorted(mutations)
return {m: orderedmuts.index(m) for m in orderedmuts}
def calc_spearman(both, m0only, m1only, none):
dataset = (
[(1, 1)] * both + [(1, 0)] * m0only +
[(0, 1)] * m1only + [(0, 0)] * none
)
return spearmanr(dataset)
@click.command()
@click.argument('input_mutations_file', type=click.File('r'))
@click.argument('output_file', type=click.File('w'))
@click.option('--include-mixture', is_flag=True,
help='Include specified mutations from mixtures')
@click.option('--include-zeros', is_flag=True,
help='Include sequence without any of the specified mutations')
@click.option('--species', type=click.Choice(['HIV1', 'HIV2']),
default='HIV1', help='specify an HIV species')
@click.option('--gene', type=click.Choice(['PR', 'RT', 'IN']),
help='specify an HIV gene')
@click.option('--filter', type=click.Choice(CRITERIA_SHORTCUTS.keys()),
multiple=True, default=('NO_CLONES', 'NO_QA_ISSUES',
'SANGER_ONLY'),
show_default=True, help='specify filter criteria')
def mutation_corellation(input_mutations_file, output_file,
include_mixture, include_zeros,
species, gene, filter):
mutations = read_mutations(input_mutations_file)
mutationitems = sorted(mutations.items(), key=lambda i: i[1])
nummuts = len(mutations)
writer = csv.writer(output_file)
matrix = np.zeros([nummuts, nummuts, 0b100], dtype=np.int64)
writer.writerow(['MutX', 'MutY', '#XY', '#X',
'#Y', '#Null', 'Rho', 'P'])
drugclass = GENE2DRUGCLASS[gene]
# query = models.Isolate.make_query(
# 'HIV1', 'INSTI', 'all', ['NO_CLONES',
# 'NO_QA_ISSUES',
# 'PUBLISHED_ONLY'])
query = (
models.Patient.query
.filter(models.Patient.isolates.any(db.and_(
*models.Isolate.make_criteria(species, drugclass, 'art', filter)
)))
.options(db.selectinload(models.Patient.isolates)
.selectinload(models.Isolate.sequences)
.selectinload(models.Sequence.insertions))
.options(db.selectinload(models.Patient.isolates)
.selectinload(models.Isolate.sequences)
.selectinload(models.Sequence.mixtures))
)
patients = dbutils.chunk_query(
query, models.Patient.id, chunksize=500,
on_progress=(lambda o, t:
print('{0}/{1} patients...'.format(o, t), end='\r')),
on_finish=(lambda t:
print('{0} patients. '.format(t)))
)
patcount = 0
seqcount = 0
for patient in patients:
patmatrix = np.zeros_like(matrix)
patflag = False
for isolate in patient.isolates:
if isolate.gene != gene:
continue
seq = isolate.get_or_create_consensus()
first_aa = seq.first_aa
last_aa = seq.last_aa
# Here we ignored mixtures
if include_mixture:
seqmuts = {(pos, aa)
for pos, aas in seq.aas
for aa in aas if (pos, aa) in mutations}
else:
seqmuts = {m for m in seq.aas if m in mutations}
if not include_zeros and not seqmuts:
continue
seqcount += 1
patflag = True
for m0, m0idx in mutationitems:
if m0[0] < first_aa or m0[0] > last_aa:
# disqualified because of out of range
continue
for m1, m1idx in mutationitems[m0idx + 1:]:
if m1[0] < first_aa or m1[0] > last_aa:
# disqualified because of out of range
continue
hasm0 = m0 in seqmuts
hasm1 = m1 in seqmuts
if hasm0 and hasm1:
# contains both
patmatrix[m0idx, m1idx, 0b11] = 1
elif hasm0 and not hasm1:
# contains m0
patmatrix[m0idx, m1idx, 0b10] = 1
elif not hasm0 and hasm1:
# contains m1
patmatrix[m0idx, m1idx, 0b01] = 1
else: # elif not hasm0 and not hasm1:
# contains none
patmatrix[m0idx, m1idx, 0b00] = 1
matrix += patmatrix
patcount += patflag
print('{} patients ({} sequences) have at least one given mutation.'
.format(patcount, seqcount))
for m0, m0idx in mutationitems:
for m1, m1idx in mutationitems[m0idx + 1:]:
both = matrix[m0idx, m1idx, 0b11]
m0only = matrix[m0idx, m1idx, 0b10]
m1only = matrix[m0idx, m1idx, 0b01]
none = matrix[m0idx, m1idx, 0b00]
if both != 0 or m0only * m1only != 0:
rho, p = calc_spearman(both, m0only, m1only, none)
else:
rho = p = ''
writer.writerow([
'{}{}'.format(*m0),
'{}{}'.format(*m1),
both, m0only, m1only, none, rho, p
])
if __name__ == '__main__':
with app.app_context():
mutation_corellation()
|
nilq/baby-python
|
python
|
""" Manages drawing of the game """
from typing import List, Tuple, Any
import colorsys
import random
import pygame
import settings
from game_state import GameState, Snake, Pizza
Color = Tuple[int, int, int]
class Colors:
""" Basic colors """
CLEAR_COLOR = (240, 240, 240)
BLACK = (0, 0, 0)
DARK_YELLOW = (200, 200, 0)
HOT_PINK = (220, 0, 127)
PINK = (255, 192, 203)
FUCHSIA = (255, 130, 255)
LIME = (0, 255, 0)
P1_GREEN = (100, 255, 10)
P1_YELLOW = (255, 255, 10)
P2_RED = (255, 10, 10)
P2_ORANGE = (255, 200, 10)
P3_BLUE = (10, 10, 255)
P3_CYAN = (10, 200, 200)
P4_VIOLET = (150, 50, 255)
P4_BLUE = (50, 50, 100)
MINT = (170, 255, 195)
GOLD = (249, 166, 2)
ROYAL = (250, 218, 94)
PLAYER_COLORS = [(Colors.P1_GREEN, Colors.P1_YELLOW),
(Colors.P2_RED, Colors.P2_ORANGE),
(Colors.P3_BLUE, Colors.P3_CYAN),
(Colors.P4_VIOLET, Colors.P4_BLUE),
(Colors.HOT_PINK, Colors.PINK),
(Colors.BLACK, Colors.DARK_YELLOW),
(Colors.ROYAL, Colors.GOLD), (Colors.FUCHSIA, Colors.MINT)]
def generate_gradient(colors: Tuple[Color, Color], steps: int) -> List[Color]:
""" Generate a color gradient with 2*steps for two input colors """
def lerp(val1: int, val2: int, scale: float) -> int:
""" interpolate between values val1 and val2 with scale [0, 1] """
return int(val1 + (val2 - val1) * scale)
palette = []
c1_red, c1_green, c1_blue = colors[0]
c2_red, c2_green, c2_blue = colors[1]
for i in range(steps):
scale = i / steps
red = lerp(c1_red, c2_red, scale)
green = lerp(c1_green, c2_green, scale)
blue = lerp(c1_blue, c2_blue, scale)
palette.append((red, green, blue))
for i in range(steps):
scale = i / steps
red = lerp(c2_red, c1_red, scale)
green = lerp(c2_green, c1_green, scale)
blue = lerp(c2_blue, c1_blue, scale)
palette.append((red, green, blue))
return palette
class SnakeGraphics:
""" Implements Snake drawing with 8-bit texture
and palette color rotations """
def __init__(self) -> None:
def hsl_color_pair(seed: float,
player_index: int) -> Tuple[Color, Color]:
""" Generate a hsl color with unique hue for each player """
def hsl_color(hue: float, saturation: float,
lightness: float) -> Color:
""" Convert hsl to rgb """
hue = hue - 1 if hue > 1 else hue
red, green, blue = (
int(256 * i)
for i in colorsys.hls_to_rgb(hue, lightness, saturation))
return (red, green, blue)
pidx = player_index / settings.MAX_PLAYERS
return (hsl_color(seed + pidx, 0.99,
0.5), hsl_color(seed + pidx, 0.7, 0.3))
self.image = pygame.Surface(settings.PLAY_AREA, 0, 8)
self.image.fill((0, 0, 0))
self.image.set_colorkey((0, 0, 0))
self.gradients = [
generate_gradient(
PLAYER_COLORS[index] if index < len(PLAYER_COLORS) else
hsl_color_pair(random.random(), index),
settings.PLAYER_COLOR_GRADIENT_SIZE // 2)
for index in range(settings.MAX_PLAYERS)
]
assert len(self.gradients) == settings.MAX_PLAYERS
self.palette = [(0, 0, 0)] * 256
self.rotate: float = 0.0
self.update_palette()
def rotate_palette(self) -> None:
""" Rotate the color gradients for each player to create animation """
self.rotate += settings.SNAKE_COLOR_ROT
rot = int(self.rotate)
size = settings.PLAYER_COLOR_GRADIENT_SIZE
for pidx in range(settings.MAX_PLAYERS):
base = 1 + pidx * size
for i in range(size):
self.palette[base + i] = self.gradients[pidx][(i + rot) % size]
def update_palette(self) -> None:
""" Animate color palette and apply it to the snake texture """
self.rotate_palette()
self.image.set_palette(self.palette)
def draw_snake(self, player_idx: int, snake: Snake) -> None:
""" Apply updates to the snake texture """
def player_color_index(pidx: int, value: int) -> int:
""" return player color index in the shared palette """
size = settings.PLAYER_COLOR_GRADIENT_SIZE
return 1 + pidx * size + value % size
for part in snake.new_parts:
index = player_color_index(player_idx, part[2])
pygame.draw.circle(self.image, index, [part[0], part[1]],
settings.SNAKE_RADIUS)
snake.new_parts.clear()
for part in snake.removed_parts:
pygame.draw.circle(self.image, 0, [part[0], part[1]],
settings.SNAKE_RADIUS)
snake.removed_parts.clear()
# Replace last part as it was partially removed,
# clearing could be implemented better with masking
if len(snake.parts) > 0:
part = snake.parts[0]
corr_col_index = player_color_index(player_idx, part[2])
pygame.draw.circle(self.image, corr_col_index, [part[0], part[1]],
settings.SNAKE_RADIUS)
def draw_snakes(self, screen: Any, snakes: List[Snake]) -> None:
""" Draw all provided snake objects and rotate palette """
for snake_id, snake in enumerate(snakes):
self.draw_snake(snake_id, snake)
self.update_palette()
screen.blit(self.image, (0, 0))
class GameRenderer:
""" Handles game state rendering """
def __init__(self) -> None:
self.snake_graphics = SnakeGraphics()
self.screen = pygame.display.set_mode(settings.PLAY_AREA)
def draw_pizza(self, pizza: Pizza) -> None:
""" Draw a pizza object to the screen """
pygame.draw.circle(self.screen, (180, 160, 10), [pizza.x, pizza.y],
pizza.radius)
pygame.draw.circle(self.screen, (255, 210, 10), [pizza.x, pizza.y],
pizza.radius - 3)
pygame.draw.circle(self.screen, (255, 100, 10), [pizza.x, pizza.y],
pizza.radius - 6)
def draw_pizzas(self, pizzas: List[Pizza]) -> None:
""" Draw all pizzas in a list """
for pizza in pizzas:
self.draw_pizza(pizza)
def draw_game(self, game_state: GameState) -> None:
""" Draw game """
self.screen.fill(Colors.CLEAR_COLOR)
self.draw_pizzas(game_state.pizzas)
self.snake_graphics.draw_snakes(self.screen, game_state.snakes)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from re import findall
from string import printable
from struct import unpack
from src.capturePkt.networkProtocol import NetworkProtocol
class Telnet(NetworkProtocol):
IAC = 0xff
codeDict = {236: 'EOF',
237: 'SUSP',
238: 'ABORT',
239: 'EOR',
240: 'SE',
241: 'NOP',
242: 'DM',
243: 'BRK',
244: 'IP',
245: 'AO',
246: 'AYT',
247: 'EC',
248: 'EL',
249: 'GA',
250: 'SB',
251: 'WILL',
252: 'WONT',
253: 'DO',
254: 'DONT',
255: 'IAC',
}
# https://www.iana.org/assignments/telnet-options/telnet-options.xhtml
optionDict = {0: 'Binary Transmission',
1: 'Echo',
2: 'Reconnection',
3: 'Suppress Go Ahead',
4: 'Approx Message Size Negotiation',
5: 'Status',
6: 'Timing Mark',
7: 'Remote Controlled Trans and Echo',
8: 'Output Line Width',
9: 'Output Page Size',
10: 'Output Carriage-Return Disposition',
11: 'Output Horizontal Tab Stops',
12: 'Output Horizontal Tab Disposition',
13: 'Output Formfeed Disposition',
14: 'Output Vertical Tabstops',
15: 'Output Vertical Tab Disposition',
16: 'Output Linefeed Disposition',
17: 'Extended ASCII',
18: 'Logout',
19: 'Byte Macro',
20: 'Data Entry Terminal',
21: 'SUPDUP',
22: 'SUPDUP Output',
23: 'Send Location',
24: 'Terminal Type',
25: 'End of Record',
26: 'TACACS User Identification',
27: 'Output Marking',
28: 'Terminal Location Number',
29: 'Telnet 3270 Regime',
30: 'X.3 PAD',
31: 'Negotiate About Window Size',
32: 'Terminal Speed',
33: 'Remote Flow Control',
34: 'Linemode',
35: 'X Display Location',
36: 'Environment Option',
37: 'Authentication Option',
38: 'Encryption Option',
39: 'New Environment Option',
40: 'TN3270E',
41: 'XAUTH',
42: 'CHARSET',
43: 'Telnet Remote Serial Port (RSP)',
44: 'Com Port Control Option',
45: 'Telnet Suppress Local Echo',
46: 'Telnet Start TLS',
47: 'KERMIT',
48: 'SEND-URL',
49: 'FORWARD_X',
138: 'TELOPT PRAGMA LOGON',
139: 'TELOPT SSPI LOGON',
140: 'TELOPT PRAGMA HEARTBEAT',
255: 'Extended-Options-List',
}
def __init__(self, packet):
self.extendField = tuple()
self.extendParse = tuple()
while packet:
if packet[0] == self.IAC:
telnet = unpack('!B B', packet[1:3])
self.code = Telnet.codeDict.get(telnet[0], 'Unknown')
self.option = Telnet.optionDict.get(telnet[1], 'Unknown')
commandStr = '{} {}'.format(self.code, self.option)
self.extendField = self.extendField + ('Telnet Command',)
self.extendParse = self.extendParse + (commandStr,)
packet = packet[3:]
continue
else:
data = packet.decode('utf-8', 'ignore')
data = list(filter(lambda x: x in printable, data))
data = ''.join(data)
if len(data) > 80:
data = '\n'.join(findall(r'.{80}', data))
else:
data = data.replace('\r\n', '')
self.extendField = self.extendField + ('Data',)
self.extendParse = self.extendParse + (data,)
break
def getFields(self):
return self.extendField
def getParses(self):
return self.extendParse
|
nilq/baby-python
|
python
|
#-*- coding: utf-8 -*-
'''
Created on 2017. 11. 06
Updated on 2017. 11. 06
'''
from __future__ import print_function
import os
import cgi
import re
import time
import codecs
import sys
import subprocess
import math
import dateutil.parser
from datetime import datetime
from commons import Subjects
from xml.etree import ElementTree
from features.Corpus import Corpus
from bs4 import BeautifulSoup
from unidiff import PatchSet
from utils import Progress
from repository.GitLog import GitLog
from repository.BugFilter import BugFilter
from repository.GitVersion import GitVersion
###############################################################
# make bug information
###############################################################
def load_file_corpus(_filepath):
data = {}
f = open(_filepath, 'r')
while True:
line = f.readline()
if line is None or line == "":break
identifier, words = line.split('\t')
identifier = identifier.replace('/','.')
idx = identifier.find('org.')
if idx >= 0:
identifier = identifier[idx:]
data[identifier] = words.split(' ')
return data
def load_bug_corpus(_filename):
'''
return words for each items(ex. file, bug report...)
:param _filename:
:return: {'itemID1':['word1', 'word2',....], 'itemID2':[]....}
'''
f = open(_filename, 'r')
lines = f.readlines()
f.close()
corpus = {}
for line in lines:
idx = line.find('\t')
key = int(line[:idx])
words = line[idx + 1:-1]
words = words.strip().split(' ') if len(words) > 0 else []
# remove blank items
idx = 0
while idx < len(words):
if words[idx] == '':
del words[idx]
else:
idx += 1
corpus[key] = words
return corpus
###############################################################
# make comment information
###############################################################
def load_bug_xml(_filepath):
'''
get bugs information according to _selector from bug repository XML file
:param _repo:
:param _selector:
:return:
'''
bug = {}
try:
root = ElementTree.parse(_filepath).getroot()
itemtag = root[0].find('item') #channel > item
bug['title'] = itemtag.find('title').text
bug['desc'] = itemtag.find('description').text
bug['comments'] = []
comments = itemtag.find('comments')
if comments is not None:
for comment in comments:
cID = int(comment.attrib['id'])
cTime = dateutil.parser.parse(comment.attrib['created'])
cTime = time.mktime(cTime.timetuple())
# cTime = datetime.strptime(comment.attrib['created'], "%a, %d %b %Y %H:%M:%S")
cAuthor = comment.attrib['author']
cText = comment.text
bug['comments'].append({'id':cID, 'timestamp':cTime, 'author':cAuthor, 'text':cText})
except Exception as e:
print(e)
return bug
def make_comment_corpus(_project, _bugIDs, _bugPath, _featurePath):
corpusPath = os.path.join(_featurePath, 'bugs', '_corpus')
if os.path.exists(corpusPath) is False:
os.makedirs(corpusPath)
result_file = os.path.join(corpusPath, 'comments.corpus')
if os.path.exists(result_file) is True:
return True
corpus = Corpus(_camelSplit=False)
f = codecs.open(result_file, 'w', 'UTF-8')
count = 0
progress = Progress(u'[%s] making comment corpus' % _project, 2, 10, True)
progress.set_upperbound(len(_bugIDs))
progress.start()
for bugID in _bugIDs:
# print(u'[%s] Working %d ...' % (_project, bugID), end=u'')
# load XML
filepath = os.path.join(_bugPath, 'bugs', '%s-%d.xml'% (_project, bugID))
bug = load_bug_xml(filepath)
if len(bug['comments']) == 0:
print(u'!', end=u'') #\t\tNo comment!')
count +=1
# make Corpus
for comment in bug['comments']:
# Convert some formats (date and text...)
# re.sub = remove compound character except english caracter and numbers and some special characters
text = BeautifulSoup(comment['text'], "html.parser").get_text()
text = cgi.escape(re.sub(r'[^\x00-\x80]+', '', text))
text = cgi.escape(re.sub(chr(27), '', text))
comment_corpus = corpus.make_text_corpus(text)
corpus_text = ' '.join(comment_corpus)
f.write('%d\t%d\t%d\t%s\t%s\n' % (bugID, comment['id'], comment['timestamp'], comment['author'], corpus_text))
progress.check()
f.close()
progress.done()
print(u'missed bugs : %d' % count)
pass
def load_comment_corpus(_project, _bugIDs, _bugPath, _featurePath, _force=False):
corpusPath = os.path.join(_featurePath, 'bugs', '_corpus', 'comments.corpus')
if _force is True or os.path.exists(corpusPath) is False:
make_comment_corpus(_project, _bugIDs, _bugPath, _featurePath)
data = {}
f = codecs.open(corpusPath, 'r', 'UTF-8')
while True:
line = f.readline()
if line is None or line=="": break
line = line[:-1]
items = line.split('\t')
bugID = int(items[0])
if bugID not in data: data[bugID] = []
corpus = items[4].split(' ') if len(items[4]) > 0 else[]
data[bugID].append({'id':items[1], 'timestamp':items[2], 'author':items[3], 'corpus':corpus})
f.close()
return data
###############################################################
# make comment information
###############################################################
def get_patches(_hash, _gitPath):
'''
_hash에 해당하는 patch정보를 로드
commit log는 제외되어있음.
:return:
'''
# check this branch
command = [u'git', u'log', u'-1', u'-U', _hash]
result = subprocess.check_output(command, stderr=sys.stderr, cwd=_gitPath)
if result is None:
print(u'Failed')
return False
# if the head is not up-to-date, checkout up-to-date
# common_log_msg = result[:result.find('diff --git ')] # common_log_msg가 포함되어있음. log_msg를 처리하려면 분석하면 됨.
result = result[result.find('diff --git '):].strip()
result = re.sub(r'[^\x00-\x80]+', '', result)
result = result.decode('UTF-8', 'ignore')
patch = PatchSet(result.split('\n'))
return patch
def make_hunk(_bug, _gitPath):
'''
:param _bug: {'commits':[list of commit hash], 'files':[list of related files]}
:param _gitPath:
:return: {'classpath':[corpus], .... }
'''
changes = {}
fixed_files = [item['name'] for item in _bug['files']]
for commit in _bug['commits']:
patches = get_patches(commit, _gitPath)
for patch in patches:
classpath = patch.path.replace('/', '.')
classpath = classpath[classpath.find('org.'):]
if classpath not in fixed_files:continue
hunk_text = u''
for hunk in patch:
# related method name + codes with linesep
hunk_text += hunk.section_header + reduce(lambda x, y: str(x) + os.linesep + str(y), hunk) + os.linesep
changes[classpath] = hunk_text
return changes
def make_hunk_corpus(_project, _bugs, _gitPath, _featurePath):
# sources의 버전은 고려하지 않음 (commit log의 hunk와 비교하므로 버전은 제외함
# bugID와 관련된 수정된 파일들과 commit 목록을 가져옴 (commit hash가 포함되어야 함)
# create hunk corpus path
corpusPath = os.path.join(_featurePath, 'bugs', '_corpus')
if os.path.exists(corpusPath) is False:
os.makedirs(corpusPath)
f = codecs.open(os.path.join(corpusPath, 'hunk.corpus'), 'w', 'UTF-8')
# create hunk and save
progress = Progress(u'[%s] making hunk corpus' % _project, 2, 10, True)
progress.set_upperbound(len(_bugs))
progress.start()
for bugID, info in _bugs.iteritems():
#_bugs : {bugID:{'hash':[], 'files':[{'type:'M', 'name':'fileclass'}, ...]
#print('[Hunk] working %d' % bugID)
# make hunk
hunks = make_hunk(info, _gitPath)
# make hunk corpus
corpus = Corpus(_camelSplit=False)
for classpath, hunk_text in hunks.iteritems():
terms = corpus.make_text_corpus(hunk_text)
terms_text = ' '.join(terms)
f.write('%d\t%s\t%s\n' % (bugID, classpath, terms_text))
progress.check()
f.close()
progress.done()
pass
def load_hunk_corpus(_project, _bugs, _gitPath, _featurePath, _force=False):
'''
각 버그리포트 별로 정답파일들의 변경된 hunk들에 대해서 corpus를 생성.
:param _bugID:
:param _gitPath:
:param _featurePath:
:return:
'''
#check the path
corpusPath = os.path.join(_featurePath, 'bugs', '_corpus', 'hunk.corpus')
if _force is True or os.path.exists(corpusPath) is False:
make_hunk_corpus(_project, _bugs, _gitPath, _featurePath)
# load hunk corpus
data = {}
f = codecs.open(corpusPath, 'r', 'UTF-8')
while True:
line = f.readline()
if line is None or line == "": break
line = line[:-1]
items = line.split('\t')
bugID = int(items[0])
classpath = items[1]
corpus = items[2].split(' ') if len(items[2]) > 0 else []
# Add to data
if bugID not in data: data[bugID] = {}
data[bugID][classpath] = corpus
f.close()
return data
def make_bug_hash(_project, _bugIDs, _bugPath, _gitPath, _featurePath):
'''
:param _project:
:param _bugIDs:
:param _bugPath:
:param _gitPath:
:param _featurePath:
:return: {bugID:{'commits':[commit hash list], 'files':[{'type':'M', 'name':'fileclass'}, ...]
'''
gitlogPath = os.path.join(_featurePath, 'bugs', '_corpus', u'.git.log')
gitversionPath = os.path.join(_featurePath, 'bugs', '_corpus', u'.git_version.txt')
gitLog = GitLog(_project, _gitPath, gitlogPath)
gitVersion = GitVersion(_project, _gitPath, gitversionPath)
bugFilter = BugFilter(_project, os.path.join(_bugPath, u'bugs'))
print(u'[%s] start making bug infromation *************' % (_project))
logs = gitLog.load()
tagmaps = gitVersion.load()
items, dupgroups = bugFilter.run(logs, tagmaps)
print(u'[%s] start making bug infromation ************* Done' % (_project))
# making bugs
bugs = {}
for item in items:
bugID = int(item['id'][item['id'].find('-')+1:])
if bugID not in _bugIDs: continue
if item['id'] not in logs:
print('**********item ID %s not exists in logs! It\'s wired' % item['id'])
bugs[bugID] = {'commits':[] , 'files':[]}
continue
commits = logs[item['id']]
hash_list = [commit['hash'] for commit in commits]
files = []
for fileitem in item['fixedFiles']:
if fileitem['name'].find('test') >= 0: continue
if fileitem['name'].find('Test') >= 0: continue
files.append(fileitem)
if len(hash_list)==0 or len(files) ==0: continue
bugs[bugID] = {'commits':hash_list , 'files':files}
os.remove(gitlogPath)
os.remove(gitversionPath)
print(u'[%s] making hash info for bugs ************* Done' % (_project))
from utils.PrettyStringBuilder import PrettyStringBuilder
builder = PrettyStringBuilder()
text = builder.get_dicttext(bugs, _indent=1)
f = codecs.open(os.path.join(_featurePath, 'bugs', '.bug.hash'), 'w', 'UTF-8')
f.write(text)
f.close()
return bugs
def load_bug_hash(_project, _bugIDs, _bugPath, _gitPath, _featurePath, _force=False):
bugHashPath = os.path.join(_featurePath, 'bugs', '.bug.hash')
if _force is True or os.path.exists(bugHashPath) is False:
make_bug_hash(_project, _bugIDs, _bugPath, _gitPath, _featurePath)
f = codecs.open(bugHashPath, 'r', 'UTF-8')
text = f.read()
f.close()
return eval(text)
###############################################################
# make comment information
###############################################################
def make_IDF(descriptions, comments, hunks):
document_count = 0
# description IDF
IDF = {}
for bugID in descriptions:
document_count += 1
unique_terms = set(descriptions[bugID])
for term in unique_terms:
IDF[term] = (IDF[term] + 1) if term in IDF else 1
# comments IDF
for bugID in comments:
for comment in comments[bugID]:
document_count += 1
unique_terms = set(comment['corpus'])
for term in unique_terms:
IDF[term] = (IDF[term] + 1) if term in IDF else 1
# hunk IDF
for bugID in hunks:
for filepath, corpus in hunks[bugID].iteritems():
document_count += 1
unique_terms = set(corpus)
for term in unique_terms:
IDF[term] = (IDF[term] + 1) if term in IDF else 1
print('len of all tokens is : %d' % len(IDF))
return document_count, IDF
def get_TF(_corpus):
TF = {}
# hunk IDF
for term in _corpus:
TF[term] = (TF[term]+1)if term in TF else 1
return TF
def get_TFIDF(_TF, _IDF, _nD):
'''
basic TF-IDF
:param _TF:
:param _IDF:
:param _nD:
:return:
'''
TFIDF = {}
for term, count in _TF.iteritems():
TFIDF[term] = float(count) * (1.0 + math.log(float(_nD) / _IDF[term])) #basic TF-IDF
return TFIDF
def get_similarity(_vectorA, _vectorB):
'''
return sparse vector's similarity between vA and vB
:param _vectorA:
:param _vectorB:
:return:
'''
common_set = set(_vectorA.keys()) & set(_vectorB.keys())
# A dot B
product = sum(_vectorA[term] * _vectorB[term] for term in common_set)
# |A|, |B|
valueA = sum(_vectorA[term] * _vectorA[term] for term in _vectorA)
valueB = sum(_vectorB[term] * _vectorB[term] for term in _vectorB)
if valueA == 0 or valueB ==0:
return 0
return product / (valueA * valueB)
def calculate_similarity(_group, _project, _descs, _comments, _hunks, _featurePath, _outputPath):
# calculated Number of Documents, Number of IDF
nD, nIDF = make_IDF(_descs, _comments, _hunks) # number of Documents, number of IDF
# for term in nIDF:
# print(u'%s\t%d' % (term, nIDF[term]))
def get_average_similarity(_bugID, _vecA, _hunks):
countFiles = len(_hunks[bugID])
similarity = 0.0
for classpath, corpus in _hunks[_bugID].iteritems():
tfH = get_TF(corpus)
vecH = get_TFIDF(tfH, nIDF, nD)
similarity += get_similarity(_vecA, vecH)
similarity = similarity / countFiles # 관련된 전체 파일에 대한 평균 유사도.
return similarity
progress = Progress("[%s] calculating similarity for reports and file hunks" % _project, 2, 10, True)
progress.set_upperbound(len(_descs))
progress.start()
translationPath = os.path.join(_outputPath, '_translation')
if os.path.exists(translationPath) is False: os.makedirs(translationPath)
output = codecs.open(os.path.join(translationPath, '%s-translation-relation.csv'%_project), 'w', 'UTF-8')
#output.write(u'Group\tProject\tBugID\tcommentID\tTime\tAuthor\tSimilarity\t#comments\tMemo\n')
# for each bug report,
for bugID in _descs:
# except for reports which have no hunks
if bugID not in _hunks: continue
tfR = get_TF(_descs[bugID])
vectorR = get_TFIDF(tfR, nIDF, nD)
# 리포트와 정답파일들 간의 평균 유사도
simR = get_average_similarity(bugID, vectorR, _hunks)
simMax = {'id':bugID, 'time':'', 'author':'description', 'similarity':simR}
if bugID not in _comments:
# bugID가 comment에 없으면 그냥 simR로 대체.
output.write(u'%s,%s,%d,%d,%.8f\n' % (_project, bugID, 0, 0, simMax['similarity']))
output.flush()
continue
# comment와 file간의 유사도를 비교해서 가장 큰 값을 찾음.
for comment in _comments[bugID]:
tfC = get_TF(comment['corpus'])
vectorC = get_TFIDF(tfC, nIDF, nD)
simC = get_average_similarity(bugID, vectorC, _hunks)
if simMax['similarity'] < simC:
simMax = {'id': comment['id'],
'time': datetime.fromtimestamp(int(comment['timestamp'])).strftime("%Y-%m-%d %H:%M:%S"),
'author': comment['author'], 'similarity': simC, 'corpus':comment['corpus']}
# corpus도 저장
fcorpus = []
for item in _hunks[bugID].values():
fcorpus += item
# max값에 대한 판단
if simMax['author'] == 'description' and simMax['id'] == bugID:
simpath = os.path.join(translationPath, _project + 'desc')
if os.path.exists(simpath) is False: os.makedirs(simpath)
print_tf(_descs[bugID], os.path.join(simpath,'%s-%d$desc.csv' % (_project, bugID)))
print_tf(fcorpus, os.path.join(simpath, '%s-%d$files.csv' % (_project, bugID)))
output.write(u'%s,%s,%d,%d,%.8f\n' % (_project, bugID, 0, 0, simMax['similarity']))
else:
simpath = os.path.join(translationPath, _project)
if os.path.exists(simpath) is False: os.makedirs(simpath)
print_tf(_descs[bugID], os.path.join(simpath, '%s-%d$desc.csv' % (_project, bugID)))
print_tf(simMax['corpus'], os.path.join(simpath, '%s-%d$comment-%s.csv' % (_project, bugID, simMax['id'])))
print_tf(fcorpus, os.path.join(simpath, '%s-%d$files.csv' % (_project, bugID)))
output.write(u'%s,%s,%d,%s,%.8f\n' % (_project, bugID, 1, simMax['id'], simMax['similarity']))
# output.write(u'%s\t%s\t%d\t%s\t%s\t%s\t%.8f\t%d\t%s\n' %
# (_group, _project, bugID, simMax['id'], simMax['time'], simMax['author'], simMax['similarity'], len(_comments[bugID]), ev))
output.flush()
progress.check()
output.close()
progress.done()
pass
def print_tf(_corpus, _filename):
# make term frequency
TF = {}
for item in _corpus:
if item not in TF:
TF[item] = 1
else:
TF[item] += 1
import operator
sorted_value = sorted(TF.items(), key=operator.itemgetter(1), reverse=True)
f = codecs.open(_filename, 'w', 'UTF-8')
f.write(u'\n'.join( u'%s,%d'%(key,value) for key, value in sorted_value))
f.close()
###############################################################
# main routine
###############################################################
def make(_group, _project, _bugIDs, _bugPath, _gitPath, _featurePath, _outputPath, _isDesc, _isCamel, _force=False):
# comment가 없거나 file이 매핑이 안된다거나...한 버그리포트들은 미리 제거해야되지 않을까?
# descriptions = {bugID:[corpus], bugID:[corpus], ...}
descriptions = load_bug_corpus(os.path.join(_featurePath, 'bugs', '_corpus', 'desc.corpus'))
# comments 가 존재하지 않는 151개 버그리포트는 제외됨.
# comments = {bugID:{'id':CommentID, 'timestamp':timestamp, 'author':Author, 'corpus':[corpus]}, ...}
comments = load_comment_corpus(_project, _bugIDs, _bugPath, _featurePath, _force=_force)
# bugs = {bugID:{'commits':[], 'files':[{'type':'M', 'name':'org....java'}]}, ...}
bugs = load_bug_hash(_project, _bugIDs, _bugPath, _gitPath, _featurePath, _force=_force)
# hunks = {bugID:{'classpath':[corpus], 'classpath':[corpus], ...}, ...}
hunks = load_hunk_corpus(_project, bugs, _gitPath, _featurePath, _force = _force)
# calculated Number of Documents, Number of IDF
calculate_similarity(_group, _project, descriptions, comments, hunks, _featurePath, _outputPath)
pass
#####################################
# command
#####################################
def work():
import json
S = Subjects()
desc = True
camel = False
for group in ['Apache']:#S.groups:#S.groups: #['Commons']:#
for project in ['CAMEL'] : #S.projects[group]:#S.projects[group]: ['CODEC']
make(group, project, S.bugs[project]['all'],
S.getPath_bugrepo(group,project),
S.getPath_gitrepo(group, project),
S.getPath_featurebase(group, project),
S.root_feature,
_isDesc=desc,
_isCamel=camel,
_force = False)
###############################################################################################################
###############################################################################################################
if __name__ == "__main__":
'''
'''
work()
pass
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#
# Copyright (c) 2018 Sébastien RAMAGE
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
#
import logging
import argparse
import time
from zigate import connect
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug', help='Debug',
default=False, action='store_true')
parser.add_argument('--port', help='ZiGate usb port',
default=None)
parser.add_argument('--host', help='Wifi ZiGate host:port',
default=None)
parser.add_argument('--path', help='ZiGate state file path',
default='~/.zigate.json')
parser.add_argument('--gpio', help='Enable PiZigate', default=False, action='store_true')
parser.add_argument('--channel', help='Zigbee channel', default=None)
parser.add_argument('--admin_panel', help='Enable Admin panel', default=True, action='store_true')
parser.add_argument('--admin_panel_port', help='Admin panel url prefix', default=9998)
parser.add_argument('--admin_panel_mount', help='Admin panel url mount point', default=None)
parser.add_argument('--admin_panel_prefix', help='Admin panel url prefix', default=None)
args = parser.parse_args()
if args.debug:
logging.root.setLevel(logging.DEBUG)
z = connect(args.port, args.host, args.path, True, True, args.channel, args.gpio)
if args.admin_panel:
logging.root.info('Starting Admin Panel on port %s', args.admin_panel_port)
if args.admin_panel_mount:
logging.root.info('Mount point is %s', args.admin_panel_mount)
if args.admin_panel_prefix:
logging.root.info('URL prefix is %s', args.admin_panel_prefix)
z.start_adminpanel(port=int(args.admin_panel_port), mount=args.admin_panel_mount, prefix=args.admin_panel_prefix,
debug=args.debug)
print('Press Ctrl+C to quit')
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print('Interrupted by user')
z.save_state()
z.close()
|
nilq/baby-python
|
python
|
import os
import sys
import argparse
from cuttsum.event import read_events_xml
from cuttsum.nuggets import read_nuggets_tsv
from cuttsum.util import gen_dates
import streamcorpus as sc
import numpy as np
from sklearn.feature_extraction import DictVectorizer
import codecs
def main():
event_file, rc_dir, event_title, ofile = parse_args()
event = load_event(event_title, event_file)
hours = [dth for dth in gen_dates(event.start, event.end)]
num_hours = len(hours)
meta_data = []
bow_dicts = []
for h, hour in enumerate(hours, 1):
path = os.path.join(rc_dir, '{}.sc.gz'.format(hour))
for si in sc.Chunk(path=path):
uni2id = {}
for sid, sentence in enumerate(si.body.sentences[u'serif'], 0):
uni2id[sentence_uni(sentence)] = sid
for sent in si.body.sentences[u'article-clf']:
bow_dict = {}
for token in sent.tokens:
t = token.token.decode(u'utf-8').lower()
bow_dict[t] = 1
bow_dicts.append(bow_dict)
uni = sentence_uni(sent)
sent_id = uni2id[uni]
meta_data.append((hour, si.stream_id, sent_id, uni))
vctr = DictVectorizer()
X = vctr.fit_transform(bow_dicts)
with codecs.open(ofile, 'w', 'utf-8') as f:
for i, (hour, stream_id, sent_id, uni) in enumerate(meta_data):
uni = uni.replace(u'\n', u' ').replace(u'\t', u' ')
f.write(u'{}\t{}\t{}\t{}\t'.format(hour, stream_id, sent_id, uni))
x = u' '.join([unicode(col) for col in X[i,:].indices])
f.write(x)
f.write(u'\n')
f.flush()
def sentence_uni(sent):
return u' '.join(token.token.decode(u'utf-8') for token in sent.tokens)
def load_event(event_title, event_xml):
events = read_events_xml(event_xml)
for event in events:
if event_title == event.title:
return event
raise ValueError(("No event title matches \"{}\" " \
+ "in file: {}").format(event_title, event_xml))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--event-file',
help=u'Event xml file.',
type=unicode, required=True)
parser.add_argument('-r', '--rel-chunks-dir',
help=u'Relevance Chunks dir',
type=str, required=True)
parser.add_argument('-t', '--event-title',
help=u'Event title',
type=unicode, required=True)
parser.add_argument('-o', '--output-file',
help=u'Location to write sims',
type=unicode, required=True)
args = parser.parse_args()
event_file = args.event_file
rc_dir = args.rel_chunks_dir
event_title = args.event_title
ofile = args.output_file
odir = os.path.dirname(ofile)
if odir != u'' and not os.path.exists(odir):
os.makedirs(odir)
if not os.path.exists(event_file) or os.path.isdir(event_file):
sys.stderr.write((u'--event-file argument {} either does not exist' \
+ u' or is a directory!\n').format(event_file))
sys.stderr.flush()
sys.exit()
if not os.path.exists(rc_dir) or not os.path.isdir(rc_dir):
sys.stderr.write((u'--rel-chunks-dir argument {} either does not' \
+ u' exist or is not a directory!\n').format(rc_dir))
sys.stderr.flush()
sys.exit()
return (event_file, rc_dir, event_title, ofile)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# coding: utf-8
# 2020/1/3 @ tongshiwei
__all__ = ["get_net", "get_bp_loss"]
from mxnet import gluon
from .WCLSTM import WCLSTM
from .WCRLSTM import WCRLSTM
from .WRCLSTM import WRCLSTM
def get_net(model_type, class_num, embedding_dim, net_type="lstm", **kwargs):
if model_type == "wclstm":
return WCLSTM(net_type=net_type, class_num=class_num, embedding_dim=embedding_dim, **kwargs)
elif model_type == "wcrlstm":
return WCRLSTM(net_type=net_type, class_num=class_num, embedding_dim=embedding_dim, **kwargs)
elif model_type == "wrclstm":
return WRCLSTM(net_type=net_type, class_num=class_num, embedding_dim=embedding_dim, **kwargs)
else:
raise TypeError("unknown model_type: %s" % model_type)
def get_bp_loss(**kwargs):
return {"cross-entropy": gluon.loss.SoftmaxCrossEntropyLoss()}
|
nilq/baby-python
|
python
|
"""LiteDRAM BankMachine (Rows/Columns management)."""
import math
from migen import *
from litex.soc.interconnect import stream
from litedram.common import *
from litedram.core.multiplexer import *
class _AddressSlicer:
def __init__(self, colbits, address_align):
self.colbits = colbits
self.address_align = address_align
def row(self, address):
split = self.colbits - self.address_align
return address[split:]
def col(self, address):
split = self.colbits - self.address_align
return Cat(Replicate(0, self.address_align), address[:split])
class BankMachine(Module):
def __init__(self, n, aw, address_align, nranks, settings):
self.req = req = Record(cmd_layout(aw))
self.refresh_req = refresh_req = Signal()
self.refresh_gnt = refresh_gnt = Signal()
a = settings.geom.addressbits
ba = settings.geom.bankbits + log2_int(nranks)
self.cmd = cmd = stream.Endpoint(cmd_request_rw_layout(a, ba))
# # #
auto_precharge = Signal()
# Command buffer
cmd_buffer_layout = [("we", 1), ("addr", len(req.addr))]
cmd_buffer_lookahead = stream.SyncFIFO(
cmd_buffer_layout, settings.cmd_buffer_depth,
buffered=settings.cmd_buffer_buffered)
cmd_buffer = stream.Buffer(cmd_buffer_layout) # 1 depth buffer to detect row change
self.submodules += cmd_buffer_lookahead, cmd_buffer
self.comb += [
req.connect(cmd_buffer_lookahead.sink, keep={"valid", "ready", "we", "addr"}),
cmd_buffer_lookahead.source.connect(cmd_buffer.sink),
cmd_buffer.source.ready.eq(req.wdata_ready | req.rdata_valid),
req.lock.eq(cmd_buffer_lookahead.source.valid | cmd_buffer.source.valid),
]
slicer = _AddressSlicer(settings.geom.colbits, address_align)
# Row tracking
row = Signal(settings.geom.rowbits)
row_opened = Signal()
row_hit = Signal()
row_open = Signal()
row_close = Signal()
self.comb += row_hit.eq(row == slicer.row(cmd_buffer.source.addr))
self.sync += \
If(row_close,
row_opened.eq(0)
).Elif(row_open,
row_opened.eq(1),
row.eq(slicer.row(cmd_buffer.source.addr))
)
# Address generation
row_col_n_addr_sel = Signal()
self.comb += [
cmd.ba.eq(n),
If(row_col_n_addr_sel,
cmd.a.eq(slicer.row(cmd_buffer.source.addr))
).Else(
cmd.a.eq((auto_precharge << 10) | slicer.col(cmd_buffer.source.addr))
)
]
# tWTP (write-to-precharge) controller
write_latency = math.ceil(settings.phy.cwl / settings.phy.nphases)
precharge_time = write_latency + settings.timing.tWR + settings.timing.tCCD # AL=0
self.submodules.twtpcon = twtpcon = tXXDController(precharge_time)
self.comb += twtpcon.valid.eq(cmd.valid & cmd.ready & cmd.is_write)
# tRC (activate-activate) controller
self.submodules.trccon = trccon = tXXDController(settings.timing.tRC)
self.comb += trccon.valid.eq(cmd.valid & cmd.ready & row_open)
# tRAS (activate-precharge) controller
self.submodules.trascon = trascon = tXXDController(settings.timing.tRAS)
self.comb += trascon.valid.eq(cmd.valid & cmd.ready & row_open)
# Auto Precharge generation
if settings.with_auto_precharge:
self.comb += \
If(cmd_buffer_lookahead.source.valid & cmd_buffer.source.valid,
If(slicer.row(cmd_buffer_lookahead.source.addr) !=
slicer.row(cmd_buffer.source.addr),
auto_precharge.eq(row_close == 0)
)
)
# Control and command generation FSM
# Note: tRRD, tFAW, tCCD, tWTR timings are enforced by the multiplexer
self.submodules.fsm = fsm = FSM()
fsm.act("REGULAR",
If(refresh_req,
NextState("REFRESH")
).Elif(cmd_buffer.source.valid,
If(row_opened,
If(row_hit,
cmd.valid.eq(1),
If(cmd_buffer.source.we,
req.wdata_ready.eq(cmd.ready),
cmd.is_write.eq(1),
cmd.we.eq(1),
).Else(
req.rdata_valid.eq(cmd.ready),
cmd.is_read.eq(1)
),
cmd.cas.eq(1),
If(cmd.ready & auto_precharge,
NextState("AUTOPRECHARGE")
)
).Else(
NextState("PRECHARGE")
)
).Else(
NextState("ACTIVATE")
)
)
)
fsm.act("PRECHARGE",
# Note: we are presenting the column address, A10 is always low
If(twtpcon.ready & trascon.ready,
cmd.valid.eq(1),
If(cmd.ready,
NextState("TRP")
),
cmd.ras.eq(1),
cmd.we.eq(1),
cmd.is_cmd.eq(1)
),
row_close.eq(1)
)
fsm.act("AUTOPRECHARGE",
If(twtpcon.ready & trascon.ready,
NextState("TRP")
),
row_close.eq(1)
)
fsm.act("ACTIVATE",
If(trccon.ready,
row_col_n_addr_sel.eq(1),
row_open.eq(1),
cmd.valid.eq(1),
cmd.is_cmd.eq(1),
If(cmd.ready,
NextState("TRCD")
),
cmd.ras.eq(1)
)
)
fsm.act("REFRESH",
If(twtpcon.ready,
refresh_gnt.eq(1),
),
row_close.eq(1),
cmd.is_cmd.eq(1),
If(~refresh_req,
NextState("REGULAR")
)
)
fsm.delayed_enter("TRP", "ACTIVATE", settings.timing.tRP - 1)
fsm.delayed_enter("TRCD", "REGULAR", settings.timing.tRCD - 1)
|
nilq/baby-python
|
python
|
from django.contrib import admin
from mptt.admin import MPTTModelAdmin
from .models import Business, Category, Request
@admin.register(Business)
class BusinessAdmin(admin.ModelAdmin):
list_display = ('name', 'location', 'main_category')
ordering = ('name',)
filter_horizontal = ('other_categories', 'delivers_to')
@admin.register(Category)
class CategoryAdmin(MPTTModelAdmin):
list_display = ('name',)
mptt_level_indent = 20
@admin.register(Request)
class RequestAdmin(admin.ModelAdmin):
pass
|
nilq/baby-python
|
python
|
import uuid
from cinderclient import exceptions as cinder_exceptions
from ddt import ddt, data
from django.conf import settings
from django.test import override_settings
from novaclient import exceptions as nova_exceptions
from rest_framework import status, test
import mock
from six.moves import urllib
from waldur_openstack.openstack.tests.unittests import test_backend
from waldur_core.structure.tests import factories as structure_factories
from . import factories, fixtures
from .. import models, views
@ddt
class InstanceCreateTest(test.APITransactionTestCase):
def setUp(self):
self.openstack_tenant_fixture = fixtures.OpenStackTenantFixture()
self.openstack_settings = self.openstack_tenant_fixture.openstack_tenant_service_settings
self.openstack_settings.options = {'external_network_id': uuid.uuid4().hex}
self.openstack_settings.save()
self.openstack_spl = self.openstack_tenant_fixture.spl
self.project = self.openstack_tenant_fixture.project
self.customer = self.openstack_tenant_fixture.customer
self.image = factories.ImageFactory(settings=self.openstack_settings, min_disk=10240, min_ram=1024)
self.flavor = factories.FlavorFactory(settings=self.openstack_settings)
self.subnet = self.openstack_tenant_fixture.subnet
self.client.force_authenticate(user=self.openstack_tenant_fixture.owner)
self.url = factories.InstanceFactory.get_list_url()
def get_valid_data(self, **extra):
subnet_url = factories.SubNetFactory.get_url(self.subnet)
default = {
'service_project_link': factories.OpenStackTenantServiceProjectLinkFactory.get_url(self.openstack_spl),
'flavor': factories.FlavorFactory.get_url(self.flavor),
'image': factories.ImageFactory.get_url(self.image),
'name': 'Valid name',
'system_volume_size': self.image.min_disk,
'internal_ips_set': [{'subnet': subnet_url}],
}
default.update(extra)
return default
def test_quotas_update(self):
response = self.client.post(self.url, self.get_valid_data())
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
instance = models.Instance.objects.get(uuid=response.data['uuid'])
Quotas = self.openstack_settings.Quotas
self.assertEqual(self.openstack_settings.quotas.get(name=Quotas.ram).usage, instance.ram)
self.assertEqual(self.openstack_settings.quotas.get(name=Quotas.storage).usage, instance.disk)
self.assertEqual(self.openstack_settings.quotas.get(name=Quotas.vcpu).usage, instance.cores)
self.assertEqual(self.openstack_settings.quotas.get(name=Quotas.instances).usage, 1)
self.assertEqual(self.openstack_spl.quotas.get(name=self.openstack_spl.Quotas.ram).usage, instance.ram)
self.assertEqual(self.openstack_spl.quotas.get(name=self.openstack_spl.Quotas.storage).usage, instance.disk)
self.assertEqual(self.openstack_spl.quotas.get(name=self.openstack_spl.Quotas.vcpu).usage, instance.cores)
def test_project_quotas_updated_when_instance_is_created(self):
response = self.client.post(self.url, self.get_valid_data())
instance = models.Instance.objects.get(uuid=response.data['uuid'])
self.assertEqual(self.project.quotas.get(name='os_cpu_count').usage, instance.cores)
self.assertEqual(self.project.quotas.get(name='os_ram_size').usage, instance.ram)
self.assertEqual(self.project.quotas.get(name='os_storage_size').usage, instance.disk)
def test_customer_quotas_updated_when_instance_is_created(self):
response = self.client.post(self.url, self.get_valid_data())
instance = models.Instance.objects.get(uuid=response.data['uuid'])
self.assertEqual(self.customer.quotas.get(name='os_cpu_count').usage, instance.cores)
self.assertEqual(self.customer.quotas.get(name='os_ram_size').usage, instance.ram)
self.assertEqual(self.customer.quotas.get(name='os_storage_size').usage, instance.disk)
def test_spl_quota_updated_by_signal_handler_when_instance_is_removed(self):
response = self.client.post(self.url, self.get_valid_data())
instance = models.Instance.objects.get(uuid=response.data['uuid'])
instance.delete()
self.assertEqual(self.openstack_spl.quotas.get(name=self.openstack_spl.Quotas.vcpu).usage, 0)
self.assertEqual(self.openstack_spl.quotas.get(name=self.openstack_spl.Quotas.ram).usage, 0)
self.assertEqual(self.openstack_spl.quotas.get(name=self.openstack_spl.Quotas.storage).usage, 0)
def test_project_quotas_updated_when_instance_is_deleted(self):
response = self.client.post(self.url, self.get_valid_data())
instance = models.Instance.objects.get(uuid=response.data['uuid'])
instance.delete()
self.assertEqual(self.project.quotas.get(name='os_cpu_count').usage, 0)
self.assertEqual(self.project.quotas.get(name='os_ram_size').usage, 0)
self.assertEqual(self.project.quotas.get(name='os_storage_size').usage, 0)
def test_customer_quotas_updated_when_instance_is_deleted(self):
response = self.client.post(self.url, self.get_valid_data())
instance = models.Instance.objects.get(uuid=response.data['uuid'])
instance.delete()
self.assertEqual(self.customer.quotas.get(name='os_cpu_count').usage, 0)
self.assertEqual(self.customer.quotas.get(name='os_ram_size').usage, 0)
self.assertEqual(self.customer.quotas.get(name='os_storage_size').usage, 0)
@data('storage', 'ram', 'vcpu')
def test_instance_cannot_be_created_if_service_project_link_quota_has_been_exceeded(self, quota):
payload = self.get_valid_data()
self.openstack_spl.set_quota_limit(quota, 0)
response = self.client.post(self.url, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@data('instances')
def test_quota_validation(self, quota_name):
self.openstack_settings.quotas.filter(name=quota_name).update(limit=0)
response = self.client.post(self.url, self.get_valid_data())
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_can_provision_instance(self):
response = self.client.post(self.url, self.get_valid_data())
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
def test_user_can_define_instance_subnets(self):
subnet = self.openstack_tenant_fixture.subnet
data = self.get_valid_data(internal_ips_set=[{'subnet': factories.SubNetFactory.get_url(subnet)}])
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
instance = models.Instance.objects.get(uuid=response.data['uuid'])
self.assertTrue(models.InternalIP.objects.filter(subnet=subnet, instance=instance).exists())
def test_user_cannot_assign_subnet_from_other_settings_to_instance(self):
data = self.get_valid_data(internal_ips_set=[{'subnet': factories.SubNetFactory.get_url()}])
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_can_define_instance_floating_ips(self):
subnet_url = factories.SubNetFactory.get_url(self.subnet)
floating_ip = self.openstack_tenant_fixture.floating_ip
data = self.get_valid_data(
floating_ips=[{'subnet': subnet_url, 'url': factories.FloatingIPFactory.get_url(floating_ip)}],
)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
instance = models.Instance.objects.get(uuid=response.data['uuid'])
self.assertIn(floating_ip, instance.floating_ips)
def test_user_cannot_assign_floating_ip_from_other_settings_to_instance(self):
subnet_url = factories.SubNetFactory.get_url(self.subnet)
floating_ip = factories.FloatingIPFactory()
data = self.get_valid_data(
floating_ips=[{'subnet': subnet_url, 'url': factories.FloatingIPFactory.get_url(floating_ip)}],
)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_cannot_assign_floating_ip_to_disconnected_subnet(self):
disconnected_subnet = factories.SubNetFactory(
settings=self.openstack_tenant_fixture.openstack_tenant_service_settings)
disconnected_subnet_url = factories.SubNetFactory.get_url(disconnected_subnet)
floating_ip = self.openstack_tenant_fixture.floating_ip
data = self.get_valid_data(
floating_ips=[{'subnet': disconnected_subnet_url, 'url': factories.FloatingIPFactory.get_url(floating_ip)}],
)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_cannot_use_floating_ip_assigned_to_other_instance(self):
subnet_url = factories.SubNetFactory.get_url(self.subnet)
internal_ip = factories.InternalIPFactory(subnet=self.subnet)
floating_ip = factories.FloatingIPFactory(
settings=self.openstack_settings,
runtime_state='ACTIVE',
internal_ip=internal_ip
)
data = self.get_valid_data(
floating_ips=[{'subnet': subnet_url, 'url': factories.FloatingIPFactory.get_url(floating_ip)}],
)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('floating_ips', response.data)
def test_user_can_assign_active_floating_ip(self):
subnet_url = factories.SubNetFactory.get_url(self.subnet)
floating_ip = factories.FloatingIPFactory(settings=self.openstack_settings, runtime_state='ACTIVE')
data = self.get_valid_data(
floating_ips=[{'subnet': subnet_url, 'url': factories.FloatingIPFactory.get_url(floating_ip)}],
)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_user_can_allocate_floating_ip(self):
subnet_url = factories.SubNetFactory.get_url(self.subnet)
self.openstack_tenant_fixture.floating_ip.status = 'ACTIVE'
self.openstack_tenant_fixture.floating_ip.save()
data = self.get_valid_data(
floating_ips=[{'subnet': subnet_url}],
)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
instance = models.Instance.objects.get(uuid=response.data['uuid'])
self.assertEqual(instance.floating_ips.count(), 1)
def test_user_cannot_allocate_floating_ip_if_quota_limit_is_reached(self):
self.openstack_settings.quotas.filter(name=self.openstack_settings.Quotas.floating_ip_count).update(limit=0)
subnet_url = factories.SubNetFactory.get_url(self.subnet)
self.openstack_tenant_fixture.floating_ip.status = 'ACTIVE'
self.openstack_tenant_fixture.floating_ip.save()
data = self.get_valid_data(
floating_ips=[{'subnet': subnet_url}],
)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_cannot_create_instance_without_internal_ips(self):
data = self.get_valid_data()
del data['internal_ips_set']
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('internal_ips_set', response.data)
class InstanceDeleteTest(test_backend.BaseBackendTestCase):
def setUp(self):
super(InstanceDeleteTest, self).setUp()
self.instance = factories.InstanceFactory(
state=models.Instance.States.OK,
runtime_state=models.Instance.RuntimeStates.SHUTOFF,
backend_id='VALID_ID'
)
self.instance.increase_backend_quotas_usage()
self.mocked_nova().servers.get.side_effect = nova_exceptions.NotFound(code=404)
views.InstanceViewSet.async_executor = False
def tearDown(self):
super(InstanceDeleteTest, self).tearDown()
views.InstanceViewSet.async_executor = True
def mock_volumes(self, delete_data_volume=True):
self.data_volume = self.instance.volumes.get(bootable=False)
self.data_volume.backend_id = 'DATA_VOLUME_ID'
self.data_volume.state = models.Volume.States.OK
self.data_volume.save()
self.data_volume.increase_backend_quotas_usage()
self.system_volume = self.instance.volumes.get(bootable=True)
self.system_volume.backend_id = 'SYSTEM_VOLUME_ID'
self.system_volume.state = models.Volume.States.OK
self.system_volume.save()
self.system_volume.increase_backend_quotas_usage()
def get_volume(backend_id):
if not delete_data_volume and backend_id == self.data_volume.backend_id:
mocked_volume = mock.Mock()
mocked_volume.status = 'available'
return mocked_volume
raise cinder_exceptions.NotFound(code=404)
self.mocked_cinder().volumes.get.side_effect = get_volume
def delete_instance(self, query_params=None):
staff = structure_factories.UserFactory(is_staff=True)
self.client.force_authenticate(user=staff)
url = factories.InstanceFactory.get_url(self.instance)
if query_params:
url += '?' + urllib.parse.urlencode(query_params)
with override_settings(CELERY_ALWAYS_EAGER=True, CELERY_EAGER_PROPAGATES_EXCEPTIONS=True):
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED, response.data)
def assert_quota_usage(self, quotas, name, value):
self.assertEqual(quotas.get(name=name).usage, value)
def test_nova_methods_are_called_if_instance_is_deleted_with_volumes(self):
self.mock_volumes(True)
self.delete_instance()
nova = self.mocked_nova()
nova.servers.delete.assert_called_once_with(self.instance.backend_id)
nova.servers.get.assert_called_once_with(self.instance.backend_id)
self.assertFalse(nova.volumes.delete_server_volume.called)
def test_database_models_deleted(self):
self.mock_volumes(True)
self.delete_instance()
self.assertFalse(models.Instance.objects.filter(id=self.instance.id).exists())
for volume in self.instance.volumes.all():
self.assertFalse(models.Volume.objects.filter(id=volume.id).exists())
def test_quotas_updated_if_instance_is_deleted_with_volumes(self):
self.mock_volumes(True)
self.delete_instance()
self.instance.service_project_link.service.settings.refresh_from_db()
quotas = self.instance.service_project_link.service.settings.quotas
self.assert_quota_usage(quotas, 'instances', 0)
self.assert_quota_usage(quotas, 'vcpu', 0)
self.assert_quota_usage(quotas, 'ram', 0)
self.assert_quota_usage(quotas, 'volumes', 0)
self.assert_quota_usage(quotas, 'storage', 0)
def test_backend_methods_are_called_if_instance_is_deleted_without_volumes(self):
self.mock_volumes(False)
self.delete_instance({
'delete_volumes': False
})
nova = self.mocked_nova()
nova.volumes.delete_server_volume.assert_called_once_with(
self.instance.backend_id, self.data_volume.backend_id)
nova.servers.delete.assert_called_once_with(self.instance.backend_id)
nova.servers.get.assert_called_once_with(self.instance.backend_id)
def test_system_volume_is_deleted_but_data_volume_exists(self):
self.mock_volumes(False)
self.delete_instance({
'delete_volumes': False
})
self.assertFalse(models.Instance.objects.filter(id=self.instance.id).exists())
self.assertTrue(models.Volume.objects.filter(id=self.data_volume.id).exists())
self.assertFalse(models.Volume.objects.filter(id=self.system_volume.id).exists())
def test_quotas_updated_if_instance_is_deleted_without_volumes(self):
self.mock_volumes(False)
self.delete_instance({
'delete_volumes': False
})
settings = self.instance.service_project_link.service.settings
settings.refresh_from_db()
self.assert_quota_usage(settings.quotas, 'instances', 0)
self.assert_quota_usage(settings.quotas, 'vcpu', 0)
self.assert_quota_usage(settings.quotas, 'ram', 0)
self.assert_quota_usage(settings.quotas, 'volumes', 1)
self.assert_quota_usage(settings.quotas, 'storage', self.data_volume.size)
def test_instance_cannot_be_deleted_if_it_has_backups(self):
self.instance = factories.InstanceFactory(
state=models.Instance.States.OK,
runtime_state=models.Instance.RuntimeStates.SHUTOFF,
backend_id='VALID_ID'
)
staff = structure_factories.UserFactory(is_staff=True)
self.client.force_authenticate(user=staff)
factories.BackupFactory(instance=self.instance, state=models.Backup.States.OK)
url = factories.InstanceFactory.get_url(self.instance)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT, response.data)
def test_neutron_methods_are_called_if_instance_is_deleted_with_floating_ips(self):
fixture = fixtures.OpenStackTenantFixture()
internal_ip = factories.InternalIPFactory.create(instance=self.instance, subnet=fixture.subnet)
settings = self.instance.service_project_link.service.settings
floating_ip = factories.FloatingIPFactory.create(internal_ip=internal_ip, settings=settings)
self.delete_instance({'release_floating_ips': True})
self.mocked_neutron().delete_floatingip.assert_called_once_with(floating_ip.backend_id)
def test_neutron_methods_are_not_called_if_instance_does_not_have_any_floating_ips_yet(self):
self.delete_instance({'release_floating_ips': True})
self.assertEqual(self.mocked_neutron().delete_floatingip.call_count, 0)
def test_neutron_methods_are_not_called_if_user_did_not_ask_for_floating_ip_removal_explicitly(self):
self.mocked_neutron().show_floatingip.return_value = {'floatingip': {'status': 'DOWN'}}
fixture = fixtures.OpenStackTenantFixture()
internal_ip = factories.InternalIPFactory.create(instance=self.instance, subnet=fixture.subnet)
settings = self.instance.service_project_link.service.settings
factories.FloatingIPFactory.create(internal_ip=internal_ip, settings=settings)
self.delete_instance({'release_floating_ips': False})
self.assertEqual(self.mocked_neutron().delete_floatingip.call_count, 0)
class InstanceCreateBackupSchedule(test.APITransactionTestCase):
action_name = 'create_backup_schedule'
def setUp(self):
self.user = structure_factories.UserFactory.create(is_staff=True)
self.client.force_authenticate(user=self.user)
backupable = factories.InstanceFactory(state=models.Instance.States.OK)
self.create_url = factories.InstanceFactory.get_url(backupable, action=self.action_name)
self.backup_schedule_data = {
'name': 'test schedule',
'retention_time': 3,
'schedule': '0 * * * *',
'maximal_number_of_resources': 3,
}
def test_staff_can_create_backup_schedule(self):
response = self.client.post(self.create_url, self.backup_schedule_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['retention_time'], self.backup_schedule_data['retention_time'])
self.assertEqual(
response.data['maximal_number_of_resources'], self.backup_schedule_data['maximal_number_of_resources'])
self.assertEqual(response.data['schedule'], self.backup_schedule_data['schedule'])
def test_backup_schedule_default_state_is_OK(self):
response = self.client.post(self.create_url, self.backup_schedule_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
backup_schedule = models.BackupSchedule.objects.first()
self.assertIsNotNone(backup_schedule)
self.assertEqual(backup_schedule.state, backup_schedule.States.OK)
def test_backup_schedule_can_not_be_created_with_wrong_schedule(self):
# wrong schedule:
self.backup_schedule_data['schedule'] = 'wrong schedule'
response = self.client.post(self.create_url, self.backup_schedule_data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('schedule', response.content)
def test_backup_schedule_creation_with_correct_timezone(self):
backupable = factories.InstanceFactory(state=models.Instance.States.OK)
create_url = factories.InstanceFactory.get_url(backupable, action=self.action_name)
backup_schedule_data = {
'name': 'test schedule',
'retention_time': 3,
'schedule': '0 * * * *',
'timezone': 'Europe/London',
'maximal_number_of_resources': 3,
}
response = self.client.post(create_url, backup_schedule_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['timezone'], 'Europe/London')
def test_backup_schedule_creation_with_incorrect_timezone(self):
backupable = factories.InstanceFactory(state=models.Instance.States.OK)
create_url = factories.InstanceFactory.get_url(backupable, action=self.action_name)
backup_schedule_data = {
'name': 'test schedule',
'retention_time': 3,
'schedule': '0 * * * *',
'timezone': 'incorrect',
'maximal_number_of_resources': 3,
}
response = self.client.post(create_url, backup_schedule_data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('timezone', response.data)
def test_backup_schedule_creation_with_default_timezone(self):
backupable = factories.InstanceFactory(state=models.Instance.States.OK)
create_url = factories.InstanceFactory.get_url(backupable, action=self.action_name)
backup_schedule_data = {
'name': 'test schedule',
'retention_time': 3,
'schedule': '0 * * * *',
'maximal_number_of_resources': 3,
}
response = self.client.post(create_url, backup_schedule_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['timezone'], settings.TIME_ZONE)
class InstanceUpdateInternalIPsSetTest(test.APITransactionTestCase):
action_name = 'update_internal_ips_set'
def setUp(self):
self.fixture = fixtures.OpenStackTenantFixture()
self.client.force_authenticate(user=self.fixture.admin)
self.instance = self.fixture.instance
self.url = factories.InstanceFactory.get_url(self.instance, action=self.action_name)
def test_user_can_update_instance_internal_ips_set(self):
# instance had 2 internal IPs
ip_to_keep = factories.InternalIPFactory(instance=self.instance, subnet=self.fixture.subnet)
ip_to_delete = factories.InternalIPFactory(instance=self.instance)
# instance should be connected to new subnet
subnet_to_connect = factories.SubNetFactory(settings=self.fixture.openstack_tenant_service_settings)
response = self.client.post(self.url, data={
'internal_ips_set': [
{'subnet': factories.SubNetFactory.get_url(self.fixture.subnet)},
{'subnet': factories.SubNetFactory.get_url(subnet_to_connect)},
]
})
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertTrue(self.instance.internal_ips_set.filter(pk=ip_to_keep.pk).exists())
self.assertFalse(self.instance.internal_ips_set.filter(pk=ip_to_delete.pk).exists())
self.assertTrue(self.instance.internal_ips_set.filter(subnet=subnet_to_connect).exists())
def test_user_cannot_add_intenal_ip_from_different_settings(self):
subnet = factories.SubNetFactory()
response = self.client.post(self.url, data={
'internal_ips_set': [
{'subnet': factories.SubNetFactory.get_url(subnet)},
]
})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(self.instance.internal_ips_set.filter(subnet=subnet).exists())
def test_user_cannot_connect_instance_to_one_subnet_twice(self):
response = self.client.post(self.url, data={
'internal_ips_set': [
{'subnet': factories.SubNetFactory.get_url(self.fixture.subnet)},
{'subnet': factories.SubNetFactory.get_url(self.fixture.subnet)},
]
})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(self.instance.internal_ips_set.filter(subnet=self.fixture.subnet).exists())
class InstanceUpdateFloatingIPsTest(test.APITransactionTestCase):
action_name = 'update_floating_ips'
def setUp(self):
self.fixture = fixtures.OpenStackTenantFixture()
self.fixture.openstack_tenant_service_settings.options = {'external_network_id': uuid.uuid4().hex}
self.fixture.openstack_tenant_service_settings.save()
self.client.force_authenticate(user=self.fixture.admin)
self.instance = self.fixture.instance
factories.InternalIPFactory.create(instance=self.instance, subnet=self.fixture.subnet)
self.url = factories.InstanceFactory.get_url(self.instance, action=self.action_name)
self.subnet_url = factories.SubNetFactory.get_url(self.fixture.subnet)
def test_user_can_update_instance_floating_ips(self):
floating_ip_url = factories.FloatingIPFactory.get_url(self.fixture.floating_ip)
data = {
'floating_ips': [
{'subnet': self.subnet_url, 'url': floating_ip_url},
]
}
response = self.client.post(self.url, data=data)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEqual(self.instance.floating_ips.count(), 1)
self.assertIn(self.fixture.floating_ip, self.instance.floating_ips)
def test_when_floating_ip_is_attached_action_details_are_updated(self):
floating_ip_url = factories.FloatingIPFactory.get_url(self.fixture.floating_ip)
data = {
'floating_ips': [
{'subnet': self.subnet_url, 'url': floating_ip_url},
]
}
self.client.post(self.url, data=data)
self.instance.refresh_from_db()
self.assertEqual(self.instance.action_details, {
'message': 'Attached floating IPs: %s.' % self.fixture.floating_ip.address,
'attached': [self.fixture.floating_ip.address],
'detached': [],
})
def test_when_floating_ip_is_detached_action_details_are_updated(self):
self.fixture.floating_ip.internal_ip = self.instance.internal_ips_set.first()
self.fixture.floating_ip.save()
self.client.post(self.url, data={
'floating_ips': []
})
self.instance.refresh_from_db()
self.assertEqual(self.instance.action_details, {
'message': 'Detached floating IPs: %s.' % self.fixture.floating_ip.address,
'attached': [],
'detached': [self.fixture.floating_ip.address],
})
def test_user_can_not_assign_floating_ip_used_by_other_instance(self):
internal_ip = factories.InternalIPFactory(subnet=self.fixture.subnet)
floating_ip = factories.FloatingIPFactory(
settings=self.fixture.openstack_tenant_service_settings,
runtime_state='DOWN',
internal_ip=internal_ip,
)
floating_ip_url = factories.FloatingIPFactory.get_url(floating_ip)
data = {
'floating_ips': [
{'subnet': self.subnet_url, 'url': floating_ip_url},
]
}
response = self.client.post(self.url, data=data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('floating_ips', response.data)
def test_user_cannot_add_floating_ip_via_subnet_that_is_not_connected_to_instance(self):
subnet_url = factories.SubNetFactory.get_url()
data = {'floating_ips': [{'subnet': subnet_url}]}
response = self.client.post(self.url, data=data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_can_remove_floating_ip_from_instance(self):
self.fixture.floating_ip.internal_ip = self.instance.internal_ips_set.first()
self.fixture.floating_ip.save()
data = {'floating_ips': []}
response = self.client.post(self.url, data=data)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEqual(self.instance.floating_ips.count(), 0)
def test_free_floating_ip_is_used_for_allocation(self):
external_network_id = self.fixture.openstack_tenant_service_settings.options['external_network_id']
self.fixture.floating_ip.backend_network_id = external_network_id
self.fixture.floating_ip.save()
data = {'floating_ips': [{'subnet': self.subnet_url}]}
response = self.client.post(self.url, data=data)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertIn(self.fixture.floating_ip, self.instance.floating_ips)
def test_user_cannot_use_same_subnet_twice(self):
data = {'floating_ips': [{'subnet': self.subnet_url}, {'subnet': self.subnet_url}]}
response = self.client.post(self.url, data=data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class InstanceBackupTest(test.APITransactionTestCase):
action_name = 'backup'
def setUp(self):
self.fixture = fixtures.OpenStackTenantFixture()
self.client.force_authenticate(self.fixture.owner)
def test_backup_can_be_created_for_instance_with_2_volumes(self):
url = factories.InstanceFactory.get_url(self.fixture.instance, action='backup')
payload = self.get_payload()
response = self.client.post(url, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(models.Backup.objects.get(name=payload['name']).snapshots.count(), 2)
def test_backup_can_be_created_for_instance_only_with_system_volume(self):
instance = self.fixture.instance
instance.volumes.filter(bootable=False).delete()
url = factories.InstanceFactory.get_url(instance, action='backup')
payload = self.get_payload()
response = self.client.post(url, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
self.assertEqual(models.Backup.objects.get(name=payload['name']).snapshots.count(), 1)
def test_backup_can_be_created_for_instance_with_3_volumes(self):
instance = self.fixture.instance
instance.volumes.add(factories.VolumeFactory(service_project_link=instance.service_project_link))
url = factories.InstanceFactory.get_url(instance, action='backup')
payload = self.get_payload()
response = self.client.post(url, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
self.assertEqual(models.Backup.objects.get(name=payload['name']).snapshots.count(), 3)
def test_user_cannot_backup_unstable_instance(self):
instance = self.fixture.instance
instance.state = models.Instance.States.UPDATING
instance.save()
url = factories.InstanceFactory.get_url(instance, action='backup')
response = self.client.post(url, data={'name': 'test backup'})
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
def get_payload(self):
return {
'name': 'backup_name'
}
class BaseInstanceImportTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.OpenStackTenantFixture()
def _generate_backend_instances(self, count=1):
instances = []
for i in range(count):
instance = factories.InstanceFactory()
instance.delete()
instances.append(instance)
return instances
class InstanceImportableResourcesTest(BaseInstanceImportTest):
def setUp(self):
super(InstanceImportableResourcesTest, self).setUp()
self.url = factories.InstanceFactory.get_list_url('importable_resources')
self.client.force_authenticate(self.fixture.owner)
@mock.patch('waldur_openstack.openstack_tenant.backend.OpenStackTenantBackend.get_instances_for_import')
def test_importable_instances_are_returned(self, get_instances_for_import_mock):
backend_instances = self._generate_backend_instances()
get_instances_for_import_mock.return_value = backend_instances
data = {'service_project_link': factories.OpenStackTenantServiceProjectLinkFactory.get_url(self.fixture.spl)}
response = self.client.get(self.url, data=data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data), len(backend_instances))
returned_backend_ids = [item['backend_id'] for item in response.data]
expected_backend_ids = [item.backend_id for item in backend_instances]
self.assertItemsEqual(returned_backend_ids, expected_backend_ids)
get_instances_for_import_mock.assert_called()
class InstanceImportTest(BaseInstanceImportTest):
def setUp(self):
super(InstanceImportTest, self).setUp()
self.url = factories.InstanceFactory.get_list_url('import_resource')
self.client.force_authenticate(self.fixture.owner)
def _get_payload(self, backend_id):
return {
'backend_id': backend_id,
'service_project_link': factories.OpenStackTenantServiceProjectLinkFactory.get_url(self.fixture.spl),
}
@mock.patch('waldur_openstack.openstack_tenant.executors.InstancePullExecutor.execute')
@mock.patch('waldur_openstack.openstack_tenant.backend.OpenStackTenantBackend.import_instance')
def test_instance_can_be_imported(self, import_instance_mock, resource_import_execute_mock):
backend_id = 'backend_id'
def import_instance(backend_id, save, service_project_link):
return self._generate_backend_instances()[0]
import_instance_mock.side_effect = import_instance
payload = self._get_payload(backend_id)
response = self.client.post(self.url, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
resource_import_execute_mock.assert_called()
def test_existing_instance_cannot_be_imported(self):
payload = self._get_payload(factories.InstanceFactory().backend_id)
response = self.client.post(self.url, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data)
|
nilq/baby-python
|
python
|
##########################################################################
#
# Copyright (c) 2008-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
class RandomTest( unittest.TestCase ) :
def testCosineHemisphere( self ) :
r = IECore.Rand32()
v = r.cosineHemispherefVector( 1000 )
for i in range( 0, v.size() ) :
self.assert_( v[i].z >= 0 )
self.assertAlmostEqual( v[i].length(), 1, 6 )
def testBarycentric( self ) :
r = IECore.Rand32()
f = r.barycentricf()
self.assert_( ( f[0] + f[1] + f[2] ) == 1.0 )
d = r.barycentricd()
self.assert_( ( d[0] + d[1] + d[2] ) == 1.0 )
fvs = r.barycentricfVector( IECore.IntVectorData( [ 1, 2, 3, 4, 5 ] ) )
for i in range( 0, fvs.size() ) :
self.assert_( ( fvs[i][0] + fvs[i][1] + fvs[i][2] ) == 1.0 )
fv = r.barycentricfVector( 5 )
for i in range( 0, fv.size() ) :
self.assert_( ( fv[i][0] + fv[i][1] + fv[i][2] ) == 1.0 )
dvs = r.barycentricdVector( IECore.IntVectorData( [ 1, 2, 3, 4, 5 ] ) )
for i in range( 0, dvs.size() ) :
self.assert_( ( dvs[i][0] + dvs[i][1] + dvs[i][2] ) == 1.0 )
dv = r.barycentricdVector( 5 )
for i in range( 0, dv.size() ) :
self.assert_( ( dv[i][0] + dv[i][1] + dv[i][2] ) == 1.0 )
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import codecs
import numpy as np
import tensorflow as tf
from Transformer.config.hyperparams import Hyperparams as pm
class Data_helper(object):
def __init__(self):
self.pointer = 0
def mini_batch(self):
X, Y = self.load_train_datasets()
num_batch = len(X) // pm.batch_size
X = tf.convert_to_tensor(X, tf.int32)
Y = tf.convert_to_tensor(Y, tf.int32)
# Input Queue by CPU
input_queues = tf.train.slice_input_producer([X, Y])
# Get mini batch from Queue
x, y = tf.train.shuffle_batch(input_queues,
num_threads=8,
batch_size=pm.batch_size,
capacity=pm.batch_size * 64, # Max_number of batches in queue
min_after_dequeue=pm.batch_size * 32, # Min_number of batches in queue after dequeue
allow_smaller_final_batch=False)
return x, y, num_batch
def load_train_datasets(self):
de_sents = [line for line in codecs.open(pm.source_train, 'r', 'utf-8').read().split("\n") if line]
en_sents = [line for line in codecs.open(pm.target_train, 'r', 'utf-8').read().split("\n") if line]
x, y, sources, targets = self.generate(de_sents, en_sents)
return x, y
def load_test_datasets(self):
de_sents = [line for line in codecs.open(pm.source_test, 'r', 'utf-8').read().split("\n") if line]
en_sents = [line for line in codecs.open(pm.target_test, 'r', 'utf-8').read().split("\n") if line]
x, y, sources, targets = self.generate(de_sents, en_sents)
return x, sources, targets
def generate(self, source_sents, target_sents):
de2idx, idx2de = self.load_vocab(pm.DECODER_VOCAB)
en2idx, idx2en = self.load_vocab(pm.ENCODER_VOCAB)
x_list, y_list, Sources, Targets = [], [], [], []
for source_sent, target_sent in zip(source_sents, target_sents):
x = [de2idx.get(word, 1) for word in (source_sent + " <EOS>").split()]
y = [en2idx.get(word, 1) for word in (target_sent + " <EOS>").split()]
if max(len(x), len(y)) <= pm.maxlen:
x_list.append(np.array(x))
y_list.append(np.array(y))
Sources.append(source_sent)
Targets.append(target_sent)
# Padding 0(<PAD>)
x_np = np.zeros([len(x_list), pm.maxlen], np.int32)
y_np = np.zeros([len(y_list), pm.maxlen], np.int32)
for i, (x, y) in enumerate(zip(x_list, y_list)):
x_np[i] = np.lib.pad(x, [0, pm.maxlen - len(x)], 'constant', constant_values=(0, 0))
y_np[i] = np.lib.pad(y, [0, pm.maxlen - len(y)], 'constant', constant_values=(0, 0))
return x_np, y_np, Sources, Targets
def load_vocab(self, file):
vocab = [line.split()[0] for line in codecs.open(file, 'r', encoding='utf-8').read().splitlines() if int(line.split()[1]) >= pm.min_cnt]
word2idx = {word: idx for idx, word in enumerate(vocab)}
idx2word = {word2idx[word]: word for word in word2idx}
return word2idx, idx2word
def next(self, X, Sources, Targets, num_batch):
x = X[self.pointer * pm.batch_size: (self.pointer + 1) * pm.batch_size]
sources = Sources[self.pointer * pm.batch_size: (self.pointer + 1) * pm.batch_size]
targets = Targets[self.pointer * pm.batch_size: (self.pointer + 1) * pm.batch_size]
self.pointer = (self.pointer + 1) % num_batch
return x, sources, targets
def reset_pointer(self):
self.pointer = 0
|
nilq/baby-python
|
python
|
import os
import glob
def delete_given_file(image_name):
file_name = image_name.split(".")[0]
IMG_PATH = "./annotated_dataset/img"
TXT_PATH = "./annotated_dataset/txt_label"
XML_PATH = "./annotated_dataset/xml_label"
img_file = f"{IMG_PATH}/{file_name}.jpg"
txt_file = f"{TXT_PATH}/{file_name}.txt"
xml_file = f"{XML_PATH}/{file_name}.xml"
files_list = [img_file, txt_file, xml_file]
int_files = len(files_list)
try:
for item in files_list:
os.remove(item)
if int_files == 3:
print(f"removed {int_files} file of {file_name}")
except:
print(f"FAIL: removing {file_name} failed")
def remove_img_file(image_name):
file_name = image_name.split(".")[0]
IMG_PATH = "./unlabeled-imgs"
img_file = f"{IMG_PATH}/{file_name}.jpg"
try:
os.remove(img_file)
print(f"removed {img_file} file")
except:
print(f"FAIL: removing {img_file} failed")
# delete_given_file("naver_0074.jpg")
removable_file_list = [
"naver_0615.jpg",
"naver_0508.jpg",
"naver_0353.jpg",
"naver_0677.jpg",
"naver_0592.jpg",
"naver_0443.jpg",
"naver_0904.jpg",
"naver_0460.jpg",
"naver_0832.jpg",
"naver_0388.jpg",
"naver_0408.jpg",
"naver_0513.jpg",
"naver_0429.jpg",
]
for file_item in removable_file_list:
# delete_given_file(file_item)
remove_img_file(file_item)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright (c) 2020 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# <https://github.com/boschresearch/amira-blender-rendering>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
from mathutils import Vector
from amira_blender_rendering.utils.blender import clear_orphaned_materials, remove_material_nodes, add_default_material
from amira_blender_rendering.utils import material as mutil
# from amira_blender_rendering.utils.logging import get_logger
# TODO: change into MaterialNodesMetalToolCap class
# TODO: it is really tedious and error-prone to set up materials this way. We
# should invest the time to write a blender plugin that generates
# python-code for us, or loads node setups from a configuration file, or
# something along the lines...
def setup_material(material: bpy.types.Material, empty: bpy.types.Object = None):
"""Setup material nodes for the metal tool cap"""
# TODO: refactor into smaller node-creation functions that can be re-used elsewhere
# logger = get_logger()
tree = material.node_tree
nodes = tree.nodes
# check if we have default nodes
n_output, n_bsdf = mutil.check_default_material(material)
# set BSDF default values
n_bsdf.inputs['Subsurface'].default_value = 0.6
n_bsdf.inputs['Subsurface Color'].default_value = (0.8, 0.444, 0.444, 1.0)
n_bsdf.inputs['Metallic'].default_value = 1.0
# thin metallic surface lines (used primarily for normal/bump map computation)
n_texcoord_bump = nodes.new('ShaderNodeTexCoord')
# setup empty (reference for distance computations)
if empty is None:
# get currently selected object
obj = bpy.context.object
# add empty
bpy.ops.object.empty_add(type='PLAIN_AXES')
empty = bpy.context.object
# locate at the top of the object
v0 = Vector(obj.bound_box[1])
v1 = Vector(obj.bound_box[2])
v2 = Vector(obj.bound_box[5])
v3 = Vector(obj.bound_box[6])
empty.location = (v0 + v1 + v2 + v3) / 4
# rotate into object space. afterwards we'll have linkage via parenting
empty.location = obj.matrix_world @ empty.location
# copy rotation
empty.rotation_euler = obj.rotation_euler
# deselect all
bpy.ops.object.select_all(action='DESELECT')
# take care to re-select everything
empty.select_set(state=True)
obj.select_set(state=True)
# make obj active again (will become parent of all selected objects)
bpy.context.view_layer.objects.active = obj
# make parent, keep transform
bpy.ops.object.parent_set(type='OBJECT', xmirror=False, keep_transform=True)
# set the empty as input for the texture
n_texcoord_bump.object = empty
# (dot)^2 (distance from empty)
n_dot = nodes.new('ShaderNodeVectorMath')
n_dot.operation = 'DOT_PRODUCT'
tree.links.new(n_texcoord_bump.outputs['Object'], n_dot.inputs[0])
tree.links.new(n_texcoord_bump.outputs['Object'], n_dot.inputs[1])
n_pow = nodes.new('ShaderNodeMath')
n_pow.operation = 'POWER'
tree.links.new(n_dot.outputs[1], n_pow.inputs[0])
# mapping input from empty to noise
n_mapping = nodes.new('ShaderNodeMapping')
tree.links.new(n_texcoord_bump.outputs['Object'], n_mapping.inputs[0])
# generate and link up required noise textures
n_noise0 = nodes.new('ShaderNodeTexNoise')
n_noise0.inputs['Scale'].default_value = 1.0
n_noise0.inputs['Detail'].default_value = 1.0
n_noise0.inputs['Distortion'].default_value = 2.0
tree.links.new(n_pow.outputs[0], n_noise0.inputs[0])
n_noise1 = nodes.new('ShaderNodeTexNoise')
n_noise1.inputs['Scale'].default_value = 300.0
n_noise1.inputs['Detail'].default_value = 0.0
n_noise1.inputs['Distortion'].default_value = 0.0
tree.links.new(n_pow.outputs[0], n_noise1.inputs[0])
# XXX: is this noise required?
n_noise2 = nodes.new('ShaderNodeTexNoise')
n_noise2.inputs['Scale'].default_value = 0.0
n_noise2.inputs['Detail'].default_value = 0.0
n_noise2.inputs['Distortion'].default_value = 0.1
tree.links.new(n_mapping.outputs['Vector'], n_noise2.inputs[0])
n_noise3 = nodes.new('ShaderNodeTexNoise')
n_noise3.inputs['Scale'].default_value = 5.0
n_noise3.inputs['Detail'].default_value = 2.0
n_noise3.inputs['Distortion'].default_value = 0.0
tree.links.new(n_mapping.outputs['Vector'], n_noise3.inputs[0])
# color output
n_colorramp_col = nodes.new('ShaderNodeValToRGB')
n_colorramp_col.color_ramp.color_mode = 'RGB'
n_colorramp_col.color_ramp.interpolation = 'LINEAR'
n_colorramp_col.color_ramp.elements[0].position = 0.118
n_colorramp_col.color_ramp.elements[1].position = 0.727
tree.links.new(n_noise0.outputs['Fac'], n_colorramp_col.inputs['Fac'])
n_output_color = nodes.new('ShaderNodeMixRGB')
n_output_color.inputs['Fac'].default_value = 0.400
n_output_color.inputs['Color1'].default_value = (0.485, 0.485, 0.485, 1.0)
tree.links.new(n_colorramp_col.outputs['Color'], n_output_color.inputs['Color2'])
# roughness finish
n_mul_r = nodes.new('ShaderNodeMath')
n_mul_r.operation = 'MULTIPLY'
n_mul_r.inputs[1].default_value = 0.100
tree.links.new(n_noise3.outputs['Fac'], n_mul_r.inputs[0])
n_output_roughness = nodes.new('ShaderNodeMath')
n_output_roughness.operation = 'ADD'
n_output_roughness.inputs[1].default_value = 0.050
tree.links.new(n_mul_r.outputs[0], n_output_roughness.inputs[0])
# math nodes to mix noise with distance and get ring-effect (modulo), leading to bump map
n_add0 = nodes.new('ShaderNodeMath')
n_add0.operation = 'ADD'
tree.links.new(n_pow.outputs[0], n_add0.inputs[0])
tree.links.new(n_noise2.outputs['Fac'], n_add0.inputs[1])
n_mul0 = nodes.new('ShaderNodeMath')
n_mul0.operation = 'MULTIPLY'
n_mul0.inputs[1].default_value = 300.000
tree.links.new(n_add0.outputs[0], n_mul0.inputs[0])
n_mod0 = nodes.new('ShaderNodeMath')
n_mod0.operation = 'MODULO'
n_mod0.inputs[1].default_value = 2.000
tree.links.new(n_mul0.outputs[0], n_mod0.inputs[0])
n_mul1 = nodes.new('ShaderNodeMath')
n_mul1.operation = 'MULTIPLY'
tree.links.new(n_noise1.outputs['Fac'], n_mul1.inputs[0])
tree.links.new(n_mod0.outputs[0], n_mul1.inputs[1])
n_min_n = nodes.new('ShaderNodeMath')
n_min_n.operation = 'MINIMUM'
tree.links.new(n_noise1.outputs['Fac'], n_min_n.inputs[0])
tree.links.new(n_mul1.outputs[0], n_min_n.inputs[1])
n_colorramp_rough = nodes.new('ShaderNodeValToRGB')
n_colorramp_rough.color_ramp.color_mode = 'RGB'
n_colorramp_rough.color_ramp.interpolation = 'LINEAR'
n_colorramp_rough.color_ramp.elements[0].position = 0.159
n_colorramp_rough.color_ramp.elements[1].position = 0.541
tree.links.new(n_min_n.outputs[0], n_colorramp_rough.inputs[0])
n_output_normal = nodes.new('ShaderNodeBump')
n_output_normal.inputs['Strength'].default_value = 0.075
n_output_normal.inputs['Distance'].default_value = 1.000
tree.links.new(n_colorramp_rough.outputs['Color'], n_output_normal.inputs['Height'])
# output nodes:
# n_output_color -> color / outputs['Color']
# n_output_roughness -> roughness / outputs['Value']
# n_output_normal -> normal / outputs['Normal']
# hook to bsdf shader node
tree.links.new(n_output_color.outputs['Color'], n_bsdf.inputs['Base Color'])
tree.links.new(n_output_roughness.outputs['Value'], n_bsdf.inputs['Roughness'])
tree.links.new(n_output_normal.outputs['Normal'], n_bsdf.inputs['Normal'])
# TODO: this should become a unit test
def main():
"""First tear down any material assigned with the object, then create everything from scratch"""
remove_material_nodes()
clear_orphaned_materials()
mat = add_default_material()
setup_material(mat)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
"""
Transitions (Perturbation Kernels)
==================================
Perturbation strategies. The classes defined here transition the current
population to the next one. pyABC implements global and local transitions.
Proposals for the subsequent generation are generated from the current
generation density estimates of the current generations.
This is equivalent to perturbing randomly chosen particles.
These can be passed to :class:`pyabc.smc.ABCSMC` via the ``transitions``
keyword argument.
"""
from .base import Transition, DiscreteTransition
from .multivariatenormal import (MultivariateNormalTransition,
silverman_rule_of_thumb,
scott_rule_of_thumb)
from .exceptions import NotEnoughParticles
from .model_selection import GridSearchCV
from .local_transition import LocalTransition
from .randomwalk import DiscreteRandomWalkTransition
__all__ = [
"Transition",
"DiscreteTransition",
"MultivariateNormalTransition",
"GridSearchCV",
"NotEnoughParticles",
"LocalTransition",
"scott_rule_of_thumb",
"silverman_rule_of_thumb",
"DiscreteRandomWalkTransition",
]
|
nilq/baby-python
|
python
|
from test.ga.ga import GaTestCase
from test.ga.population import PopulationTestCase
from test.ga.individual import IndividualTestCase
__all__ = (
"GaTestCase", "PopulationTestCase",
"IndividualTestCase"
)
|
nilq/baby-python
|
python
|
def get_key():
if isinstance(self.instance,
def clean():
pass
|
nilq/baby-python
|
python
|
from screen.drawing.color import *
from screen.drawing.color import __all__ as _color__all__
from screen.drawing.colorinterpolationmethod import *
from screen.drawing.colorinterpolationmethod import __all__ as _colorinterpolationmethod__all__
from screen.drawing.style import *
from screen.drawing.style import __all__ as _style__all__
__all__ = [
*_color__all__,
*_colorinterpolationmethod__all__,
*_style__all__,
]
|
nilq/baby-python
|
python
|
from types import FunctionType
import backends
__storage = backends.default()
def set_storage(BackendInstance):
global __storage
__storage = BackendInstance
def make_cached(make_key, f):
def cached(*args, **kwargs):
cache_key = make_key(args=args, kwargs=kwargs)
if __storage.has(cache_key):
return __storage.get(cache_key)
value = f(*args, **kwargs)
__storage.set(cache_key, value)
return value
return cached
def cache_function(function_or_key):
key = 'function:'
if type(function_or_key) is FunctionType:
"""No args to decorator makes the first arg the
function to be decorated"""
f = function_or_key
key = key + f.__name__
def make_key(args=None, kwargs=None):
return key + f.__name__ + str(args) + str(kwargs)
return make_cached(make_key, f)
else:
"""Arguments have been passed to the decorator.
The user wants to override automatic key creation and always
use the same, so do that here"""
key += function_or_key
def make_decorator(f):
def make_key(args=None, kwargs=None):
return key + ':' + f.__name__
return make_cached(make_key, f)
return make_decorator
__register = []
__open_queue = False
__in_init = False
__cache = {}
__next_provider = None
__update = []
def __register_update(id_, values):
__update.append((id_, values))
def do_updates():
global __update
for id_, values in __update:
__storage.set(id_, values)
__update = []
def __do_queue():
global __register
global __cache
global __open_queue
__open_queue = False
for id_, self, provider in __register:
if not __storage.has(id_):
__storage.set(id_, provider(self))
self.__cached__ = __storage.get(id_)
__register = []
def __register_class(id_, self, provider):
global __open_queue
__register.append((id_, self, provider))
__open_queue = True
def __make_id(cls, self, id_attribute):
return 'class:' + cls.__name__ + str(self.__dict__[id_attribute])
def __should_do_queue(self):
if not __open_queue:
return False
if '__in_init' in self.__dict__:
if self.__dict__['__in_init']:
return False
else:
return False
return True
def cache_class(id_attribute):
"""Cachable attributes don't have to be specified since
self.__cached__.keys() will provide all attributes that were
retrieved from cache (and could subsequently be updated).
"""
def make_class(cls):
global __next_provider
if __next_provider is None:
raise LookupError("No provider function declared. Put"
+ " the 'cache_provider' decorator on the"
+ " function that returns data for the"
+ " instance")
provider_function = __next_provider
__next_provider = None
old_init = cls.__init__
def new_init(self, *args, **kwargs):
self.__in_init = True
old_init(self, *args, **kwargs)
self.__in_init = False
__register_class(__make_id(cls, self, id_attribute),
self, provider_function)
cls.__init__ = new_init
old_getattribute = cls.__getattribute__
def new_getattribute(self, key):
if key != '__dict__' and key != '__cached__':
if __should_do_queue(self):
__do_queue()
if hasattr(self, '__cached__') and key in self.__cached__:
return self.__cached__[key]
return old_getattribute(self, key)
cls.__getattribute__ = new_getattribute
old_setattr = cls.__setattr__
def new_setattr(self, key, value):
if key != '__cache__':
if __should_do_queue(self):
__do_queue()
if hasattr(self, '__cached__'):
"""Only check for updatable cache values
when a cache dict exists"""
if not hasattr(self, '__cachable_attrs'):
self.__dict__['__cachable_attrs'] = \
self.__dict__['__cached__'].keys()
if key in self.__dict__['__cachable_attrs']:
if key != self.__dict__['__cached__'][key]:
self.__dict__['__cached__'][key] = value
__register_update(
__make_id(cls, self, id_attribute),
self.__cached__)
return
old_setattr(self, key, value)
cls.__setattr__ = new_setattr
def hasattr(self, key):
if __should_do_queue(self):
__do_queue()
if '__cache__' in self.__dict__:
if key in self.__dict__['__cache__']:
return True
if key in self.__dict__:
return True
return False
cls.__hasattr__ = hasattr
return cls
return make_class
def cache_provider(f):
global __next_provider
__next_provider = f
return f
|
nilq/baby-python
|
python
|
"""
Created on Wed Jan 15 11:17:10 2020
@author: mesch
"""
from colorama import init, Fore, Back
init(autoreset=True) #to convert termcolor to wins color
import copy
from pyqum.instrument.benchtop import RSA5 as MXA
from pyqum.instrument.benchtop import PSGA
from pyqum.instrument.modular import AWG
from pyqum.instrument.logger import status_code
from pyqum.instrument.analyzer import curve
from numpy import sin, cos, pi, array, float64, sum, dot
# Initialize instruments:
# PSGA
saga = PSGA.Initiate()
PSGA.rfoutput(saga, action=['Set', 1])
PSGA.frequency(saga, action=['Set', "5.5" + "GHz"])
PSGA.power(saga, action=['Set', "13" + "dBm"])
# SA
mxa = MXA.Initiate()
MXA.frequency(mxa, action=['Set','5.525GHz'])
MXA.fspan(mxa, action=['Set','150MHz'])
MXA.rbw(mxa, action=['Set','1MHz'])
MXA.vbw(mxa, action=['Set','100kHz'])
MXA.trigger_source(mxa, action=['Set','EXTernal1'])
# AWG
awgsess = AWG.InitWithOptions()
AWG.Abort_Gen(awgsess)
AWG.ref_clock_source(awgsess, action=['Set',int(1)]) # External 10MHz clock-reference
AWG.predistortion_enabled(awgsess, action=['Set',True])
AWG.output_mode_adv(awgsess, action=['Set',int(2)]) # Sequence output mode
AWG.arb_sample_rate(awgsess, action=['Set',float(1250000000)]) # maximum sampling rate
AWG.active_marker(awgsess, action=['Set','3']) # master
AWG.marker_delay(awgsess, action=['Set',float(0)])
AWG.marker_pulse_width(awgsess, action=['Set',float(1e-7)])
AWG.marker_source(awgsess, action=['Set',int(7)])
samplingrate = AWG.arb_sample_rate(awgsess)[1]
dt = 1e9/samplingrate # in ns
# PRESET Output:
for ch in range(2):
channel = str(ch + 1)
AWG.output_config(awgsess, RepCap=channel, action=["Set", 0]) # Single-ended
AWG.output_filter_bandwidth(awgsess, RepCap=channel, action=["Set", 0])
AWG.arb_gain(awgsess, RepCap=channel, action=["Set", 0.5])
AWG.output_impedance(awgsess, RepCap=channel, action=["Set", 50])
# output settings:
for ch in range(2):
channel = str(ch + 1)
AWG.output_enabled(awgsess, RepCap=channel, action=["Set", int(1)]) # ON
AWG.output_filter_enabled(awgsess, RepCap=channel, action=["Set", True])
AWG.output_config(awgsess, RepCap=channel, action=["Set", int(2)]) # Amplified 1:2
AWG.output_filter_bandwidth(awgsess, RepCap=channel, action=["Set", 0])
AWG.arb_gain(awgsess, RepCap=channel, action=["Set", 0.5])
AWG.output_impedance(awgsess, RepCap=channel, action=["Set", 50])
def AWG_Sinewave(ifreq,IQparams):
'''
ifreq: IF frequency in MHz
'''
AWG.Clear_ArbMemory(awgsess)
WAVE = []
Ioffset, Qoffset, ampratio, Iphase, Qphase = IQparams
if (ampratio > -1.0) and (ampratio < 1.0):
Iamp = 1
Qamp = Iamp * ampratio
else:
Qamp = 1
Iamp = Qamp/ampratio
ifvoltag = [min(abs(Qamp),1), min(abs(Iamp),1)] # contain amplitude within 1V
iffunction = ['sin', 'cos']
iffreq = [ifreq, ifreq]
ifoffset = [Qoffset, Ioffset]
ifphase = [Qphase, Iphase]
# construct waveform:
for ch in range(2):
channel = str(ch + 1)
Nperiod = int(1000/iffreq[ch]/dt) # of points per period
Nperiod *= 8
wavefom = [ifvoltag[ch] * eval(iffunction[ch] + '(x*%s*%s/1000*2*pi + %s/180*pi)' %(dt,iffreq[ch],ifphase[ch])) + ifoffset[ch] for x in range(Nperiod)]
createdwave = AWG.CreateArbWaveform(awgsess, wavefom)
WAVE.append(createdwave[1])
# Building Sequences:
for ch in range(2):
channel = str(ch + 1)
createdseqhandl = AWG.CreateArbSequence(awgsess, [WAVE[ch]], [1]) # loop# canbe >1 if longer sequence is needed in the future!
# Channel Assignment:
AWG.arb_sequence_handle(awgsess, RepCap=channel, action=["Set", createdseqhandl[1]])
# Trigger Settings:
for ch in range(2):
channel = str(ch + 1)
AWG.operation_mode(awgsess, RepCap=channel, action=["Set", 0])
AWG.trigger_source_adv(awgsess, RepCap=channel, action=["Set", 0])
AWG.Init_Gen(awgsess)
AWG.Send_Pulse(awgsess, 1)
return
class IQ_Cal:
def __init__(self, suppression='LO', IQparams=array([0.,0.,1.,0.,0.]), STEP=array([-0.5,-0.5,0.5,12,12]), ratio=1):
self.IQparams = IQparams
self.STEP = STEP
self.suppression = suppression
if self.suppression == 'LO':
self.var = copy.copy(self.IQparams[:2])
self.step = self.STEP[:2]/(10**(ratio+1))
elif self.suppression == 'MR':
self.var = copy.copy(self.IQparams[2:])
self.step = self.STEP[2:]/(2**(ratio+1))
def nelder_mead(self, no_improve_thr=10e-6, no_improv_break=10, max_iter=0,
alpha=1., gamma=2., rho=-0.5, sigma=0.5, time=0):
'''
Pure Python/Numpy implementation of the Nelder-Mead algorithm.
Reference: https://en.wikipedia.org/wiki/Nelder%E2%80%93Mead_method
'''
'''
@param f (function): function to optimize, must return a scalar score
and operate over a numpy array of the same dimensions as x_start
@param x_start (numpy array): initial position
@param step (float): look-around radius in initial step
@no_improv_thr, no_improv_break (float, int): break after no_improv_break iterations with
an improvement lower than no_improv_thr
@max_iter (int): always break after this number of iterations.
Set it to 0 to loop indefinitely.
@alpha, gamma, rho, sigma (floats): parameters of the algorithm
(see Wikipedia page for reference)
return: tuple (best parameter array, best score)
'''
index = time%2
dim = len(self.var)
"tell AWG to apply DC offset(x) on I & Q"
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
MXA.preamp(mxa, action=['Set','OFF'])
# MXA.preamp_band(mxa, action=['Set','FULL'])
# MXA.attenuation(mxa, action=['Set','14dB'])
MXA.attenuation_auto(mxa, action=['Set','ON'])
power = float((MXA.fpower(mxa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((MXA.fpower(mxa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
prev_best = power
no_improv = 0
res = [[self.var, prev_best]]
# while True:
# print("LOPower: %s" %power)
# if bool(input('hello')): break
for i in range(dim):
x = copy.copy(self.var)
x[i] = x[i] + self.step[i]
"tell AWG to apply DC offset(x) on I & Q"
if self.suppression == 'LO': self.IQparams[:2] = x
elif self.suppression == 'MR': self.IQparams[2:] = x
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((MXA.fpower(mxa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((MXA.fpower(mxa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
score = power
res.append([x, score])
# simplex iter
iters = 0
while 1:
# order
res.sort(key=lambda x: x[1])
if self.suppression == 'LO': self.IQparams[:2] = res[0][0]
elif self.suppression == 'MR': self.IQparams[2:] = res[0][0]
# print(Fore.YELLOW + "\rProgress time#%s: %s" %(time, self.IQparams), end='\r', flush=True)
best = res[0][1]
# break after max_iter
if max_iter and iters >= max_iter:
return res[0]
iters += 1
# AWG_Sinewave(25, self.IQparams)
# if float((RSA5.fpower(rsa, str(5.5)+'GHz')).split('dBm')[0]) < -65. and float((RSA5.fpower(rsa, str(5.475)+'GHz')).split('dBm')[0]) < -65.:
# return array([self.IQparams, best, 0.])
if best < prev_best - no_improve_thr or best == prev_best:
no_improv = 0
prev_best = best
else:
no_improv += 1
if no_improv >= no_improv_break:
AWG_Sinewave(25, self.IQparams)
print("Rest at Optimized IQ Settings: %s" %self.IQparams)
return array([self.IQparams, best]) # Optimized parameters
# centroid
x0 = [0.] * dim
for tup in res[:-1]:
for i, c in enumerate(tup[0]):
x0[i] += c / (len(res)-1)
# reflection
xr = x0 + alpha*(x0 - res[-1][0])
if self.suppression == 'LO': self.IQparams[:2] = xr
elif self.suppression == 'MR': self.IQparams[2:] = xr
"tell AWG to apply DC offset(x) on I & Q"
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((MXA.fpower(mxa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((MXA.fpower(mxa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
rscore = power
if res[0][1] <= rscore < res[-2][1]:
del res[-1]
res.append([xr, rscore])
continue
# expansion
if rscore < res[0][1]:
xe = x0 + gamma*(x0 - res[-1][0])
if self.suppression == 'LO': self.IQparams[:2] = xe
elif self.suppression == 'MR': self.IQparams[2:] = xe
"tell AWG to apply DC offset(x) on I & Q"
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((MXA.fpower(mxa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((MXA.fpower(mxa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
escore = power
if escore < rscore:
del res[-1]
res.append([xe, escore])
continue
else:
del res[-1]
res.append([xr, rscore])
continue
# contraction
xc = x0 + rho*(x0 - res[-1][0])
if self.suppression == 'LO': self.IQparams[:2] = xc
elif self.suppression == 'MR': self.IQparams[2:] = xc
"tell AWG to apply DC offset(x) on I & Q"
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((MXA.fpower(mxa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((MXA.fpower(mxa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
cscore = power
if cscore < res[-1][1]:
del res[-1]
res.append([xc, cscore])
continue
# reduction
x1 = res[0][0]
nres = []
for tup in res:
redx = x1 + sigma*(tup[0] - x1)
if self.suppression == 'LO': self.IQparams[:2] = redx
elif self.suppression == 'MR': self.IQparams[2:] = redx
"tell AWG to apply DC offset(x) on I & Q"
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((MXA.fpower(mxa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((MXA.fpower(mxa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
score = power
nres.append([redx, score])
res = nres
if __name__ == "__main__":
LO_0 = float((MXA.fpower(mxa, str(5.5)+'GHz')).split('dBm')[0])
Mirror_0 = float((MXA.fpower(mxa, str(5.475)+'GHz')).split('dBm')[0])
Initial = [0., 0., 1., 0., 0.]
time = 0
OPT = IQ_Cal()
OPT.IQparams = array(Initial,dtype=float64) #overwrite initial values
result = OPT.nelder_mead(time = time)
prev = result[0]
no_improv, no_improv_thr, no_improv_break = 0, 1e-5, 6
LO, Mirror, T = [], [], []
while True:
time += 1
if time%2: OPT = IQ_Cal('MR',result[0], ratio = time)
else: OPT = IQ_Cal('LO',result[0], ratio = time)
result = OPT.nelder_mead(time = time)
# if len(result) == 3:
# print("Optimized IQ parameters:\n %s" %result)
# break
LO.append(float((MXA.fpower(mxa, str(5.5)+'GHz')).split('dBm')[0]) - LO_0)
Mirror.append(float((MXA.fpower(mxa, str(5.475)+'GHz')).split('dBm')[0]) - Mirror_0)
print(Back.BLUE + Fore.WHITE + "Mirror has been suppressed for %s from %s" %(Mirror[-1],Mirror_0))
T.append(time)
ssq = sum((result[0] - prev)**2)
if ssq > no_improv_thr:
no_improv = 0
prev = result[0]
else:
no_improv += 1
if no_improv >= no_improv_break:
AWG_Sinewave(25, OPT.IQparams)
print(type(OPT.IQparams))
print("Optimized IQ parameters:\n %s" %result)
print("Amplitude Imbalance:\n %s" %OPT.IQparams[2])
if OPT.IQparams[3] > OPT.IQparams[4] and OPT.IQparams[3]-OPT.IQparams[4] < 180:
print("phase skew I-Q:\n %s" %(OPT.IQparams[3]-OPT.IQparams[4]))
if OPT.IQparams[3] > OPT.IQparams[4] and OPT.IQparams[3]-OPT.IQparams[4] > 180:
print("phase skew Q-I:\n %s" %(360-(OPT.IQparams[3]-OPT.IQparams[4])))
if (OPT.IQparams[4] > OPT.IQparams[3] and OPT.IQparams[4]-OPT.IQparams[3] < 180) or (OPT.IQparams[3] > OPT.IQparams[4] and OPT.IQparams[3]-OPT.IQparams[4] > 180):
print("phase skew Q-I:\n %s" %(OPT.IQparams[4]-OPT.IQparams[3]))
if (OPT.IQparams[2] > -1.0) and (OPT.IQparams[2] < 1.0):
Iamp = 1
Qamp = Iamp * OPT.IQparams[2]
else:
Qamp = 1
Iamp = Qamp/OPT.IQparams[2]
print("Ioffset:\n %s" %OPT.IQparams[0])
print("Qoffset:\n %s" %OPT.IQparams[1])
print("Iamp:\n %s" %Iamp)
print("Qamp:\n %s" %Qamp)
print("Iphase:\n %s" %OPT.IQparams[3])
print("Qphase:\n %s" %OPT.IQparams[4])
break
curve(T,LO,'LO Leakage vs time','T(#)','DLO(dB)')
curve(T,Mirror,'Mirror Image vs time','T(#)','DMirror(dB)')
# closing instruments:
ans = input("Press any keys to close AWG, PSGA and RSA-5 ")
AWG.Abort_Gen(awgsess)
AWG.close(awgsess)
PSGA.rfoutput(saga, action=['Set', 0])
PSGA.close(saga, False)
MXA.close(mxa,False)
|
nilq/baby-python
|
python
|
from datetime import datetime
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from markdown import markdown
import bleach
from flask import current_app, request, url_for
from flask_login import UserMixin, AnonymousUserMixin
from app.exceptions import ValidationError
from . import db, login_manager
class Permission:
VIEW = 0x01
SEARCH = 0x02
EDIT = 0x04
#WRITE_ARTICLES = 0x04
# MODERATE_COMMENTS = 0x08
ADMINISTER = 0x80
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'Visitor':(Permission.VIEW,True),
'Inneruser': (Permission.SEARCH|Permission.VIEW , False),
'Manager': (Permission.SEARCH |
Permission.EDIT , False),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
#登录模块,用户创建
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
passwd=db.Column(db.String(32))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default=datetime.utcnow) #创建时间
last_seen = db.Column(db.DateTime(), default=datetime.utcnow) #最后登录时间
avatar_hash = db.Column(db.String(32))
# 对应策略
bids = db.relationship('Auction_data', backref='author', lazy='dynamic') #一对一 ,lazy='immediate',uselist=False
actions = db.relationship('BID_action', backref='author', lazy='dynamic') #一对一,uselist=False
@property #这可以让你将一个类方法转变成一个类属性,表示只读。
def password(self):
raise AttributeError('password is not a readable attribute')
#散列密码
@password.setter #同时有@property和@x.setter表示可读可写,@property和@x.setter和@x.deleter表示可读可写可删除
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
#判断是否有相应权限
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
# 刷新用户最后登录时间
def ping(self):
self.last_seen = datetime.utcnow() #UTC世界时间
db.session.add(self)
###添加用户头像
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
hash = self.avatar_hash or hashlib.md5(
self.username.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
#使用编码后的用户id 字段值生成一个签名令牌
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],
expires_in=expiration)
return s.dumps({'id': self.id}).decode('ascii')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return '<User %r>' % self.username
#####拍牌数据库
class Auction_data(db.Model):
__tablename__ = 'bids'
id = db.Column(db.Integer, primary_key=True)
IDnumber = db.Column(db.Integer)
BIDnumber = db.Column(db.Integer)
BIDpassword = db.Column(db.Integer)
author_id = db.Column(db.Integer, db.ForeignKey('users.id')) # 对应backref
# action_id =db.Column(db.Integer, db.ForeignKey('actions.id'))
def __repr__(self):
return '<Auction %r>' % self.IDnumber
def to_json(self):
json_post = {
'IDnumber': self.IDnumber,
'BIDnumber': self.BIDnumber,
'BIDpassword': self.BIDpassword,
'author': url_for('api.get_user', id=self.author_id,
_external=True),
}
return json_post
class BID_action(db.Model):
__tablename__ = 'actions'
id = db.Column(db.Integer, primary_key=True)
diff = db.Column(db.Integer) #参考时间差价
refer_time = db.Column(db.Integer) #参考时间
bid_time = db.Column(db.Integer) #出价截止时间
delay_time = db.Column(db.Float) #出价延迟时间,0.1~0.9
ahead_price = db.Column(db.Integer) #出价提前价格
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
# auctions = db.relationship('Auction_data', backref='action', lazy='immediate') #一对一
def __repr__(self):
return '<BID %r>' % self.diff
#拍牌登录信息
class login_user(db.Model):
__tablename__='Account'
id=db.Column(db.Integer,primary_key=True)
name=db.Column(db.String) #用户名与User名相同
password=db.Column(db.String) #与Password相同,使用hash存储
login=db.Column(db.Integer) #登录状态
CODE=db.Column(db.String) #使用的标书号
codepsd=db.Column(db.String) #标书登录密码
ID_number=db.Column(db.Integer)
IP=db.Column #记录登录IP
MAC=db.Column(db.String) #记录登录MAC地址
COUNT=db.Column(db.Integer) #登录状态
#继承自Flask-Login 中的AnonymousUserMixin 类,并将其设为用户未登录时current_user 的值
#这样程序不用先检查用户是否登录,就能自由调用current_user.can() 和current_user.is_administrator()
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
#实现一个回调函数,使用指定的标识符加载用户
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
|
nilq/baby-python
|
python
|
from typing import Any, Dict, Iterable, List
import pandas as pd
from fugue.dataframe import ArrayDataFrame
from fugue.exceptions import FugueInterfacelessError
from fugue.extensions.transformer import (
Transformer,
_to_transformer,
transformer,
register_transformer,
)
from pytest import raises
from triad.collections.schema import Schema
from triad.utils.hash import to_uuid
def test_transformer():
assert isinstance(t1, Transformer)
df = ArrayDataFrame([[0]], "a:int")
t1._output_schema = t1.get_output_schema(df)
assert t1.output_schema == "a:int,b:int"
t2._output_schema = t2.get_output_schema(df)
assert t2.output_schema == "b:int,a:int"
t3._output_schema = t3.get_output_schema(df)
assert t3.output_schema == "a:int,b:int"
assert [[0, 1]] == list(t3(df.as_array_iterable()))
def test__to_transformer():
a = _to_transformer(MockTransformer)
assert isinstance(a, MockTransformer)
b = _to_transformer("MockTransformer")
assert isinstance(b, MockTransformer)
a = _to_transformer(t1, None)
assert isinstance(a, Transformer)
a._x = 1
# every parse should produce a different transformer even the input is
# a transformer instance
b = _to_transformer(t1, None)
assert isinstance(b, Transformer)
assert "_x" not in b.__dict__
c = _to_transformer("t1", None)
assert isinstance(c, Transformer)
assert "_x" not in c.__dict__
c._x = 1
d = _to_transformer("t1", None)
assert isinstance(d, Transformer)
assert "_x" not in d.__dict__
raises(FugueInterfacelessError, lambda: _to_transformer(t4, None))
raises(FugueInterfacelessError, lambda: _to_transformer("t4", None))
e = _to_transformer("t4", "*,b:int")
assert isinstance(e, Transformer)
f = _to_transformer("t5")
assert isinstance(f, Transformer)
g = _to_transformer("t6", "*,b:int")
assert isinstance(g, Transformer)
h = _to_transformer("t7")
assert isinstance(h, Transformer)
i = _to_transformer("t8")
assert isinstance(i, Transformer)
j = _to_transformer("t9")
assert isinstance(j, Transformer)
k = _to_transformer("t10")
assert isinstance(k, Transformer)
def test__register():
register_transformer("t_x", MockTransformer)
b = _to_transformer("t_x")
assert isinstance(b, MockTransformer)
register_transformer("t_t3", t3)
register_transformer("t_t4", t4)
register_transformer("t_t5", t5)
assert isinstance(_to_transformer("t_t3"), Transformer)
assert isinstance(_to_transformer("t_t4", "*,x:int"), Transformer)
assert isinstance(_to_transformer("t_t5"), Transformer)
# schema: *
def register_temp(df: pd.DataFrame) -> pd.DataFrame:
return df
t = _to_transformer("register_temp")
assert isinstance(t, Transformer)
assert not isinstance(t, MockTransformer)
# registered alias has the highest priority
register_transformer("register_temp", MockTransformer)
t = _to_transformer("register_temp")
assert isinstance(t, MockTransformer)
# can't overwrite
raises(
KeyError,
lambda: register_transformer("register_temp", MockTransformer, on_dup="raise"),
)
def test__to_transformer_determinism():
a = _to_transformer(t1, None)
b = _to_transformer(t1, None)
c = _to_transformer("t1", None)
assert a is not b
assert to_uuid(a) == to_uuid(b)
assert a is not c
assert to_uuid(a) == to_uuid(c)
a = _to_transformer(t4, "*,b:int")
b = _to_transformer("t4", "*,b:int")
assert a is not b
assert to_uuid(a) == to_uuid(b)
a = _to_transformer(t4, "a:int,b:int")
b = _to_transformer("t4", Schema("a:int,b:int"))
assert a is not b
assert to_uuid(a) == to_uuid(b)
a = _to_transformer(MockTransformer)
b = _to_transformer("MockTransformer")
assert a is not b
assert to_uuid(a) == to_uuid(b)
a = _to_transformer(t10)
b = _to_transformer("t10")
assert a is not b
assert to_uuid(a) == to_uuid(b)
def test_to_transformer_validation():
@transformer(["*", None, "b:int"], input_has=" a , b ")
def tv1(df: Iterable[Dict[str, Any]]) -> Iterable[Dict[str, Any]]:
for r in df:
r["b"] = 1
yield r
# input_has: a , b
# schema: *,b:int
def tv2(df: Iterable[Dict[str, Any]]) -> Iterable[Dict[str, Any]]:
for r in df:
r["b"] = 1
yield r
class MockTransformerV(Transformer):
@property
def validation_rules(self):
return {"input_is": "a:int,b:int"}
def get_output_schema(self, df):
pass
def transform(self, df):
pass
a = _to_transformer(tv1, None)
assert {"input_has": ["a", "b"]} == a.validation_rules
b = _to_transformer(tv2, None)
assert {"input_has": ["a", "b"]} == b.validation_rules
c = _to_transformer(MockTransformerV)
assert {"input_is": "a:int,b:int"} == c.validation_rules
def test_inside_class():
class Test(object):
# schema: *
# input_is: a:int , b :int
def t1(self, df: pd.DataFrame) -> pd.DataFrame:
return df
test = Test()
a = _to_transformer(test.t1)
assert isinstance(a, Transformer)
assert {"input_is": "a:int,b:int"} == a.validation_rules
@transformer(["*", None, "b:int"])
def t1(df: Iterable[Dict[str, Any]]) -> Iterable[Dict[str, Any]]:
for r in df:
r["b"] = 1
yield r
@transformer([Schema("b:int"), "*"])
def t2(df: Iterable[Dict[str, Any]]) -> Iterable[Dict[str, Any]]:
for r in df:
r["b"] = 1
yield r
@transformer(Schema("a:int, b:int"))
def t3(df: Iterable[List[Any]]) -> Iterable[List[Any]]:
for r in df:
r += [1]
yield r
def t4(df: Iterable[List[Any]]) -> Iterable[List[Any]]:
for r in df:
r += [1]
yield r
# schema: *,b:int
def t5(df: Iterable[Dict[str, Any]]) -> Iterable[Dict[str, Any]]:
for r in df:
r["b"] = 1
yield r
def t6(df: Iterable[Dict[str, Any]]) -> Iterable[Dict[str, Any]]:
for r in df:
r["b"] = 1
yield r
# schema: *
def t7(df: pd.DataFrame) -> Iterable[pd.DataFrame]:
yield df
# schema: *
def t8(df: Iterable[pd.DataFrame]) -> pd.DataFrame:
return pd.concat(list(df))
# schema: *
def t9(df: pd.DataFrame) -> Iterable[pd.DataFrame]:
yield df
# schema: *
def t10(df: pd.DataFrame, c: callable) -> pd.DataFrame:
yield df
class MockTransformer(Transformer):
def get_output_schema(self, df):
pass
def transform(self, df):
pass
|
nilq/baby-python
|
python
|
from header_common import *
from header_dialogs import *
from header_operations import *
from module_constants import *
####################################################################################################################
# During a dialog, the dialog lines are scanned from top to bottom.
# If the dialog-line is spoken by the player, all the matching lines are displayed for the player to pick from.
# If the dialog-line is spoken by another, the first (top-most) matching line is selected.
#
# Each dialog line contains the following fields:
# 1) Dialogue partner: This should match the person player is talking to.
# Usually this is a troop-id.
# You can also use a party-template-id by appending '|party_tpl' to this field.
# Use the constant 'anyone' if you'd like the line to match anybody.
# Appending '|plyr' to this field means that the actual line is spoken by the player
# Appending '|other(troop_id)' means that this line is spoken by a third person on the scene.
# (You must make sure that this third person is present on the scene)
#
# 2) Starting dialog-state:
# During a dialog there's always an active Dialog-state.
# A dialog-line's starting dialog state must be the same as the active dialog state, for the line to be a possible candidate.
# If the dialog is started by meeting a party on the map, initially, the active dialog state is "start"
# If the dialog is started by speaking to an NPC in a town, initially, the active dialog state is "start"
# If the dialog is started by helping a party defeat another party, initially, the active dialog state is "party_relieved"
# If the dialog is started by liberating a prisoner, initially, the active dialog state is "prisoner_liberated"
# If the dialog is started by defeating a party led by a hero, initially, the active dialog state is "enemy_defeated"
# If the dialog is started by a trigger, initially, the active dialog state is "event_triggered"
# 3) Conditions block (list): This must be a valid operation block. See header_operations.py for reference.
# 4) Dialog Text (string):
# 5) Ending dialog-state:
# If a dialog line is picked, the active dialog-state will become the picked line's ending dialog-state.
# 6) Consequences block (list): This must be a valid operation block. See header_operations.py for reference.
# 7) Voice-over (string): sound filename for the voice over. Leave here empty for no voice over
####################################################################################################################
dialogs = [
[anyone|plyr,"member_talk", [
(troop_get_slot, ":is_skill_companion", "$g_talk_troop", slot_troop_skill_companion),
(eq, ":is_skill_companion", 0),
], "I'd like you to try to keep out of the fighting.", "member_keep_out_fighting",[]],
[anyone,"member_keep_out_fighting", [], "Oh? Are you sure?", "member_keep_out_fighting_confirm",[]],
[anyone|plyr,"member_keep_out_fighting_confirm", [], "Yes, you have other skills that are too valuable for me to risk losing you in battle.", "member_keep_out_fighting_yes",[
(troop_set_slot, "$g_talk_troop", slot_troop_skill_companion, 1),
]],
[anyone|plyr,"member_keep_out_fighting_confirm", [], "Actually, never mind.", "member_keep_out_fighting_no",[]],
[anyone,"member_keep_out_fighting_yes", [
(store_conversation_troop,"$g_talk_troop"),
(troop_is_hero,"$g_talk_troop"),
(troop_get_slot, ":honorific", "$g_talk_troop", slot_troop_honorific),
(str_store_string, s5, ":honorific"),
], "As you say {s5}. Unless you order me otherwise, I will try to be the last to enter the battle. Anything else?", "member_talk",[]],
[anyone,"member_keep_out_fighting_no", [], "Very well. Anything else?", "member_talk",[]],
[anyone|plyr,"member_talk", [
(troop_get_slot, ":is_skill_companion", "$g_talk_troop", slot_troop_skill_companion),
(eq, ":is_skill_companion", 1),
], "I'd like you to take an active role in battles from now on.", "member_join_in_fighting",[]],
[anyone,"member_join_in_fighting", [], "I see. Is this definitely what you want?", "member_join_in_fighting_confirm",[]],
[anyone|plyr,"member_join_in_fighting_confirm", [], "Yes, your skill on the battlefield is what we need now.", "member_join_in_fighting_yes",[
(troop_set_slot, "$g_talk_troop", slot_troop_skill_companion, 0),
]],
[anyone|plyr,"member_join_in_fighting_confirm", [], "Actually, never mind.", "member_join_in_fighting_no",[]],
[anyone,"member_join_in_fighting_yes", [
(store_conversation_troop,"$g_talk_troop"),
(troop_is_hero,"$g_talk_troop"),
(troop_get_slot, ":honorific", "$g_talk_troop", slot_troop_honorific),
(str_store_string, s5, ":honorific"),
], "As you command {s5}. I will take my position with the rest of the troops from now on. Anything else?", "member_talk",[]],
[anyone,"member_join_in_fighting_no", [], "Very well. Anything else?", "member_talk",[]],
]
def add_dialog(dialogs, new_dialog, bottom_offset):
if bottom_offset == 0:
dialogs.append(new_dialog)
else:
state = new_dialog[1]
indices = []
for i in xrange(0, len(dialogs)):
dialog = dialogs[i]
if dialog[1] == state:
indices.append(i)
if len(indices) == 0:
index = len(dialogs)
elif len(indices) < bottom_offset:
index = indices[0]
else:
index = indices[len(indices) - bottom_offset]
dialogs.insert(index, new_dialog)
def modmerge(var_set):
try:
var_name_1 = "dialogs"
orig_scripts = var_set[var_name_1]
# START do your own stuff to do merging
for dialog in dialogs:
state = dialog[1]
if state == "member_talk":
add_dialog(orig_scripts, dialog, 1)
else:
add_dialog(orig_scripts, dialog, 0)
# END do your own stuff
except KeyError:
errstring = "Variable set does not contain expected variable: \"%s\"." % var_name_1
raise ValueError(errstring)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import sys
import h5py
if __name__ == "__main__":
files = sys.argv[1:]
files.extend(sys.stdin.readlines())
for file in files:
file = file.strip()
with h5py.File(file, 'r') as f:
f['/entry1/instrument/parameters/y_pixels_per_mm'][0] = 0.321
|
nilq/baby-python
|
python
|
"""
Tests CoreML Scaler converter.
"""
import unittest
import numpy
import coremltools
from sklearn.preprocessing import StandardScaler
from onnxmltools.convert.coreml.convert import convert
from onnxmltools.utils import dump_data_and_model
class TestCoreMLScalerConverter(unittest.TestCase):
def test_scaler(self):
model = StandardScaler()
data = numpy.array([[0, 0, 3], [1, 1, 0], [0, 2, 1], [1, 0, 2]], dtype=numpy.float32)
model.fit(data)
model_coreml = coremltools.converters.sklearn.convert(model)
model_onnx = convert(model_coreml.get_spec())
self.assertTrue(model_onnx is not None)
dump_data_and_model(data, model, model_onnx, basename="CmlStandardScalerFloat32")
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
total_bill = 124.56
procent_10 = 0.10
procent_12 = 0.12
procent_15 = 0.15
split_people = 7
tip = total_bill * procent_12 + total_bill
print(tip)
print(tip)
total = tip / float(split_people)
print(round(total, 2))
|
nilq/baby-python
|
python
|
from guardian.core import ObjectPermissionChecker
class ObjectPermissionCheckerViewSetMixin:
"""add a ObjectPermissionChecker based on the accessing user to the serializer context."""
def get_serializer_context(self):
context = super().get_serializer_context()
if self.request:
perm_checker = ObjectPermissionChecker(
user_or_group=self.request.user)
perm_checker.prefetch_perms(self.get_queryset())
context.update({'perm_checker': perm_checker})
return context
|
nilq/baby-python
|
python
|
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2016
# --------------------------------------------------------------------------
"""
This is a problem of building five houses in different locations.
The masonry, roofing, painting, etc. must be scheduled.
Some tasks must necessarily take place before others and these requirements are
expressed through precedence constraints.
There are three workers, and each task requires a worker.
There is also a cash budget which starts with a given balance.
Each task costs a given amount of cash per day which must be available at the start of the task.
A cash payment is received periodically.
The objective is to minimize the overall completion date.
Please refer to documentation for appropriate setup of solving configuration.
"""
from docplex.cp.model import CpoModel, CpoStepFunction, INTERVAL_MAX, INT_MAX
import docplex.cp.utils_visu as visu
#-----------------------------------------------------------------------------
# Initialize the problem data
#-----------------------------------------------------------------------------
# House building task descriptor
class BuildingTask(object):
def __init__(self, name, duration):
self.name = name
self.duration = duration
# List of tasks to be executed for each house
MASONRY = BuildingTask('masonry', 35)
CARPENTRY = BuildingTask('carpentry', 15)
PLUMBING = BuildingTask('plumbing', 40)
CEILING = BuildingTask('ceiling', 15)
ROOFING = BuildingTask('roofing', 5)
PAINTING = BuildingTask('painting', 10)
WINDOWS = BuildingTask('windows', 5)
FACADE = BuildingTask('facade', 10)
GARDEN = BuildingTask('garden', 5)
MOVING = BuildingTask('moving', 5)
# Tasks precedence constraints (each tuple (X, Y) means X ends before start of Y)
PRECEDENCES = ( (MASONRY, CARPENTRY),
(MASONRY, PLUMBING),
(MASONRY, CEILING),
(CARPENTRY, ROOFING),
(CEILING, PAINTING),
(ROOFING, WINDOWS),
(ROOFING, FACADE),
(PLUMBING, FACADE),
(ROOFING, GARDEN),
(PLUMBING, GARDEN),
(WINDOWS, MOVING),
(FACADE, MOVING),
(GARDEN, MOVING),
(PAINTING, MOVING),
)
# Number of workers
NB_WORKERS = 3
# List of houses to build. Value is the minimum start date
HOUSES = (31, 0, 90, 120, 90)
# Cash parameters
NB_PAYMENTS = 5
PAYMENT_AMOUNT = 30000
PAYMENT_INTERVAL = 60
#-----------------------------------------------------------------------------
# Prepare the data for modeling
#-----------------------------------------------------------------------------
# Assign an index to tasks
ALL_TASKS = (MASONRY, CARPENTRY, PLUMBING, CEILING, ROOFING, PAINTING, WINDOWS, FACADE, GARDEN, MOVING)
for i in range(len(ALL_TASKS)):
ALL_TASKS[i].id = i
#-----------------------------------------------------------------------------
# Build the model
#-----------------------------------------------------------------------------
# Create model
mdl = CpoModel()
# Initialize model variable sets
all_tasks = [] # Array of all tasks
desc = dict() # Dictionary task interval var -> task descriptor
house = dict() # Dictionary task interval var -> id of the corresponding house
workers_usage = mdl.step_at(0, 0) # Total worker usage
# Initialize cash function
cash = mdl.step_at(0, 0)
for p in range(NB_PAYMENTS):
cash += mdl.step_at(PAYMENT_INTERVAL * p, PAYMENT_AMOUNT)
# Utility function
def make_house(loc, rd):
''' Create model elements corresponding to the building of one house
loc: Identification (index) of the house to build
rd: Min start date
'''
# Create interval variable for each task for this house
tasks = [mdl.interval_var(size=t.duration,
start=(rd, INTERVAL_MAX),
name="H{}-{}".format(loc, t.name)) for t in ALL_TASKS]
all_tasks.extend(tasks)
# Add precedence constraints
for p, s in PRECEDENCES:
mdl.add(mdl.end_before_start(tasks[p.id], tasks[s.id]))
global workers_usage
global cash
# Allocate tasks to workers
for t in ALL_TASKS:
desc[tasks[t.id]] = t
house[tasks[t.id]] = loc
workers_usage += mdl.pulse(tasks[t.id], 1)
cash -= mdl.step_at_start(tasks[t.id], 200 * t.duration)
# Make houses
for i, sd in enumerate(HOUSES):
make_house(i, sd)
# Number of workers should not be greater than the limit
mdl.add(workers_usage <= NB_WORKERS)
# Cash should not be negative
mdl.add(cash >= 0)
# Minimize overall completion date
mdl.add(mdl.minimize(mdl.max([mdl.end_of(task) for task in all_tasks])))
#-----------------------------------------------------------------------------
# Solve the model and display the result
#-----------------------------------------------------------------------------
def compact(name):
# Example: H3-garden -> G3
# ^ ^
loc, task = name[1:].split('-', 1)
return task[0].upper() + loc
# Solve model
print("Solving model....")
msol = mdl.solve(FailLimit=10000, TimeLimit=10)
print("Solution: ")
msol.print_solution()
# Display result
if msol and visu.is_visu_enabled():
workersF = CpoStepFunction()
cashF = CpoStepFunction()
for p in range(5):
cashF.add_value(60 * p, INT_MAX, 30000)
for task in all_tasks:
itv = msol.get_var_solution(task)
workersF.add_value(itv.get_start(), itv.get_end(), 1)
cashF.add_value(itv.start, INT_MAX, -200 * desc[task].duration)
visu.timeline('Solution SchedCumul')
visu.panel(name="Schedule")
for task in all_tasks:
visu.interval(msol.get_var_solution(task), house[task], compact(task.get_name()))
visu.panel(name="Workers")
visu.function(segments=workersF, style='area')
visu.panel(name="Cash")
visu.function(segments=cashF, style='area', color='gold')
visu.show()
|
nilq/baby-python
|
python
|
import os
from minicps.devices import PLC
from temperature_simulator import TemperatureSimulator
from Logger import hlog
import time
class EnipPLC1(PLC): #builds upon the tags of the swat example
# These constants are used mostly during setting up of topology
NAME = 'plc1'
IP = ' 10.0.2.110'
MAC = '00:1D:9C:C7:B0:10'
# PLC1_PROTOCOL defines type of this PLC (see PLC class in minicps package)
PLC1_PROTOCOL = {
'name': 'enip',
'mode': 1,
'server': {
'address': IP,
'tags': (
('LIT101', 1, 'REAL'),
('LIT101', 2, 'REAL'),
('LIT101', 3, 'REAL'),
)
}
}
# This PLC doesn't use data yet
PLC1_DATA = {
'TODO': 'TODO',
}
# State of this PLC is stored in Sqlite database on this path
STATE = {
'name': 'swat_s1',
'path': 'swat_s1_db.sqlite'
}
def __init__(self):
self.temperature_simulator = TemperatureSimulator(0.0, 50.0, 5.0)
PLC.__init__(
self,
name='plc1',
state=EnipPLC1.STATE,
protocol=EnipPLC1.PLC1_PROTOCOL,
memory=EnipPLC1.PLC1_DATA,
disk=EnipPLC1.PLC1_DATA)
# Executed before main loop is started
def pre_loop(self, sleep=0.1):
hlog ('DEBUG:plc1 enters pre_loop')
print
time.sleep(sleep)
# Main loop keeps sending ENIP messages with one LIT101 tag and value
# obtained from temperature simulator.
def main_loop(self):
hlog ('DEBUG: plc1 enters main_loop.')
print
count = 0
while(count <= 1000000):
lit101 = float(self.temperature_simulator.get_next())
hlog ('DEBUG plc1 lit101: %.5f' % lit101)
self.send(('LIT101', 3), lit101, EnipPLC1.IP)
count += 1
hlog ('DEBUG plc1 shutdown')
if __name__ == "__main__":
hlog('DEBUG plc1 start')
plc1 = EnipPLC1()
|
nilq/baby-python
|
python
|
from setuptools import setup, find_packages
setup(name='donkeypart_keras_behavior_cloning',
version='0.1.3',
description='Library to control steering and throttle actuators.',
long_description="no long description given",
long_description_content_type="text/markdown",
url='https://github.com/autorope/donkeypart_PCA9685_actuators',
author='Will Roscoe',
author_email='wroscoe@gmail.com',
license='MIT',
entry_points={
'console_scripts': [
'donkey=donkeycar.management.base:execute_from_command_line',
],
},
install_requires=['numpy',
'tensorflow==1.11',
],
extras_require={'dev': ['pytest-cov']},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='selfdriving cars donkeycar diyrobocars datastore',
packages=find_packages(exclude=(['tests', 'docs', 'site', 'env'])),
)
|
nilq/baby-python
|
python
|
from trading_bot import app, create_app
def init_app():
# TODO add test config class
create_app()
def reset_managers():
create_app()
app.symbol_manager._symbols = []
app.exchange_manager._exchanges = []
app.exchange_manager._exchanges_by_name = {}
app.indicator_manager._indicators = []
app.indicator_manager._indicators_by_name = {}
app.indicator_value_manager._indicator_values = []
app.indicator_value_manager._indicator_values_by_key = {}
app.trading_system_manager._trading_systems = []
|
nilq/baby-python
|
python
|
import cv2
import numpy as np
import math
import time
import testAAE
import testAAEWithClassifier
import region
def quantizeAngle(angle):
if angle >= 0:
if angle >= 90:
if angle >= 45:
quantized = 2
else:
quantized = 1
elif angle >= 135:
quantized = 4
else:
quantized = 8
elif angle <= -90:
if angle <= -135:
quantized = 16
else:
quantized = 32
elif angle <= -45:
quantized = 64
else:
quantized = 128
return int(quantized)
def angleFilter(mask, quantized, quantized_flag = False):
temp_angle = mask*quantized
kernal = 9
m,n = mask.shape
hist = {}
hist_sorted = []
strong_angle = np.zeros(mask.shape, np.uint8)
contour = np.zeros(mask.shape, np.uint8)
score_map = np.array([[5,3,1,0,0,0,1,3],
[3,5,3,1,0,0,0,1],
[1,3,5,3,1,0,0,0],
[0,1,3,5,3,1,0,0],
[0,0,1,3,5,3,1,0],
[0,0,0,1,3,5,3,1],
[1,0,0,0,1,3,5,3],
[3,1,0,0,0,1,3,5]])
bias = math.floor(kernal /2)
qt_angle = np.array([1,2,4,8,16,32,64,128])
for i in range(m):
for j in range(n):
if mask[i,j] > 0:
if i-bias < 0:
h_t=0
else:
h_t = i-bias
if i+bias > m-1:
h_b=m-1
else:
h_b = i+bias
if j-bias < 0:
w_l=0
else:
w_l=j-bias
if j+bias > m-1:
w_r=m-1
else:
w_r=j+bias
temp = temp_angle[h_t:h_b+1,w_l:w_r+1]
a,b = temp.shape
temp = temp.flat[:]
if not quantized_flag:
for k in range(a*b):
if temp[k] > 0:
temp[k] = quantizeAngle(temp[k])
temp = temp.astype(np.uint8)
temp_ = temp[temp.nonzero()]
bcounts = np.bincount(temp_)
strong_temp = np.zeros(a*b)
score_temp = np.zeros(a*b)
hist.clear
hist_sorted.clear
hist = dict(zip(np.unique(temp_),bcounts[bcounts.nonzero()]))
hist_sorted = sorted(hist.items(), key=lambda x: x[1], reverse=True)
max_count = hist_sorted[0][1]
strong_angle[i,j] = hist_sorted[0][0]
count = 0
for c in range(a*b):
if temp[c] > 0:
score_temp[c] = score_map[int(math.log2(quantizeAngle(temp_angle[i,j]))),int(math.log2(temp[c]))]
strong_temp[c] = score_map[int(math.log2(strong_angle[i,j])),int(math.log2(temp[c]))]
count+=1
pix_score = np.sum(score_temp)/count
strong_score = np.sum(strong_temp)/count
if max_count > 5 and (pix_score > 2 or strong_score > 2):
contour[i,j] = 1
return contour
def preprocessing():
total_num = 28
sample_id = 0
threshold = 160
exposure = 6
write_flag = False
sobel_mask_vect = []
src_vect = []
sobel_x =np.array([[-1, 0, 1],[-1, 0, 1],[-1, 0, 1]], dtype=np.float32)
sobel_y =np.array([[1, 1, 1],[0, 0, 0],[-1, -1, -1]], dtype=np.float32)
new_img = np.zeros((256,256), np.uint8)
for pic_num in range(1, total_num):
if write_flag:
src_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '/' + str(pic_num) + '.jpg'
output_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '/' + str(pic_num) + '.png'
IN_src_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '_IN/' + 'SQI' + '/' + '{:02d}'.format(pic_num) + '.png'
# output_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '_IN/' + 'TT' + '/' + '{:02d}'.format(pic_num) + '.png'
# region_file = './roi/region_' + str(pic_num) + '.png'
print(src_file)
img = cv2.imread(src_file)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
m,n = img.shape
img = img[0:n]
new_img[3:253,3:253] = img
cv2.imwrite(output_file, new_img)
new_img_copy = new_img.copy()
IN_img = cv2.imread(IN_src_file)
IN_img = cv2.cvtColor(IN_img, cv2.COLOR_BGR2GRAY)
src_vect.append(IN_img)
else:
src_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '/' + str(pic_num) + '.png'
IN_src_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '_IN/' + 'SQI' + '/' + '{:02d}'.format(pic_num) + '.png'
new_img = cv2.imread(src_file)
new_img = cv2.cvtColor(new_img,cv2.COLOR_BGR2GRAY)
IN_img = cv2.imread(IN_src_file)
IN_img = cv2.cvtColor(IN_img, cv2.COLOR_BGR2GRAY)
src_vect.append(IN_img)
sobel_mag = np.zeros(new_img.shape, np.float)
sobel_angle = np.zeros(new_img.shape, np.float)
quantized_angle = np.zeros(new_img.shape, np.uint8)
sobel_mask = np.zeros(new_img.shape, np.uint8)
# img_Guassian = cv2.GaussianBlur(new_img,(5,5),0)
# img_Guassian.astype(np.uint8)
# m,n = img_Guassian.shape
# m,n = new_img.shape
# for i in range(2,m-1):
# for j in range(2,n-1):
# Gx = np.sum(new_img[i-1:i+2, j-1:j+2] * sobel_x)
# Gy = np.sum(new_img[i-1:i+2, j-1:j+2] * sobel_y)
# sobel_mag[i,j] = math.sqrt(math.pow(Gx,2) + math.pow(Gy,2))
# sobel_angle[i,j] = math.atan2(Gy, Gx) * 180 / math.pi
# # quantized_angle[i,j] = quantizeAngle(sobel_angle[i,j])
# if sobel_mag[i,j] >= threshold:
# sobel_mask[i,j] = 1
# contour = angleFilter(sobel_mask, quantized_angle)
# contour = cv2.blur(contour, (3,3))
# sobelx = cv2.Sobel(new_img,cv2.CV_32F,1,0) #默认ksize=3
# sobely = cv2.Sobel(new_img,cv2.CV_32F,0,1)
sobelx = cv2.filter2D(new_img, cv2.CV_32F, sobel_x)
sobely = cv2.filter2D(new_img, cv2.CV_32F, sobel_y)
sobel_mag = np.sqrt(pow(sobelx,2) + pow(sobely,2))
sobel_angle = np.arctan2(sobely,sobelx) * 180 /math.pi
sobel_mag = cv2.convertScaleAbs(sobel_mag)
_, sobel_mask = cv2.threshold(sobel_mag, threshold, 255, 0)
# contour = angleFilter(sobel_mask, sobel_angle)
# contour = cv2.blur(contour, (3,3))
# sobel_mask = cv2.blur(sobel_mask, (3,3))
# contour_vect.append(contour)
# cv2.imshow('sobel', sobel_mask)
# cv2.waitKey(0)
sobel_mask_vect.append(sobel_mask)
return sobel_mask_vect, src_vect
if __name__ == "__main__":
time_start = time.time()
sobel_mask_vect, src_vect = preprocessing()
time_end = time.time()
print('Proprecessing time cost:{:.3f}'.format(time_end - time_start))
# for sobel_mask in sobel_mask_vect:
# # cv2.imshow("sobel",255*sobel_mask.astype(np.uint8))
# cv2.imshow("sobel",sobel_mask)
# # cv2.imshow("extend", 255*contour.astype(np.uint8))
# # cv2.imshow("sub",255*(sobel_mask - contour).astype(np.uint8))
# cv2.waitKey(0)
output_img_vect = testAAE.AEprocessing(sobel_mask_vect)
# output_img_vect = testAAEWithClassifier.AEprocessing(sobel_mask_vect)
print('AAE time cost:{:.3f}'.format(time.time() - time_end))
for i, singleimg in enumerate(output_img_vect):
# singleimg = np.squeeze(singleimg, axis=(2,))
singleimg = singleimg.astype(np.uint8)
src = src_vect[i]
# cv2.imshow('src',src)
# cv2.waitKey(0)
region_file = '../roi/region_{:02d}'.format(i) + '.png'
mask_file = '../Template/bin_mask/region_{:02d}'.format(i) + '.png'
mask = region.regionGenerate(singleimg)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))
eroded = cv2.erode(mask,kernel)
eroded_2 = cv2.erode(eroded,kernel)
eroded_3 = cv2.erode(eroded_2,kernel)
roi = cv2.bitwise_and(src, src, mask=eroded)
sub = eroded - eroded_3
m,n = sub.shape
for row in range(m):
for col in range(n):
if sub[row, col] and roi[row, col] < 80:
roi[row,col] = 0
eroded[row, col] = 0
background = cv2.bitwise_not(eroded)
# cv2.imwrite(region_file, roi)
# cv2.imwrite(mask_file, eroded)
# cv2.imshow('region', roi+background)
# cv2.waitKey(0)
print('Totally time cost:{:.3f}'.format(time.time() - time_start))
|
nilq/baby-python
|
python
|
def read_input():
# for puzzles where each input line is an object
with open('input.txt') as fh:
for line in fh.readlines():
if line.strip():
yield line.strip()
def read_input_objs():
# for puzzles with newline-separated objects as input
with open('input.txt') as fh:
obj = []
for line in fh.readlines():
if not line.strip():
yield ' '.join(obj).strip()
obj = []
else:
obj.append(line.strip())
if obj:
yield obj
fwd = 0
depth = 0
i = 0
for obj in read_input():
d, num = obj.split(' ')
num = int(num)
if d == 'forward':
fwd += num
if d == 'down':
depth += num
if d == 'up':
depth -= num
print(fwd * depth)
|
nilq/baby-python
|
python
|
import PySimpleGUI as sg
use_custom_titlebar = False
def make_window(theme=None):
NAME_SIZE = 23
def name(name):
dots = NAME_SIZE - len(name) - 2
return sg.Text(
name + ' ' + '•' * dots,
size=(NAME_SIZE, 1),
justification='r',
pad=(0, 0),
font='Courier 10',
)
sg.theme(theme)
treedata = sg.TreeData()
treedata.Insert(
'',
'_A_',
'Tree Item 1',
[1234],
)
treedata.Insert('', '_B_', 'B', [])
treedata.Insert(
'_A_',
'_A1_',
'Sub Item 1',
['can', 'be', 'anything'],
)
layout_l = [
[name('Text'), sg.Text('Text')],
[name('Input'), sg.Input(s=15)],
[name('Multiline'), sg.Multiline(s=(15, 2))],
[name('Output'), sg.Output(s=(15, 2))],
[
name('Combo'),
sg.Combo(
sg.theme_list(),
default_value=sg.theme(),
s=(15, 22),
enable_events=True,
readonly=True,
k='-COMBO-',
),
],
[
name('OptionMenu'),
sg.OptionMenu(
[
'OptionMenu',
],
s=(15, 2),
),
],
[name('Checkbox'), sg.Checkbox('Checkbox')],
[name('Radio'), sg.Radio('Radio', 1)],
[
name('Spin'),
sg.Spin(
[
'Spin',
],
s=(15, 2),
),
],
[name('Button'), sg.Button('Button')],
[
name('ButtonMenu'),
sg.ButtonMenu('ButtonMenu', sg.MENU_RIGHT_CLICK_EDITME_EXIT),
],
[name('Slider'), sg.Slider((0, 10), orientation='h', s=(10, 15))],
[
name('Listbox'),
sg.Listbox(['Listbox', 'Listbox 2'], no_scrollbar=True, s=(15, 2)),
],
[name('Image'), sg.Image(sg.EMOJI_BASE64_HAPPY_THUMBS_UP)],
[name('Graph'), sg.Graph((125, 50), (0, 0), (125, 50), k='-GRAPH-')],
]
layout_r = [
[
name('Canvas'),
sg.Canvas(
background_color=sg.theme_button_color()[1], size=(125, 50)
),
],
[
name('ProgressBar'),
sg.ProgressBar(100, orientation='h', s=(10, 20), k='-PBAR-'),
],
[
name('Table'),
sg.Table(
[[1, 2, 3], [4, 5, 6]], ['Col 1', 'Col 2', 'Col 3'], num_rows=2
),
],
[
name('Tree'),
sg.Tree(
treedata,
[
'Heading',
],
num_rows=3,
),
],
[name('Horizontal Separator'), sg.HSep()],
[name('Vertical Separator'), sg.VSep()],
[name('Frame'), sg.Frame('Frame', [[sg.T(s=15)]])],
[name('Column'), sg.Column([[sg.T(s=15)]])],
[
name('Tab, TabGroup'),
sg.TabGroup(
[[sg.Tab('Tab1', [[sg.T(s=(15, 2))]]), sg.Tab('Tab2', [[]])]]
),
],
[
name('Pane'),
sg.Pane([sg.Col([[sg.T('Pane 1')]]), sg.Col([[sg.T('Pane 2')]])]),
],
[name('Push'), sg.Push(), sg.T('Pushed over')],
[name('VPush'), sg.VPush()],
[name('Sizer'), sg.Sizer(1, 1)],
[name('StatusBar'), sg.StatusBar('StatusBar')],
[name('Sizegrip'), sg.Sizegrip()],
]
layout = [
[
sg.MenubarCustom(
[
['File', ['Exit']],
[
'Edit',
[
'Edit Me',
],
],
],
k='-CUST MENUBAR-',
p=0,
)
]
if use_custom_titlebar
else [
sg.Menu(
[
['File', ['Exit']],
[
'Edit',
[
'Edit Me',
],
],
],
k='-CUST MENUBAR-',
p=0,
)
],
[
sg.Checkbox(
'Use Custom Titlebar & Menubar',
use_custom_titlebar,
enable_events=True,
k='-USE CUSTOM TITLEBAR-',
)
],
[
sg.T(
'PySimpleGUI Elements - Use Combo to Change Themes',
font='_ 18',
justification='c',
expand_x=True,
)
],
[sg.Col(layout_l), sg.Col(layout_r)],
]
window = sg.Window(
'The PySimpleGUI Element List',
layout,
finalize=True,
right_click_menu=sg.MENU_RIGHT_CLICK_EDITME_VER_EXIT,
keep_on_top=True,
use_custom_titlebar=use_custom_titlebar,
)
window['-PBAR-'].update(30) # Show 30% complete on ProgressBar
window['-GRAPH-'].draw_image(
data=sg.EMOJI_BASE64_HAPPY_JOY, location=(0, 50)
) # Draw something in the Graph Element
return window
# Start of the program...
window = make_window()
window.read()
window.close()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for :mod:`orion.algo.tpe`."""
import numpy
import pytest
from scipy.stats import norm
from orion.algo.space import Categorical, Fidelity, Integer, Real, Space
from orion.algo.tpe import (
TPE,
CategoricalSampler,
GMMSampler,
adaptive_parzen_estimator,
compute_max_ei_point,
ramp_up_weights,
)
from orion.core.worker.transformer import build_required_space
@pytest.fixture()
def space():
"""Return an optimization space"""
space = Space()
dim1 = Real("yolo1", "uniform", -10, 20)
space.register(dim1)
dim2 = Integer("yolo2", "uniform", -5, 10)
space.register(dim2)
categories = ["a", 0.1, 2, "c"]
dim3 = Categorical("yolo3", categories)
space.register(dim3)
return space
@pytest.fixture
def tpe(space):
"""Return an instance of TPE."""
return TPE(space, seed=1)
def test_compute_max_ei_point():
"""Test that max ei point is computed correctly"""
points = numpy.linspace(-3, 3, num=10)
below_likelis = numpy.linspace(0.5, 0.9, num=10)
above_likes = numpy.linspace(0.2, 0.5, num=10)
numpy.random.shuffle(below_likelis)
numpy.random.shuffle(above_likes)
max_ei_index = (below_likelis - above_likes).argmax()
max_ei_point = compute_max_ei_point(points, below_likelis, above_likes)
assert max_ei_point == points[max_ei_index]
def test_ramp_up_weights():
"""Test TPE adjust observed points correctly"""
weights = ramp_up_weights(25, 15, True)
assert len(weights) == 25
assert numpy.all(weights == 1.0)
weights = ramp_up_weights(25, 15, False)
assert len(weights) == 25
assert numpy.all(weights[:10] == (numpy.linspace(1.0 / 25, 1.0, num=10)))
assert numpy.all(weights[10:] == 1.0)
weights = ramp_up_weights(10, 15, False)
assert len(weights) == 10
assert numpy.all(weights == 1.0)
weights = ramp_up_weights(25, 0, False)
assert len(weights) == 25
assert numpy.all(weights == (numpy.linspace(1.0 / 25, 1.0, num=25)))
def test_adaptive_parzen_normal_estimator():
"""Test adaptive parzen estimator"""
low = -1
high = 5
obs_mus = [1.2]
mus, sigmas, weights = adaptive_parzen_estimator(
obs_mus, low, high, prior_weight=1.0, equal_weight=False, flat_num=25
)
assert list(mus) == [1.2, 2]
assert list(sigmas) == [3, 6]
assert list(weights) == [1.0 / 2, 1.0 / 2]
obs_mus = [3.4]
mus, sigmas, weights = adaptive_parzen_estimator(
obs_mus, low, high, prior_weight=0.5, equal_weight=False, flat_num=25
)
assert list(mus) == [2, 3.4]
assert list(sigmas) == [6, 3]
assert list(weights) == [0.5 / 1.5, 1.0 / 1.5]
obs_mus = numpy.linspace(-1, 5, num=30, endpoint=False)
mus, sigmas, weights = adaptive_parzen_estimator(
obs_mus, low, high, prior_weight=1.0, equal_weight=False, flat_num=25
)
ramp = numpy.linspace(1.0 / 30, 1.0, num=30 - 25)
full = numpy.ones(25 + 1)
all_weights = numpy.concatenate([ramp, full])
assert len(mus) == len(sigmas) == len(weights) == 30 + 1
assert numpy.all(weights[: 30 - 25] == ramp / all_weights.sum())
assert numpy.all(weights[30 - 25 :] == 1 / all_weights.sum())
assert numpy.all(sigmas == 6 / 10)
def test_adaptive_parzen_normal_estimator_weight():
"""Test the weight for the normal components"""
obs_mus = numpy.linspace(-1, 5, num=30, endpoint=False)
low = -1
high = 5
# equal weight
mus, sigmas, weights = adaptive_parzen_estimator(
obs_mus, low, high, prior_weight=1.0, equal_weight=True, flat_num=25
)
assert numpy.all(weights == 1 / 31)
assert numpy.all(sigmas == 6 / 10)
# prior weight
mus, sigmas, weights = adaptive_parzen_estimator(
obs_mus, low, high, prior_weight=0.5, equal_weight=False, flat_num=25
)
ramp = numpy.linspace(1.0 / 30, 1.0, num=30 - 25)
full = numpy.ones(25 + 1)
all_weights = numpy.concatenate([ramp, full])
prior_pos = numpy.searchsorted(mus, 2)
all_weights[prior_pos] = 0.5
assert numpy.all(
weights[: 30 - 25]
== (numpy.linspace(1.0 / 30, 1.0, num=30 - 25) / all_weights.sum())
)
assert numpy.all(weights[33 - 25 : prior_pos] == 1 / all_weights.sum())
assert weights[prior_pos] == 0.5 / all_weights.sum()
assert numpy.all(weights[prior_pos + 1 :] == 1 / all_weights.sum())
assert numpy.all(sigmas == 6 / 10)
# full weights number
mus, sigmas, weights = adaptive_parzen_estimator(
obs_mus, low, high, prior_weight=1.0, equal_weight=False, flat_num=15
)
ramp = numpy.linspace(1.0 / 30, 1.0, num=30 - 15)
full = numpy.ones(15 + 1)
all_weights = numpy.concatenate([ramp, full])
prior_pos = numpy.searchsorted(mus, 2)
all_weights[prior_pos] = 1.0
assert numpy.all(
weights[: 30 - 15]
== (numpy.linspace(1.0 / 30, 1.0, num=30 - 15) / all_weights.sum())
)
assert numpy.all(weights[30 - 15 :] == 1 / all_weights.sum())
assert numpy.all(sigmas == 6 / 10)
def test_adaptive_parzen_normal_estimator_sigma_clip():
"""Test that the magic clip of sigmas for parzen estimator"""
low = -1
high = 5
obs_mus = numpy.linspace(-1, 5, num=8, endpoint=False)
mus, sigmas, weights = adaptive_parzen_estimator(
obs_mus, low, high, prior_weight=1.0, equal_weight=False, flat_num=25
)
assert len(mus) == len(sigmas) == len(weights) == 8 + 1
assert numpy.all(weights == 1 / 9)
assert numpy.all(sigmas == 6 / 8)
obs_mus = numpy.random.uniform(-1, 5, 30)
mus, sigmas, weights = adaptive_parzen_estimator(
obs_mus, low, high, prior_weight=1.0, equal_weight=False, flat_num=25
)
assert len(mus) == len(sigmas) == len(weights) == 30 + 1
assert numpy.all(weights[-25:] == weights[-1])
assert numpy.all(sigmas <= 6) and numpy.all(sigmas >= 6 / 10)
obs_mus = numpy.random.uniform(-1, 5, 400)
mus, sigmas, weights = adaptive_parzen_estimator(
obs_mus, low, high, prior_weight=1.0, equal_weight=False, flat_num=25
)
assert len(mus) == len(sigmas) == len(weights) == 400 + 1
assert numpy.all(weights[-25:] == weights[-1])
assert numpy.all(sigmas <= 6) and numpy.all(sigmas >= 6 / 20)
obs_mus = numpy.random.uniform(-1, 5, 10000)
mus, sigmas, weights = adaptive_parzen_estimator(
obs_mus, low, high, prior_weight=1.0, equal_weight=False, flat_num=25
)
assert len(mus) == len(sigmas) == len(weights) == 10000 + 1
assert numpy.all(weights[-25:] == weights[-1])
assert numpy.all(sigmas <= 6) and numpy.all(sigmas >= 6 / 100)
class TestCategoricalSampler:
"""Tests for TPE Categorical Sampler"""
def test_cat_sampler_creation(self, tpe):
"""Test CategoricalSampler creation"""
obs = [0, 3, 9]
choices = list(range(-5, 5))
cat_sampler = CategoricalSampler(tpe, obs, choices)
assert len(cat_sampler.weights) == len(choices)
obs = [0, 3, 9]
choices = ["a", "b", 11, 15, 17, 18, 19, 20, 25, "c"]
cat_sampler = CategoricalSampler(tpe, obs, choices)
assert len(cat_sampler.weights) == len(choices)
tpe.equal_weight = True
tpe.prior_weight = 1.0
obs = numpy.random.randint(0, 10, 100)
cat_sampler = CategoricalSampler(tpe, obs, choices)
counts_obs = numpy.bincount(obs) + 1.0
weights = counts_obs / counts_obs.sum()
assert numpy.all(cat_sampler.weights == weights)
tpe.equal_weight = False
tpe.prior_weight = 0.5
tpe.full_weight_num = 30
obs = numpy.random.randint(0, 10, 100)
cat_sampler = CategoricalSampler(tpe, obs, choices)
ramp = numpy.linspace(1.0 / 100, 1.0, num=100 - 30)
full = numpy.ones(30)
ramp_weights = numpy.concatenate([ramp, full])
counts_obs = numpy.bincount(obs, weights=ramp_weights) + 0.5
weights = counts_obs / counts_obs.sum()
assert numpy.all(cat_sampler.weights == weights)
def test_sample(self, tpe):
"""Test CategoricalSampler sample function"""
obs = numpy.random.randint(0, 10, 100)
choices = ["a", "b", 11, 15, 17, 18, 19, 20, 25, "c"]
cat_sampler = CategoricalSampler(tpe, obs, choices)
points = cat_sampler.sample(25)
assert len(points) == 25
assert numpy.all(points >= 0)
assert numpy.all(points < 10)
weights = numpy.linspace(1, 10, num=10) ** 3
numpy.random.shuffle(weights)
weights = weights / weights.sum()
cat_sampler = CategoricalSampler(tpe, obs, choices)
cat_sampler.weights = weights
points = cat_sampler.sample(10000)
points = numpy.array(points)
hist = numpy.bincount(points)
assert numpy.all(hist.argsort() == weights.argsort())
assert len(points) == 10000
assert numpy.all(points >= 0)
assert numpy.all(points < 10)
def test_get_loglikelis(self, tpe):
"""Test to get log likelis of points"""
obs = numpy.random.randint(0, 10, 100)
choices = ["a", "b", 11, 15, 17, 18, 19, 20, 25, "c"]
cat_sampler = CategoricalSampler(tpe, obs, choices)
points = cat_sampler.sample(25)
likelis = cat_sampler.get_loglikelis(points)
assert numpy.all(
likelis == numpy.log(numpy.asarray(cat_sampler.weights)[points])
)
class TestGMMSampler:
"""Tests for TPE GMM Sampler"""
def test_gmm_sampler_creation(self, tpe):
"""Test GMMSampler creation"""
mus = numpy.linspace(-3, 3, num=12, endpoint=False)
sigmas = [0.5] * 12
gmm_sampler = GMMSampler(tpe, mus, sigmas, -3, 3)
assert len(gmm_sampler.weights) == 12
assert len(gmm_sampler.pdfs) == 12
def test_sample(self, tpe):
"""Test GMMSampler sample function"""
mus = numpy.linspace(-3, 3, num=12, endpoint=False)
sigmas = [0.5] * 12
gmm_sampler = GMMSampler(tpe, mus, sigmas, -3, 3)
points = gmm_sampler.sample(25)
points = numpy.array(points)
assert len(points) <= 25
assert numpy.all(points >= -3)
assert numpy.all(points < 3)
mus = numpy.linspace(-10, 10, num=10, endpoint=False)
sigmas = [0.00001] * 10
weights = numpy.linspace(1, 10, num=10) ** 3
numpy.random.shuffle(weights)
weights = weights / weights.sum()
gmm_sampler = GMMSampler(tpe, mus, sigmas, -11, 9, weights)
points = gmm_sampler.sample(10000)
points = numpy.array(points)
hist = numpy.histogram(points, bins=[-11, -9, -7, -5, -3, -1, 1, 3, 5, 7, 9])
assert numpy.all(hist[0].argsort() == numpy.array(weights).argsort())
assert numpy.all(points >= -11)
assert numpy.all(points < 9)
def test_get_loglikelis(self):
"""Test to get log likelis of points"""
mus = numpy.linspace(-10, 10, num=10, endpoint=False)
weights = numpy.linspace(1, 10, num=10) ** 3
numpy.random.shuffle(weights)
weights = weights / weights.sum()
sigmas = [0.00001] * 10
gmm_sampler = GMMSampler(tpe, mus, sigmas, -11, 9, weights)
points = [mus[7]]
pdf = norm(mus[7], sigmas[7])
point_likeli = numpy.log(pdf.pdf(mus[7]) * weights[7])
likelis = gmm_sampler.get_loglikelis(points)
assert list(likelis) == point_likeli
assert likelis[0] == point_likeli
sigmas = [2] * 10
gmm_sampler = GMMSampler(tpe, mus, sigmas, -11, 9, weights)
log_pdf = []
pdfs = []
for i in range(10):
pdfs.append(norm(mus[i], sigmas[i]))
for pdf, weight in zip(pdfs, weights):
log_pdf.append(numpy.log(pdf.pdf(0) * weight))
point_likeli = numpy.log(numpy.sum(numpy.exp(log_pdf)))
points = numpy.random.uniform(-11, 9, 30)
points = numpy.insert(points, 10, 0)
likelis = gmm_sampler.get_loglikelis(points)
point_likeli = numpy.format_float_scientific(point_likeli, precision=10)
gmm_likeli = numpy.format_float_scientific(likelis[10], precision=10)
assert point_likeli == gmm_likeli
assert len(likelis) == len(points)
class TestTPE:
"""Tests for the algo TPE."""
def test_seed_rng(self, tpe):
"""Test that algo is seeded properly"""
tpe.seed_rng(1)
a = tpe.suggest(1)[0]
assert not numpy.allclose(a, tpe.suggest(1)[0])
tpe.seed_rng(1)
assert numpy.allclose(a, tpe.suggest(1)[0])
def test_set_state(self, tpe):
"""Test that state is reset properly"""
tpe.seed_rng(1)
state = tpe.state_dict
a = tpe.suggest(1)[0]
assert not numpy.allclose(a, tpe.suggest(1)[0])
tpe.set_state(state)
assert numpy.allclose(a, tpe.suggest(1)[0])
def test_unsupported_space(self):
"""Test tpe only work for supported search space"""
space = Space()
dim1 = Real("yolo1", "uniform", -10, 10)
space.register(dim1)
dim2 = Real("yolo2", "reciprocal", 10, 20)
space.register(dim2)
categories = ["a", 0.1, 2, "c"]
dim3 = Categorical("yolo3", categories)
space.register(dim3)
dim4 = Fidelity("epoch", 1, 9, 3)
space.register(dim4)
TPE(space)
space = Space()
dim = Real("yolo1", "norm", 0.9)
space.register(dim)
with pytest.raises(ValueError) as ex:
tpe = TPE(space)
tpe.space = build_required_space(
space, shape_requirement=TPE.requires_shape
)
assert (
"TPE now only supports uniform, loguniform, uniform discrete and choices"
in str(ex.value)
)
def test_split_trials(self, tpe):
"""Test observed trials can be split based on TPE gamma"""
space = Space()
dim1 = Real("yolo1", "uniform", -3, 6)
space.register(dim1)
tpe.space = space
points = numpy.linspace(-3, 3, num=10, endpoint=False)
results = numpy.linspace(0, 1, num=10, endpoint=False)
points_results = list(zip(points, results))
numpy.random.shuffle(points_results)
points, results = zip(*points_results)
for point, result in zip(points, results):
tpe.observe([[point]], [{"objective": result}])
tpe.gamma = 0.25
below_points, above_points = tpe.split_trials()
assert below_points == [[-3.0], [-2.4], [-1.8]]
assert len(above_points) == 7
tpe.gamma = 0.2
below_points, above_points = tpe.split_trials()
assert below_points == [[-3.0], [-2.4]]
assert len(above_points) == 8
def test_sample_int_dimension(self):
"""Test sample values for a integer dimension"""
space = Space()
dim1 = Integer("yolo1", "uniform", -10, 20)
space.register(dim1)
dim2 = Integer("yolo2", "uniform", -5, 10, shape=(2))
space.register(dim2)
tpe = TPE(space)
obs_points = numpy.random.randint(-10, 10, 100)
below_points = [obs_points[:25]]
above_points = [obs_points[25:]]
points = tpe.sample_one_dimension(
dim1, 1, below_points, above_points, tpe._sample_int_point
)
points = numpy.asarray(points)
assert len(points) == 1
assert all(points >= -10)
assert all(points < 10)
obs_points_below = numpy.random.randint(-10, 0, 25).reshape(1, 25)
obs_points_above = numpy.random.randint(0, 10, 75).reshape(1, 75)
points = tpe.sample_one_dimension(
dim1, 1, obs_points_below, obs_points_above, tpe._sample_int_point
)
points = numpy.asarray(points)
assert len(points) == 1
assert all(points >= -10)
assert all(points < 0)
obs_points = numpy.random.randint(-5, 5, 100)
below_points = [obs_points[:25], obs_points[25:50]]
above_points = [obs_points[50:75], obs_points[75:]]
points = tpe.sample_one_dimension(
dim2, 2, below_points, above_points, tpe._sample_int_point
)
points = numpy.asarray(points)
assert len(points) == 2
assert all(points >= -10)
assert all(points < 10)
tpe.n_ei_candidates = 0
points = tpe.sample_one_dimension(
dim2, 2, below_points, above_points, tpe._sample_int_point
)
assert len(points) == 0
def test_sample_categorical_dimension(self):
"""Test sample values for a categorical dimension"""
space = Space()
categories = ["a", "b", 11, 15, 17, 18, 19, 20, 25, "c"]
dim1 = Categorical("yolo1", categories)
space.register(dim1)
dim2 = Categorical("yolo2", categories, shape=(2))
space.register(dim2)
tpe = TPE(space)
obs_points = numpy.random.randint(0, 10, 100)
obs_points = [categories[point] for point in obs_points]
below_points = [obs_points[:25]]
above_points = [obs_points[25:]]
points = tpe.sample_one_dimension(
dim1, 1, below_points, above_points, tpe._sample_categorical_point
)
assert len(points) == 1
assert points[0] in categories
obs_points_below = numpy.random.randint(0, 3, 25)
obs_points_above = numpy.random.randint(3, 10, 75)
below_points = [[categories[point] for point in obs_points_below]]
above_points = [[categories[point] for point in obs_points_above]]
points = tpe.sample_one_dimension(
dim1, 1, below_points, above_points, tpe._sample_categorical_point
)
assert len(points) == 1
assert points[0] in categories[:3]
obs_points = numpy.random.randint(0, 10, 100)
obs_points = [categories[point] for point in obs_points]
below_points = [obs_points[:25], obs_points[25:50]]
above_points = [obs_points[50:75], obs_points[75:]]
points = tpe.sample_one_dimension(
dim2, 2, below_points, above_points, tpe._sample_categorical_point
)
assert len(points) == 2
assert points[0] in categories
assert points[1] in categories
tpe.n_ei_candidates = 0
points = tpe.sample_one_dimension(
dim2, 2, below_points, above_points, tpe._sample_categorical_point
)
assert len(points) == 0
def test_sample_real_dimension(self):
"""Test sample values for a real dimension"""
space = Space()
dim1 = Real("yolo1", "uniform", -10, 20)
space.register(dim1)
dim2 = Real("yolo2", "uniform", -5, 10, shape=(2))
space.register(dim2)
dim3 = Real("yolo3", "reciprocal", 1, 20)
space.register(dim3)
tpe = TPE(space)
points = numpy.random.uniform(-10, 10, 20)
below_points = [points[:8]]
above_points = [points[8:]]
points = tpe._sample_real_dimension(dim1, 1, below_points, above_points)
points = numpy.asarray(points)
assert len(points) == 1
assert all(points >= -10)
assert all(points < 10)
points = numpy.random.uniform(1, 20, 20)
below_points = [points[:8]]
above_points = [points[8:]]
points = tpe._sample_real_dimension(dim3, 1, below_points, above_points)
points = numpy.asarray(points)
assert len(points) == 1
assert all(points >= 1)
assert all(points < 20)
below_points = numpy.random.uniform(-10, 0, 25).reshape(1, 25)
above_points = numpy.random.uniform(0, 10, 75).reshape(1, 75)
points = tpe._sample_real_dimension(dim1, 1, below_points, above_points)
points = numpy.asarray(points)
assert len(points) == 1
assert all(points >= -10)
assert all(points < 0)
points = numpy.random.uniform(-5, 5, 32)
below_points = [points[:8], points[8:16]]
above_points = [points[16:24], points[24:]]
points = tpe._sample_real_dimension(dim2, 2, below_points, above_points)
points = numpy.asarray(points)
assert len(points) == 2
assert all(points >= -10)
assert all(points < 10)
tpe.n_ei_candidates = 0
points = tpe._sample_real_dimension(dim2, 2, below_points, above_points)
assert len(points) == 0
def test_suggest(self, tpe):
"""Test suggest with no shape dimensions"""
tpe.n_initial_points = 10
results = numpy.random.random(10)
for i in range(10):
point = tpe.suggest(1)
assert len(point) == 1
assert len(point[0]) == 3
assert not isinstance(point[0][0], tuple)
tpe.observe(point, [{"objective": results[i]}])
point = tpe.suggest(1)
assert len(point) == 1
assert len(point[0]) == 3
assert not isinstance(point[0][0], tuple)
def test_1d_shape(self, tpe):
"""Test suggest with 1D shape dimensions"""
space = Space()
dim1 = Real("yolo1", "uniform", -3, 6, shape=(2))
space.register(dim1)
dim2 = Real("yolo2", "uniform", -2, 4)
space.register(dim2)
tpe.space = space
tpe.n_initial_points = 10
results = numpy.random.random(10)
for i in range(10):
point = tpe.suggest(1)
assert len(point) == 1
assert len(point[0]) == 2
assert len(point[0][0]) == 2
tpe.observe(point, [{"objective": results[i]}])
point = tpe.suggest(1)
assert len(point) == 1
assert len(point[0]) == 2
assert len(point[0][0]) == 2
def test_suggest_initial_points(self, tpe, monkeypatch):
"""Test that initial points can be sampled correctly"""
points = [(i, i - 6, "c") for i in range(1, 12)]
global index
index = 0
def sample(num=1, seed=None):
global index
pts = points[index : index + num]
index += num
return pts
monkeypatch.setattr(tpe.space, "sample", sample)
tpe.n_initial_points = 10
results = numpy.random.random(10)
for i in range(1, 11):
point = tpe.suggest(1)[0]
assert point == (i, i - 6, "c")
tpe.observe([point], [{"objective": results[i - 1]}])
point = tpe.suggest(1)[0]
assert point != (11, 5, "c")
def test_suggest_ei_candidates(self, tpe):
"""Test suggest with no shape dimensions"""
tpe.n_initial_points = 2
tpe.n_ei_candidates = 0
results = numpy.random.random(2)
for i in range(2):
point = tpe.suggest(1)
assert len(point) == 1
assert len(point[0]) == 3
assert not isinstance(point[0][0], tuple)
tpe.observe(point, [{"objective": results[i]}])
point = tpe.suggest(1)
assert not point
tpe.n_ei_candidates = 24
point = tpe.suggest(1)
assert len(point) > 0
|
nilq/baby-python
|
python
|
import pygame
import random
screen_size = [360, 600]
screen = pygame.display.set_mode(screen_size)
pygame.font.init()
background = pygame.image.load('background.png')
user = pygame.image.load('user.png')
chicken = pygame.image.load('chicken.png')
def display_score(score):
font = pygame.font.SysFont('Comic Sans MS', 30)
score_text = 'Score: ' + str(score)
text_img = font.render(score_text, True, (0, 255, 0))
screen.blit(text_img, [20, 10])
def random_offset():
return -1*random.randint(100, 1500)
chicken_y = [random_offset(), random_offset(), random_offset()]
user_x = 150
score = 0
def crashed(idx):
global score
global keep_alive
score = score - 50
chicken_y[idx] = random_offset()
if score < -500:
keep_alive = False
def update_chicken_pos(idx):
global score
if chicken_y[idx] > 600:
chicken_y[idx] = random_offset()
score = score + 5
print('score', score)
else:
chicken_y[idx] = chicken_y[idx] + 5
keep_alive = True
clock = pygame.time.Clock()
while keep_alive:
pygame.event.get()
keys = pygame.key.get_pressed()
if keys[pygame.K_RIGHT] and user_x < 280:
user_x = user_x + 10
elif keys[pygame.K_LEFT] and user_x > 0:
user_x = user_x - 10
update_chicken_pos(0)
update_chicken_pos(1)
update_chicken_pos(2)
screen.blit(background, [0, 0])
screen.blit(user, [user_x, 520])
screen.blit(chicken, [0, chicken_y[0]])
screen.blit(chicken, [150, chicken_y[1]])
screen.blit(chicken, [280, chicken_y[2]])
if chicken_y[0] > 500 and user_x < 70:
crashed(0)
if chicken_y[1] > 500 and user_x > 80 and user_x < 200:
crashed(1)
if chicken_y[2] > 500 and user_x > 220:
crashed(2)
display_score(score)
pygame.display.update()
clock.tick(60)
|
nilq/baby-python
|
python
|
from flask import request, jsonify, Blueprint
from flask_jwt_extended import (
create_access_token,
create_refresh_token,
jwt_refresh_token_required,
get_jwt_identity
)
from flasgger import swag_from
from myapi.models import User
from myapi.extensions import pwd_context, jwt
from myapi.api.doc.login_doc import login_post
blueprint = Blueprint('auth', __name__, url_prefix='/auth')
@blueprint.route('/login', methods=['POST'])
@swag_from(login_post)
def login():
"""Authenticate user and return token
---
tags:
- Login
"""
# if not request.is_json:
# return jsonify({"msg": "Missing JSON in request"}), 400
username = request.form.get('username', None)
password = request.form.get('password', None)
if not username or not password:
return jsonify({"msg": "Missing username or password"}), 400
user = User.query.filter_by(username=username).first()
if user is None or not pwd_context.verify(password, user.password):
return jsonify({"msg": "Bad credentials"}), 400
access_token = create_access_token(identity=user.id)
refresh_token = create_refresh_token(identity=user.id)
ret = {
'access_token': access_token,
'refresh_token': refresh_token
}
return jsonify(ret), 200
@blueprint.route('/refresh', methods=['POST'])
@jwt_refresh_token_required
def refresh():
current_user = get_jwt_identity()
ret = {
'access_token': create_access_token(identity=current_user)
}
return jsonify(ret), 200
@jwt.user_loader_callback_loader
def user_loader_callback(identity):
return User.query.get(identity)
|
nilq/baby-python
|
python
|
import psycopg2
import os
from dotenv import load_dotenv
import sqlite3
import pandas as pd
import datetime
from psycopg2.extras import execute_values
load_dotenv()
# connecting to our elephant sql rpg database
RPG_DB_NAME = os.getenv('RPG_DB_NAME', default='oops')
RPG_DB_USER = os.getenv('RPG_DB_USER', default='oops')
RPG_DB_PASSWORD = os.getenv('RPG_DB_PASSWORD', default='oops')
RPG_DB_HOST = os.getenv('RPG_DB_HOST', default='oops')
postgresql_connection = psycopg2.connect(dbname=RPG_DB_NAME, user=RPG_DB_USER, password=RPG_DB_PASSWORD, host=RPG_DB_HOST)
# connecting to local rpg database
DB_FILEPATH = os.path.join(os.path.dirname(__file__), '..', '..', 'module1-introduction-to-sql', 'rpg_db.sqlite3')
sqlite_connection = sqlite3.connect(DB_FILEPATH)
# creating cursors for both of the databases
sqlite_cursor = sqlite_connection.cursor()
postgresql_cursor = postgresql_connection.cursor()
# getting all of the tables names from the local rpg database
query = "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
table_names = sqlite_cursor.execute(query).fetchall()
query = 'SELECT * FROM armory_item'
result = sqlite_cursor.execute(query).fetchall()
total_query = ''
for table in table_names:
# getting all of the schema for the tables from the local db
query = f"SELECT sql from sqlite_master WHERE name = \'{table[0]}\'"
result = sqlite_cursor.execute(query).fetchall()
'''
was going to implement this using python code but had to hard code it in using sql queries in tables plus due to differences between sqlite and postgres
# creating the tables in the elephant db
query = result[0][0].replace('integer NOT NULL PRIMARY KEY AUTOINCREMENT', 'SERIAL PRIMARY KEY')
query = query.replace('CREATE TABLE ', 'CREATE TABLE IF NOT EXISTS ')
query = query.replace('datetime', 'date')
total_query += query + ';'
#print(total_query)
postgresql_cursor.execute(total_query)
'''
# getting data to insert into table from local db
query = f'SELECT * from {table[0]}'
result = sqlite_cursor.execute(query).fetchall()
print(f'\n{table[0]}')
insertion_query = f'INSERT INTO {table[0]} VALUES %s'
if table[0] in ['charactercreator_cleric', 'charactercreator_fighter', 'charactercreator_mage',
'charactercreator_necromancer', 'charactercreator_thief']:
new_result = []
for each in result:
each = list(each)
each[1] = bool(each[1])
new_result.append(each)
execute_values(postgresql_cursor, insertion_query, new_result)
elif table[0] != 'sqlite_sequence': # not sure how to create this table with an unsigned data type
execute_values(postgresql_cursor, insertion_query, result)
postgresql_connection.commit()
postgresql_cursor.close()
postgresql_connection.close()
|
nilq/baby-python
|
python
|
import time
def time_training(fitter):
"""Print the time taken for a machine learning algorithm to train.
Parameters:
fitter(function): function used to train the model
Returns: None
"""
start = time.time()
fitter()
end = time.time()
diff = end - start
print(f'Training time: {diff} miliseconds.')
return None
|
nilq/baby-python
|
python
|
################################################################################
# Module: decision.py
# Description: Agent decision function templates
# Rafal Kucharski @ TU Delft, The Netherlands
################################################################################
from math import exp
import random
import pandas as pd
from dotmap import DotMap
from numpy.random.mtrand import choice
from MaaSSim.driver import driverEvent
from MaaSSim.traveller import travellerEvent
#################
# DUMMIES #
#################
def dummy_False(*args, **kwargs):
# dummy function to always return False,
# used as default function inside of functionality
# (if the behaviour is not modelled)
return False
def dummy_True(*args, **kwargs):
# dummy function to always return True
return True
def f_dummy_repos(*args, **kwargs):
# handles the vehiciles when they become IDLE (after comppleting the request or entering the system)
repos = DotMap()
repos.flag = False
# repos.pos = None
# repos.time = 0
return repos
################
# DRIVER #
################
def f_driver_out(*args, **kwargs):
# returns boolean True if vehicle decides to opt out
leave_threshold = 0.25
back_threshold = 0.5
unserved_threshold = 0.005
anneal = 0.2
veh = kwargs.get('veh', None) # input
sim = veh.sim # input
flag = False # output
if len(sim.runs) == 0: # first day
msg = 'veh {} stays on'.format(veh.id)
else:
last_run = sim.run_ids[-1]
avg_yesterday = sim.res[last_run].veh_exp.nRIDES.quantile(
back_threshold) # how many rides was there on average
quant_yesterday = sim.res[last_run].veh_exp.nRIDES.quantile(
leave_threshold) # what was the lower quantile of rides
prev_rides = pd.Series([sim.res[_].veh_exp.loc[veh.id].nRIDES for _ in
sim.run_ids]).mean() # how many rides did I have on average before
rides_yesterday = sim.res[last_run].veh_exp.loc[veh.id].nRIDES # how many rides did I have yesterday
unserved_demand_yesterday = sim.res[last_run].pax_exp[sim.res[last_run].pax_exp.LOSES_PATIENCE > 0].shape[0] / \
sim.res[last_run].pax_exp.shape[0] # what is the share of unserved demand
did_i_work_yesterday = sim.res[last_run].veh_exp.loc[veh.id].ENDS_SHIFT > 0
if not did_i_work_yesterday:
if avg_yesterday < prev_rides:
msg = 'veh {} stays out'.format(veh.id)
flag = True
elif unserved_demand_yesterday > unserved_threshold:
if random.random() < anneal:
msg = 'veh {} comes to serve unserved'.format(veh.id)
flag = False
else:
msg = 'veh {} someone else come to serve unserved'.format(veh.id)
flag = False
else:
msg = 'veh {} comes back'.format(veh.id)
flag = False
pass
else:
if rides_yesterday > quant_yesterday:
msg = 'veh {} stays in'.format(veh.id)
flag = False
else:
msg = 'veh {} leaves'.format(veh.id)
flag = True
sim.logger.info('DRIVER OUT: ' + msg)
return flag
def f_repos(*args, **kwargs):
"""
handles the vehiciles when they become IDLE (after comppleting the request or entering the system)
:param args:
:param kwargs: vehicle and simulation object (veh.sim)
:return: structure with flag = bool, position to reposition to and time that it will take to reposition there.
"""
import random
repos = DotMap()
if random.random() > 0.8: # 20% of cases driver will repos
driver = kwargs.get('veh', None)
sim = driver.sim
neighbors = list(sim.inData.G.neighbors(driver.veh.pos))
if len(neighbors) == 0:
# escape from dead-end (teleport)
repos.pos = sim.inData.nodes.sample(1).squeeze().name
repos.time = 300
else:
repos.pos = random.choice(neighbors)
repos.time = driver.sim.skims.ride[repos.pos][driver.veh.pos]
repos.flag = True
else:
repos.flag = False
return repos
def f_decline(*args, **kwargs):
# determines whether driver will pick up the request or not
# now it accepts requests only in the first quartile of travel times
wait_limit = 200
fare_limit = 0.1
veh = kwargs.get('veh',None)
offers = veh.platform.offers
my_offer = None
for key, offer in offers.items():
if offer['status'] == 0 and offer['veh_id'] == veh.id:
my_offer = offer
break
if my_offer is None:
return False
wait_time = my_offer['wait_time']
fare = my_offer['fare']
flag = False # i do not decline
if wait_time >= wait_limit:
flag = True # unless I have ot wait a lot
if fare < fare_limit:
flag = True # or fare is low
#if flag:
# veh.sim.logger.critical('Veh {} declined offer with {} wait time and fare {}'.format(veh.id, wait_time,fare))
return flag
# ######### #
# PLATFORM #
# ######### #
def f_match(**kwargs):
"""
for each platfrom, whenever one of the queues changes (new idle vehicle or new unserved request)
this procedure handles the queue and prepares transactions between drivers and travellers
it operates based on nearest vehicle and prepares and offer to accept by traveller/vehicle
:param kwargs:
:return:
"""
platform = kwargs.get('platform') # platform for which we perform matching
vehQ = platform.vehQ # queue of idle vehicles
reqQ = platform.reqQ # queue of unserved requests
sim = platform.sim # reference to the simulation object
while min(len(reqQ), len(vehQ)) > 0: # loop until one of queues is empty (i.e. all requests handled)
requests = sim.inData.requests.loc[reqQ] # queued schedules of requests
vehicles = sim.vehicles.loc[vehQ] # vehicle agents
skimQ = sim.skims.ride[requests.origin].loc[vehicles.pos].copy().stack() # travel times between
# requests and vehicles in the column vector form
skimQ = skimQ.drop(platform.tabu, errors='ignore') # drop already rejected matches
if skimQ.shape[0] == 0:
sim.logger.warn("Nobody likes each other, "
"Qs {}veh; {}req; tabu {}".format(len(vehQ), len(reqQ), len(platform.tabu)))
break # nobody likes each other - wait until new request or new vehicle
vehPos, reqPos = skimQ.idxmin() # find the closest ones
mintime = skimQ.min() # and the travel time
vehicle = vehicles[vehicles.pos == vehPos].iloc[0]
veh_id = vehicle.name
veh = sim.vehs[veh_id] # vehicle agent
request = requests[requests.origin == reqPos].iloc[0]
req_id = request.name
simpaxes = request.sim_schedule.req_id.dropna().unique()
simpax = sim.pax[simpaxes[0]] # first traveller of shared ride (he is a leader and decision maker)
veh.update(event=driverEvent.RECEIVES_REQUEST)
for i in simpaxes:
sim.pax[i].update(event=travellerEvent.RECEIVES_OFFER)
if simpax.veh is not None: # the traveller already assigned (to a different platform)
if req_id in platform.reqQ: # we were too late, forget about it
platform.reqQ.pop(platform.reqQ.index(req_id)) # pop this request (vehicle still in the queue)
else:
for i in simpaxes:
offer_id = i
pax_request = sim.pax[i].request
if isinstance(pax_request.ttrav, int):
ttrav = pax_request.ttrav
else:
ttrav = pax_request.ttrav.total_seconds()
offer = {'pax_id': i,
'req_id': pax_request.name,
'simpaxes': simpaxes,
'veh_id': veh_id,
'status': 0, # 0 - offer made, 1 - accepted, -1 rejected by traveller, -2 rejected by veh
'request': pax_request,
'wait_time': mintime,
'travel_time': ttrav,
'fare': platform.platform.fare * sim.pax[i].request.dist / 1000} # make an offer
platform.offers[offer_id] = offer # bookkeeping of offers made by platform
sim.pax[i].offers[platform.platform.name] = offer # offer transferred to
if veh.f_driver_decline(veh=veh): # allow driver reject the request
veh.update(event=driverEvent.REJECTS_REQUEST)
platform.offers[offer_id]['status'] = -2
for i in simpaxes:
sim.pax[i].update(event=travellerEvent.IS_REJECTED_BY_VEHICLE)
sim.pax[i].offers[platform.platform.name]['status'] = -2
sim.logger.warning("pax {:>4} {:40} {}".format(request.name,
'got rejected by vehicle ' + str(veh_id),
sim.print_now()))
platform.tabu.append((vehPos, reqPos)) # they are unmatchable
else:
for i in simpaxes:
if not sim.pax[i].got_offered.triggered:
sim.pax[i].got_offered.succeed()
vehQ.pop(vehQ.index(veh_id)) # pop offered ones
reqQ.pop(reqQ.index(req_id)) # from the queues
platform.updateQs()
# ######### #
# TRAVELLER #
# ######### #
def f_platform_opt_out(*args, **kwargs):
pax = kwargs.get('pax', None)
return pax.request.platform == -1
def f_out(*args, **kwargs):
# it uses pax_exp of a passenger populated in previous run
# prev_exp is a pd.Series of this pd.DataFrame
# pd.DataFrame(columns=['wait_pickup','wait_match','tt'])
# returns boolean True if passanger decides to opt out
prev_exp = kwargs.get('prev_exp', None)
if prev_exp is None:
# no prev exepreince
return False
else:
if prev_exp.iloc[0].outcome == 1:
return False
else:
return True
def f_mode(*args, **kwargs):
# returns boolean True if passenger decides not to use MaaS (bad offer)
offer = kwargs.get('offer', None)
delta = 0.5
trip = kwargs.get('trip')
pass_walk_time = trip.pass_walk_time
veh_pickup_time = trip.sim.skims.ride.T[trip.veh.pos][trip.request.origin]
pass_matching_time = trip.sim.env.now - trip.t_matching
tt = trip.request.ttrav
return (max(pass_walk_time, veh_pickup_time) + pass_matching_time) / tt.seconds > delta
def f_platform_choice(*args, **kwargs):
traveller = kwargs.get('traveller')
sim = traveller.sim
betas = sim.params.platform_choice
offers = traveller.offers
# calc utilities
exps = list()
add_opt_out = True
for platform, offer in offers.items():
if add_opt_out:
u = offer['wait_time'] * 2 * betas.Beta_wait + \
offer['travel_time'] * 2 * betas.Beta_time + \
offer['fare'] / 2 * betas.Beta_cost
exps.append(exp(u))
add_opt_out = False
u = offer['wait_time'] * betas.Beta_wait + \
offer['travel_time'] * betas.Beta_time + \
offer['fare'] * betas.Beta_cost
exps.append(exp(u))
p = [_ / sum(exps) for _ in exps]
platform_chosen = choice([-1] + list(offers.keys()), 1, p=p)[0] # random choice with p
if platform_chosen == -1:
sim.logger.info("pax {:>4} {:40} {}".format(traveller.id, 'chosen to opt out',
sim.print_now()))
else:
sim.logger.info("pax {:>4} {:40} {}".format(traveller.id, 'chosen platform ' + str(platform_chosen),
sim.print_now()))
sim.logger.info("pax {:>4} {:40} {}".format(traveller.id, 'platform probs: ' + str(p),
sim.print_now()))
# handle requests
for platform_id, offer in offers.items():
if int(platform_id) == platform_chosen:
sim.plats[platform_id].handle_accepted(offer['pax_id'])
else:
sim.plats[platform_id].handle_rejected(offer['pax_id'])
sim.logger.info("pax {:>4} {:40} {}".format(traveller.id,
"wait: {}, travel: {}, fare: {}".format(offer['wait_time'],
int(offer['travel_time']),
int(offer[
'fare'] * 100) / 100),
sim.print_now()))
return platform_chosen == -1
#############
# SIMULATOR #
#############
def f_stop_crit(*args, **kwargs):
"""
Decision whether to stop experiment after current iterartion
:param args:
:param kwargs: sim object
:return: boolean flag
"""
sim = kwargs.get('sim', None)
convergence_threshold = 0.001
_ = sim.run_ids[-1]
sim.logger.warning(sim.res[_].veh_exp[sim.res[_].veh_exp.ENDS_SHIFT > 0].shape[0])
if len(sim.runs) < 2:
sim.logger.warning('Early days')
return False
else:
# example of convergence on waiting times
convergence = abs((sim.res[sim.run_ids[-1]].pax_kpi['MEETS_DRIVER_AT_PICKUP']['mean'] -
sim.res[sim.run_ids[-2]].pax_kpi['MEETS_DRIVER_AT_PICKUP']['mean']) /
sim.res[sim.run_ids[-2]].pax_kpi['MEETS_DRIVER_AT_PICKUP']['mean'])
if convergence < convergence_threshold:
sim.logger.warn('CONVERGED to {} after {} days'.format(convergence, sim.run_ids[-1]))
return True
else:
sim.logger.warn('NOT CONVERGED to {} after {} days'.format(convergence, sim.run_ids[-1]))
return False
|
nilq/baby-python
|
python
|
import boto3
from aws_cdk import (
core as cdk,
aws_events as events,
aws_events_targets as events_targets,
aws_glue as glue,
aws_iam as iam,
aws_lambda as lmb,
aws_lambda_python as lambda_python,
aws_logs as logs,
aws_s3 as s3,
aws_s3_notifications as s3_notifications,
aws_kinesisfirehose as kinesisfirehose,
custom_resources
)
from os import path
class SecurityHub(cdk.Construct):
"""Security Hub Contruct designed to act like an L2 CDK Construct"""
def __init__(self, scope: cdk.Construct, identifier: str):
super().__init__(scope, identifier)
self.this_dir = path.dirname(__file__)
enable_disable_function = lmb.Function(self, 'EnableSHFunction',
code=lmb.Code.from_asset(path.join(self.this_dir,
'../assets/lambdas/enable_security_hub_resource')),
handler='index.handler',
runtime=lmb.Runtime.PYTHON_3_8)
enable_disable_function.add_to_role_policy(iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
'securityhub:EnableSecurityHub',
'securityhub:DisableSecurityHub'
],
resources=['*']
))
enable_provider = custom_resources.Provider(self, 'EnableSHProvider',
on_event_handler=enable_disable_function,
log_retention=logs.RetentionDays.ONE_DAY)
cdk.CustomResource(self, 'EnableSH',
service_token=enable_provider.service_token,
removal_policy=cdk.RemovalPolicy.RETAIN)
self.__enabled = True
@property
def is_enabled(self):
return self.__enabled
def stream_raw_findings_to_s3(self,
bucket_name: str,
bucket_arn: str,
bucket_region=None,
raw_prefix='raw/firehose'):
if bucket_region is None:
bucket_region = cdk.Aws.REGION
target_bucket = s3.Bucket.from_bucket_attributes(self, 'TargetBucket',
bucket_name=bucket_name,
bucket_arn=bucket_arn,
region=bucket_region
)
role = iam.Role(self, 'DeliveryRole',
assumed_by=iam.ServicePrincipal('firehose.amazonaws.com'))
target_bucket.grant_read_write(role)
delivery_stream = kinesisfirehose.CfnDeliveryStream(self, 'SHDeliveryStream',
delivery_stream_type='DirectPut',
extended_s3_destination_configuration=kinesisfirehose.CfnDeliveryStream.ExtendedS3DestinationConfigurationProperty(
role_arn=role.role_arn,
bucket_arn=target_bucket.bucket_arn,
buffering_hints=kinesisfirehose.CfnDeliveryStream.BufferingHintsProperty(
interval_in_seconds=900,
size_in_m_bs=128
),
compression_format='UNCOMPRESSED',
prefix=raw_prefix
))
stream_rule = events.Rule(self, 'StreamFromKinesisToS3',
event_pattern=events.EventPattern(
source=['aws.securityhub'],
detail_type=['Security Hub Findings - Imported'],
))
target = events_targets.KinesisFirehoseStream(
stream=delivery_stream,
)
stream_rule.add_target(target)
def enable_import_findings_for_product(self, product_arn):
this_dir = path.dirname(__file__)
enable_disable_function = lmb.Function(self, 'EnableSHImportFunction',
code=lmb.Code.from_asset(path.join(self.this_dir,
'../assets/lambdas/enable_import_prowler_findings')),
handler='index.handler',
runtime=lmb.Runtime.PYTHON_3_8)
enable_provider = custom_resources.Provider(self, 'EnableSHImportProvider',
on_event_handler=enable_disable_function,
log_retention=logs.RetentionDays.ONE_DAY)
cdk.CustomResource(self, 'EnableSHImport',
service_token=enable_provider.service_token,
properties={
'product_arn': product_arn
},
removal_policy=cdk.RemovalPolicy.RETAIN)
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
from typing import List, Tuple, Union
from mathplotlib.base import BaseElement, Curve2D
from mathplotlib.style import Style
from mathplotlib.utils import update_with_default
class Text(BaseElement):
"""
Draws a bit of text
"""
on_curve_params = dict(
horizontal_alignment="center",
backgroundcolor=None,
outlined=True,
strokecolor="white",
strokewidth=5,
)
def __init__(
self,
x: float,
y: float,
text: str,
size: int = 12,
rotation: int = 0,
horizontal_alignment: str = "center",
vertical_alignment: str = "center",
**kwargs,
):
super().__init__("Text", nolegend=True)
self.x, self.y = x, y
self.size, self.rotation = size, rotation
self.horizontal_alignment = horizontal_alignment
self.vertical_alignment = vertical_alignment
self.text = text
self.style = Style(**kwargs)
def __repr__(self) -> str:
return f"Text @ ({self.x:.2f}, {self.y:.2f})"
def draw(self, ax: plt.Axes):
text_actor = ax.text(
self.x,
self.y,
self.text,
size=self.size,
rotation=self.rotation,
ha=self.horizontal_alignment,
va=self.vertical_alignment,
color=self.style.textcolor,
alpha=self.style.alpha,
bbox=dict(
pad=2,
color=self.style.backgroundcolor,
joinstyle="round",
alpha=0.95,
),
fontweight=self.style.fontweight,
zorder=self.style.zorder,
)
if self.style.backgroundcolor is None:
text_actor.set_bbox(dict(alpha=0))
if self.style.outlined:
self.outline(text_actor, lw=2)
@classmethod
def on_curve(cls, curve: Curve2D, text: str, at: float = 1, **kwargs):
"""
Adds a text on a curve at a given X position along it
"""
try:
y_func = curve.y_func # type: ignore
except AttributeError:
raise AttributeError(
f'The curve object {curve} has no "y_func" method, cant reconstruct text position'
)
# compute the angle of the curve at the point
curve_angle = curve.angle_at_point(at)
rotation = kwargs.pop("rotation", curve_angle)
# get the color based on the curve
color = kwargs.pop("textcolor", curve.style.linecolor)
kwargs = update_with_default(kwargs, Text.on_curve_params)
return Text(
at, y_func(at), text, rotation=rotation, textcolor=color, **kwargs
)
class Annotation(BaseElement):
_default_arrow_params = dict(
arrowstyle="-|>",
connectionstyle="arc3,rad=-0.25",
shrinkA=4,
shrinkB=4,
lw=2,
fc="w",
mutation_scale=20,
)
def __init__(
self,
x: float,
y: float,
text: str,
x_shift: float = 1,
y_shift: float = 1,
size: Union[int, str] = "medium",
textcoords: str = "data",
arrow_params: dict = None,
additional_points: List[Tuple[float, float]] = None,
**kwargs,
):
super().__init__("Annotation", nolegend=True)
self.style = Style(**kwargs)
# get/set arrow paramters
if arrow_params is None:
arrow_params = self._default_arrow_params.copy()
arrow_params = update_with_default(
arrow_params, self._default_arrow_params
)
arrow_params["color"] = kwargs.pop("textcolor", self.style.textcolor)
self.x, self.y = x, y
self.x_shift, self.y_shift = x_shift, y_shift
self.size = size
self.textcoords = textcoords
self.arrow_params = arrow_params
self.text = text
self.arrow_params["color"] = self.arrow_params.pop(
"color", self.style.textcolor
)
self.additional_points = additional_points
def draw(self, ax: plt.Axes):
# draw arrow + add text
actors = [
ax.annotate(
self.text,
(self.x, self.y),
size=self.size,
color=self.style.textcolor,
xytext=(self.x + self.x_shift, self.y + self.y_shift),
textcoords=self.textcoords,
arrowprops=self.arrow_params,
zorder=self.style.zorder,
)
]
# add additional arrows
if self.additional_points is not None:
for xy in self.additional_points:
actors.append(
ax.annotate(
self.text,
xy,
size=self.size,
color=self.style.textcolor,
xytext=(self.x + self.x_shift, self.y + self.y_shift),
textcoords=self.textcoords,
arrowprops=self.arrow_params,
fontweight=0, # make the text invisible
)
)
if self.style.outlined:
for actor in actors:
self.outline(actor, lw=2)
@classmethod
def at_curve(cls, curve: BaseElement, text: str, at: float = 1, **kwargs):
"""
Draws an annotation pointing at a point along a curve
"""
try:
y_func = curve.y_func # type: ignore
except AttributeError:
raise AttributeError(
f'The curve object {curve} has no "y_func" method, cant reconstruct text position'
)
# get color
color = kwargs.pop("textcolor", curve.style.linecolor)
return Annotation(at, y_func(at), text, textcolor=color, **kwargs)
|
nilq/baby-python
|
python
|
import argparse
import math
import PIL.Image
import PIL.ImageDraw
import sys
def choose_guideline_style(guideline_mod):
if guideline_mod % 16 == 0:
return ('#1f32ff', 3)
if guideline_mod % 8 == 0:
return ('#80f783', 2)
if guideline_mod % 4 == 0:
return ('#f4bffb', 1)
def in_ellipsoid(x, y, z, rad_x, rad_y, rad_z, center_x=None, center_y=None, center_z=None):
'''
Given a point (x, y, z), return whether that point lies inside the
ellipsoid defined by (x/a)^2 + (y/b)^2 + (z/c)^2 = 1
'''
if center_x is None: center_x = rad_x
if center_y is None: center_y = rad_y
if center_z is None: center_z = rad_z
#print(x, y, z, rad_x, rad_y, rad_z, center_x, center_y, center_z)
x = ((x - center_x) / rad_x) ** 2
y = ((y - center_y) / rad_y) ** 2
z = ((z - center_z) / rad_z) ** 2
distance = x + y + z
#print(distance)
return distance < 1
def voxelspheregenerator(WIDTH, HEIGH, DEPTH, WALL_THICKNESS=None, specific=None):
ODD_W = WIDTH % 2 == 1
ODD_H = HEIGH % 2 == 1
ODD_D = DEPTH % 2 == 1
RAD_X = WIDTH / 2
RAD_Y = HEIGH / 2
RAD_Z = DEPTH / 2
if WALL_THICKNESS:
INNER_RAD_X = RAD_X - WALL_THICKNESS
INNER_RAD_Y = RAD_Y - WALL_THICKNESS
INNER_RAD_Z = RAD_Z - WALL_THICKNESS
X_CENTER = {WIDTH // 2} if ODD_W else {WIDTH // 2, (WIDTH // 2) - 1}
Y_CENTER = {HEIGH // 2} if ODD_H else {HEIGH // 2, (HEIGH // 2) - 1}
Z_CENTER = {DEPTH // 2} if ODD_D else {DEPTH // 2, (DEPTH // 2) - 1}
layer_digits = len(str(DEPTH))
filename_form = '{w}x{h}x{d}w{wall}-{{layer:0{digits}}}.png'
filename_form = filename_form.format(
w=WIDTH,
h=HEIGH,
d=DEPTH,
wall=WALL_THICKNESS if WALL_THICKNESS else 0,
digits=layer_digits,
)
dot_highlight = PIL.Image.open('dot_highlight.png')
dot_normal = PIL.Image.open('dot_normal.png')
dot_corner = PIL.Image.open('dot_corner.png')
pixel_scale = dot_highlight.size[0]
# Space between each pixel
PIXEL_MARGIN = 7
# Space between the pixel area and the canvas
PIXELSPACE_MARGIN = 2
# Space between the canvas area and the image edge
CANVAS_MARGIN = 2
LABEL_HEIGH = 20
FINAL_IMAGE_SCALE = 1
PIXELSPACE_WIDTH = (WIDTH * pixel_scale) + ((WIDTH - 1) * PIXEL_MARGIN)
PIXELSPACE_HEIGH = (HEIGH * pixel_scale) + ((HEIGH - 1) * PIXEL_MARGIN)
CANVAS_WIDTH = PIXELSPACE_WIDTH + (2 * PIXELSPACE_MARGIN * pixel_scale)
CANVAS_HEIGH = PIXELSPACE_HEIGH + (2 * PIXELSPACE_MARGIN * pixel_scale)
IMAGE_WIDTH = CANVAS_WIDTH + (2 * CANVAS_MARGIN * pixel_scale)
IMAGE_HEIGH = CANVAS_HEIGH + (2 * CANVAS_MARGIN * pixel_scale) + LABEL_HEIGH
CANVAS_START_X = CANVAS_MARGIN * pixel_scale
CANVAS_START_Y = CANVAS_MARGIN * pixel_scale
CANVAS_END_X = CANVAS_START_X + CANVAS_WIDTH
CANVAS_END_Y = CANVAS_START_Y + CANVAS_HEIGH
PIXELSPACE_START_X = CANVAS_START_X + (PIXELSPACE_MARGIN * pixel_scale)
PIXELSPACE_START_Y = CANVAS_START_Y + (PIXELSPACE_MARGIN * pixel_scale)
PIXELSPACE_END_X = PIXELSPACE_START_X + PIXELSPACE_WIDTH
PIXELSPACE_END_Y = PIXELSPACE_START_Y + PIXELSPACE_HEIGH
GUIDELINE_MOD_X = math.ceil(RAD_X)
GUIDELINE_MOD_Y = math.ceil(RAD_Y)
def pixel_coord(x, y):
x = PIXELSPACE_START_X + (x * pixel_scale) + (x * PIXEL_MARGIN)
y = PIXELSPACE_START_Y + (y * pixel_scale) + (y * PIXEL_MARGIN)
return (x, y)
def make_layer_matrix(z):
layer_matrix = [[None for y in range(math.ceil(RAD_Y))] for x in range(math.ceil(RAD_X))]
# Generate the upper left corner.
furthest_x = RAD_X
furthest_y = RAD_Y
for y in range(math.ceil(RAD_Y)):
for x in range(math.ceil(RAD_X)):
ux = x + 0.5
uy = y + 0.5
uz = z + 0.5
within = in_ellipsoid(ux, uy, uz, RAD_X, RAD_Y, RAD_Z)
if WALL_THICKNESS:
in_hole = in_ellipsoid(
ux, uy, uz,
INNER_RAD_X, INNER_RAD_Y, INNER_RAD_Z,
RAD_X, RAD_Y, RAD_Z
)
within = within and not in_hole
if within:
if x in X_CENTER or y in Y_CENTER:
if z in Z_CENTER:
dot = dot_normal
else:
dot = dot_highlight
else:
if z in Z_CENTER:
dot = dot_highlight
else:
dot = dot_normal
layer_matrix[x][y] = dot
furthest_x = min(x, furthest_x)
furthest_y = min(y, furthest_y)
#layer_image.paste(dot, box=(pixel_coord_x, pixel_coord_y))
# Mark the corner pieces
furthest_y = math.floor(furthest_y)
for y in range(furthest_y, math.ceil(RAD_Y-1)):
for x in range(furthest_x, math.ceil(RAD_X-1)):
is_corner = (
layer_matrix[x][y] is not None and
layer_matrix[x-1][y+1] is not None and
layer_matrix[x+1][y-1] is not None and
(
# Outer corners
(layer_matrix[x][y-1] is None and layer_matrix[x-1][y] is None) or
# Inner corners, if hollow
(layer_matrix[x][y+1] is None and layer_matrix[x+1][y] is None)
)
)
if is_corner:
layer_matrix[x][y] = dot_corner
return layer_matrix
def make_layer_image(layer_matrix):
layer_image = PIL.Image.new('RGBA', size=(IMAGE_WIDTH, IMAGE_HEIGH), color=(0, 0, 0, 0))
draw = PIL.ImageDraw.ImageDraw(layer_image)
# Plot.
LABEL_Y = (2 * math.ceil(RAD_Y))
for y in range(math.ceil(RAD_Y)):
bottom_y = (HEIGH - 1) - y
for x in range(math.ceil(RAD_X)):
right_x = (WIDTH - 1) - x
if layer_matrix[x][y] is not None:
layer_image.paste(layer_matrix[x][y], box=pixel_coord(x, y))
layer_image.paste(layer_matrix[x][y], box=pixel_coord(right_x, y))
layer_image.paste(layer_matrix[x][y], box=pixel_coord(x, bottom_y))
layer_image.paste(layer_matrix[x][y], box=pixel_coord(right_x, bottom_y))
# Draw the counting helpers along the bottom.
# Start at the center top of the circle and walk along the edge.
# Every time the walker 'falls' down, mark the distance.
def put_counterhelper(start_x, end_x, y):
if start_x > end_x:
return
y = (HEIGH + 1) - y
span = end_x - start_x
center = start_x + 1
draw.text(pixel_coord(center, y), str(span), fill='#000')
y = 0
x = math.floor(RAD_X) - 1
end_x = x
start_y = None
while x >= y and y < RAD_Y:
#print(x, y, start_y)
pixel = layer_matrix[x][y]
if pixel is None:
y += 1
if x != end_x:
put_counterhelper(x, end_x, y)
if start_y is None:
start_y = y
else:
put_counterhelper(x, end_x, start_y)
end_x = x
continue
x -= 1
y += 1
put_counterhelper(x, end_x, y)
# To draw the guidelines, start from
for x in range(GUIDELINE_MOD_X % 4, WIDTH + 4, 4):
# Vertical guideline
as_if = GUIDELINE_MOD_X - x
#print(x, as_if)
line_x = PIXELSPACE_START_X + (x * pixel_scale) + (x * PIXEL_MARGIN)
line_x = line_x - PIXEL_MARGIN + (PIXEL_MARGIN // 2)
if line_x >= PIXELSPACE_END_X:
continue
(color, width) = choose_guideline_style(as_if)
draw.line((line_x, CANVAS_START_Y, line_x, CANVAS_END_Y - 1), fill=color, width=width)
draw.text((line_x, CANVAS_END_X), str(x), fill='#000')
for y in range(GUIDELINE_MOD_Y % 4, HEIGH + 4, 4):
# Horizontal guideline
as_if = GUIDELINE_MOD_Y - y
#print(y, as_if)
line_y = PIXELSPACE_START_Y + (y * pixel_scale) + (y * PIXEL_MARGIN)
line_y = line_y - PIXEL_MARGIN + (PIXEL_MARGIN // 2)
if line_y >= PIXELSPACE_END_Y:
continue
(color, width) = choose_guideline_style(as_if)
draw.line((CANVAS_START_X, line_y, CANVAS_END_X - 1, line_y), fill=color, width=width)
draw.text((CANVAS_END_X, line_y), str(y), fill='#000')
draw.rectangle((CANVAS_START_X, CANVAS_START_Y, CANVAS_END_X - 1, CANVAS_END_Y - 1), outline='#000')
draw.text((CANVAS_START_X, IMAGE_HEIGH - LABEL_HEIGH), layer_filename, fill='#000')
print(layer_filename)
if FINAL_IMAGE_SCALE != 1:
layer_image = layer_image.resize((FINAL_IMAGE_SCALE * IMAGE_WIDTH, FINAL_IMAGE_SCALE * IMAGE_HEIGH))
return layer_image
if specific is None:
zrange = range(DEPTH)
elif isinstance(specific, int):
zrange = [specific]
else:
zrange = specific
layer_matrices = []
for z in zrange:
if z < math.ceil(RAD_Z):
layer_matrix = make_layer_matrix(z)
layer_matrices.append(layer_matrix)
else:
layer_matrix = layer_matrices[(DEPTH - 1) - z]
layer_filename = filename_form.format(layer=z)
layer_image = make_layer_image(layer_matrix)
layer_image.save(layer_filename)
def voxelsphere_argparse(args):
height_depth_match = bool(args.height) == bool(args.depth)
if not height_depth_match:
raise ValueError('Must provide both or neither of height+depth. Not just one.')
if (args.height is args.depth is None):
args.height = args.width
args.depth = args.width
wall_thickness = int(args.wall_thickness) if args.wall_thickness else None
specific = int(args.specific) if args.specific else None
voxelspheregenerator(
WIDTH=int(args.width),
HEIGH=int(args.height),
DEPTH=int(args.depth),
WALL_THICKNESS=wall_thickness,
specific=specific,
)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('width')
parser.add_argument('height', nargs='?', default=None)
parser.add_argument('depth', nargs='?', default=None)
parser.add_argument('--wall', dest='wall_thickness', default=None)
parser.add_argument('--specific', dest='specific', default=None)
parser.set_defaults(func=voxelsphere_argparse)
args = parser.parse_args(argv)
return args.func(args)
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))
|
nilq/baby-python
|
python
|
class GQL:
# Client -> Server message types.
CONNECTION_INIT = "connection_init"
START = "start"
STOP = "stop"
CONNECTION_TERMINATE = "connection_terminate"
# Server -> Client message types.
CONNECTION_ERROR = "connection_error"
CONNECTION_ACK = "connection_ack"
DATA = "data"
ERROR = "error"
COMPLETE = "complete"
CONNECTION_KEEP_ALIVE = "ka"
|
nilq/baby-python
|
python
|
# -*- coding:utf-8 -*-
import time
from tqdm import tqdm
import requests
from lxml import etree
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gb2312,utf-8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Connection': 'Keep-alive'
}
def run(url):
try:
res_text = requests.get(url=url, headers=headers)
res = etree.HTML(res_text.text)
# 提取文章页的链接并爬取
article_urls = res.xpath('//div[@class="article-list"]/div/h4/a/@href')
for article_url in article_urls:
article_text = requests.get(url=article_url, headers=headers)
article_result = etree.HTML(article_text.text)
title = article_result.xpath('//h1[@class="title-article"]/text()')[0]
publish_time = article_result.xpath('//div[@class="bar-content"]/span[@class="time"]/text()')[0]
print(publish_time, title)
except:
pass
if __name__ == '__main__':
start = time.time()
for i in range(1, 10): # 建立任务链接
url = 'https://blog.csdn.net/cui_yonghua/article/list/{}'.format(i)
run(url=url)
print('time cost:{}'.format(time.time()-start))
|
nilq/baby-python
|
python
|
# coding: utf-8
"""
Eclipse Kapua REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class DataMetricsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def data_metric_count(self, scope_id, body, **kwargs): # noqa: E501
"""Counts the MetricInfos # noqa: E501
Counts the MetricInfos with the given MetricInfoQuery parameter returning the number of matching MetricInfos # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.data_metric_count(scope_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results (required)
:param MetricInfoQuery body: The MetricInfoQuery to use to filter count results (required)
:return: CountResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.data_metric_count_with_http_info(scope_id, body, **kwargs) # noqa: E501
else:
(data) = self.data_metric_count_with_http_info(scope_id, body, **kwargs) # noqa: E501
return data
def data_metric_count_with_http_info(self, scope_id, body, **kwargs): # noqa: E501
"""Counts the MetricInfos # noqa: E501
Counts the MetricInfos with the given MetricInfoQuery parameter returning the number of matching MetricInfos # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.data_metric_count_with_http_info(scope_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results (required)
:param MetricInfoQuery body: The MetricInfoQuery to use to filter count results (required)
:return: CountResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_metric_count" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `data_metric_count`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `data_metric_count`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/data/metrics/_count', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CountResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def data_metric_find(self, scope_id, metric_info_id, **kwargs): # noqa: E501
"""Gets an MetricInfo # noqa: E501
Gets the MetricInfo specified by the metricInfoId path parameter # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.data_metric_find(scope_id, metric_info_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the requested MetricInfo. (required)
:param str metric_info_id: The id of the requested MetricInfo (required)
:return: MetricInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.data_metric_find_with_http_info(scope_id, metric_info_id, **kwargs) # noqa: E501
else:
(data) = self.data_metric_find_with_http_info(scope_id, metric_info_id, **kwargs) # noqa: E501
return data
def data_metric_find_with_http_info(self, scope_id, metric_info_id, **kwargs): # noqa: E501
"""Gets an MetricInfo # noqa: E501
Gets the MetricInfo specified by the metricInfoId path parameter # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.data_metric_find_with_http_info(scope_id, metric_info_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the requested MetricInfo. (required)
:param str metric_info_id: The id of the requested MetricInfo (required)
:return: MetricInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'metric_info_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_metric_find" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `data_metric_find`") # noqa: E501
# verify the required parameter 'metric_info_id' is set
if ('metric_info_id' not in params or
params['metric_info_id'] is None):
raise ValueError("Missing the required parameter `metric_info_id` when calling `data_metric_find`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'metric_info_id' in params:
path_params['metricInfoId'] = params['metric_info_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/data/metrics/{metricInfoId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MetricInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def data_metric_query(self, scope_id, body, **kwargs): # noqa: E501
"""Queries the MetricInfos # noqa: E501
Queries the MetricInfos with the given MetricInfoQuery parameter returning all matching MetricInfos # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.data_metric_query(scope_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results (required)
:param MetricInfoQuery body: The MetricInfoQuery to use to filter results (required)
:return: MetricInfoListResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.data_metric_query_with_http_info(scope_id, body, **kwargs) # noqa: E501
else:
(data) = self.data_metric_query_with_http_info(scope_id, body, **kwargs) # noqa: E501
return data
def data_metric_query_with_http_info(self, scope_id, body, **kwargs): # noqa: E501
"""Queries the MetricInfos # noqa: E501
Queries the MetricInfos with the given MetricInfoQuery parameter returning all matching MetricInfos # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.data_metric_query_with_http_info(scope_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results (required)
:param MetricInfoQuery body: The MetricInfoQuery to use to filter results (required)
:return: MetricInfoListResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_metric_query" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `data_metric_query`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `data_metric_query`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/data/metrics/_query', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MetricInfoListResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def data_metric_simple_query(self, scope_id, **kwargs): # noqa: E501
"""Gets the MetricInfo list in the scope # noqa: E501
Returns the list of all the metricInfos associated to the current selected scope. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.data_metric_simple_query(scope_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results (required)
:param str client_id: The client id to filter results
:param str channel: The channel to filter results. It allows '#' wildcard in last channel level
:param str name: The metric name to filter results
:param int offset: The result set offset
:param int limit: The result set limit
:return: MetricInfoListResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.data_metric_simple_query_with_http_info(scope_id, **kwargs) # noqa: E501
else:
(data) = self.data_metric_simple_query_with_http_info(scope_id, **kwargs) # noqa: E501
return data
def data_metric_simple_query_with_http_info(self, scope_id, **kwargs): # noqa: E501
"""Gets the MetricInfo list in the scope # noqa: E501
Returns the list of all the metricInfos associated to the current selected scope. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.data_metric_simple_query_with_http_info(scope_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results (required)
:param str client_id: The client id to filter results
:param str channel: The channel to filter results. It allows '#' wildcard in last channel level
:param str name: The metric name to filter results
:param int offset: The result set offset
:param int limit: The result set limit
:return: MetricInfoListResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'client_id', 'channel', 'name', 'offset', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_metric_simple_query" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `data_metric_simple_query`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
query_params = []
if 'client_id' in params:
query_params.append(('clientId', params['client_id'])) # noqa: E501
if 'channel' in params:
query_params.append(('channel', params['channel'])) # noqa: E501
if 'name' in params:
query_params.append(('name', params['name'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/data/metrics', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MetricInfoListResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
nilq/baby-python
|
python
|
import datetime
import sys
import pathlib
current_dir = pathlib.Path(__file__).resolve().parent
sys.path.append( str(current_dir) + '/../' )
from lib.config import get_camera_config
from service.camera import take_pictures
"""
cronによって毎分実行される
その分にタイマーが設定されているカメラの撮影リクエストを送信する
python3 cameras.py
"""
def main():
now = datetime.datetime.now()
hour, minute = now.hour, now.minute
cameras = get_camera_config()
target_camera_ids = []
# 全てのカメラをチェック
for camera in cameras:
# timerがなかったら次へ
if 'timer' not in camera:
continue
timers = camera['timer']
# 対象のカメラが今写真を取るべきかチェック
for timer in timers:
if timer['hour'] == hour and timer['minute'] == minute:
target_camera_ids.append(camera['camera_id'])
continue
take_pictures(target_camera_ids)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
"""Dataset specification for hit graphs using pytorch_geometric formulation"""
# System imports
import os
# External imports
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, random_split
import torch_geometric
def load_graph(filename):
with np.load(filename) as f:
x, y = f['X'], f['y']
Ri_rows, Ri_cols = f['Ri_rows'], f['Ri_cols']
Ro_rows, Ro_cols = f['Ro_rows'], f['Ro_cols']
n_edges = Ri_cols.shape[0]
edge_index = np.zeros((2, n_edges), dtype=int)
edge_index[0, Ro_cols] = Ro_rows
edge_index[1, Ri_cols] = Ri_rows
return x, edge_index, y
def load_sparse(filename):
with np.load(filename, allow_pickle=True) as f:
x, edge_index, y = f['X'], f['e'], f['y']
return x, edge_index, y
class HitGraphDataset(Dataset):
"""PyTorch dataset specification for hit graphs"""
def __init__(self, input_dir=None, filelist=None, n_samples=None, real_weight=1.0):
if filelist is not None:
self.metadata = pd.read_csv(os.path.expandvars(filelist))
filenames = self.metadata.file.values
elif input_dir is not None:
input_dir = os.path.expandvars(input_dir)
filenames = sorted([os.path.join(input_dir, f) for f in os.listdir(input_dir)
if f.startswith('event') and not f.endswith('_ID.npz')])
else:
raise Exception('Must provide either input_dir or filelist to HitGraphDataset')
self.filenames = filenames if n_samples is None else filenames[:n_samples]
self.real_weight = real_weight
self.fake_weight = 1 #real_weight / (2 * real_weight - 1)
def __getitem__(self, index):
""" We choose to load an already sparsified graph """
# x, edge_index, y = load_graph(self.filenames[index])
# print(self.filenames[index])
x, edge_index, y = load_sparse(self.filenames[index])
# Compute weights
w = y * self.real_weight + (1-y) * self.fake_weight
return torch_geometric.data.Data(x=torch.from_numpy(x),
edge_index=torch.from_numpy(edge_index),
y=torch.from_numpy(y), w=torch.from_numpy(w),
i=index)
def get_filelist(self):
return self.filenames
def __len__(self):
return len(self.filenames)
def get_datasets(n_train, n_valid, input_dir=None, filelist=None, real_weight=1.0):
data = HitGraphDataset(input_dir=input_dir, filelist=filelist,
n_samples=n_train+n_valid, real_weight=real_weight)
# Split into train and validation
train_data, valid_data = random_split(data, [n_train, n_valid])
return train_data, valid_data
|
nilq/baby-python
|
python
|
"""
author: Rene Pickhardt (rene.m.pickhardt@ntnu.no)
Date: 15.1.2020
License: MIT
Checks which nodes are currently online by establishing a connection to those nodes. Results can later be studied with `lightning-cli listpeers` or when `jq` is installed with `lcli getinfo | jq ".num_peers"`
This tool is intended to be used to make routing decisions in which we we chose a path to the destination in which all hops are online. This should reduce the failed routing attempts and the latency.
=== Support:
If you like my work consider a donation at https://patreon.com/renepickhardt or https://tallyco.in/s/lnbook
"""
from lightning import LightningRpc, RpcError
from multiprocessing import Process
from time import sleep
rpc = LightningRpc("/home/rpickhardt/.lightning/bitcoin/lightning-rpc")
def connect(nodeid):
try:
res = rpc.connect(nodeid)
print(nodeid, res)
except RpcError as e:
print("could not connect to", nodeid, str(e))
nodes = rpc.listnodes()["nodes"]
potential_nodes = []
for node in nodes:
if "nodeid" in node:
nodeid = node["nodeid"]
if "addresses" not in node:
continue
addresses = node["addresses"]
for addr in addresses:
if addr["type"] == "ipv4":
potential_nodes.append(nodeid)
print("known nodes:", len(nodes), "nodes with ipv4 addr:", len(potential_nodes))
for r, nodeid in enumerate(potential_nodes):
p = Process(target=connect, args=(nodeid,))
print("go", nodeid)
p.start()
sleep(0.2)
print(r)
|
nilq/baby-python
|
python
|
"""Helper methods."""
from typing import Tuple, Optional
from rest_framework import status
from rest_framework.response import Response
def validate_request_body(
request,
) -> Tuple[Optional[Response], Optional[dict]]:
"""
Validate the json body of a request.
:param request: django request
:return: None or error response, None or json dict
"""
try:
return None, request.data
except Exception:
return (
Response(
data={"message": "Sent data is not valid"},
status=status.HTTP_400_BAD_REQUEST,
),
None,
)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 3 20:07:19 2020
https://zhuanlan.zhihu.com/p/78452993
@author: lenovo
"""
import torch
from torch.nn import Sequential as Seq, Linear as Lin, ReLU
from torch_geometric.nn import MessagePassing
from torch_geometric.datasets import TUDataset
# dataset = TUDataset(root='/tmp/ENZYMES', name='ENZYMES')
class EdgeConv(MessagePassing):
def __init__(self, F_in, F_out):
super(EdgeConv, self).__init__(aggr='max') # "Max" aggregation.
self.mlp = Seq(Lin(2 * F_in, F_out), ReLU(), Lin(F_out, F_out))
def forward(self, x, edge_index):
# x has shape [N, F_in]
# edge_index has shape [2, E]
return self.propagate(edge_index, x=x) # shape [N, F_out]
def message(self, x_i, x_j):
# x_i has shape [E, F_in]
# x_j has shape [E, F_in]
edge_features = torch.cat([x_i, x_j - x_i], dim=1) # shape [E, 2 * F_in]
return self.mlp(edge_features) # shape [E, F_out]
|
nilq/baby-python
|
python
|
class Web3ClientException(BaseException):
pass
class MissingParameter(Web3ClientException):
pass
class TransactionTooExpensive(Web3ClientException):
pass
class NetworkNotFound(Web3ClientException):
pass
|
nilq/baby-python
|
python
|
import gdo
def f():
import time
time.sleep(10)
gdo.concurrent(
gdo.RunGraph(
"slee", "sleep 2",
"slle2", "sleep 5",
"pysleep", f,
"true", "true")
.req("slee", "true")
)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
#PIN 0-8 3v3 pull-up default, 9-27 pull-down default
# Pin # for relay connected to heating element (Note: GPIO pin#)
he_pin = 26
brew_pin = 22
steam_pin = 27
led_pin = 13
# Default goal temperature
set_temp = 96.
set_steam_temp = 145.
#Use Fahrenheit?
use_fahrenheit = False
# Default alarm time
snooze = '07:00'
# Pressure gauge
pressure_enable = True
#circuit breaker time in minutes convert to seconds
circuitBreakerTime = 20 * 60
#temp lowpoint and high point (Celsius)
low_temp_b = 0
high_temp_b = 110
low_temp_s = 130
high_temp_s = 160
# Main loop sample rate in seconds
sample_time = 0.1
# PID Proportional, Integral, and Derivative value
P = 10
I = 1.5
D = 20.0
#Web/REST Server Options
host = '0.0.0.0'
port = 8080
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import os
from codecs import open
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
# load the package's __version__.py module as a dictionary
about = {}
with open(os.path.join(here, "profiler", "__version__.py"), "r", "utf-8") as f:
exec(f.read(), about)
try:
with open("README.md", "r") as f:
readme = f.read()
except FileNotFoundError:
readme = about["__description__"]
packages = ["profiler"]
requires = ["tox==3.24.4", "coverage-badge==1.1.0", "scapy==2.4.5", "manuf==1.1.3"]
setup(
name=about["__title__"],
version=about["__version__"],
description=about["__description__"],
long_description=readme,
long_description_content_type="text/markdown",
author=about["__author__"],
author_email=about["__author_email__"],
url=about["__url__"],
python_requires="~=3.7,",
license=about["__license__"],
classifiers=[
"Natural Language :: English",
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3.7",
"Intended Audience :: System Administrators",
"Topic :: Utilities",
],
packages=packages,
project_urls={
"Documentation": "https://docs.wlanpi.com",
"Source": "https://github.com/wlan-pi/profiler",
},
include_package_data=True,
install_requires=requires,
entry_points={"console_scripts": ["profiler=profiler.__main__:main"]},
)
|
nilq/baby-python
|
python
|
"""recode entities
Revision ID: 4212acfa7aec
Revises: 235fd19bb942
Create Date: 2016-12-01 10:24:07.638773
"""
import logging
# from pprint import pprint
from alembic import op
import sqlalchemy as sa
import uuid
log = logging.getLogger('migrate')
revision = '4212acfa7aec'
down_revision = '235fd19bb942'
SCHEMA = {
'/entity/person.json#': 'Person',
'/entity/organization.json#': 'Organization',
'/entity/entity.json#': 'LegalEntity',
'/entity/company.json#': 'Company'
}
def upgrade():
op.alter_column('document', 'collection_id', existing_type=sa.INTEGER(), nullable=False) # noqa
op.add_column('entity', sa.Column('collection_id', sa.Integer, nullable=True)) # noqa
op.create_index(op.f('ix_entity_collection_id'), 'entity', ['collection_id'], unique=False) # noqa
op.create_foreign_key(None, 'entity', 'collection', ['collection_id'], ['id']) # noqa
op.create_table('entity_identity',
sa.Column('id', sa.Integer, nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('entity_id', sa.Unicode(255), nullable=True),
sa.Column('identity', sa.Unicode(255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
bind = op.get_bind()
meta = sa.MetaData()
meta.bind = bind
meta.reflect()
entity_table = meta.tables['entity']
entity_identity_table = meta.tables['entity_identity']
document_table = meta.tables['document']
collection_entity_table = meta.tables['collection_entity']
reference_table = meta.tables['reference']
permission_table = meta.tables['permission']
alert_table = meta.tables['alert']
q = sa.select([entity_table])
rp = bind.execute(q)
entities_all = rp.fetchall()
for i, entity in enumerate(entities_all):
log.info("Process [%s: %s]: %s", i, entity.id, entity.name)
if entity.deleted_at is not None:
cq = sa.delete(alert_table)
cq = cq.where(alert_table.c.entity_id == entity.id)
bind.execute(cq)
cq = sa.delete(collection_entity_table)
cq = cq.where(collection_entity_table.c.entity_id == entity.id)
bind.execute(cq)
cq = sa.delete(reference_table)
cq = cq.where(reference_table.c.entity_id == entity.id)
bind.execute(cq)
cq = sa.delete(entity_table)
cq = cq.where(entity_table.c.id == entity.id)
bind.execute(cq)
continue
data = entity['data']
data.pop('identifiers', None)
data['country'] = data.pop('jurisdiction_code', None)
data['birthDate'] = data.pop('birth_date', None)
data['deathDate'] = data.pop('death_date', None)
data['alias'] = []
for on in data.pop('other_names', []):
name = on.get('name')
if name is None:
continue
data['alias'].append(name)
for k, v in data.items():
if v is None or v == '':
data.pop(k)
schema = SCHEMA.get(entity.type)
cq = sa.select([alert_table])
cq = cq.where(alert_table.c.entity_id == entity.id)
alerts = bind.execute(cq).fetchall()
cq = sa.select([reference_table, document_table.c.collection_id])
cq = cq.select_from(reference_table.join(document_table, reference_table.c.document_id == document_table.c.id)) # noqa
cq = cq.where(reference_table.c.entity_id == entity.id)
references = bind.execute(cq).fetchall()
cq = sa.select([collection_entity_table])
cq = cq.where(collection_entity_table.c.entity_id == entity.id)
colls = bind.execute(cq).fetchall()
identity = uuid.uuid4().hex
for i, coll in enumerate(colls):
coll_id = coll.collection_id
eid = entity.id
if i == 0:
q = sa.update(entity_table)
q = q.where(entity_table.c.id == entity.id)
q = q.values(type=schema, data=data, collection_id=coll_id)
bind.execute(q)
else:
eid = uuid.uuid4().hex
ent = {
'id': eid,
'name': entity.name,
'state': entity.state,
'type': schema,
'data': data,
'collection_id': coll_id,
'created_at': entity.created_at,
'updated_at': entity.updated_at
}
q = sa.insert(entity_table).values(ent)
bind.execute(q)
if len(colls) > 1:
q = sa.insert(entity_identity_table).values({
'created_at': entity.updated_at,
'updated_at': entity.updated_at,
'entity_id': eid,
'identity': identity
})
bind.execute(q)
for alert in alerts:
cq = sa.select([permission_table])
cq = cq.where(permission_table.c.collection_id == coll_id)
cq = cq.where(permission_table.c.role_id == alert.role_id)
cq = cq.where(permission_table.c.read == True) # noqa
perm = bind.execute(cq).fetchone()
if perm is None and eid == entity.id:
q = sa.delete(alert_table)
q = q.where(alert_table.c.id == alert.id)
bind.execute(q)
if perm is not None and eid != entity.id:
ad = dict(alert)
ad.pop('id', None)
ad['entity_id'] = eid
q = sa.insert(alert_table).values(ad)
bind.execute(q)
for ref in references:
refdata = dict(ref)
collection_id = refdata.pop('collection_id')
if entity.state == 'pending' and coll_id == collection_id:
q = sa.update(reference_table)
q = q.where(reference_table.c.id == ref.id)
q = q.values(entity_id=eid)
bind.execute(q)
if entity.state == 'active' and eid != ref.entity_id:
refdata.pop('id', None)
refdata['entity_id'] = eid
q = sa.insert(reference_table).values(refdata)
bind.execute(q)
op.drop_table('collection_document')
op.drop_table('collection_entity')
# op.alter_column('entity', 'collection_id', nullable=False) # noqa
def downgrade():
pass
|
nilq/baby-python
|
python
|
import os
# Reserves disk space by saving binary zeros to a file a given size
class DiskSpaceReserver:
def __init__(self, path: str, size: int):
self.path = path
self.size = size
def reserve(self):
with open(self.path, 'wb') as f:
f.write(b'\0' * self.size)
def release(self):
try:
os.remove(self.path)
except OSError:
pass
|
nilq/baby-python
|
python
|
import uuid
from core import db, logging, plugin, model
from core.models import conduct, trigger, webui
from plugins.occurrence.models import action
class _occurrence(plugin._plugin):
version = 5.0
def install(self):
# Register models
model.registerModel("occurrence","_occurrence","_action","plugins.occurrence.models.action")
model.registerModel("occurrence clean","_occurrenceClean","_action","plugins.occurrence.models.action")
model.registerModel("occurrenceUpdate","_occurrenceUpdate","_action","plugins.occurrence.models.action")
# Finding conduct
foundConducts = conduct._conduct().query(query={"name" : "occurrenceCore" })["results"]
if len(foundConducts) == 0:
# Install
c = conduct._conduct().new("occurrenceCore")
c = conduct._conduct().get(c.inserted_id)
elif len(foundConducts) == 1:
# Reinstall
c = conduct._conduct().get(foundConducts[0]["_id"])
else:
# Count invalid
return False
# Finding trigger
foundTriggers = trigger._trigger(False).query(query={"name" : "occurrenceCore" })["results"]
if len(foundTriggers) == 0:
# Install
t = trigger._trigger().new("occurrenceCore")
t = trigger._trigger().get(t.inserted_id)
elif len(foundTriggers) == 1:
# Reinstall
t = trigger._trigger().get(foundTriggers[0]["_id"])
else:
# Count invalid
return False
# Finding action
foundActions = action._occurrenceClean().query(query={"name" : "occurrenceCore" })["results"]
if len(foundActions) == 0:
# Install
a = action._occurrenceClean().new("occurrenceCore")
a = action._occurrenceClean().get(a.inserted_id)
elif len(foundActions) == 1:
# Reinstall
a = action._occurrenceClean().get(foundActions[0]["_id"])
else:
# Count invalid
return False
c.triggers = [t._id]
flowTriggerID = str(uuid.uuid4())
flowActionID = str(uuid.uuid4())
c.flow = [
{
"flowID" : flowTriggerID,
"type" : "trigger",
"triggerID" : t._id,
"next" : [
{"flowID": flowActionID, "logic": True }
]
},
{
"flowID" : flowActionID,
"type" : "action",
"actionID" : a._id,
"next" : []
}
]
webui._modelUI().new(c._id,{ "ids":[ { "accessID":"0","delete": True,"read": True,"write": True } ] },flowTriggerID,0,0,"")
webui._modelUI().new(c._id,{ "ids":[ { "accessID":"0","delete": True,"read": True,"write": True } ] },flowActionID,100,0,"")
c.acl = { "ids":[ { "accessID":"0","delete": True,"read": True,"write": True } ] }
c.enabled = True
c.update(["triggers","flow","enabled","acl"])
t.acl = { "ids":[ { "accessID":"0","delete": True,"read": True,"write": True } ] }
t.schedule = "60-90s"
t.enabled = True
t.update(["schedule","enabled","acl"])
a.acl = { "ids":[ { "accessID":"0","delete": True,"read": True,"write": True } ] }
a.enabled = True
a.update(["enabled","acl"])
# Hide Created Models
temp = model._model().getAsClass(query={ "name" : "occurrence clean" })
if len(temp) == 1:
temp = temp[0]
temp.hidden = True
temp.update(["hidden"])
return True
def uninstall(self):
# deregister models
model.deregisterModel("occurrence","_occurrence","_action","plugins.occurrence.models.action")
model.deregisterModel("occurrence clean","_occurrenceClean","_action","plugins.occurrence.models.action")
model.deregisterModel("occurrenceUpdate","_occurrenceUpdate","_action","plugins.occurrence.models.action")
conduct._conduct().api_delete(query={"name" : "occurrenceCore" })
trigger._trigger().api_delete(query={"name" : "occurrenceCore" })
action._occurrenceClean().api_delete(query={"name" : "occurrenceCore" })
return True
def upgrade(self,LatestPluginVersion):
if self.version < 5:
pass
return True
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import imp
import io
import os
from setuptools import setup, find_packages
def read(*filenames, **kwargs):
encoding = kwargs.get("encoding", "utf-8")
sep = kwargs.get("sep", "\n")
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
name = 'nengolib'
root = os.path.dirname(os.path.realpath(__file__))
version_module = imp.load_source(
'version', os.path.join(root, name, 'version.py'))
deps = [ # https://github.com/nengo/nengo/issues/508
"nengo>=2.2.0,<3.0",
"numpy>=1.13",
"scipy>=0.19.0",
]
download_url = (
'https://github.com/arvoelke/nengolib/archive/v%s.tar.gz' % (
version_module.version))
setup(
name=name,
version=version_module.version,
author="Aaron R. Voelker",
author_email="arvoelke@gmail.com",
description="Tools for robust dynamics in Nengo",
long_description=read("README.rst", "CHANGES.rst"),
url="https://github.com/arvoelke/nengolib/",
download_url=download_url,
license="Free for non-commercial use (see Nengo license)",
packages=find_packages(),
setup_requires=deps,
install_requires=deps,
keywords=[
'Neural Engineering Framework',
'Nengo',
'Dynamical Spiking Networks',
'Neural Dynamics',
'Reservoir Computing',
],
classifiers=[ # https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Framework :: Nengo',
'Intended Audience :: Science/Research',
'License :: Free for non-commercial use',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
]
)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, nishta and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import getdate, validate_email_add, today
from frappe.model.document import Document
from planning.planning.myfunction import mail_format_pms,daily_summary_mail
import datetime
class NNTask(Document):
def autoname(self):
max_no_old="0"
max_no_result=frappe.db.sql("""select max(max_count) from `tabNNTask`""")
if(max_no_result):
max_no_old=max_no_result[0][0]
if max_no_old<=0:
max_no_old=0
max_no_new=int(max_no_old)+int(1)
count_zero=""
if max_no_new<1000:
count_zero="0"
if max_no_new<100:
count_zero="00"
if max_no_new<10:
count_zero="000"
self.max_count=max_no_new
new_naming=str("-")+str(count_zero)+str(max_no_new)
self.task=self.task+new_naming
self.name=self.task
def validate(self):
allocate_to_arr=[]
i=1
for d in self.assign_to:
if d.members in allocate_to_arr:
frappe.msgprint("Allocate to "+ str(d.members) +" Already Exists ( Row No : "+ str(i) +")",raise_exception=1)
else:
allocate_to_arr.append(d.members)
def after_insert(self):
task_name=self.task
mode=0
mail_format_pms(task_name,mode)
@frappe.whitelist()
def employee_values_load(naming_series=None):
return_values=frappe.db.sql("""select employee_name,hourly_rate from tabEmployee where employee=%s""",naming_series)
return return_values
|
nilq/baby-python
|
python
|
import logging
from impala.dbapi import connect
from .settings import ImpalaConstants, NEED_CERTIFICATE
from .error import ImpalaConnectError, ImpalaQueryError
class ImpalaWrapper:
def __init__(self, host=ImpalaConstants.HOST, port=ImpalaConstants.PORT,
user=ImpalaConstants.USER, database=None, sql=None,
auth_required=NEED_CERTIFICATE):
self.host = host
self.port = int(port)
self.user = user
self.database = database
self.sql = "explain %s" % sql
self.auth_required = auth_required
def cursor(self):
if self.auth_required:
auth_mechanism = 'GSSAPI'
else:
auth_mechanism = 'NOSASL'
try:
return connect(self.host, self.port,
auth_mechanism=auth_mechanism).cursor()
except Exception as err:
logging.error(err)
raise ImpalaConnectError(message=str(err))
def explain(self):
cursor = self.cursor()
try:
cursor.execute("use %s" % self.database)
cursor.execute("set explain_level=2")
cursor.execute(self.sql)
except Exception as err:
logging.warning(err)
raise ImpalaQueryError(message=str(err))
else:
for line in cursor:
yield line[0]
finally:
cursor.close()
|
nilq/baby-python
|
python
|
from quixstreaming import QuixStreamingClient
from flask import Flask, request
from datetime import datetime
from waitress import serve
import os
import json
import hmac
import hashlib
# Quix injects credentials automatically to the client.
# Alternatively, you can always pass an SDK token manually as an argument.
client = QuixStreamingClient()
# Open the output topic where to write data out
output_topic = client.open_output_topic(os.environ["output"])
stream = output_topic.create_stream()
stream.properties.name = "Segment Data"
app = Flask("Segment Webhook")
# this is unauthenticated, anyone could post anything to you!
@app.route("/webhook", methods=['POST'])
def webhook():
# get the shared secret from environment variables
secret = os.environ["shared_secret"]
# convert to a byte array
secret_bytes = bytearray(secret, "utf-8")
# get the signature from the headers
header_sig = request.headers['x-signature']
# compute a hash-based message authentication code (HMAC)
hex_digest = hmac.new(secret_bytes, request.get_data(), hashlib.sha1).hexdigest()
# compare the HMAC to the header signature provided by Segment
if(header_sig != hex_digest):
# if they don't match its no bueno
return "ERROR", 401
# if they do then fly me to the moon
stream.events.add_timestamp(datetime.now())\
.add_value(request.json["type"], json.dumps(request.json))\
.write()
return "OK", 200
print("CONNECTED!")
# you can use app.run for dev, but its not secure, stable or particularly efficient
# app.run(debug=True, host="0.0.0.0", port=80)
# use waitress instead for production
serve(app, host='0.0.0.0', port=80)
|
nilq/baby-python
|
python
|
import re
import json
from ..extractor.common import InfoExtractor
from ..utils import (
js_to_json
)
class sexixnetIE(InfoExtractor):
#http://www.txxx.com/videos/2631606/stepmom-seduces-teen-babe/
_VALID_URL = r'https?://(?:www\.)?sexix\.net'
def _real_extract(self, url):
webpage = self._download_webpage(url, url)
title = self._og_search_title(webpage, default=None) or self._html_search_regex(
r'(?s)<title>(.*?)</title>', webpage, 'video title',
default='video')
thumbnail = self._search_regex(r'image: \'([^\']+)', webpage, 'thumbnail', default=None)
vid = self._search_regex(r'<iframe src="http://sexix.net/v.php\?u=([^"]+)', webpage, 'emb')
embUrl = 'http://sexix.net/v.php?u=%s' % vid
headers = {'Referer': url}
webpage = self._download_webpage(embUrl, vid, headers=headers)
jw_config = self._parse_json(
self._search_regex(
r'(?s)jwplayer\(([\'"])(?:(?!\1).)+\1\)\.setup\s*\((?P<options>.+?)\);',
webpage, 'jw config', group='options'),
'', transform_source=js_to_json)
playlist_url = jw_config['playlist']
webpage = self._download_webpage(playlist_url, vid, headers=headers)
#<jwplayer:source file="http://porn96.xyz/?u=8pFvAZ3bC8jsfGLlJzaPUxZ%2BIL%2FLuJ8hSylcUIoCCQo%2FAyyZHVBvIS27YLs6U8UeKy6oYUwHCtJ6O0YFMAkOSg%3D%3D" type="mp4" label="480p"/>
list = re.findall(r'file="(.+)"\s*type="(.+)"\s*label="([^"]+)p', webpage)
formats = []
for item in list:
if item[0]!='':
try:
formats.append({
'url': item[0],
'height': item[2],
'ext': item[1],
})
except:
pass
self._sort_formats(formats)
return ({
'id': '',
'title': title,
'thumbnail': thumbnail,
'formats': formats,
})
|
nilq/baby-python
|
python
|
"""
https://www.hackerrank.com/challenges/no-idea
There is an array of integers. There are also disjoint sets, and , each containing integers. You like all the integers in set and dislike all the integers in set . Your initial happiness is . For each integer in the array, if , you add to your happiness. If , you add to your happiness. Otherwise, your happiness does not change. Output your final happiness at the end.
Note: Since and are sets, they have no repeated elements. However, the array might contain duplicate elements.
Constraints
Input Format
The first line contains integers and separated by a space.
The second line contains integers, the elements of the array.
The third and fourth lines contain integers, and , respectively.
Output Format
Output a single integer, your total happiness.
Sample Input
3 2
1 5 3
3 1
5 7
Sample Output
1
Explanation
You gain unit of happiness for elements and in set . You lose unit for in set . The element in set does not exist in the array so it is not included in the calculation.
Hence, the total happiness is .
"""
#!/bin/python3
# Enter your code here. Read input from STDIN. Print output to STDOUT
def get_points(array, like_list, dislike_list):
points = 0
for number in array:
if number in like_list:
points += 1
if number in dislike_list:
points -= 1
return points
if __name__ == "__main__":
n,m = tuple(map(int, input().split()))
array = tuple(map(int, input().split()))
like_list = set(map(int, input().split()))
dislike_list = set(map(int, input().split()))
print(get_points(array, like_list, dislike_list))
|
nilq/baby-python
|
python
|
from entities import FeedEntity as FE
FEEDS = (
FE('rss', 'The Verge', 'https://www.theverge.com/rss/index.xml'),
FE('rss', 'VB', 'https://feeds.feedburner.com/venturebeat/SZYF'),
# FE('rss', 'TNW', 'https://thenextweb.com/feed/'),
FE('rss', 'ARS Technica', 'http://feeds.arstechnica.com/arstechnica/index'),
FE('rss', 'Wired', 'https://www.wired.com/feed/rss'),
FE('rss', 'The Atlantic', 'https://www.theatlantic.com/feed/all/.rss'),
# FE('rss', 'TechCrunch', 'http://feeds.feedburner.com/TechCrunch/'),
# FE('rss', 'addmeto (telegram)', 'https://addmeto.cc/rss/'),
FE('hn', 'Hacker News', 'https://news.ycombinator.com/', data={'max_news': 20}),
# FE('rss', 'BBC Tech', 'http://feeds.bbci.co.uk/news/technology/rss.xml'),
# FE('rss', 'NYT Tech', 'https://rss.nytimes.com/services/xml/rss/nyt/Technology.xml'),
FE('rss', 'Engadged', 'https://www.engadget.com/rss.xml'),
# FE('rss', 'WSJ Tech', 'https://feeds.a.dj.com/rss/RSSWSJD.xml'),
FE('rss', 'BBC Science & Environment', 'http://feeds.bbci.co.uk/news/science_and_environment/rss.xml'),
FE('rss', 'dev.by', 'https://dev.by/rss'),
# FE('rss', 'NYT Home Page', 'https://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml'),
FE('rss', 'python PEP', 'https://www.python.org/dev/peps/peps.rss'),
FE('rss', 'tut.by', 'https://news.tut.by/rss/index.rss'),
)
|
nilq/baby-python
|
python
|
class RoomAlreadyEmpty(Exception):
pass
class CannotAllocateRoom(Exception):
pass
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.